From 21e50a3f9d5cc349525ff6703e3f4f485dc0004b Mon Sep 17 00:00:00 2001 From: jojohappy Date: Tue, 3 Jul 2018 15:37:22 +0800 Subject: [PATCH 1/6] Upgrade k8s client to kubernetes-1.11.0 Signed-off-by: jojohappy --- discovery/kubernetes/endpoints.go | 2 +- discovery/kubernetes/endpoints_test.go | 2 +- discovery/kubernetes/ingress.go | 2 +- discovery/kubernetes/ingress_test.go | 2 +- discovery/kubernetes/kubernetes.go | 7 +- discovery/kubernetes/node.go | 7 +- discovery/kubernetes/node_test.go | 2 +- discovery/kubernetes/pod.go | 5 +- discovery/kubernetes/pod_test.go | 2 +- discovery/kubernetes/service.go | 2 +- discovery/kubernetes/service_test.go | 2 +- pkg/textparse/testdata.txt | 5 +- vendor/github.com/golang/protobuf/LICENSE | 3 - .../golang/protobuf/jsonpb/jsonpb.go | 563 +- .../github.com/golang/protobuf/proto/Makefile | 43 - .../github.com/golang/protobuf/proto/clone.go | 46 +- .../golang/protobuf/proto/decode.go | 668 +- .../golang/protobuf/proto/discard.go | 350 + .../golang/protobuf/proto/encode.go | 1202 +- .../github.com/golang/protobuf/proto/equal.go | 30 +- .../golang/protobuf/proto/extensions.go | 208 +- .../github.com/golang/protobuf/proto/lib.go | 70 +- .../golang/protobuf/proto/message_set.go | 81 +- .../golang/protobuf/proto/pointer_reflect.go | 645 +- .../golang/protobuf/proto/pointer_unsafe.go | 402 +- .../golang/protobuf/proto/properties.go | 424 +- .../golang/protobuf/proto/table_marshal.go | 2736 + .../golang/protobuf/proto/table_merge.go | 654 + .../golang/protobuf/proto/table_unmarshal.go | 2048 + .../github.com/golang/protobuf/proto/text.go | 61 +- .../golang/protobuf/proto/text_parser.go | 75 +- .../protoc-gen-go/descriptor/Makefile | 36 - .../protoc-gen-go/descriptor/descriptor.pb.go | 1356 +- .../protoc-gen-go/descriptor/descriptor.proto | 872 + .../github.com/golang/protobuf/ptypes/any.go | 141 + .../golang/protobuf/ptypes/any/any.pb.go | 106 +- .../golang/protobuf/ptypes/any/any.proto | 11 +- .../github.com/golang/protobuf/ptypes/doc.go | 35 + .../golang/protobuf/ptypes/duration.go | 102 + .../protobuf/ptypes/duration/duration.pb.go | 159 + .../protobuf/ptypes/duration/duration.proto | 117 + .../protobuf/ptypes/struct/struct.pb.go | 440 + .../protobuf/ptypes/struct/struct.proto | 96 + .../golang/protobuf/ptypes/timestamp.go | 134 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 175 + .../protobuf/ptypes/timestamp/timestamp.proto | 133 + vendor/github.com/google/btree/LICENSE | 202 + vendor/github.com/google/btree/README.md | 12 + vendor/github.com/google/btree/btree.go | 881 + vendor/github.com/googleapis/gnostic/LICENSE | 203 + .../googleapis/gnostic/OpenAPIv2/OpenAPIv2.go | 8847 ++ .../gnostic/OpenAPIv2/OpenAPIv2.pb.go | 4455 + .../gnostic/OpenAPIv2/OpenAPIv2.proto | 663 + .../googleapis/gnostic/OpenAPIv2/README.md | 16 + .../gnostic/OpenAPIv2/openapi-2.0.json | 1610 + .../googleapis/gnostic/compiler/README.md | 3 + .../googleapis/gnostic/compiler/context.go | 43 + .../googleapis/gnostic/compiler/error.go | 61 + .../gnostic/compiler/extension-handler.go | 101 + .../googleapis/gnostic/compiler/helpers.go | 197 + .../googleapis/gnostic/compiler/main.go | 16 + .../googleapis/gnostic/compiler/reader.go | 175 + .../gnostic/extensions/COMPILE-EXTENSION.sh | 5 + .../googleapis/gnostic/extensions/README.md | 5 + .../gnostic/extensions/extension.pb.go | 218 + .../gnostic/extensions/extension.proto | 93 + .../gnostic/extensions/extensions.go | 82 + .../gregjones/httpcache/LICENSE.txt | 7 + .../github.com/gregjones/httpcache/README.md | 25 + .../httpcache/diskcache/diskcache.go | 61 + .../gregjones/httpcache/httpcache.go | 551 + vendor/github.com/hashicorp/golang-lru/2q.go | 223 + .../github.com/hashicorp/golang-lru/LICENSE | 362 + .../github.com/hashicorp/golang-lru/README.md | 25 + vendor/github.com/hashicorp/golang-lru/arc.go | 257 + vendor/github.com/hashicorp/golang-lru/doc.go | 21 + vendor/github.com/hashicorp/golang-lru/lru.go | 110 + .../hashicorp/golang-lru/simplelru/lru.go | 161 + .../golang-lru/simplelru/lru_interface.go | 37 + vendor/github.com/json-iterator/go/Gopkg.lock | 30 +- vendor/github.com/json-iterator/go/Gopkg.toml | 13 +- vendor/github.com/json-iterator/go/README.md | 3 + .../go/{feature_adapter.go => adapter.go} | 18 +- .../go/{feature_any.go => any.go} | 82 +- .../go/{feature_any_array.go => any_array.go} | 0 .../go/{feature_any_bool.go => any_bool.go} | 0 .../go/{feature_any_float.go => any_float.go} | 0 .../go/{feature_any_int32.go => any_int32.go} | 0 .../go/{feature_any_int64.go => any_int64.go} | 0 ...{feature_any_invalid.go => any_invalid.go} | 0 .../go/{feature_any_nil.go => any_nil.go} | 0 .../{feature_any_number.go => any_number.go} | 0 .../{feature_any_object.go => any_object.go} | 0 .../go/{feature_any_string.go => any_str.go} | 2 +- .../{feature_any_uint32.go => any_uint32.go} | 0 .../{feature_any_uint64.go => any_uint64.go} | 0 .../go/{feature_config.go => config.go} | 171 +- .../go/feature_config_with_sync_map.go | 65 - .../go/feature_config_without_sync_map.go | 71 - .../json-iterator/go/feature_json_number.go | 31 - .../json-iterator/go/feature_reflect.go | 607 - .../json-iterator/go/feature_reflect_array.go | 110 - .../json-iterator/go/feature_reflect_map.go | 260 - .../go/feature_reflect_native.go | 789 - .../json-iterator/go/feature_reflect_slice.go | 143 - .../go/{feature_iter.go => iter.go} | 0 .../{feature_iter_array.go => iter_array.go} | 0 .../{feature_iter_float.go => iter_float.go} | 0 .../go/{feature_iter_int.go => iter_int.go} | 6 + ...{feature_iter_object.go => iter_object.go} | 12 +- .../go/{feature_iter_skip.go => iter_skip.go} | 0 ...ter_skip_sloppy.go => iter_skip_sloppy.go} | 0 ...ter_skip_strict.go => iter_skip_strict.go} | 0 .../{feature_iter_string.go => iter_str.go} | 0 .../go/{feature_pool.go => pool.go} | 35 +- vendor/github.com/json-iterator/go/reflect.go | 330 + .../json-iterator/go/reflect_array.go | 104 + .../json-iterator/go/reflect_dynamic.go | 70 + ...lect_extension.go => reflect_extension.go} | 253 +- .../json-iterator/go/reflect_json_number.go | 112 + .../go/reflect_json_raw_message.go | 60 + .../json-iterator/go/reflect_map.go | 318 + .../json-iterator/go/reflect_marshaler.go | 218 + .../json-iterator/go/reflect_native.go | 451 + ...eflect_optional.go => reflect_optional.go} | 93 +- .../json-iterator/go/reflect_slice.go | 99 + ...t_decoder.go => reflect_struct_decoder.go} | 138 +- ...ct_object.go => reflect_struct_encoder.go} | 150 +- .../go/{feature_stream.go => stream.go} | 5 + ...eature_stream_float.go => stream_float.go} | 0 .../{feature_stream_int.go => stream_int.go} | 8 +- ...feature_stream_string.go => stream_str.go} | 0 .../github.com/modern-go/concurrent/LICENSE | 201 + .../github.com/modern-go/concurrent/README.md | 49 + .../modern-go/concurrent/executor.go | 14 + .../modern-go/concurrent/go_above_19.go | 15 + .../modern-go/concurrent/go_below_19.go | 33 + vendor/github.com/modern-go/concurrent/log.go | 13 + .../github.com/modern-go/concurrent/test.sh | 12 + .../concurrent/unbounded_executor.go | 119 + .../github.com/modern-go/reflect2/Gopkg.lock | 15 + .../github.com/modern-go/reflect2/Gopkg.toml | 35 + vendor/github.com/modern-go/reflect2/LICENSE | 201 + .../github.com/modern-go/reflect2/README.md | 71 + .../modern-go/reflect2/go_above_17.go | 8 + .../modern-go/reflect2/go_above_19.go | 14 + .../modern-go/reflect2/go_below_17.go | 9 + .../modern-go/reflect2/go_below_19.go | 14 + .../github.com/modern-go/reflect2/reflect2.go | 298 + .../modern-go/reflect2/reflect2_amd64.s | 0 .../modern-go/reflect2/reflect2_kind.go | 30 + .../modern-go/reflect2/relfect2_386.s | 0 .../modern-go/reflect2/relfect2_amd64p32.s | 0 .../modern-go/reflect2/relfect2_arm.s | 0 .../modern-go/reflect2/relfect2_arm64.s | 0 .../modern-go/reflect2/relfect2_mips64x.s | 0 .../modern-go/reflect2/relfect2_mipsx.s | 0 .../modern-go/reflect2/relfect2_ppc64x.s | 0 .../modern-go/reflect2/relfect2_s390x.s | 0 .../modern-go/reflect2/safe_field.go | 58 + .../github.com/modern-go/reflect2/safe_map.go | 101 + .../modern-go/reflect2/safe_slice.go | 92 + .../modern-go/reflect2/safe_struct.go | 29 + .../modern-go/reflect2/safe_type.go | 78 + vendor/github.com/modern-go/reflect2/test.sh | 12 + .../github.com/modern-go/reflect2/type_map.go | 103 + .../modern-go/reflect2/unsafe_array.go | 65 + .../modern-go/reflect2/unsafe_eface.go | 59 + .../modern-go/reflect2/unsafe_field.go | 74 + .../modern-go/reflect2/unsafe_iface.go | 64 + .../modern-go/reflect2/unsafe_link.go | 70 + .../modern-go/reflect2/unsafe_map.go | 138 + .../modern-go/reflect2/unsafe_ptr.go | 46 + .../modern-go/reflect2/unsafe_slice.go | 177 + .../modern-go/reflect2/unsafe_struct.go | 59 + .../modern-go/reflect2/unsafe_type.go | 85 + vendor/github.com/peterbourgon/diskv/LICENSE | 19 + .../github.com/peterbourgon/diskv/README.md | 191 + .../peterbourgon/diskv/compression.go | 64 + vendor/github.com/peterbourgon/diskv/diskv.go | 726 + vendor/github.com/peterbourgon/diskv/index.go | 115 + .../x/crypto/ssh/terminal/terminal.go | 951 + .../golang.org/x/crypto/ssh/terminal/util.go | 114 + .../x/crypto/ssh/terminal/util_bsd.go | 12 + .../x/crypto/ssh/terminal/util_linux.go | 10 + .../x/crypto/ssh/terminal/util_plan9.go | 58 + .../x/crypto/ssh/terminal/util_solaris.go | 124 + .../x/crypto/ssh/terminal/util_windows.go | 103 + .../x/sys/windows/asm_windows_386.s | 13 + .../x/sys/windows/asm_windows_amd64.s | 13 + .../golang.org/x/sys/windows/dll_windows.go | 378 + .../golang.org/x/sys/windows/env_windows.go | 29 + vendor/golang.org/x/sys/windows/eventlog.go | 20 + .../golang.org/x/sys/windows/exec_windows.go | 97 + .../x/sys/windows/memory_windows.go | 26 + vendor/golang.org/x/sys/windows/mksyscall.go | 7 + vendor/golang.org/x/sys/windows/race.go | 30 + vendor/golang.org/x/sys/windows/race0.go | 25 + .../x/sys/windows/security_windows.go | 478 + vendor/golang.org/x/sys/windows/service.go | 165 + vendor/golang.org/x/sys/windows/str.go | 22 + vendor/golang.org/x/sys/windows/syscall.go | 74 + .../x/sys/windows/syscall_windows.go | 1153 + .../golang.org/x/sys/windows/types_windows.go | 1353 + .../x/sys/windows/types_windows_386.go | 22 + .../x/sys/windows/types_windows_amd64.go | 22 + .../x/sys/windows/zsyscall_windows.go | 2700 + vendor/k8s.io/api/LICENSE | 202 + .../api/admissionregistration/v1alpha1/doc.go | 25 + .../v1alpha1/generated.pb.go | 1028 + .../v1alpha1/generated.proto | 108 + .../v1alpha1/register.go | 51 + .../admissionregistration/v1alpha1/types.go | 106 + .../v1alpha1/types_swagger_doc_generated.go | 71 + .../v1alpha1/zz_generated.deepcopy.go | 145 + .../api/admissionregistration/v1beta1/doc.go | 25 + .../v1beta1/generated.pb.go | 2150 + .../v1beta1/generated.proto | 263 + .../admissionregistration/v1beta1/register.go | 53 + .../admissionregistration/v1beta1/types.go | 281 + .../v1beta1/types_swagger_doc_generated.go | 125 + .../v1beta1/zz_generated.deepcopy.go | 317 + .../authentication => api/apps}/v1/doc.go | 6 +- vendor/k8s.io/api/apps/v1/generated.pb.go | 6945 ++ vendor/k8s.io/api/apps/v1/generated.proto | 701 + vendor/k8s.io/api/apps/v1/register.go | 60 + vendor/k8s.io/api/apps/v1/types.go | 819 + .../apps/v1/types_swagger_doc_generated.go | 365 + .../api/apps/v1/zz_generated.deepcopy.go | 856 + .../apps/v1beta1}/doc.go | 6 +- .../apis => api}/apps/v1beta1/generated.pb.go | 2912 +- .../apis => api}/apps/v1beta1/generated.proto | 204 +- .../pkg/apis => api}/apps/v1beta1/register.go | 11 +- .../pkg/apis => api}/apps/v1beta1/types.go | 261 +- .../v1beta1/types_swagger_doc_generated.go | 109 +- .../api/apps/v1beta1/zz_generated.deepcopy.go | 658 + .../apps/v1beta1 => api/apps/v1beta2}/doc.go | 5 +- .../k8s.io/api/apps/v1beta2/generated.pb.go | 7586 ++ .../k8s.io/api/apps/v1beta2/generated.proto | 752 + vendor/k8s.io/api/apps/v1beta2/register.go | 61 + vendor/k8s.io/api/apps/v1beta2/types.go | 877 + .../v1beta2/types_swagger_doc_generated.go | 396 + .../api/apps/v1beta2/zz_generated.deepcopy.go | 923 + vendor/k8s.io/api/authentication/v1/doc.go | 20 + .../api/authentication/v1/generated.pb.go | 2148 + .../authentication/v1}/generated.proto | 69 +- .../authentication/v1/register.go | 10 +- vendor/k8s.io/api/authentication/v1/types.go | 168 + .../v1/types_swagger_doc_generated.go | 45 +- .../v1/zz_generated.deepcopy.go | 239 + .../k8s.io/api/authentication/v1beta1/doc.go | 20 + .../authentication/v1beta1/generated.pb.go | 500 +- .../authentication/v1beta1}/generated.proto | 7 +- .../authentication/v1beta1/register.go | 9 +- .../authentication/v1beta1/types.go | 7 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../v1beta1/zz_generated.deepcopy.go | 139 + .../pkg/apis => api}/authorization/v1/doc.go | 5 +- .../api/authorization/v1/generated.pb.go | 3528 + .../authorization/v1/generated.proto | 100 +- .../apis => api}/authorization/v1/register.go | 14 +- .../apis => api}/authorization/v1/types.go | 112 +- .../v1/types_swagger_doc_generated.go | 60 +- .../authorization/v1/zz_generated.deepcopy.go | 398 + .../authorization/v1beta1}/doc.go | 5 +- .../api/authorization/v1beta1/generated.pb.go | 3529 + .../authorization/v1beta1/generated.proto | 100 +- .../authorization/v1beta1/register.go | 14 +- .../authorization/v1beta1/types.go | 112 +- .../v1beta1/types_swagger_doc_generated.go | 60 +- .../v1beta1/zz_generated.deepcopy.go | 398 + .../v1beta1 => api/autoscaling/v1}/doc.go | 6 +- .../autoscaling/v1/generated.pb.go | 2238 +- .../autoscaling/v1/generated.proto | 112 +- .../apis => api}/autoscaling/v1/register.go | 9 +- .../pkg/apis => api}/autoscaling/v1/types.go | 124 +- .../v1/types_swagger_doc_generated.go | 63 +- .../autoscaling/v1/zz_generated.deepcopy.go | 569 + vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 20 + .../api/autoscaling/v2beta1/generated.pb.go | 6732 ++ .../autoscaling/v2beta1}/generated.proto | 276 +- .../api/autoscaling/v2beta1/register.go | 52 + .../k8s.io/api/autoscaling/v2beta1/types.go | 583 + .../v2beta1/types_swagger_doc_generated.go | 321 + .../v2beta1/zz_generated.deepcopy.go | 813 + .../pkg/apis/apps => api/batch/v1}/doc.go | 5 +- .../pkg/apis => api}/batch/v1/generated.pb.go | 517 +- .../pkg/apis => api}/batch/v1/generated.proto | 69 +- .../pkg/apis => api}/batch/v1/register.go | 9 +- .../pkg/apis => api}/batch/v1/types.go | 75 +- .../batch/v1/types_swagger_doc_generated.go | 94 + .../api/batch/v1/zz_generated.deepcopy.go | 215 + vendor/k8s.io/api/batch/v1beta1/doc.go | 20 + .../k8s.io/api/batch/v1beta1/generated.pb.go | 1509 + .../k8s.io/api/batch/v1beta1/generated.proto | 138 + vendor/k8s.io/api/batch/v1beta1/register.go | 53 + vendor/k8s.io/api/batch/v1beta1/types.go | 158 + .../v1beta1/types_swagger_doc_generated.go | 96 + .../batch/v1beta1/zz_generated.deepcopy.go | 214 + vendor/k8s.io/api/batch/v2alpha1/doc.go | 20 + .../batch/v2alpha1/generated.pb.go | 461 +- .../batch/v2alpha1/generated.proto | 56 +- .../apis => api}/batch/v2alpha1/register.go | 11 +- .../pkg/apis => api}/batch/v2alpha1/types.go | 55 +- .../v2alpha1/types_swagger_doc_generated.go | 34 +- .../batch/v2alpha1/zz_generated.deepcopy.go | 214 + .../certificates/v1beta1}/doc.go | 5 +- .../certificates/v1beta1/generated.pb.go | 605 +- .../certificates/v1beta1/generated.proto | 8 +- .../certificates/v1beta1/register.go | 12 +- .../certificates/v1beta1/types.go | 11 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../v1beta1/zz_generated.deepcopy.go | 194 + .../api/core/v1/annotation_key_constants.go | 81 + .../defaults.go => api/core/v1/doc.go} | 12 +- .../pkg/api => api/core}/v1/generated.pb.go | 29899 ++++--- .../pkg/api => api/core}/v1/generated.proto | 1799 +- vendor/k8s.io/api/core/v1/objectreference.go | 33 + .../pkg/api => api/core}/v1/register.go | 7 +- vendor/k8s.io/api/core/v1/resource.go | 56 + vendor/k8s.io/api/core/v1/taint.go | 33 + vendor/k8s.io/api/core/v1/toleration.go | 56 + .../pkg/api => api/core}/v1/types.go | 2102 +- .../core}/v1/types_swagger_doc_generated.go | 1000 +- .../api/core/v1/zz_generated.deepcopy.go | 6065 ++ vendor/k8s.io/api/events/v1beta1/doc.go | 21 + .../k8s.io/api/events/v1beta1/generated.pb.go | 1306 + .../k8s.io/api/events/v1beta1/generated.proto | 122 + vendor/k8s.io/api/events/v1beta1/register.go | 53 + vendor/k8s.io/api/events/v1beta1/types.go | 122 + .../v1beta1/types_swagger_doc_generated.go | 73 + .../events/v1beta1/zz_generated.deepcopy.go | 125 + vendor/k8s.io/api/extensions/v1beta1/doc.go | 20 + .../extensions/v1beta1/generated.pb.go | 6506 +- .../extensions/v1beta1/generated.proto | 454 +- .../extensions/v1beta1/register.go | 13 +- .../apis => api}/extensions/v1beta1/types.go | 559 +- .../v1beta1/types_swagger_doc_generated.go | 282 +- .../v1beta1/zz_generated.deepcopy.go | 1598 + vendor/k8s.io/api/networking/v1/doc.go | 20 + .../k8s.io/api/networking/v1/generated.pb.go | 1869 + .../k8s.io/api/networking/v1/generated.proto | 197 + vendor/k8s.io/api/networking/v1/register.go | 53 + vendor/k8s.io/api/networking/v1/types.go | 203 + .../v1/types_swagger_doc_generated.go | 113 + .../networking/v1/zz_generated.deepcopy.go | 282 + .../pkg/apis => api}/policy/v1beta1/doc.go | 5 +- .../k8s.io/api/policy/v1beta1/generated.pb.go | 4056 + .../k8s.io/api/policy/v1beta1/generated.proto | 334 + .../apis => api}/policy/v1beta1/register.go | 11 +- vendor/k8s.io/api/policy/v1beta1/types.go | 393 + .../v1beta1/types_swagger_doc_generated.go | 210 + .../policy/v1beta1/zz_generated.deepcopy.go | 485 + .../apis/rbac/v1beta1 => api/rbac/v1}/doc.go | 5 +- vendor/k8s.io/api/rbac/v1/generated.pb.go | 2749 + .../v1beta1 => api/rbac/v1}/generated.proto | 41 +- .../pkg/apis/rbac => api/rbac/v1}/register.go | 22 +- .../rbac/v1beta1 => api/rbac/v1}/types.go | 46 +- .../rbac/v1}/types_swagger_doc_generated.go | 22 +- .../api/rbac/v1/zz_generated.deepcopy.go | 393 + .../apis/rbac => api/rbac/v1alpha1}/doc.go | 5 +- .../rbac/v1alpha1/generated.pb.go | 1253 +- .../rbac/v1alpha1/generated.proto | 38 +- .../apis => api}/rbac/v1alpha1/register.go | 9 +- .../pkg/apis => api}/rbac/v1alpha1/types.go | 44 +- .../v1alpha1/types_swagger_doc_generated.go | 20 +- .../rbac/v1alpha1/zz_generated.deepcopy.go | 393 + vendor/k8s.io/api/rbac/v1beta1/doc.go | 21 + .../apis => api}/rbac/v1beta1/generated.pb.go | 1252 +- .../k8s.io/api/rbac/v1beta1/generated.proto | 200 + .../pkg/apis => api}/rbac/v1beta1/register.go | 9 +- .../apis/rbac => api/rbac/v1beta1}/types.go | 151 +- .../v1beta1/types_swagger_doc_generated.go | 158 + .../api/rbac/v1beta1/zz_generated.deepcopy.go | 393 + vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 21 + .../api/scheduling/v1alpha1/generated.pb.go | 641 + .../api/scheduling/v1alpha1/generated.proto | 68 + .../api/scheduling/v1alpha1/register.go | 52 + .../k8s.io/api/scheduling/v1alpha1/types.go | 66 + .../v1alpha1/types_swagger_doc_generated.go | 52 + .../v1alpha1/zz_generated.deepcopy.go | 84 + .../scheduling/v1beta1/doc.go} | 16 +- .../api/scheduling/v1beta1/generated.pb.go | 641 + .../api/scheduling/v1beta1/generated.proto | 68 + .../k8s.io/api/scheduling/v1beta1/register.go | 52 + vendor/k8s.io/api/scheduling/v1beta1/types.go | 66 + .../v1beta1/types_swagger_doc_generated.go | 52 + .../v1beta1/zz_generated.deepcopy.go | 84 + .../pkg/apis => api}/settings/v1alpha1/doc.go | 5 +- .../settings/v1alpha1/generated.pb.go | 304 +- .../settings/v1alpha1/generated.proto | 20 +- .../k8s.io/api/settings/v1alpha1/register.go | 52 + .../apis => api}/settings/v1alpha1/types.go | 11 +- .../v1alpha1/types_swagger_doc_generated.go | 8 +- .../v1alpha1/zz_generated.deepcopy.go | 131 + .../pkg/apis => api}/storage/v1/doc.go | 2 + vendor/k8s.io/api/storage/v1/generated.pb.go | 980 + .../apis => api}/storage/v1/generated.proto | 41 +- .../pkg/apis => api}/storage/v1/register.go | 9 +- vendor/k8s.io/api/storage/v1/types.go | 106 + .../storage/v1/types_swagger_doc_generated.go | 56 + .../api/storage/v1/zz_generated.deepcopy.go | 131 + vendor/k8s.io/api/storage/v1alpha1/doc.go | 20 + .../api/storage/v1alpha1/generated.pb.go | 1523 + .../api/storage/v1alpha1/generated.proto | 128 + .../storage}/v1alpha1/register.go | 9 +- vendor/k8s.io/api/storage/v1alpha1/types.go | 126 + .../v1alpha1/types_swagger_doc_generated.go | 93 + .../storage/v1alpha1/zz_generated.deepcopy.go | 186 + .../storage => api/storage/v1beta1}/doc.go | 4 +- .../api/storage/v1beta1/generated.pb.go | 2263 + .../api/storage/v1beta1/generated.proto | 190 + .../apis => api}/storage/v1beta1/register.go | 12 +- vendor/k8s.io/api/storage/v1beta1/types.go | 213 + .../v1beta1/types_swagger_doc_generated.go | 119 + .../storage/v1beta1/zz_generated.deepcopy.go | 292 + .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 5 +- .../apimachinery/pkg/api/errors/errors.go | 151 +- .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 1 - .../apimachinery/pkg/api/meta/default.go | 51 - .../apimachinery/pkg/api/meta/errors.go | 20 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 6 + .../apimachinery/pkg/api/meta/interfaces.go | 27 +- .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 104 + .../k8s.io/apimachinery/pkg/api/meta/meta.go | 105 +- .../pkg/api/meta/multirestmapper.go | 25 +- .../apimachinery/pkg/api/meta/priority.go | 34 +- .../apimachinery/pkg/api/meta/restmapper.go | 72 +- .../apimachinery/pkg/api/meta/unstructured.go | 31 - .../apimachinery/pkg/api/resource/OWNERS | 3 +- .../pkg/api/resource/generated.pb.go | 46 +- .../pkg/api/resource/generated.proto | 3 +- .../apimachinery/pkg/api/resource/math.go | 13 - .../apimachinery/pkg/api/resource/quantity.go | 67 +- .../api/resource/zz_generated.deepcopy.go} | 16 +- .../pkg/apimachinery/announced/announced.go | 99 - .../apimachinery/announced/group_factory.go | 252 - .../apimachinery/pkg/apimachinery/doc.go | 20 - .../pkg/apimachinery/registered/registered.go | 376 - .../apimachinery/pkg/apimachinery/types.go | 87 - .../apis/meta/internalversion/conversion.go | 77 + .../pkg/apis/meta/internalversion/doc.go | 20 + .../pkg/apis/meta/internalversion/register.go | 105 + .../pkg/apis/meta/internalversion/types.go | 70 + .../zz_generated.conversion.go | 118 + .../internalversion/zz_generated.deepcopy.go | 106 + .../apimachinery/pkg/apis/meta/v1/OWNERS | 2 - .../pkg/apis/meta/v1/controller_ref.go | 54 + .../pkg/apis/meta/v1/conversion.go | 56 +- .../apimachinery/pkg/apis/meta/v1/doc.go | 2 +- .../apimachinery/pkg/apis/meta/v1/duration.go | 5 +- .../pkg/apis/meta/v1/generated.pb.go | 3266 +- .../pkg/apis/meta/v1/generated.proto | 204 +- .../pkg/apis/meta/v1/group_version.go | 24 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 6 +- .../apimachinery/pkg/apis/meta/v1/labels.go | 28 +- .../apimachinery/pkg/apis/meta/v1/meta.go | 73 +- .../pkg/apis/meta/v1/micro_time.go | 183 + .../pkg/apis/meta/v1/micro_time_proto.go | 72 + .../apimachinery/pkg/apis/meta/v1/register.go | 15 +- .../apimachinery/pkg/apis/meta/v1/time.go | 45 +- .../pkg/apis/meta/v1/time_proto.go | 13 +- .../apimachinery/pkg/apis/meta/v1/types.go | 210 +- .../meta/v1/types_swagger_doc_generated.go | 102 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 451 + .../apis/meta/v1/unstructured/unstructured.go | 608 +- .../meta/v1/unstructured/unstructured_list.go | 188 + .../v1/unstructured/zz_generated.deepcopy.go | 55 + .../apimachinery/pkg/apis/meta/v1/watch.go | 9 + .../pkg/apis/meta/v1/well_known_labels.go | 84 - .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 1174 +- .../pkg/apis/meta/v1/zz_generated.defaults.go | 4 +- .../pkg/apis/meta/v1beta1/conversion.go | 27 + .../pkg/apis/meta/v1beta1/deepcopy.go} | 31 +- .../apimachinery/pkg/apis/meta/v1beta1/doc.go | 22 + .../pkg/apis/meta/v1beta1/generated.pb.go | 633 + .../pkg/apis/meta/v1beta1/generated.proto | 58 + .../pkg/apis/meta/v1beta1}/register.go | 41 +- .../pkg/apis/meta/v1beta1/types.go | 161 + .../v1beta1/types_swagger_doc_generated.go | 104 + .../meta/v1beta1/zz_generated.deepcopy.go | 190 + .../meta}/v1beta1/zz_generated.defaults.go | 4 +- .../k8s.io/apimachinery/pkg/conversion/OWNERS | 10 - .../apimachinery/pkg/conversion/cloner.go | 249 - .../apimachinery/pkg/conversion/converter.go | 55 - .../pkg/conversion/queryparams/convert.go | 12 +- .../apimachinery/pkg/fields/selector.go | 79 +- .../k8s.io/apimachinery/pkg/labels/labels.go | 4 +- .../apimachinery/pkg/labels/selector.go | 69 +- .../pkg/labels/zz_generated.deepcopy.go | 42 + .../k8s.io/apimachinery/pkg/openapi/common.go | 160 - vendor/k8s.io/apimachinery/pkg/runtime/OWNERS | 19 - .../k8s.io/apimachinery/pkg/runtime/codec.go | 18 +- .../apimachinery/pkg/runtime/codec_check.go | 8 +- .../apimachinery/pkg/runtime/conversion.go | 15 + .../apimachinery/pkg/runtime/converter.go | 805 + .../apimachinery/pkg/runtime/embedded.go | 6 + .../k8s.io/apimachinery/pkg/runtime/error.go | 33 +- .../apimachinery/pkg/runtime/extension.go | 5 +- .../apimachinery/pkg/runtime/generated.pb.go | 236 +- .../apimachinery/pkg/runtime/generated.proto | 5 +- .../apimachinery/pkg/runtime/interfaces.go | 39 +- .../apimachinery/pkg/runtime/register.go | 2 +- .../pkg/runtime/schema/generated.pb.go | 38 +- .../pkg/runtime/schema/generated.proto | 2 +- .../pkg/runtime/schema/group_version.go | 24 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 403 +- .../pkg/runtime/serializer/json/json.go | 104 +- .../serializer/versioning/versioning.go | 47 +- .../pkg/runtime/swagger_doc_generator.go | 2 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 6 +- .../pkg/runtime/zz_generated.deepcopy.go | 122 +- .../apimachinery/pkg/types/namespacedname.go | 17 - .../apimachinery/pkg/util/cache/cache.go | 83 + .../pkg/util/cache/lruexpirecache.go | 102 + .../pkg}/util/clock/clock.go | 63 +- .../k8s.io/apimachinery/pkg/util/diff/diff.go | 55 +- .../apimachinery/pkg/util/errors/errors.go | 19 + .../pkg/util/intstr/generated.pb.go | 139 +- .../pkg/util/intstr/generated.proto | 2 +- .../apimachinery/pkg/util/intstr/intstr.go | 22 +- .../k8s.io/apimachinery/pkg/util/json/json.go | 12 + .../apimachinery/pkg/util/mergepatch/OWNERS | 5 + .../pkg/util/mergepatch/errors.go | 102 + .../apimachinery/pkg/util/mergepatch/util.go | 133 + .../k8s.io/apimachinery/pkg/util/net/http.go | 269 +- .../apimachinery/pkg/util/net/interface.go | 314 +- .../apimachinery/pkg/util/net/port_range.go | 66 +- .../k8s.io/apimachinery/pkg/util/net/util.go | 14 +- .../k8s.io/apimachinery/pkg/util/rand/rand.go | 49 +- .../apimachinery/pkg/util/runtime/runtime.go | 10 +- .../k8s.io/apimachinery/pkg/util/sets/byte.go | 6 +- .../k8s.io/apimachinery/pkg/util/sets/doc.go | 4 +- .../apimachinery/pkg/util/sets/empty.go | 4 +- .../k8s.io/apimachinery/pkg/util/sets/int.go | 6 +- .../apimachinery/pkg/util/sets/int64.go | 6 +- .../apimachinery/pkg/util/sets/string.go | 6 +- .../pkg/util/strategicpatch/OWNERS | 5 + .../pkg/util/strategicpatch/errors.go | 49 + .../pkg/util/strategicpatch/meta.go | 194 + .../pkg/util/strategicpatch/patch.go | 2174 + .../pkg/util/strategicpatch/types.go | 193 + .../pkg/util/validation/field/errors.go | 7 +- .../pkg/util/validation/validation.go | 72 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 93 +- .../apimachinery/pkg/util/yaml/decoder.go | 6 +- .../apimachinery/pkg/version/helpers.go | 88 + .../k8s.io/apimachinery/pkg/watch/filter.go | 6 +- vendor/k8s.io/apimachinery/pkg/watch/mux.go | 13 +- vendor/k8s.io/apimachinery/pkg/watch/until.go | 6 +- vendor/k8s.io/apimachinery/pkg/watch/watch.go | 5 +- .../pkg/watch/zz_generated.deepcopy.go | 42 + .../third_party/forked/golang/json/OWNERS | 5 + .../third_party/forked/golang/json/fields.go | 513 + .../forked/golang/reflect/deep_equal.go | 2 +- .../third_party/forked/golang/reflect/type.go | 91 - vendor/k8s.io/client-go/CHANGELOG.md | 420 + vendor/k8s.io/client-go/CONTRIBUTING.md | 7 + vendor/k8s.io/client-go/INSTALL.md | 162 + vendor/k8s.io/client-go/OWNERS | 15 + vendor/k8s.io/client-go/README.md | 192 + vendor/k8s.io/client-go/SECURITY_CONTACTS | 17 + vendor/k8s.io/client-go/code-of-conduct.md | 3 + .../client-go/discovery/cached_discovery.go | 282 + .../client-go/discovery/discovery_client.go | 235 +- .../client-go/discovery/fake/discovery.go | 64 +- vendor/k8s.io/client-go/discovery/helper.go | 61 +- .../k8s.io/client-go/discovery/restmapper.go | 320 - .../client-go/discovery/round_tripper.go | 51 + .../client-go/discovery/unstructured.go | 80 +- .../k8s.io/client-go/kubernetes/clientset.go | 520 +- vendor/k8s.io/client-go/kubernetes/doc.go | 4 +- .../kubernetes/fake/clientset_generated.go | 168 +- .../k8s.io/client-go/kubernetes/fake/doc.go | 4 +- .../client-go/kubernetes/fake/register.go | 112 +- vendor/k8s.io/client-go/kubernetes/import.go | 19 + .../kubernetes/import_known_versions.go | 42 - .../k8s.io/client-go/kubernetes/scheme/doc.go | 4 +- .../client-go/kubernetes/scheme/register.go | 69 +- .../v1alpha1/admissionregistration_client.go | 90 + .../v1alpha1}/doc.go | 6 +- .../v1alpha1}/fake/doc.go | 4 +- .../fake/fake_admissionregistration_client.go | 40 + .../fake/fake_initializerconfiguration.go | 120 + .../v1alpha1/generated_expansion.go | 21 + .../v1alpha1/initializerconfiguration.go | 147 + .../v1beta1/admissionregistration_client.go | 95 + .../admissionregistration/v1beta1/doc.go | 20 + .../admissionregistration/v1beta1/fake/doc.go | 20 + .../fake/fake_admissionregistration_client.go | 44 + .../fake/fake_mutatingwebhookconfiguration.go | 120 + .../fake_validatingwebhookconfiguration.go | 120 + .../v1beta1/generated_expansion.go} | 12 +- .../v1beta1/mutatingwebhookconfiguration.go | 147 + .../v1beta1/validatingwebhookconfiguration.go | 147 + .../kubernetes/typed/apps/v1/apps_client.go | 110 + .../typed/apps/v1/controllerrevision.go | 157 + .../kubernetes/typed/apps/v1/daemonset.go | 174 + .../kubernetes/typed/apps/v1/deployment.go | 174 + .../client-go/kubernetes/typed/apps/v1/doc.go | 20 + .../kubernetes/typed/apps/v1/fake/doc.go | 20 + .../typed/apps/v1/fake/fake_apps_client.go | 56 + .../apps/v1/fake/fake_controllerrevision.go | 128 + .../typed/apps/v1/fake/fake_daemonset.go | 140 + .../typed/apps/v1/fake/fake_deployment.go | 140 + .../typed/apps/v1/fake/fake_replicaset.go | 140 + .../typed/apps/v1/fake/fake_statefulset.go | 140 + .../typed/apps/v1/generated_expansion.go} | 18 +- .../kubernetes/typed/apps/v1/replicaset.go | 174 + .../kubernetes/typed/apps/v1/statefulset.go | 174 + .../typed/apps/v1beta1/apps_client.go | 11 +- .../typed/apps/v1beta1/controllerrevision.go | 157 + .../typed/apps/v1beta1/deployment.go | 78 +- .../kubernetes/typed/apps/v1beta1/doc.go | 4 +- .../kubernetes/typed/apps/v1beta1/fake/doc.go | 4 +- .../apps/v1beta1/fake/fake_apps_client.go | 8 +- .../v1beta1/fake/fake_controllerrevision.go | 128 + .../apps/v1beta1/fake/fake_deployment.go | 106 +- .../typed/apps/v1beta1/fake/fake_scale.go | 4 +- .../apps/v1beta1/fake/fake_statefulset.go | 106 +- .../typed/apps/v1beta1/generated_expansion.go | 6 +- .../kubernetes/typed/apps/v1beta1/scale.go | 4 +- .../typed/apps/v1beta1/statefulset.go | 78 +- .../typed/apps/v1beta2/apps_client.go | 115 + .../typed/apps/v1beta2/controllerrevision.go | 157 + .../typed/apps/v1beta2/daemonset.go | 174 + .../typed/apps/v1beta2/deployment.go | 174 + .../kubernetes/typed/apps/v1beta2}/doc.go | 8 +- .../kubernetes/typed/apps/v1beta2/fake/doc.go | 20 + .../apps/v1beta2/fake/fake_apps_client.go | 60 + .../v1beta2/fake/fake_controllerrevision.go | 128 + .../typed/apps/v1beta2/fake/fake_daemonset.go | 140 + .../apps/v1beta2/fake/fake_deployment.go | 140 + .../apps/v1beta2/fake/fake_replicaset.go | 140 + .../typed/apps/v1beta2/fake/fake_scale.go} | 14 +- .../apps/v1beta2/fake/fake_statefulset.go | 162 + .../typed/apps/v1beta2/generated_expansion.go | 31 + .../typed/apps/v1beta2/replicaset.go | 174 + .../kubernetes/typed/apps/v1beta2/scale.go | 48 + .../typed/apps/v1beta2/statefulset.go | 205 + .../v1/authentication_client.go | 6 +- .../kubernetes/typed/authentication/v1/doc.go | 4 +- .../typed/authentication/v1/fake/doc.go | 4 +- .../v1/fake/fake_authentication_client.go | 4 +- .../v1/fake/fake_tokenreview.go | 4 +- .../v1/fake/fake_tokenreview_expansion.go | 2 +- .../authentication/v1/generated_expansion.go | 4 +- .../typed/authentication/v1/tokenreview.go | 4 +- .../v1/tokenreview_expansion.go | 2 +- .../v1beta1/authentication_client.go | 6 +- .../typed/authentication/v1beta1/doc.go | 4 +- .../typed/authentication/v1beta1/fake/doc.go | 4 +- .../fake/fake_authentication_client.go | 4 +- .../v1beta1/fake/fake_tokenreview.go | 4 +- .../fake/fake_tokenreview_expansion.go | 2 +- .../v1beta1/generated_expansion.go | 4 +- .../authentication/v1beta1/tokenreview.go | 4 +- .../v1beta1/tokenreview_expansion.go | 2 +- .../authorization/v1/authorization_client.go | 11 +- .../kubernetes/typed/authorization/v1/doc.go | 4 +- .../typed/authorization/v1/fake/doc.go | 4 +- .../v1/fake/fake_authorization_client.go | 8 +- .../v1/fake/fake_localsubjectaccessreview.go | 4 +- ...fake_localsubjectaccessreview_expansion.go | 2 +- .../v1/fake/fake_selfsubjectaccessreview.go | 4 +- .../fake_selfsubjectaccessreview_expansion.go | 2 +- .../v1/fake/fake_selfsubjectrulesreview.go} | 14 +- .../fake_selfsubjectrulesreview_expansion.go} | 17 +- .../v1/fake/fake_subjectaccessreview.go | 4 +- .../fake_subjectaccessreview_expansion.go | 2 +- .../authorization/v1/generated_expansion.go | 4 +- .../v1/localsubjectaccessreview.go | 4 +- .../v1/localsubjectaccessreview_expansion.go | 2 +- .../v1/selfsubjectaccessreview.go | 4 +- .../v1/selfsubjectaccessreview_expansion.go | 2 +- .../v1/selfsubjectrulesreview.go | 46 + .../v1/selfsubjectrulesreview_expansion.go} | 23 +- .../authorization/v1/subjectaccessreview.go | 4 +- .../v1/subjectaccessreview_expansion.go | 2 +- .../v1beta1/authorization_client.go | 11 +- .../typed/authorization/v1beta1/doc.go | 4 +- .../typed/authorization/v1beta1/fake/doc.go | 4 +- .../v1beta1/fake/fake_authorization_client.go | 8 +- .../fake/fake_localsubjectaccessreview.go | 4 +- ...fake_localsubjectaccessreview_expansion.go | 2 +- .../fake/fake_selfsubjectaccessreview.go | 4 +- .../fake_selfsubjectaccessreview_expansion.go | 2 +- .../fake/fake_selfsubjectrulesreview.go} | 14 +- .../fake_selfsubjectrulesreview_expansion.go | 27 + .../v1beta1/fake/fake_subjectaccessreview.go | 4 +- .../fake_subjectaccessreview_expansion.go | 2 +- .../v1beta1/generated_expansion.go | 4 +- .../v1beta1/localsubjectaccessreview.go | 4 +- .../localsubjectaccessreview_expansion.go | 2 +- .../v1beta1/selfsubjectaccessreview.go | 4 +- .../selfsubjectaccessreview_expansion.go | 2 +- .../v1beta1/selfsubjectrulesreview.go | 46 + .../selfsubjectrulesreview_expansion.go} | 23 +- .../v1beta1/subjectaccessreview.go | 4 +- .../v1beta1/subjectaccessreview_expansion.go | 2 +- .../autoscaling/v1/autoscaling_client.go | 6 +- .../kubernetes/typed/autoscaling/v1/doc.go | 4 +- .../typed/autoscaling/v1/fake/doc.go | 4 +- .../v1/fake/fake_autoscaling_client.go | 4 +- .../v1/fake/fake_horizontalpodautoscaler.go | 130 +- .../autoscaling/v1/generated_expansion.go | 4 +- .../autoscaling/v1/horizontalpodautoscaler.go | 78 +- .../autoscaling_client.go | 36 +- .../typed/autoscaling/v2beta1/doc.go | 20 + .../typed/autoscaling/v2beta1/fake/doc.go | 20 + .../fake/fake_autoscaling_client.go | 12 +- .../fake/fake_horizontalpodautoscaler.go | 130 +- .../generated_expansion.go | 6 +- .../horizontalpodautoscaler.go | 110 +- .../kubernetes/typed/batch/v1/batch_client.go | 6 +- .../kubernetes/typed/batch/v1/doc.go | 4 +- .../kubernetes/typed/batch/v1/fake/doc.go | 4 +- .../typed/batch/v1/fake/fake_batch_client.go | 4 +- .../typed/batch/v1/fake/fake_job.go | 130 +- .../typed/batch/v1/generated_expansion.go | 4 +- .../kubernetes/typed/batch/v1/job.go | 78 +- .../typed/batch/v1beta1/batch_client.go | 90 + .../kubernetes/typed/batch/v1beta1/cronjob.go | 174 + .../kubernetes/typed/batch/v1beta1/doc.go | 20 + .../typed/batch/v1beta1/fake/doc.go | 20 + .../batch/v1beta1/fake/fake_batch_client.go | 40 + .../typed/batch/v1beta1/fake/fake_cronjob.go | 140 + .../batch/v1beta1/generated_expansion.go} | 7 +- .../typed/batch/v2alpha1/batch_client.go | 6 +- .../typed/batch/v2alpha1/cronjob.go | 78 +- .../kubernetes/typed/batch/v2alpha1/doc.go | 4 +- .../typed/batch/v2alpha1/fake/doc.go | 4 +- .../batch/v2alpha1/fake/fake_batch_client.go | 4 +- .../typed/batch/v2alpha1/fake/fake_cronjob.go | 106 +- .../batch/v2alpha1/generated_expansion.go | 4 +- .../v1beta1/certificates_client.go | 6 +- .../v1beta1/certificatesigningrequest.go | 72 +- .../certificatesigningrequest_expansion.go | 2 +- .../typed/certificates/v1beta1/doc.go | 4 +- .../typed/certificates/v1beta1/fake/doc.go | 4 +- .../v1beta1/fake/fake_certificates_client.go | 4 +- .../fake/fake_certificatesigningrequest.go | 98 +- ...ake_certificatesigningrequest_expansion.go | 2 +- .../v1beta1/generated_expansion.go | 4 +- .../typed/core/v1/componentstatus.go | 70 +- .../kubernetes/typed/core/v1/configmap.go | 76 +- .../kubernetes/typed/core/v1/core_client.go | 6 +- .../client-go/kubernetes/typed/core/v1/doc.go | 4 +- .../kubernetes/typed/core/v1/endpoints.go | 76 +- .../kubernetes/typed/core/v1/event.go | 76 +- .../typed/core/v1/event_expansion.go | 5 +- .../kubernetes/typed/core/v1/fake/doc.go | 4 +- .../core/v1/fake/fake_componentstatus.go | 102 +- .../typed/core/v1/fake/fake_configmap.go | 108 +- .../typed/core/v1/fake/fake_core_client.go | 4 +- .../typed/core/v1/fake/fake_endpoints.go | 108 +- .../typed/core/v1/fake/fake_event.go | 108 +- .../core/v1/fake/fake_event_expansion.go | 8 +- .../typed/core/v1/fake/fake_limitrange.go | 108 +- .../typed/core/v1/fake/fake_namespace.go | 114 +- .../core/v1/fake/fake_namespace_expansion.go | 2 +- .../typed/core/v1/fake/fake_node.go | 122 +- .../typed/core/v1/fake/fake_node_expansion.go | 2 +- .../core/v1/fake/fake_persistentvolume.go | 122 +- .../v1/fake/fake_persistentvolumeclaim.go | 130 +- .../kubernetes/typed/core/v1/fake/fake_pod.go | 130 +- .../typed/core/v1/fake/fake_pod_expansion.go | 4 +- .../typed/core/v1/fake/fake_podtemplate.go | 108 +- .../v1/fake/fake_replicationcontroller.go | 153 +- .../typed/core/v1/fake/fake_resourcequota.go | 130 +- .../typed/core/v1/fake/fake_secret.go | 108 +- .../typed/core/v1/fake/fake_service.go | 122 +- .../typed/core/v1/fake/fake_serviceaccount.go | 108 +- .../v1/fake/fake_serviceaccount_expansion.go} | 24 +- .../typed/core/v1/generated_expansion.go | 6 +- .../kubernetes/typed/core/v1/limitrange.go | 76 +- .../kubernetes/typed/core/v1/namespace.go | 113 +- .../typed/core/v1/namespace_expansion.go | 2 +- .../kubernetes/typed/core/v1/node.go | 72 +- .../typed/core/v1/node_expansion.go | 2 +- .../typed/core/v1/persistentvolume.go | 72 +- .../typed/core/v1/persistentvolumeclaim.go | 78 +- .../client-go/kubernetes/typed/core/v1/pod.go | 78 +- .../kubernetes/typed/core/v1/pod_expansion.go | 8 +- .../kubernetes/typed/core/v1/podtemplate.go | 76 +- .../typed/core/v1/replicationcontroller.go | 164 +- .../kubernetes/typed/core/v1/resourcequota.go | 78 +- .../kubernetes/typed/core/v1/secret.go | 76 +- .../kubernetes/typed/core/v1/service.go | 122 +- .../typed/core/v1/serviceaccount.go | 76 +- .../typed/core/v1/serviceaccount_expansion.go | 41 + .../kubernetes/typed/events/v1beta1/doc.go | 20 + .../kubernetes/typed/events/v1beta1/event.go | 157 + .../typed/events/v1beta1/events_client.go | 90 + .../typed/events/v1beta1/fake/doc.go | 20 + .../typed/events/v1beta1/fake/fake_event.go | 128 + .../events/v1beta1/fake/fake_events_client.go | 40 + .../events/v1beta1/generated_expansion.go} | 7 +- .../typed/extensions/v1beta1/daemonset.go | 78 +- .../typed/extensions/v1beta1/deployment.go | 163 +- .../v1beta1/deployment_expansion.go | 2 +- .../typed/extensions/v1beta1/doc.go | 4 +- .../extensions/v1beta1/extensions_client.go | 11 +- .../typed/extensions/v1beta1/fake/doc.go | 4 +- .../extensions/v1beta1/fake/fake_daemonset.go | 106 +- .../v1beta1/fake/fake_deployment.go | 128 +- .../v1beta1/fake/fake_deployment_expansion.go | 2 +- .../v1beta1/fake/fake_extensions_client.go | 8 +- .../extensions/v1beta1/fake/fake_ingress.go | 106 +- .../v1beta1/fake/fake_podsecuritypolicy.go | 78 +- .../v1beta1/fake/fake_replicaset.go | 128 +- .../extensions/v1beta1/fake/fake_scale.go | 4 +- .../v1beta1/fake/fake_scale_expansion.go | 2 +- .../v1beta1/fake/fake_thirdpartyresource.go | 110 - .../extensions/v1beta1/generated_expansion.go | 6 +- .../typed/extensions/v1beta1/ingress.go | 78 +- .../extensions/v1beta1/podsecuritypolicy.go | 70 +- .../typed/extensions/v1beta1/replicaset.go | 163 +- .../typed/extensions/v1beta1/scale.go | 4 +- .../extensions/v1beta1/scale_expansion.go | 6 +- .../extensions/v1beta1/thirdpartyresource.go | 145 - .../kubernetes/typed/networking/v1/doc.go | 20 + .../typed/networking/v1/fake/doc.go | 20 + .../v1/fake/fake_networking_client.go | 40 + .../networking/v1/fake/fake_networkpolicy.go | 128 + .../networking/v1/generated_expansion.go} | 6 +- .../typed/networking/v1/networking_client.go | 90 + .../typed/networking/v1/networkpolicy.go | 157 + .../kubernetes/typed/policy/v1beta1/doc.go | 4 +- .../typed/policy/v1beta1/eviction.go | 4 +- .../policy/v1beta1/eviction_expansion.go | 2 +- .../typed/policy/v1beta1/fake/doc.go | 4 +- .../policy/v1beta1/fake/fake_eviction.go | 4 +- .../v1beta1/fake/fake_eviction_expansion.go | 2 +- .../v1beta1/fake/fake_poddisruptionbudget.go | 106 +- .../v1beta1/fake/fake_podsecuritypolicy.go | 120 + .../policy/v1beta1/fake/fake_policy_client.go | 8 +- .../policy/v1beta1/generated_expansion.go | 6 +- .../policy/v1beta1/poddisruptionbudget.go | 78 +- .../typed/policy/v1beta1/podsecuritypolicy.go | 147 + .../typed/policy/v1beta1/policy_client.go | 11 +- .../kubernetes/typed/rbac/v1/clusterrole.go | 147 + .../typed/rbac/v1/clusterrolebinding.go | 147 + .../client-go/kubernetes/typed/rbac/v1/doc.go | 20 + .../kubernetes/typed/rbac/v1/fake/doc.go | 20 + .../typed/rbac/v1/fake/fake_clusterrole.go | 120 + .../rbac/v1/fake/fake_clusterrolebinding.go | 120 + .../typed/rbac/v1/fake/fake_rbac_client.go | 52 + .../typed/rbac/v1/fake/fake_role.go | 128 + .../typed/rbac/v1/fake/fake_rolebinding.go | 128 + .../typed/rbac/v1/generated_expansion.go} | 17 +- .../kubernetes/typed/rbac/v1/rbac_client.go | 105 + .../kubernetes/typed/rbac/v1/role.go | 157 + .../kubernetes/typed/rbac/v1/rolebinding.go | 157 + .../typed/rbac/v1alpha1/clusterrole.go | 70 +- .../typed/rbac/v1alpha1/clusterrolebinding.go | 70 +- .../kubernetes/typed/rbac/v1alpha1/doc.go | 4 +- .../typed/rbac/v1alpha1/fake/doc.go | 4 +- .../rbac/v1alpha1/fake/fake_clusterrole.go | 78 +- .../v1alpha1/fake/fake_clusterrolebinding.go | 78 +- .../rbac/v1alpha1/fake/fake_rbac_client.go | 4 +- .../typed/rbac/v1alpha1/fake/fake_role.go | 84 +- .../rbac/v1alpha1/fake/fake_rolebinding.go | 84 +- .../rbac/v1alpha1/generated_expansion.go | 4 +- .../typed/rbac/v1alpha1/rbac_client.go | 6 +- .../kubernetes/typed/rbac/v1alpha1/role.go | 76 +- .../typed/rbac/v1alpha1/rolebinding.go | 76 +- .../typed/rbac/v1beta1/clusterrole.go | 70 +- .../typed/rbac/v1beta1/clusterrolebinding.go | 70 +- .../kubernetes/typed/rbac/v1beta1/doc.go | 4 +- .../kubernetes/typed/rbac/v1beta1/fake/doc.go | 4 +- .../rbac/v1beta1/fake/fake_clusterrole.go | 78 +- .../v1beta1/fake/fake_clusterrolebinding.go | 78 +- .../rbac/v1beta1/fake/fake_rbac_client.go | 4 +- .../typed/rbac/v1beta1/fake/fake_role.go | 84 +- .../rbac/v1beta1/fake/fake_rolebinding.go | 84 +- .../typed/rbac/v1beta1/generated_expansion.go | 4 +- .../typed/rbac/v1beta1/rbac_client.go | 6 +- .../kubernetes/typed/rbac/v1beta1/role.go | 76 +- .../typed/rbac/v1beta1/rolebinding.go | 76 +- .../typed/scheduling/v1alpha1/doc.go | 20 + .../typed/scheduling/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_priorityclass.go | 120 + .../v1alpha1/fake/fake_scheduling_client.go | 40 + .../v1alpha1/generated_expansion.go} | 7 +- .../scheduling/v1alpha1/priorityclass.go | 147 + .../scheduling/v1alpha1/scheduling_client.go | 90 + .../typed/scheduling/v1beta1/doc.go | 20 + .../typed/scheduling/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_priorityclass.go | 120 + .../v1beta1/fake/fake_scheduling_client.go | 40 + .../scheduling/v1beta1/generated_expansion.go | 21 + .../typed/scheduling/v1beta1/priorityclass.go | 147 + .../scheduling/v1beta1/scheduling_client.go | 90 + .../kubernetes/typed/settings/v1alpha1/doc.go | 4 +- .../typed/settings/v1alpha1/fake/doc.go | 4 +- .../settings/v1alpha1/fake/fake_podpreset.go | 84 +- .../v1alpha1/fake/fake_settings_client.go | 4 +- .../settings/v1alpha1/generated_expansion.go | 4 +- .../typed/settings/v1alpha1/podpreset.go | 76 +- .../settings/v1alpha1/settings_client.go | 6 +- .../kubernetes/typed/storage/v1/doc.go | 4 +- .../kubernetes/typed/storage/v1/fake/doc.go | 4 +- .../storage/v1/fake/fake_storage_client.go | 4 +- .../storage/v1/fake/fake_storageclass.go | 102 +- .../typed/storage/v1/generated_expansion.go | 4 +- .../typed/storage/v1/storage_client.go | 6 +- .../typed/storage/v1/storageclass.go | 70 +- .../kubernetes/typed/storage/v1alpha1/doc.go | 20 + .../typed/storage/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_storage_client.go | 40 + .../v1alpha1/fake/fake_volumeattachment.go | 131 + .../storage/v1alpha1/generated_expansion.go | 21 + .../typed/storage/v1alpha1/storage_client.go | 90 + .../storage/v1alpha1/volumeattachment.go | 163 + .../kubernetes/typed/storage/v1beta1/doc.go | 4 +- .../typed/storage/v1beta1/fake/doc.go | 4 +- .../v1beta1/fake/fake_storage_client.go | 8 +- .../storage/v1beta1/fake/fake_storageclass.go | 78 +- .../v1beta1/fake/fake_volumeattachment.go | 131 + .../storage/v1beta1/generated_expansion.go | 6 +- .../typed/storage/v1beta1/storage_client.go | 11 +- .../typed/storage/v1beta1/storageclass.go | 70 +- .../typed/storage/v1beta1/volumeattachment.go | 163 + vendor/k8s.io/client-go/pkg/api/OWNERS | 44 - vendor/k8s.io/client-go/pkg/api/defaults.go | 36 - vendor/k8s.io/client-go/pkg/api/doc.go | 22 - .../client-go/pkg/api/field_constants.go | 38 - vendor/k8s.io/client-go/pkg/api/helpers.go | 691 - .../k8s.io/client-go/pkg/api/install/OWNERS | 11 - .../client-go/pkg/api/install/install.go | 70 - vendor/k8s.io/client-go/pkg/api/json.go | 28 - vendor/k8s.io/client-go/pkg/api/ref.go | 132 - vendor/k8s.io/client-go/pkg/api/register.go | 135 - .../client-go/pkg/api/resource_helpers.go | 229 - vendor/k8s.io/client-go/pkg/api/types.go | 3822 - vendor/k8s.io/client-go/pkg/api/v1/OWNERS | 41 - .../k8s.io/client-go/pkg/api/v1/conversion.go | 785 - .../k8s.io/client-go/pkg/api/v1/defaults.go | 389 - .../k8s.io/client-go/pkg/api/v1/generate.go | 64 - vendor/k8s.io/client-go/pkg/api/v1/helpers.go | 632 - vendor/k8s.io/client-go/pkg/api/v1/meta.go | 98 - .../client-go/pkg/api/v1/resource_helpers.go | 257 - .../client-go/pkg/api/v1/types.generated.go | 73800 ---------------- .../pkg/api/v1/zz_generated.conversion.go | 4702 - .../pkg/api/v1/zz_generated.deepcopy.go | 3500 - .../pkg/api/v1/zz_generated.defaults.go | 631 - .../pkg/api/zz_generated.deepcopy.go | 3527 - vendor/k8s.io/client-go/pkg/apis/apps/OWNERS | 21 - .../pkg/apis/apps/install/install.go | 49 - .../client-go/pkg/apis/apps/register.go | 58 - .../k8s.io/client-go/pkg/apis/apps/types.go | 103 - .../pkg/apis/apps/v1beta1/conversion.go | 297 - .../pkg/apis/apps/v1beta1/defaults.go | 103 - .../pkg/apis/apps/v1beta1/types.generated.go | 6485 -- .../apps/v1beta1/zz_generated.conversion.go | 166 - .../apps/v1beta1/zz_generated.deepcopy.go | 355 - .../apps/v1beta1/zz_generated.defaults.go | 326 - .../pkg/apis/apps/zz_generated.deepcopy.go | 125 - .../client-go/pkg/apis/authentication/OWNERS | 9 - .../apis/authentication/install/install.go | 53 - .../pkg/apis/authentication/types.go | 89 - .../apis/authentication/v1/generated.pb.go | 1281 - .../pkg/apis/authentication/v1/types.go | 91 - .../v1/zz_generated.conversion.go | 145 - .../v1/zz_generated.deepcopy.go | 106 - .../authentication/v1beta1/types.generated.go | 1568 - .../v1beta1/zz_generated.conversion.go | 145 - .../v1beta1/zz_generated.deepcopy.go | 106 - .../authentication/zz_generated.deepcopy.go | 106 - .../client-go/pkg/apis/authorization/OWNERS | 17 - .../pkg/apis/authorization/install/install.go | 53 - .../client-go/pkg/apis/authorization/types.go | 146 - .../pkg/apis/authorization/v1/generated.pb.go | 2344 - .../apis/authorization/v1/types.generated.go | 3233 - .../v1/zz_generated.conversion.go | 263 - .../authorization/v1/zz_generated.deepcopy.go | 179 - .../authorization/v1beta1/generated.pb.go | 2344 - .../authorization/v1beta1/types.generated.go | 3233 - .../v1beta1/zz_generated.conversion.go | 263 - .../v1beta1/zz_generated.deepcopy.go | 179 - .../authorization/zz_generated.deepcopy.go | 179 - .../client-go/pkg/apis/autoscaling/OWNERS | 20 - .../pkg/apis/autoscaling/annotations.go | 30 - .../client-go/pkg/apis/autoscaling/doc.go | 17 - .../pkg/apis/autoscaling/install/install.go | 51 - .../pkg/apis/autoscaling/register.go | 53 - .../client-go/pkg/apis/autoscaling/types.go | 305 - .../pkg/apis/autoscaling/v1/conversion.go | 244 - .../pkg/apis/autoscaling/v1/defaults.go | 38 - .../apis/autoscaling/v1/types.generated.go | 5216 -- .../autoscaling/v1/zz_generated.conversion.go | 449 - .../autoscaling/v1/zz_generated.deepcopy.go | 312 - .../autoscaling/v1/zz_generated.defaults.go | 47 - .../pkg/apis/autoscaling/v2alpha1/defaults.go | 49 - .../pkg/apis/autoscaling/v2alpha1/doc.go | 17 - .../apis/autoscaling/v2alpha1/generated.pb.go | 3062 - .../pkg/apis/autoscaling/v2alpha1/register.go | 44 - .../autoscaling/v2alpha1/types.generated.go | 4621 - .../pkg/apis/autoscaling/v2alpha1/types.go | 269 - .../v2alpha1/types_swagger_doc_generated.go | 175 - .../v2alpha1/zz_generated.conversion.go | 387 - .../v2alpha1/zz_generated.deepcopy.go | 285 - .../apis/autoscaling/zz_generated.deepcopy.go | 320 - vendor/k8s.io/client-go/pkg/apis/batch/OWNERS | 19 - vendor/k8s.io/client-go/pkg/apis/batch/doc.go | 17 - .../pkg/apis/batch/install/install.go | 51 - .../client-go/pkg/apis/batch/register.go | 57 - .../k8s.io/client-go/pkg/apis/batch/types.go | 286 - .../client-go/pkg/apis/batch/v1/conversion.go | 84 - .../client-go/pkg/apis/batch/v1/defaults.go | 47 - .../k8s.io/client-go/pkg/apis/batch/v1/doc.go | 17 - .../pkg/apis/batch/v1/types.generated.go | 2681 - .../batch/v1/types_swagger_doc_generated.go | 93 - .../apis/batch/v1/zz_generated.conversion.go | 202 - .../apis/batch/v1/zz_generated.deepcopy.go | 162 - .../apis/batch/v1/zz_generated.defaults.go | 176 - .../pkg/apis/batch/v2alpha1/conversion.go | 44 - .../client-go/pkg/apis/batch/v2alpha1/doc.go | 17 - .../apis/batch/v2alpha1/types.generated.go | 2525 - .../batch/v2alpha1/zz_generated.conversion.go | 227 - .../batch/v2alpha1/zz_generated.deepcopy.go | 170 - .../batch/v2alpha1/zz_generated.defaults.go | 310 - .../pkg/apis/batch/zz_generated.deepcopy.go | 291 - .../client-go/pkg/apis/certificates/OWNERS | 14 - .../pkg/apis/certificates/helpers.go | 38 - .../pkg/apis/certificates/install/install.go | 51 - .../pkg/apis/certificates/register.go | 55 - .../client-go/pkg/apis/certificates/types.go | 143 - .../apis/certificates/v1beta1/conversion.go | 38 - .../pkg/apis/certificates/v1beta1/helpers.go | 38 - .../certificates/v1beta1/types.generated.go | 2624 - .../v1beta1/zz_generated.conversion.go | 179 - .../v1beta1/zz_generated.deepcopy.go | 150 - .../v1beta1/zz_generated.defaults.go | 47 - .../certificates/zz_generated.deepcopy.go | 150 - .../pkg/apis/clientauthentication/doc.go | 19 + .../register.go | 8 +- .../pkg/apis/clientauthentication/types.go | 77 + .../apis/clientauthentication/v1alpha1/doc.go | 23 + .../clientauthentication/v1alpha1/register.go | 55 + .../clientauthentication/v1alpha1/types.go | 78 + .../v1alpha1/zz_generated.conversion.go | 145 + .../v1alpha1/zz_generated.deepcopy.go | 128 + .../v1alpha1}/zz_generated.defaults.go | 6 +- .../v1beta1/conversion.go | 10 +- .../apis/clientauthentication/v1beta1/doc.go} | 14 +- .../clientauthentication/v1beta1/register.go | 55 + .../clientauthentication/v1beta1/types.go | 59 + .../v1beta1/zz_generated.conversion.go | 114 + .../v1beta1/zz_generated.deepcopy.go | 92 + .../v1beta1/zz_generated.defaults.go | 4 +- .../zz_generated.deepcopy.go | 128 + .../client-go/pkg/apis/extensions/OWNERS | 41 - .../client-go/pkg/apis/extensions/doc.go | 17 - .../client-go/pkg/apis/extensions/helpers.go | 37 - .../pkg/apis/extensions/install/install.go | 51 - .../client-go/pkg/apis/extensions/register.go | 70 - .../client-go/pkg/apis/extensions/types.go | 1124 - .../pkg/apis/extensions/v1beta1/conversion.go | 262 - .../pkg/apis/extensions/v1beta1/defaults.go | 138 - .../pkg/apis/extensions/v1beta1/doc.go | 17 - .../extensions/v1beta1/types.generated.go | 21745 ----- .../v1beta1/zz_generated.conversion.go | 1628 - .../v1beta1/zz_generated.deepcopy.go | 1087 - .../v1beta1/zz_generated.defaults.go | 475 - .../apis/extensions/zz_generated.deepcopy.go | 1059 - .../k8s.io/client-go/pkg/apis/policy/OWNERS | 14 - .../k8s.io/client-go/pkg/apis/policy/doc.go | 17 - .../pkg/apis/policy/install/install.go | 49 - .../client-go/pkg/apis/policy/register.go | 54 - .../k8s.io/client-go/pkg/apis/policy/types.go | 113 - .../pkg/apis/policy/v1beta1/generated.pb.go | 1375 - .../pkg/apis/policy/v1beta1/generated.proto | 109 - .../apis/policy/v1beta1/types.generated.go | 2203 - .../pkg/apis/policy/v1beta1/types.go | 105 - .../v1beta1/types_swagger_doc_generated.go | 82 - .../policy/v1beta1/zz_generated.conversion.go | 172 - .../policy/v1beta1/zz_generated.deepcopy.go | 137 - .../pkg/apis/policy/zz_generated.deepcopy.go | 137 - vendor/k8s.io/client-go/pkg/apis/rbac/OWNERS | 17 - .../k8s.io/client-go/pkg/apis/rbac/helpers.go | 340 - .../pkg/apis/rbac/install/install.go | 53 - .../pkg/apis/rbac/v1alpha1/conversion.go | 81 - .../pkg/apis/rbac/v1alpha1/defaults.go | 53 - .../pkg/apis/rbac/v1alpha1/helpers.go | 146 - .../pkg/apis/rbac/v1alpha1/types.generated.go | 4879 - .../rbac/v1alpha1/zz_generated.conversion.go | 445 - .../rbac/v1alpha1/zz_generated.deepcopy.go | 258 - .../rbac/v1alpha1/zz_generated.defaults.go | 66 - .../pkg/apis/rbac/v1beta1/defaults.go | 53 - .../pkg/apis/rbac/v1beta1/helpers.go | 146 - .../pkg/apis/rbac/v1beta1/types.generated.go | 4879 - .../rbac/v1beta1/zz_generated.conversion.go | 389 - .../rbac/v1beta1/zz_generated.deepcopy.go | 258 - .../rbac/v1beta1/zz_generated.defaults.go | 66 - .../pkg/apis/rbac/zz_generated.deepcopy.go | 258 - .../k8s.io/client-go/pkg/apis/settings/doc.go | 18 - .../pkg/apis/settings/install/install.go | 49 - .../client-go/pkg/apis/settings/register.go | 52 - .../client-go/pkg/apis/settings/types.go | 63 - .../v1alpha1/zz_generated.conversion.go | 159 - .../v1alpha1/zz_generated.deepcopy.go | 124 - .../v1alpha1/zz_generated.defaults.go | 98 - .../apis/settings/zz_generated.deepcopy.go | 124 - .../k8s.io/client-go/pkg/apis/storage/OWNERS | 3 - .../pkg/apis/storage/install/install.go | 54 - .../client-go/pkg/apis/storage/register.go | 51 - .../client-go/pkg/apis/storage/types.go | 60 - .../pkg/apis/storage/v1/generated.pb.go | 730 - .../client-go/pkg/apis/storage/v1/types.go | 57 - .../storage/v1/types_swagger_doc_generated.go | 51 - .../storage/v1/zz_generated.conversion.go | 89 - .../apis/storage/v1/zz_generated.deepcopy.go | 80 - .../client-go/pkg/apis/storage/v1beta1/doc.go | 18 - .../pkg/apis/storage/v1beta1/generated.pb.go | 731 - .../pkg/apis/storage/v1beta1/generated.proto | 64 - .../apis/storage/v1beta1/types.generated.go | 985 - .../pkg/apis/storage/v1beta1/types.go | 57 - .../v1beta1/types_swagger_doc_generated.go | 51 - .../v1beta1/zz_generated.conversion.go | 89 - .../storage/v1beta1/zz_generated.deepcopy.go | 80 - .../pkg/apis/storage/zz_generated.deepcopy.go | 80 - vendor/k8s.io/client-go/pkg/util/doc.go | 20 - .../client-go/pkg/util/parsers/parsers.go | 54 - vendor/k8s.io/client-go/pkg/util/template.go | 48 - vendor/k8s.io/client-go/pkg/util/util.go | 131 - vendor/k8s.io/client-go/pkg/version/base.go | 16 +- vendor/k8s.io/client-go/pkg/version/def.bzl | 38 + vendor/k8s.io/client-go/pkg/version/doc.go | 1 + .../plugin/pkg/client/auth/exec/exec.go | 370 + vendor/k8s.io/client-go/rest/client.go | 4 +- vendor/k8s.io/client-go/rest/config.go | 114 +- vendor/k8s.io/client-go/rest/request.go | 311 +- vendor/k8s.io/client-go/rest/transport.go | 53 +- vendor/k8s.io/client-go/rest/url_utils.go | 11 +- vendor/k8s.io/client-go/rest/versions.go | 88 - .../client-go/rest/zz_generated.deepcopy.go | 52 + vendor/k8s.io/client-go/testing/actions.go | 192 +- vendor/k8s.io/client-go/testing/fake.go | 58 +- vendor/k8s.io/client-go/testing/fixture.go | 338 +- vendor/k8s.io/client-go/tools/cache/OWNERS | 11 +- .../client-go/tools/cache/controller.go | 53 +- .../client-go/tools/cache/delta_fifo.go | 88 +- .../client-go/tools/cache/expiration_cache.go | 2 +- .../tools/cache/expiration_cache_fakes.go | 2 +- vendor/k8s.io/client-go/tools/cache/fifo.go | 4 +- vendor/k8s.io/client-go/tools/cache/heap.go | 323 + vendor/k8s.io/client-go/tools/cache/index.go | 2 + .../k8s.io/client-go/tools/cache/listwatch.go | 31 +- .../client-go/tools/cache/mutation_cache.go | 261 + .../tools/cache/mutation_detector.go | 35 +- .../k8s.io/client-go/tools/cache/reflector.go | 72 +- .../tools/cache/reflector_metrics.go | 119 + .../client-go/tools/cache/shared_informer.go | 224 +- vendor/k8s.io/client-go/tools/cache/store.go | 4 + .../tools/cache/thread_safe_store.go | 28 +- .../api/v1 => tools/clientcmd/api}/doc.go | 4 +- .../client-go/tools/clientcmd/api/types.go | 66 +- .../clientcmd/api/zz_generated.deepcopy.go | 320 + vendor/k8s.io/client-go/tools/pager/pager.go | 117 + .../{pkg/api/v1 => tools/reference}/ref.go | 39 +- vendor/k8s.io/client-go/transport/cache.go | 51 +- vendor/k8s.io/client-go/transport/config.go | 19 +- .../client-go/transport/round_trippers.go | 28 +- .../k8s.io/client-go/transport/transport.go | 34 +- .../client-go/util/buffer/ring_growing.go | 72 + vendor/k8s.io/client-go/util/cert/cert.go | 42 +- vendor/k8s.io/client-go/util/cert/csr.go | 2 +- vendor/k8s.io/client-go/util/cert/io.go | 67 +- vendor/k8s.io/client-go/util/cert/pem.go | 143 +- .../util/connrotation/connrotation.go | 105 + .../client-go/util/flowcontrol/backoff.go | 2 +- .../client-go/util/flowcontrol/throttle.go | 59 +- vendor/k8s.io/client-go/util/retry/OWNERS | 2 + vendor/k8s.io/client-go/util/retry/util.go | 79 + .../util/workqueue/default_rate_limiters.go | 8 +- .../util/workqueue/delaying_queue.go | 167 +- .../k8s.io/client-go/util/workqueue/queue.go | 2 +- .../client-go/util/workqueue/timed_queue.go | 52 - vendor/k8s.io/kube-openapi/LICENSE | 202 + .../k8s.io/kube-openapi/pkg/util/proto/doc.go | 19 + .../kube-openapi/pkg/util/proto/document.go | 299 + .../kube-openapi/pkg/util/proto/openapi.go | 276 + vendor/vendor.json | 1630 +- 1185 files changed, 195218 insertions(+), 231331 deletions(-) delete mode 100644 vendor/github.com/golang/protobuf/proto/Makefile create mode 100644 vendor/github.com/golang/protobuf/proto/discard.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto create mode 100644 vendor/github.com/google/btree/LICENSE create mode 100644 vendor/github.com/google/btree/README.md create mode 100644 vendor/github.com/google/btree/btree.go create mode 100644 vendor/github.com/googleapis/gnostic/LICENSE create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json create mode 100644 vendor/github.com/googleapis/gnostic/compiler/README.md create mode 100644 vendor/github.com/googleapis/gnostic/compiler/context.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/error.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/extension-handler.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/helpers.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/main.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/reader.go create mode 100755 vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh create mode 100644 vendor/github.com/googleapis/gnostic/extensions/README.md create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.pb.go create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.proto create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extensions.go create mode 100644 vendor/github.com/gregjones/httpcache/LICENSE.txt create mode 100644 vendor/github.com/gregjones/httpcache/README.md create mode 100644 vendor/github.com/gregjones/httpcache/diskcache/diskcache.go create mode 100644 vendor/github.com/gregjones/httpcache/httpcache.go create mode 100644 vendor/github.com/hashicorp/golang-lru/2q.go create mode 100644 vendor/github.com/hashicorp/golang-lru/LICENSE create mode 100644 vendor/github.com/hashicorp/golang-lru/README.md create mode 100644 vendor/github.com/hashicorp/golang-lru/arc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/doc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go rename vendor/github.com/json-iterator/go/{feature_adapter.go => adapter.go} (94%) rename vendor/github.com/json-iterator/go/{feature_any.go => any.go} (75%) rename vendor/github.com/json-iterator/go/{feature_any_array.go => any_array.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_bool.go => any_bool.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_float.go => any_float.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_int32.go => any_int32.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_int64.go => any_int64.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_invalid.go => any_invalid.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_nil.go => any_nil.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_number.go => any_number.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_object.go => any_object.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_string.go => any_str.go} (97%) rename vendor/github.com/json-iterator/go/{feature_any_uint32.go => any_uint32.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_uint64.go => any_uint64.go} (100%) rename vendor/github.com/json-iterator/go/{feature_config.go => config.go} (63%) delete mode 100644 vendor/github.com/json-iterator/go/feature_config_with_sync_map.go delete mode 100644 vendor/github.com/json-iterator/go/feature_config_without_sync_map.go delete mode 100644 vendor/github.com/json-iterator/go/feature_json_number.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_array.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_map.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_native.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_slice.go rename vendor/github.com/json-iterator/go/{feature_iter.go => iter.go} (100%) rename vendor/github.com/json-iterator/go/{feature_iter_array.go => iter_array.go} (100%) rename vendor/github.com/json-iterator/go/{feature_iter_float.go => iter_float.go} (100%) rename vendor/github.com/json-iterator/go/{feature_iter_int.go => iter_int.go} (98%) rename vendor/github.com/json-iterator/go/{feature_iter_object.go => iter_object.go} (95%) rename vendor/github.com/json-iterator/go/{feature_iter_skip.go => iter_skip.go} (100%) rename vendor/github.com/json-iterator/go/{feature_iter_skip_sloppy.go => iter_skip_sloppy.go} (100%) rename vendor/github.com/json-iterator/go/{feature_iter_skip_strict.go => iter_skip_strict.go} (100%) rename vendor/github.com/json-iterator/go/{feature_iter_string.go => iter_str.go} (100%) rename vendor/github.com/json-iterator/go/{feature_pool.go => pool.go} (64%) create mode 100644 vendor/github.com/json-iterator/go/reflect.go create mode 100644 vendor/github.com/json-iterator/go/reflect_array.go create mode 100644 vendor/github.com/json-iterator/go/reflect_dynamic.go rename vendor/github.com/json-iterator/go/{feature_reflect_extension.go => reflect_extension.go} (57%) create mode 100644 vendor/github.com/json-iterator/go/reflect_json_number.go create mode 100644 vendor/github.com/json-iterator/go/reflect_json_raw_message.go create mode 100644 vendor/github.com/json-iterator/go/reflect_map.go create mode 100644 vendor/github.com/json-iterator/go/reflect_marshaler.go create mode 100644 vendor/github.com/json-iterator/go/reflect_native.go rename vendor/github.com/json-iterator/go/{feature_reflect_optional.go => reflect_optional.go} (52%) create mode 100644 vendor/github.com/json-iterator/go/reflect_slice.go rename vendor/github.com/json-iterator/go/{feature_reflect_struct_decoder.go => reflect_struct_decoder.go} (88%) rename vendor/github.com/json-iterator/go/{feature_reflect_object.go => reflect_struct_encoder.go} (53%) rename vendor/github.com/json-iterator/go/{feature_stream.go => stream.go} (97%) rename vendor/github.com/json-iterator/go/{feature_stream_float.go => stream_float.go} (100%) rename vendor/github.com/json-iterator/go/{feature_stream_int.go => stream_int.go} (95%) rename vendor/github.com/json-iterator/go/{feature_stream_string.go => stream_str.go} (100%) create mode 100644 vendor/github.com/modern-go/concurrent/LICENSE create mode 100644 vendor/github.com/modern-go/concurrent/README.md create mode 100644 vendor/github.com/modern-go/concurrent/executor.go create mode 100644 vendor/github.com/modern-go/concurrent/go_above_19.go create mode 100644 vendor/github.com/modern-go/concurrent/go_below_19.go create mode 100644 vendor/github.com/modern-go/concurrent/log.go create mode 100755 vendor/github.com/modern-go/concurrent/test.sh create mode 100644 vendor/github.com/modern-go/concurrent/unbounded_executor.go create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.lock create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.toml create mode 100644 vendor/github.com/modern-go/reflect2/LICENSE create mode 100644 vendor/github.com/modern-go/reflect2/README.md create mode 100644 vendor/github.com/modern-go/reflect2/go_above_17.go create mode 100644 vendor/github.com/modern-go/reflect2/go_above_19.go create mode 100644 vendor/github.com/modern-go/reflect2/go_below_17.go create mode 100644 vendor/github.com/modern-go/reflect2/go_below_19.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_amd64.s create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_kind.go create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_386.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm64.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mips64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mipsx.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_s390x.s create mode 100644 vendor/github.com/modern-go/reflect2/safe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_type.go create mode 100755 vendor/github.com/modern-go/reflect2/test.sh create mode 100644 vendor/github.com/modern-go/reflect2/type_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_array.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_eface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_iface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_link.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_ptr.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_type.go create mode 100644 vendor/github.com/peterbourgon/diskv/LICENSE create mode 100644 vendor/github.com/peterbourgon/diskv/README.md create mode 100644 vendor/github.com/peterbourgon/diskv/compression.go create mode 100644 vendor/github.com/peterbourgon/diskv/diskv.go create mode 100644 vendor/github.com/peterbourgon/diskv/index.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_linux.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_windows.go create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_386.s create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_amd64.s create mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go create mode 100644 vendor/golang.org/x/sys/windows/env_windows.go create mode 100644 vendor/golang.org/x/sys/windows/eventlog.go create mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go create mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go create mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/race.go create mode 100644 vendor/golang.org/x/sys/windows/race0.go create mode 100644 vendor/golang.org/x/sys/windows/security_windows.go create mode 100644 vendor/golang.org/x/sys/windows/service.go create mode 100644 vendor/golang.org/x/sys/windows/str.go create mode 100644 vendor/golang.org/x/sys/windows/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go create mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go create mode 100644 vendor/k8s.io/api/LICENSE create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/register.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis/authentication => api/apps}/v1/doc.go (85%) create mode 100644 vendor/k8s.io/api/apps/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/apps/v1/generated.proto create mode 100644 vendor/k8s.io/api/apps/v1/register.go create mode 100644 vendor/k8s.io/api/apps/v1/types.go create mode 100644 vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis/authentication => api/apps/v1beta1}/doc.go (84%) rename vendor/k8s.io/{client-go/pkg/apis => api}/apps/v1beta1/generated.pb.go (53%) rename vendor/k8s.io/{client-go/pkg/apis => api}/apps/v1beta1/generated.proto (56%) rename vendor/k8s.io/{client-go/pkg/apis => api}/apps/v1beta1/register.go (78%) rename vendor/k8s.io/{client-go/pkg/apis => api}/apps/v1beta1/types.go (54%) rename vendor/k8s.io/{client-go/pkg/apis => api}/apps/v1beta1/types_swagger_doc_generated.go (52%) create mode 100644 vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis/apps/v1beta1 => api/apps/v1beta2}/doc.go (84%) create mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.pb.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.proto create mode 100644 vendor/k8s.io/api/apps/v1beta2/register.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/types.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/authentication/v1/doc.go create mode 100644 vendor/k8s.io/api/authentication/v1/generated.pb.go rename vendor/k8s.io/{client-go/pkg/apis/authentication/v1beta1 => api/authentication/v1}/generated.proto (58%) rename vendor/k8s.io/{client-go/pkg/apis => api}/authentication/v1/register.go (78%) create mode 100644 vendor/k8s.io/api/authentication/v1/types.go rename vendor/k8s.io/{client-go/pkg/apis => api}/authentication/v1/types_swagger_doc_generated.go (58%) create mode 100644 vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/doc.go rename vendor/k8s.io/{client-go/pkg/apis => api}/authentication/v1beta1/generated.pb.go (65%) rename vendor/k8s.io/{client-go/pkg/apis/authentication/v1 => api/authentication/v1beta1}/generated.proto (94%) rename vendor/k8s.io/{client-go/pkg/apis => api}/authentication/v1beta1/register.go (79%) rename vendor/k8s.io/{client-go/pkg/apis => api}/authentication/v1beta1/types.go (95%) rename vendor/k8s.io/{client-go/pkg/apis => api}/authentication/v1beta1/types_swagger_doc_generated.go (97%) create mode 100644 vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis => api}/authorization/v1/doc.go (85%) create mode 100644 vendor/k8s.io/api/authorization/v1/generated.pb.go rename vendor/k8s.io/{client-go/pkg/apis => api}/authorization/v1/generated.proto (59%) rename vendor/k8s.io/{client-go/pkg/apis => api}/authorization/v1/register.go (75%) rename vendor/k8s.io/{client-go/pkg/apis => api}/authorization/v1/types.go (56%) rename vendor/k8s.io/{client-go/pkg/apis => api}/authorization/v1/types_swagger_doc_generated.go (58%) create mode 100644 vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis/authorization => api/authorization/v1beta1}/doc.go (83%) create mode 100644 vendor/k8s.io/api/authorization/v1beta1/generated.pb.go rename vendor/k8s.io/{client-go/pkg/apis => api}/authorization/v1beta1/generated.proto (59%) rename vendor/k8s.io/{client-go/pkg/apis => api}/authorization/v1beta1/register.go (75%) rename vendor/k8s.io/{client-go/pkg/apis => api}/authorization/v1beta1/types.go (56%) rename vendor/k8s.io/{client-go/pkg/apis => api}/authorization/v1beta1/types_swagger_doc_generated.go (58%) create mode 100644 vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis/authentication/v1beta1 => api/autoscaling/v1}/doc.go (84%) rename vendor/k8s.io/{client-go/pkg/apis => api}/autoscaling/v1/generated.pb.go (57%) rename vendor/k8s.io/{client-go/pkg/apis => api}/autoscaling/v1/generated.proto (71%) rename vendor/k8s.io/{client-go/pkg/apis => api}/autoscaling/v1/register.go (80%) rename vendor/k8s.io/{client-go/pkg/apis => api}/autoscaling/v1/types.go (68%) rename vendor/k8s.io/{client-go/pkg/apis => api}/autoscaling/v1/types_swagger_doc_generated.go (70%) create mode 100644 vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/doc.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go rename vendor/k8s.io/{client-go/pkg/apis/autoscaling/v2alpha1 => api/autoscaling/v2beta1}/generated.proto (50%) create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/register.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis/apps => api/batch/v1}/doc.go (85%) rename vendor/k8s.io/{client-go/pkg/apis => api}/batch/v1/generated.pb.go (67%) rename vendor/k8s.io/{client-go/pkg/apis => api}/batch/v1/generated.proto (65%) rename vendor/k8s.io/{client-go/pkg/apis => api}/batch/v1/register.go (79%) rename vendor/k8s.io/{client-go/pkg/apis => api}/batch/v1/types.go (67%) create mode 100644 vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/batch/v1beta1/register.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/types.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/batch/v2alpha1/doc.go rename vendor/k8s.io/{client-go/pkg/apis => api}/batch/v2alpha1/generated.pb.go (69%) rename vendor/k8s.io/{client-go/pkg/apis => api}/batch/v2alpha1/generated.proto (62%) rename vendor/k8s.io/{client-go/pkg/apis => api}/batch/v2alpha1/register.go (79%) rename vendor/k8s.io/{client-go/pkg/apis => api}/batch/v2alpha1/types.go (70%) rename vendor/k8s.io/{client-go/pkg/apis => api}/batch/v2alpha1/types_swagger_doc_generated.go (59%) create mode 100644 vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis/certificates => api/certificates/v1beta1}/doc.go (84%) rename vendor/k8s.io/{client-go/pkg/apis => api}/certificates/v1beta1/generated.pb.go (68%) rename vendor/k8s.io/{client-go/pkg/apis => api}/certificates/v1beta1/generated.proto (93%) rename vendor/k8s.io/{client-go/pkg/apis => api}/certificates/v1beta1/register.go (80%) rename vendor/k8s.io/{client-go/pkg/apis => api}/certificates/v1beta1/types.go (95%) rename vendor/k8s.io/{client-go/pkg/apis => api}/certificates/v1beta1/types_swagger_doc_generated.go (97%) create mode 100644 vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/core/v1/annotation_key_constants.go rename vendor/k8s.io/{client-go/pkg/apis/authorization/v1beta1/defaults.go => api/core/v1/doc.go} (78%) rename vendor/k8s.io/{client-go/pkg/api => api/core}/v1/generated.pb.go (61%) rename vendor/k8s.io/{client-go/pkg/api => api/core}/v1/generated.proto (66%) create mode 100644 vendor/k8s.io/api/core/v1/objectreference.go rename vendor/k8s.io/{client-go/pkg/api => api/core}/v1/register.go (87%) create mode 100644 vendor/k8s.io/api/core/v1/resource.go create mode 100644 vendor/k8s.io/api/core/v1/taint.go create mode 100644 vendor/k8s.io/api/core/v1/toleration.go rename vendor/k8s.io/{client-go/pkg/api => api/core}/v1/types.go (68%) rename vendor/k8s.io/{client-go/pkg/api => api/core}/v1/types_swagger_doc_generated.go (60%) create mode 100644 vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/events/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/events/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/events/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/events/v1beta1/register.go create mode 100644 vendor/k8s.io/api/events/v1beta1/types.go create mode 100644 vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/doc.go rename vendor/k8s.io/{client-go/pkg/apis => api}/extensions/v1beta1/generated.pb.go (68%) rename vendor/k8s.io/{client-go/pkg/apis => api}/extensions/v1beta1/generated.proto (64%) rename vendor/k8s.io/{client-go/pkg/apis => api}/extensions/v1beta1/register.go (82%) rename vendor/k8s.io/{client-go/pkg/apis => api}/extensions/v1beta1/types.go (65%) rename vendor/k8s.io/{client-go/pkg/apis => api}/extensions/v1beta1/types_swagger_doc_generated.go (55%) create mode 100644 vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/networking/v1/doc.go create mode 100644 vendor/k8s.io/api/networking/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/networking/v1/generated.proto create mode 100644 vendor/k8s.io/api/networking/v1/register.go create mode 100644 vendor/k8s.io/api/networking/v1/types.go create mode 100644 vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis => api}/policy/v1beta1/doc.go (87%) create mode 100644 vendor/k8s.io/api/policy/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/generated.proto rename vendor/k8s.io/{client-go/pkg/apis => api}/policy/v1beta1/register.go (78%) create mode 100644 vendor/k8s.io/api/policy/v1beta1/types.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis/rbac/v1beta1 => api/rbac/v1}/doc.go (86%) create mode 100644 vendor/k8s.io/api/rbac/v1/generated.pb.go rename vendor/k8s.io/{client-go/pkg/apis/rbac/v1beta1 => api/rbac/v1}/generated.proto (88%) rename vendor/k8s.io/{client-go/pkg/apis/rbac => api/rbac/v1}/register.go (71%) rename vendor/k8s.io/{client-go/pkg/apis/rbac/v1beta1 => api/rbac/v1}/types.go (83%) rename vendor/k8s.io/{client-go/pkg/apis/rbac/v1beta1 => api/rbac/v1}/types_swagger_doc_generated.go (86%) create mode 100644 vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis/rbac => api/rbac/v1alpha1}/doc.go (84%) rename vendor/k8s.io/{client-go/pkg/apis => api}/rbac/v1alpha1/generated.pb.go (69%) rename vendor/k8s.io/{client-go/pkg/apis => api}/rbac/v1alpha1/generated.proto (89%) rename vendor/k8s.io/{client-go/pkg/apis => api}/rbac/v1alpha1/register.go (80%) rename vendor/k8s.io/{client-go/pkg/apis => api}/rbac/v1alpha1/types.go (83%) rename vendor/k8s.io/{client-go/pkg/apis => api}/rbac/v1alpha1/types_swagger_doc_generated.go (86%) create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/doc.go rename vendor/k8s.io/{client-go/pkg/apis => api}/rbac/v1beta1/generated.pb.go (69%) create mode 100644 vendor/k8s.io/api/rbac/v1beta1/generated.proto rename vendor/k8s.io/{client-go/pkg/apis => api}/rbac/v1beta1/register.go (80%) rename vendor/k8s.io/{client-go/pkg/apis/rbac => api/rbac/v1beta1}/types.go (51%) create mode 100644 vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/util/umask.go => api/scheduling/v1beta1/doc.go} (74%) create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/register.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/types.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis => api}/settings/v1alpha1/doc.go (84%) rename vendor/k8s.io/{client-go/pkg/apis => api}/settings/v1alpha1/generated.pb.go (65%) rename vendor/k8s.io/{client-go/pkg/apis => api}/settings/v1alpha1/generated.proto (76%) create mode 100644 vendor/k8s.io/api/settings/v1alpha1/register.go rename vendor/k8s.io/{client-go/pkg/apis => api}/settings/v1alpha1/types.go (87%) rename vendor/k8s.io/{client-go/pkg/apis => api}/settings/v1alpha1/types_swagger_doc_generated.go (88%) create mode 100644 vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis => api}/storage/v1/doc.go (91%) create mode 100644 vendor/k8s.io/api/storage/v1/generated.pb.go rename vendor/k8s.io/{client-go/pkg/apis => api}/storage/v1/generated.proto (52%) rename vendor/k8s.io/{client-go/pkg/apis => api}/storage/v1/register.go (79%) create mode 100644 vendor/k8s.io/api/storage/v1/types.go create mode 100644 vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.proto rename vendor/k8s.io/{client-go/pkg/apis/settings => api/storage}/v1alpha1/register.go (91%) create mode 100644 vendor/k8s.io/api/storage/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis/storage => api/storage/v1beta1}/doc.go (84%) create mode 100644 vendor/k8s.io/api/storage/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/generated.proto rename vendor/k8s.io/{client-go/pkg/apis => api}/storage/v1beta1/register.go (77%) create mode 100644 vendor/k8s.io/api/storage/v1beta1/types.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/default.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go rename vendor/k8s.io/{client-go/pkg/util/umask_windows.go => apimachinery/pkg/api/resource/zz_generated.deepcopy.go} (62%) delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/well_known_labels.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go rename vendor/k8s.io/{client-go/pkg/apis/certificates/v1beta1/defaults.go => apimachinery/pkg/apis/meta/v1beta1/deepcopy.go} (56%) create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto rename vendor/k8s.io/{client-go/pkg/apis/authorization => apimachinery/pkg/apis/meta/v1beta1}/register.go (54%) create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go rename vendor/k8s.io/{client-go/pkg/apis/storage => apimachinery/pkg/apis/meta}/v1beta1/zz_generated.defaults.go (88%) delete mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/cloner.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/openapi/common.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/converter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/cache.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go rename vendor/k8s.io/{client-go => apimachinery/pkg}/util/clock/clock.go (90%) create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/version/helpers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go delete mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go create mode 100644 vendor/k8s.io/client-go/CHANGELOG.md create mode 100644 vendor/k8s.io/client-go/CONTRIBUTING.md create mode 100644 vendor/k8s.io/client-go/INSTALL.md create mode 100644 vendor/k8s.io/client-go/OWNERS create mode 100644 vendor/k8s.io/client-go/README.md create mode 100644 vendor/k8s.io/client-go/SECURITY_CONTACTS create mode 100644 vendor/k8s.io/client-go/code-of-conduct.md create mode 100644 vendor/k8s.io/client-go/discovery/cached_discovery.go delete mode 100644 vendor/k8s.io/client-go/discovery/restmapper.go create mode 100644 vendor/k8s.io/client-go/discovery/round_tripper.go create mode 100644 vendor/k8s.io/client-go/kubernetes/import.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/import_known_versions.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go rename vendor/k8s.io/client-go/kubernetes/typed/{autoscaling/v2alpha1 => admissionregistration/v1alpha1}/doc.go (83%) rename vendor/k8s.io/client-go/kubernetes/typed/{autoscaling/v2alpha1 => admissionregistration/v1alpha1}/fake/doc.go (85%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go rename vendor/k8s.io/client-go/{pkg/apis/authentication/v1beta1/defaults.go => kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go} (74%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go rename vendor/k8s.io/client-go/{pkg/apis/authorization/v1/defaults.go => kubernetes/typed/apps/v1/generated_expansion.go} (66%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go rename vendor/k8s.io/{apimachinery/pkg/openapi => client-go/kubernetes/typed/apps/v1beta2}/doc.go (76%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go rename vendor/k8s.io/client-go/{pkg/apis/authentication/v1/defaults.go => kubernetes/typed/apps/v1beta2/fake/fake_scale.go} (73%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go rename vendor/k8s.io/client-go/{pkg/apis/authentication/v1beta1/conversion.go => kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go} (69%) rename vendor/k8s.io/client-go/{pkg/apis/authentication/v1/zz_generated.defaults.go => kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go} (54%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go rename vendor/k8s.io/client-go/{pkg/apis/authorization/v1/zz_generated.defaults.go => kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go} (53%) rename vendor/k8s.io/client-go/{pkg/apis/authentication/v1/conversion.go => kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go} (68%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go rename vendor/k8s.io/client-go/{pkg/apis/authorization/v1beta1/zz_generated.defaults.go => kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go} (53%) rename vendor/k8s.io/client-go/kubernetes/typed/autoscaling/{v2alpha1 => v2beta1}/autoscaling_client.go (60%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go rename vendor/k8s.io/client-go/kubernetes/typed/autoscaling/{v2alpha1 => v2beta1}/fake/fake_autoscaling_client.go (69%) rename vendor/k8s.io/client-go/kubernetes/typed/autoscaling/{v2alpha1 => v2beta1}/fake/fake_horizontalpodautoscaler.go (52%) rename vendor/k8s.io/client-go/kubernetes/typed/autoscaling/{v2alpha1 => v2beta1}/generated_expansion.go (85%) rename vendor/k8s.io/client-go/kubernetes/typed/autoscaling/{v2alpha1 => v2beta1}/horizontalpodautoscaler.go (76%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go rename vendor/k8s.io/client-go/{pkg/apis/authorization/v1beta1/doc.go => kubernetes/typed/batch/v1beta1/generated_expansion.go} (82%) rename vendor/k8s.io/client-go/{pkg/apis/batch/v2alpha1/defaults.go => kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go} (52%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go rename vendor/k8s.io/client-go/{pkg/apis/certificates/v1beta1/doc.go => kubernetes/typed/events/v1beta1/generated_expansion.go} (82%) delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go rename vendor/k8s.io/client-go/{pkg/apis/autoscaling/v1/doc.go => kubernetes/typed/networking/v1/generated_expansion.go} (81%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go rename vendor/k8s.io/client-go/{pkg/apis/authorization/v1/conversion.go => kubernetes/typed/rbac/v1/generated_expansion.go} (69%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go rename vendor/k8s.io/client-go/{pkg/apis/rbac/v1alpha1/doc.go => kubernetes/typed/scheduling/v1alpha1/generated_expansion.go} (81%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/api/defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/field_constants.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/helpers.go delete mode 100755 vendor/k8s.io/client-go/pkg/api/install/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/api/install/install.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/json.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/ref.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/resource_helpers.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/types.go delete mode 100755 vendor/k8s.io/client-go/pkg/api/v1/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/api/v1/conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/v1/defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/v1/generate.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/v1/helpers.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/v1/meta.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/v1/resource_helpers.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/v1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go delete mode 100755 vendor/k8s.io/client-go/pkg/apis/apps/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/apps/install/install.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/apps/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/apps/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/apps/zz_generated.deepcopy.go delete mode 100755 vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/install/install.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go delete mode 100755 vendor/k8s.io/client-go/pkg/apis/authorization/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authorization/install/install.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authorization/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.pb.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authorization/zz_generated.deepcopy.go delete mode 100755 vendor/k8s.io/client-go/pkg/apis/autoscaling/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/annotations.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/install/install.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/autoscaling/zz_generated.deepcopy.go delete mode 100755 vendor/k8s.io/client-go/pkg/apis/batch/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/install/install.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v1/conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v1/defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/batch/zz_generated.deepcopy.go delete mode 100755 vendor/k8s.io/client-go/pkg/apis/certificates/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/certificates/helpers.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/certificates/install/install.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/certificates/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/certificates/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/helpers.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/certificates/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go rename vendor/k8s.io/client-go/pkg/apis/{authentication => clientauthentication}/register.go (91%) create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go rename vendor/k8s.io/client-go/pkg/apis/{storage/v1 => clientauthentication/v1alpha1}/zz_generated.defaults.go (87%) rename vendor/k8s.io/client-go/pkg/apis/{authorization => clientauthentication}/v1beta1/conversion.go (61%) rename vendor/k8s.io/{apimachinery/pkg/types/unix_user_id.go => client-go/pkg/apis/clientauthentication/v1beta1/doc.go} (62%) create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go rename vendor/k8s.io/client-go/pkg/apis/{authentication => clientauthentication}/v1beta1/zz_generated.defaults.go (88%) create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go delete mode 100755 vendor/k8s.io/client-go/pkg/apis/extensions/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/extensions/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/extensions/install/install.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/extensions/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/extensions/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go delete mode 100755 vendor/k8s.io/client-go/pkg/apis/policy/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/policy/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/policy/install/install.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/policy/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/policy/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/policy/zz_generated.deepcopy.go delete mode 100755 vendor/k8s.io/client-go/pkg/apis/rbac/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/helpers.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/install/install.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/helpers.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/helpers.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/rbac/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/settings/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/settings/install/install.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/settings/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/settings/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/settings/zz_generated.deepcopy.go delete mode 100755 vendor/k8s.io/client-go/pkg/apis/storage/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/install/install.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.pb.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/v1/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/storage/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/util/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/util/parsers/parsers.go delete mode 100644 vendor/k8s.io/client-go/pkg/util/template.go delete mode 100644 vendor/k8s.io/client-go/pkg/util/util.go create mode 100644 vendor/k8s.io/client-go/pkg/version/def.bzl create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go delete mode 100644 vendor/k8s.io/client-go/rest/versions.go create mode 100644 vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/tools/cache/heap.go create mode 100644 vendor/k8s.io/client-go/tools/cache/mutation_cache.go create mode 100644 vendor/k8s.io/client-go/tools/cache/reflector_metrics.go rename vendor/k8s.io/client-go/{pkg/api/v1 => tools/clientcmd/api}/doc.go (91%) create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/tools/pager/pager.go rename vendor/k8s.io/client-go/{pkg/api/v1 => tools/reference}/ref.go (77%) create mode 100644 vendor/k8s.io/client-go/util/buffer/ring_growing.go create mode 100644 vendor/k8s.io/client-go/util/connrotation/connrotation.go create mode 100755 vendor/k8s.io/client-go/util/retry/OWNERS create mode 100644 vendor/k8s.io/client-go/util/retry/util.go delete mode 100644 vendor/k8s.io/client-go/util/workqueue/timed_queue.go create mode 100644 vendor/k8s.io/kube-openapi/LICENSE create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/document.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index a9b041db4..c0c3146e6 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -23,7 +23,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" - apiv1 "k8s.io/client-go/pkg/api/v1" + apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" ) diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index 35dd5b52a..fdca4eff1 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -18,10 +18,10 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/pkg/api/v1" ) func makeEndpoints() *v1.Endpoints { diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index 139058386..40d922869 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -22,7 +22,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" - "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/api/extensions/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" ) diff --git a/discovery/kubernetes/ingress_test.go b/discovery/kubernetes/ingress_test.go index 54ea8caf6..80259c03f 100644 --- a/discovery/kubernetes/ingress_test.go +++ b/discovery/kubernetes/ingress_test.go @@ -19,8 +19,8 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" + "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) type TLSMode int diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index faf4087ee..4f6a074f1 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -27,13 +27,12 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" + apiv1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/pkg/api" - apiv1 "k8s.io/client-go/pkg/api/v1" - extensionsv1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" ) @@ -161,7 +160,7 @@ type Discovery struct { func (d *Discovery) getNamespaces() []string { namespaces := d.namespaceDiscovery.Names if len(namespaces) == 0 { - namespaces = []string{api.NamespaceAll} + namespaces = []string{apiv1.NamespaceAll} } return namespaces } diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index 07c09d3ae..ec6733734 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -24,8 +24,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" - "k8s.io/client-go/pkg/api" - apiv1 "k8s.io/client-go/pkg/api/v1" + apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" ) @@ -189,7 +188,6 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { // nodeAddresses returns the provided node's address, based on the priority: // 1. NodeInternalIP // 2. NodeExternalIP -// 3. NodeLegacyHostIP // 3. NodeHostName // // Derived from k8s.io/kubernetes/pkg/util/node/node.go @@ -205,9 +203,6 @@ func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, if addresses, ok := m[apiv1.NodeExternalIP]; ok { return addresses[0], m, nil } - if addresses, ok := m[apiv1.NodeAddressType(api.NodeLegacyHostIP)]; ok { - return addresses[0], m, nil - } if addresses, ok := m[apiv1.NodeHostName]; ok { return addresses[0], m, nil } diff --git a/discovery/kubernetes/node_test.go b/discovery/kubernetes/node_test.go index 8a021757d..cb413a1fc 100644 --- a/discovery/kubernetes/node_test.go +++ b/discovery/kubernetes/node_test.go @@ -19,8 +19,8 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api/v1" ) func makeNode(name, address string, labels map[string]string, annotations map[string]string) *v1.Node { diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 5b10f793d..4360993bd 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -23,9 +23,8 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/common/model" + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api" - apiv1 "k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -252,5 +251,5 @@ func podReady(pod *apiv1.Pod) model.LabelValue { return lv(strings.ToLower(string(cond.Status))) } } - return lv(strings.ToLower(string(api.ConditionUnknown))) + return lv(strings.ToLower(string(apiv1.ConditionUnknown))) } diff --git a/discovery/kubernetes/pod_test.go b/discovery/kubernetes/pod_test.go index b921bf9c7..a3d464631 100644 --- a/discovery/kubernetes/pod_test.go +++ b/discovery/kubernetes/pod_test.go @@ -19,9 +19,9 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/pkg/api/v1" ) func makeOptionalBool(v bool) *bool { diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index b3d225c05..5fe4c0490 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -22,7 +22,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/common/model" - apiv1 "k8s.io/client-go/pkg/api/v1" + apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" diff --git a/discovery/kubernetes/service_test.go b/discovery/kubernetes/service_test.go index e300c850a..581b2e35c 100644 --- a/discovery/kubernetes/service_test.go +++ b/discovery/kubernetes/service_test.go @@ -19,8 +19,8 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api/v1" ) func makeMultiPortService() *v1.Service { diff --git a/pkg/textparse/testdata.txt b/pkg/textparse/testdata.txt index c7f2a7af0..1617202d8 100644 --- a/pkg/textparse/testdata.txt +++ b/pkg/textparse/testdata.txt @@ -525,4 +525,7 @@ prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 prometheus_treecache_watcher_goroutines 0 # HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. # TYPE prometheus_treecache_zookeeper_failures_total counter -prometheus_treecache_zookeeper_failures_total 0 \ No newline at end of file +prometheus_treecache_zookeeper_failures_total 0 +# HELP redis_up help text +# TYPE redis_up gauge +redis_up{addr="redis://localhost:6379",alias=""} 1 \ No newline at end of file diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE index 1b1b1921e..0f646931a 100644 --- a/vendor/github.com/golang/protobuf/LICENSE +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -1,7 +1,4 @@ -Go support for Protocol Buffers - Google's data interchange format - Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go index 82c61624e..a569810fe 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -44,6 +44,7 @@ import ( "errors" "fmt" "io" + "math" "reflect" "sort" "strconv" @@ -51,8 +52,12 @@ import ( "time" "github.com/golang/protobuf/proto" + + stpb "github.com/golang/protobuf/ptypes/struct" ) +const secondInNanos = int64(time.Second / time.Nanosecond) + // Marshaler is a configurable object for converting between // protocol buffer objects and a JSON representation for them. type Marshaler struct { @@ -70,10 +75,59 @@ type Marshaler struct { // Whether to use the original (.proto) name for fields. OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error } // Marshal marshals a protocol buffer into JSON. func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } writer := &errWriter{writer: out} return m.marshalObject(writer, pb, "", "") } @@ -89,6 +143,12 @@ func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { type int32Slice []int32 +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + // For sorting extensions ids to ensure stable output. func (s int32Slice) Len() int { return len(s) } func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } @@ -100,6 +160,31 @@ type wkt interface { // marshalObject writes a struct to the Writer. func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + s := reflect.ValueOf(v).Elem() // Handle well-known types. @@ -115,30 +200,43 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU // Any is a bit more involved. return m.marshalAny(out, v, indent) case "Duration": - // "Generated output always contains 3, 6, or 9 fractional digits, + // "Generated output always contains 0, 3, 6, or 9 fractional digits, // depending on required precision." s, ns := s.Field(0).Int(), s.Field(1).Int() - d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond - x := fmt.Sprintf("%.9f", d.Seconds()) + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + if s < 0 { + ns = -ns + } + x := fmt.Sprintf("%d.%09d", s, ns) x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") out.write(`"`) out.write(x) out.write(`s"`) return out.err - case "Struct": - // Let marshalValue handle the `fields` map. + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. // TODO: pass the correct Properties if needed. return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) case "Timestamp": // "RFC 3339, where generated output will always be Z-normalized - // and uses 3, 6 or 9 fractional digits." + // and uses 0, 3, 6 or 9 fractional digits." s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } t := time.Unix(s, ns).UTC() // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). x := t.Format("2006-01-02T15:04:05.000000000") x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") out.write(`"`) out.write(x) out.write(`Z"`) @@ -180,7 +278,7 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU // IsNil will panic on most value kinds. switch value.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + case reflect.Chan, reflect.Func, reflect.Interface: if value.IsNil() { continue } @@ -208,6 +306,10 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU if value.Len() == 0 { continue } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } } } @@ -290,16 +392,17 @@ func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) turl := v.Field(0).String() val := v.Field(1).Bytes() - // Only the part of type_url after the last slash is relevant. - mname := turl - if slash := strings.LastIndex(mname, "/"); slash >= 0 { - mname = mname[slash+1:] + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) } - mt := proto.MessageType(mname) - if mt == nil { - return fmt.Errorf("unknown message type %q", mname) + if err != nil { + return err } - msg := reflect.New(mt.Elem()).Interface().(proto.Message) + if err := proto.Unmarshal(val, msg); err != nil { return err } @@ -371,10 +474,15 @@ func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v refle // marshalValue writes the value to the Writer. func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - var err error v = reflect.Indirect(v) + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + // Handle repeated elements. if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { out.write("[") @@ -404,9 +512,6 @@ func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v refle // Handle well-known types. // Most are handled up in marshalObject (because 99% are messages). - type wkt interface { - XXX_WellKnownType() string - } if wkt, ok := v.Interface().(wkt); ok { switch wkt.XXX_WellKnownType() { case "NullValue": @@ -494,6 +599,24 @@ func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v refle return out.err } + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + // Default handling defers to the encoding/json library. b, err := json.Marshal(v.Interface()) if err != nil { @@ -516,6 +639,12 @@ type Unmarshaler struct { // Whether to allow messages to contain unknown fields, as opposed to // failing to unmarshal. AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver } // UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. @@ -526,7 +655,10 @@ func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { if err := dec.Decode(&inputValue); err != nil { return err } - return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) } // Unmarshal unmarshals a JSON object stream into a protocol @@ -565,41 +697,97 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe // Allocate memory for pointer fields. if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { + return nil + } target.Set(reflect.New(targetType.Elem())) + return u.unmarshalValue(target.Elem(), inputValue, prop) } - // Handle well-known types. - type wkt interface { - XXX_WellKnownType() string + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) } - if wkt, ok := target.Addr().Interface().(wkt); ok { - switch wkt.XXX_WellKnownType() { + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(wkt); ok { + switch w.XXX_WellKnownType() { case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - // "Wrappers use the same representation in JSON - // as the wrapped primitive type, except that null is allowed." - // encoding/json will turn JSON `null` into Go `nil`, - // so we don't have to do any extra work. return u.unmarshalValue(target.Field(0), inputValue, prop) case "Any": - return fmt.Errorf("unmarshaling Any not supported yet") - case "Duration": - ivStr := string(inputValue) - if ivStr == "null" { - target.Field(0).SetInt(0) - target.Field(1).SetInt(0) - return nil + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err } - unq, err := strconv.Unquote(ivStr) + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } if err != nil { return err } + + if _, ok := m.(wkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, err := json.Marshal(jsonFields) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + d, err := time.ParseDuration(unq) if err != nil { return fmt.Errorf("bad Duration: %v", err) } + ns := d.Nanoseconds() s := ns / 1e9 ns %= 1e9 @@ -607,24 +795,69 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe target.Field(1).SetInt(ns) return nil case "Timestamp": - ivStr := string(inputValue) - if ivStr == "null" { - target.Field(0).SetInt(0) - target.Field(1).SetInt(0) - return nil - } - - unq, err := strconv.Unquote(ivStr) + unq, err := unquote(string(inputValue)) if err != nil { return err } + t, err := time.Parse(time.RFC3339Nano, unq) if err != nil { return fmt.Errorf("bad Timestamp: %v", err) } - target.Field(0).SetInt(int64(t.Unix())) + + target.Field(0).SetInt(t.Unix()) target.Field(1).SetInt(int64(t.Nanosecond())) return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) + for k, jv := range m { + pv := &stpb.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) + } else if v, err := unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &stpb.ListValue{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &stpb.Struct{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil } } @@ -645,6 +878,9 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe target.Set(reflect.New(targetType.Elem())) target = target.Elem() } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } target.SetInt(int64(n)) return nil } @@ -708,6 +944,26 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe } } } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } if !u.AllowUnknownFields && len(jsonFields) > 0 { // Pick any field to be the scapegoat. var f string @@ -726,11 +982,13 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe if err := json.Unmarshal(inputValue, &slc); err != nil { return err } - len := len(slc) - target.Set(reflect.MakeSlice(targetType, len, len)) - for i := 0; i < len; i++ { - if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { - return err + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } } } return nil @@ -742,40 +1000,48 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe if err := json.Unmarshal(inputValue, &mp); err != nil { return err } - target.Set(reflect.MakeMap(targetType)) - var keyprop, valprop *proto.Properties - if prop != nil { - // These could still be nil if the protobuf metadata is broken somehow. - // TODO: This won't work because the fields are unexported. - // We should probably just reparse them. - //keyprop, valprop = prop.mkeyprop, prop.mvalprop - } - for ks, raw := range mp { - // Unmarshal map key. The core json library already decoded the key into a - // string, so we handle that specially. Other types were quoted post-serialization. - var k reflect.Value - if targetType.Key().Kind() == reflect.String { - k = reflect.ValueOf(ks) - } else { - k = reflect.New(targetType.Key()).Elem() - if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + // TODO: pass the correct Properties if needed. + if err := u.unmarshalValue(k, json.RawMessage(ks), nil); err != nil { + return err + } + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + // TODO: pass the correct Properties if needed. + if err := u.unmarshalValue(v, raw, nil); err != nil { return err } + target.SetMapIndex(k, v) } - - // Unmarshal map value. - v := reflect.New(targetType.Elem()).Elem() - if err := u.unmarshalValue(v, raw, valprop); err != nil { - return err - } - target.SetMapIndex(k, v) } return nil } - // 64-bit integers can be encoded as strings. In this case we drop + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // integers & floats can be encoded as strings. In this case we drop // the quotes and proceed as normal. - isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 if isNum && strings.HasPrefix(string(inputValue), `"`) { inputValue = inputValue[1 : len(inputValue)-1] } @@ -784,6 +1050,12 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe return json.Unmarshal(inputValue, target.Addr().Interface()) } +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + // jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { var prop proto.Properties @@ -841,3 +1113,140 @@ func (s mapKeys) Less(i, j int) bool { } return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) } + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(wkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if pm, ok := v.Interface().(proto.Message); ok { + return checkRequiredFields(pm) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index e2e0651a9..000000000 --- a/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go index e392575b3..3cd3249f7 100644 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -35,22 +35,39 @@ package proto import ( + "fmt" "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) +func Clone(src Message) Message { + in := reflect.ValueOf(src) if in.IsNil() { - return pb + return src } - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) } // Merge merges src into dst. @@ -58,17 +75,24 @@ func Clone(pb Message) Message { // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) } if in.IsNil() { - // Merging nil into non-nil is a quiet no-op + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) return } mergeStruct(out.Elem(), in.Elem()) @@ -84,7 +108,7 @@ func mergeStruct(out, in reflect.Value) { mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } - if emIn, ok := extendable(in.Addr().Interface()); ok { + if emIn, err := extendable(in.Addr().Interface()); err == nil { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index aa207298f..d9aa3c42d 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -39,8 +39,6 @@ import ( "errors" "fmt" "io" - "os" - "reflect" ) // errOverflow is returned when an integer is too large to be represented. @@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow") // wire type is encountered. It does not get returned to user code. var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. @@ -267,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { return } -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -311,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) { return string(buf), nil } -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - // Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be +// unmarshal themselves. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. type Unmarshaler interface { Unmarshal([]byte) error } +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. @@ -395,7 +334,13 @@ type Unmarshaler interface { // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() - return UnmarshalMerge(buf, pb) + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) } // UnmarshalMerge parses the protocol buffer representation in buf and @@ -405,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error { // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) @@ -422,12 +375,17 @@ func (p *Buffer) DecodeMessage(pb Message) error { } // DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) + err := Unmarshal(b[:x], pb) + p.index += y + return err } // Unmarshal parses the protocol buffer representation in the @@ -438,533 +396,33 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 err := u.Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) return err } diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 000000000..dea2617ce --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + di.discard(sp) + } + } + } + default: // E.g., *pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + di.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index 2b30f8462..4c35d3373 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -39,23 +39,19 @@ import ( "errors" "fmt" "reflect" - "sort" ) -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. type RequiredNotSetError struct { field string } func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } return fmt.Sprintf("proto: required field %q not set", e.field) } @@ -82,10 +78,6 @@ var ( const maxVarintBytes = 10 // maximum length of a varint -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -119,18 +111,27 @@ func (p *Buffer) EncodeVarint(x uint64) error { // SizeVarint returns the varint encoding size of an integer. func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 } - return n + return 10 } // EncodeFixed64 writes a 64-bit integer to the Buffer. @@ -149,10 +150,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error { return nil } -func sizeFixed64(x uint64) int { - return 8 -} - // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. @@ -165,10 +162,6 @@ func (p *Buffer) EncodeFixed32(x uint64) error { return nil } -func sizeFixed32(x uint64) int { - return 4 -} - // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. @@ -177,10 +170,6 @@ func (p *Buffer) EncodeZigzag64(x uint64) error { return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - // EncodeZigzag32 writes a zigzag-encoded 32-bit integer // to the Buffer. // This is the format used for the sint32 protocol buffer type. @@ -189,10 +178,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error { return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -202,11 +187,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error { return nil } -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { @@ -215,319 +195,17 @@ func (p *Buffer) EncodeStringBytes(s string) error { return nil } -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - // EncodeMessage writes the protocol buffer to the Buffer, // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) } // All protocol buffer fields are nillable, but be careful. @@ -538,825 +216,3 @@ func isNil(v reflect.Value) bool { } return false } - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index 2ed1cf596..d4db5a1c1 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -109,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool { // set/unset mismatch return false } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } f1, f2 = f1.Elem(), f2.Elem() } if !equalAny(f1, f2, sprop.Prop[i]) { @@ -146,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool { u1 := uf.Bytes() u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true + return bytes.Equal(u1, u2) } // v1 and v2 are known to have the same type. @@ -261,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { m1, m2 := e1.value, e2.value + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + if m1 != nil && m2 != nil { // Both are unencoded. if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { @@ -276,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { desc = m[extNum] } if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue + return false } var err error if m1 == nil { diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index eaad21831..816a3b9d6 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -38,6 +38,7 @@ package proto import ( "errors" "fmt" + "io" "reflect" "strconv" "sync" @@ -91,14 +92,29 @@ func (n notLocker) Unlock() {} // extendable returns the extendableProto interface for the given generated proto message. // If the proto message has the old extension format, it returns a wrapper that implements // the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok - } - return nil, false + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() } // XXX_InternalExtensions is an internal representation of proto extensions. @@ -143,9 +159,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc return e.p.extensionMap, &e.p.mu } -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() - // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. type ExtensionDesc struct { @@ -179,8 +192,8 @@ type Extension struct { // SetRawExtension is for testing only. func SetRawExtension(base Message, id int32, b []byte) { - epb, ok := extendable(base) - if !ok { + epb, err := extendable(base) + if err != nil { return } extmap := epb.extensionsWrite() @@ -205,7 +218,7 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { pbi = ea.extendableProtoV1 } if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) } // Check the range. if !isExtensionField(pb, extension.Field) { @@ -250,85 +263,11 @@ func extensionProperties(ed *ExtensionDesc) *Properties { return prop } -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - // HasExtension returns whether the given extension is present in pb. func HasExtension(pb Message, extension *ExtensionDesc) bool { // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return false } extmap, mu := epb.extensionsRead() @@ -336,15 +275,15 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool { return false } mu.Lock() - _, ok = extmap[extension.Field] + _, ok := extmap[extension.Field] mu.Unlock() return ok } // ClearExtension removes the given extension from pb. func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } // TODO: Check types, field numbers, etc.? @@ -352,16 +291,26 @@ func ClearExtension(pb Message, extension *ExtensionDesc) { delete(extmap, extension.Field) } -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } } emap, mu := epb.extensionsRead() @@ -388,6 +337,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { return e.value, nil } + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err @@ -405,6 +359,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // defaultExtensionValue returns the default value for extension. // If no default for an extension is defined ErrMissingExtension is returned. func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) @@ -439,31 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) + unmarshal := typeUnmarshaler(t, extension.Tag) // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. + // Allocate space to store the pointer/slice. value := reflect.New(t).Elem() + var err error for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { return nil, err } - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if o.index >= len(o.buf) { + if len(b) == 0 { break } } @@ -473,9 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } extensions = make([]interface{}, len(es)) for i, e := range es { @@ -494,9 +450,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // just the Field field, which defines the extension's field number. func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + epb, err := extendable(pb) + if err != nil { + return nil, err } registeredExtensions := RegisteredExtensions(pb) @@ -523,9 +479,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return err } if err := checkExtensionTypes(epb, extension); err != nil { return err @@ -550,8 +506,8 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error // ClearAllExtensions clears all extensions from pb. func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } m := epb.extensionsWrite() diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 1c225504a..0e2191b8a 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -265,6 +265,7 @@ package proto import ( "encoding/json" + "errors" "fmt" "log" "reflect" @@ -273,6 +274,8 @@ import ( "sync" ) +var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") + // Message is implemented by generated protocol buffer messages. type Message interface { Reset() @@ -309,16 +312,7 @@ type Buffer struct { buf []byte // encode/decode byte stream index int // read point - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 + deterministic bool } // NewBuffer allocates a new Buffer and initializes its internal data to @@ -343,6 +337,30 @@ func (p *Buffer) SetBuf(s []byte) { // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ @@ -831,22 +849,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes return sf, false, nil } +// mapKeys returns a sort.Interface to be used for sorting the map keys. // Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } + s := mapKeySorter{vs: vs} - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. if len(vs) == 0 { return s } @@ -855,6 +863,12 @@ func mapKeys(vs []reflect.Value) sort.Interface { s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } case reflect.Uint32, reflect.Uint64: s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) } return s @@ -895,3 +909,13 @@ const ProtoPackageIsVersion2 = true // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const ProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index fd982decd..3b6ca41d5 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -42,6 +42,7 @@ import ( "fmt" "reflect" "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item { } func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false + return ms.find(pb) != nil } func (ms *messageSet) Unmarshal(pb Message) error { @@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte { // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { switch exts := exts.(type) { case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, } - m = exts + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + default: return nil, errors.New("proto: not an extension map") } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) } // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { @@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } case map[int32]Extension: m = exts default: @@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { for i, id := range ids { ext := m[id] - if i > 0 { - b.WriteByte(',') - } - msd, ok := messageSetMap[id] if !ok { // Unknown type; we can't render it, so skip it. continue } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) x := ext.value diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index fb512e2e1..b6cad9083 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -38,32 +38,13 @@ package proto import ( - "math" "reflect" + "sync" ) -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} +const unsafeAllowed = false -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int @@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. var invalidField = field(nil) +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { v reflect.Value } -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} } -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} } -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { return p.v.IsNil() } -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() if n < m { - p.v.SetLen(n + 1) + s.SetLen(n + 1) } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) } -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) } - panic("unreachable") + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s } -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) return } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) } - panic("unreachable") + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) } -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s } -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) return } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) } - panic("unreachable") + p.v.Elem().Set(s) } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct } -func (p word64Slice) Len() int { - return p.v.Len() +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v } -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} +var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index 6b5567d47..d55a335d9 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,38 +37,13 @@ package proto import ( "reflect" + "sync/atomic" "unsafe" ) -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. +const unsafeAllowed = true -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr @@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. const invalidField = ^field(0) +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { - return f != ^field(0) + return f != invalidField } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} } -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} } -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x +func (p pointer) isNil() bool { + return p.p == nil } -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) } -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v } -func word64_IsNil(p word64) bool { - return *p == nil +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) } -func word64_Get(p word64) uint64 { - return **p +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v } -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) } -func word64Val_Get(p word64Val) uint64 { - return *p +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v } -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) } diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index ec2289c00..1f272c06a 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -58,42 +58,6 @@ const ( WireFixed32 = 5 ) -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -140,13 +104,6 @@ type StructProperties struct { decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type // OneofTypes contains information about the oneof fields in this message. // It is keyed by the original name of a field. @@ -182,41 +139,24 @@ type Properties struct { Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only + proto3 bool // whether this is known to be a proto3 field oneof bool // whether this is a oneof field Default string // default value HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only mtype reflect.Type // set for map types only mkeyprop *Properties // set for map types only mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s = "," + s += "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" @@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) { switch p.Wire { case "varint": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types @@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) { return } +outer: for i := 2; i < len(fields); i++ { f := fields[i] switch { @@ -326,229 +252,28 @@ func (p *Properties) Parse(s string) { if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") - break + break outer } } } } -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: + if t1.Elem().Kind() == reflect.Struct { p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } } case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() } case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - p.mtype = t1 p.mkeyprop = &Properties{} p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) @@ -562,20 +287,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) @@ -586,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock } var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() ) -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) @@ -621,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name - if f != nil { - p.field = toField(f) - } if tag == "" { return } p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) + p.setFieldProps(typ, f, lockGetProp) } var ( @@ -678,9 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -690,17 +372,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } oneof := f.Tag.Get("protobuf_oneof") // special case if oneof != "" { // Oneof fields don't use the traditional protobuf tag. @@ -715,9 +386,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } } // Re-order prop.order. @@ -728,8 +396,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t + _, _, _, oots = om.XXX_OneofFuncs() // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) @@ -779,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { return prop } -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. @@ -826,20 +469,42 @@ func EnumValueMap(enumType string) map[string]int32 { // A registry of all linked message types. // The string is a fully-qualified proto name ("pkg.Message"). var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) ) // RegisterType is called from generated code and maps from the fully qualified // proto name to the type (pointer to struct) of the protocol buffer. func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { + if _, ok := protoTypedNils[name]; ok { // TODO: Some day, make this a panic. log.Printf("proto: duplicate proto type registered: %s", name) return } t := reflect.TypeOf(x) - protoTypes[name] = t + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t revProtoTypes[t] = name } @@ -855,7 +520,14 @@ func MessageName(x Message) string { } // MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} // A registry of all linked proto files. var ( diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 000000000..9eda8459d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2736 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errreq error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required && errreq == nil { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + errreq = &RequiredNotSetError{f.name} + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + err = fmt.Errorf("proto: string field %q contains invalid UTF-8", fullName) + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errreq +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if err != nil { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != nil && err != ErrNil { // allow nil value in map + return b, err + } + } + return b, nil + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if err != nil { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if err != nil { + return b, err + } + } + return b, nil +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 000000000..5525def6a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 000000000..90ec6c215 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2048 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + rnse = r + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + err = fmt.Errorf("proto: string field %q contains invalid UTF-8", fullName) + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if rnse != nil { + // A required field of a submessage/group is missing. Return that error. + return rnse + } + if reqMask != u.reqMask { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + return &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return nil +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + if fn.IsValid() { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toString() = v + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if err == nil { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nil + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if err != nil { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nil + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 965876bf0..2205fdaad 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -50,7 +50,6 @@ import ( var ( newline = []byte("\n") spaces = []byte(" ") - gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} @@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error { return nil } -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - func requiresQuotes(u string) bool { // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. for _, ch := range u { @@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { props := sprops.Prop[i] name := st.Field(i).Name + if name == "XXX_NoUnkeyedLiteral" { + continue + } + if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte @@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } // Enums have a String method, so writeAny will work fine. if err := tm.writeAny(w, fv, props); err != nil { @@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { // Extensions (the XXX_extensions field). pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { + if _, err := extendable(pv.Interface()); err == nil { if err := tm.writeExtensions(w, pv); err != nil { return err } @@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } } w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } if etm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { @@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert if _, err = w.Write(text); err != nil { return err } - } else if err := tm.writeStruct(w, v); err != nil { - return err + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } } w.unindent() if err := w.WriteByte(ket); err != nil { diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 61f83c1e1..0685bae36 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -206,7 +206,6 @@ func (p *textParser) advance() { var ( errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { @@ -277,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) { return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + case '0', '1', '2', '3', '4', '5', '6', '7': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } - base := 8 - ss := s[:2] + ss := string(r) + s[:2] s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) + i, err := strconv.ParseUint(ss, 8, 8) if err != nil { - return "", "", err + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) } return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': n = 8 } if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) } + ss := s[:n] s = s[n:] - return string(bs), s, nil + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } @@ -728,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) { if tok.err != nil { return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } } return strings.Join(parts, ""), nil } @@ -883,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err + return um.UnmarshalText([]byte(s)) } pb.Reset() v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil + return newTextParser(s).readStruct(v.Elem(), "") } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile deleted file mode 100644 index 41a2d04d0..000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/ -# at src/google/protobuf/descriptor.proto -regenerate: - @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION - protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go index 63cf2c80a..e855b1f5c 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -1,35 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/descriptor.proto -/* -Package descriptor is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/descriptor.proto - -It has these top-level messages: - FileDescriptorSet - FileDescriptorProto - DescriptorProto - FieldDescriptorProto - OneofDescriptorProto - EnumDescriptorProto - EnumValueDescriptorProto - ServiceDescriptorProto - MethodDescriptorProto - FileOptions - MessageOptions - FieldOptions - OneofOptions - EnumOptions - EnumValueOptions - ServiceOptions - MethodOptions - UninterpretedOption - SourceCodeInfo - GeneratedCodeInfo -*/ -package descriptor +package descriptor // import "github.com/golang/protobuf/protoc-gen-go/descriptor" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -137,7 +109,9 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { *x = FieldDescriptorProto_Type(value) return nil } -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 0} +} type FieldDescriptorProto_Label int32 @@ -176,7 +150,7 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{3, 1} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 1} } // Generated classes can be optimized for speed or code size. @@ -216,7 +190,9 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { *x = FileOptions_OptimizeMode(value) return nil } -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10, 0} +} type FieldOptions_CType int32 @@ -254,7 +230,9 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { *x = FieldOptions_CType(value) return nil } -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} } +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 0} +} type FieldOptions_JSType int32 @@ -294,7 +272,9 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { *x = FieldOptions_JSType(value) return nil } -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 1} } +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 1} +} // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe @@ -335,20 +315,41 @@ func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { return nil } func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{16, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(dst, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { if m != nil { @@ -381,14 +382,35 @@ type FileDescriptorProto struct { SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_unrecognized []byte `json:"-"` + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(dst, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo func (m *FileDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -487,14 +509,35 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_unrecognized []byte `json:"-"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(dst, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo func (m *DescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -567,17 +610,37 @@ func (m *DescriptorProto) GetReservedName() []string { } type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 0} } +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo func (m *DescriptorProto_ExtensionRange) GetStart() int32 { if m != nil && m.Start != nil { @@ -593,21 +656,47 @@ func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { return 0 } +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + // Range of reserved tag numbers. Reserved tag numbers may not be used by // fields or extension ranges in the same message. Reserved ranges may // not overlap. type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2, 1} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 1} } +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo func (m *DescriptorProto_ReservedRange) GetStart() int32 { if m != nil && m.Start != nil { @@ -623,6 +712,54 @@ func (m *DescriptorProto_ReservedRange) GetEnd() int32 { return 0 } +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + // Describes a field within a message. type FieldDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -653,15 +790,36 @@ type FieldDescriptorProto struct { // user has set a "json_name" option on this field, that option's value // will be used. Otherwise, it's deduced from the field's name by converting // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(dst, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo func (m *FieldDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -735,15 +893,36 @@ func (m *FieldDescriptorProto) GetOptions() *FieldOptions { // Describes a oneof. type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(dst, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo func (m *OneofDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -761,16 +940,44 @@ func (m *OneofDescriptorProto) GetOptions() *OneofOptions { // Describes an enum type. type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(dst, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo func (m *EnumDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -793,18 +1000,105 @@ func (m *EnumDescriptorProto) GetOptions() *EnumOptions { return nil } -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil } -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo func (m *EnumValueDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -829,16 +1123,37 @@ func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { // Describes a service. type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo func (m *ServiceDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -872,14 +1187,35 @@ type MethodDescriptorProto struct { // Identifies if client streams multiple client messages ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_unrecognized []byte `json:"-"` + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(dst, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo const Default_MethodDescriptorProto_ClientStreaming bool = false const Default_MethodDescriptorProto_ServerStreaming bool = false @@ -946,7 +1282,7 @@ type FileOptions struct { // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. @@ -974,6 +1310,7 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -995,24 +1332,50 @@ type FileOptions struct { // Sets the php class prefix which is prepended to all php generated classes // from this .proto. Default is empty. PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // The parser stores options it doesn't recognize here. See above. + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10} +} var extRange_FileOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FileOptions } +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (dst *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(dst, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo const Default_FileOptions_JavaMultipleFiles bool = false const Default_FileOptions_JavaStringCheckUtf8 bool = false @@ -1020,6 +1383,7 @@ const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPE const Default_FileOptions_CcGenericServices bool = false const Default_FileOptions_JavaGenericServices bool = false const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false const Default_FileOptions_Deprecated bool = false const Default_FileOptions_CcEnableArenas bool = false @@ -1044,6 +1408,7 @@ func (m *FileOptions) GetJavaMultipleFiles() bool { return Default_FileOptions_JavaMultipleFiles } +// Deprecated: Do not use. func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { if m != nil && m.JavaGenerateEqualsAndHash != nil { return *m.JavaGenerateEqualsAndHash @@ -1093,6 +1458,13 @@ func (m *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + func (m *FileOptions) GetDeprecated() bool { if m != nil && m.Deprecated != nil { return *m.Deprecated @@ -1135,6 +1507,13 @@ func (m *FileOptions) GetPhpClassPrefix() string { return "" } +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { return m.UninterpretedOption @@ -1195,22 +1574,43 @@ type MessageOptions struct { MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{11} +} var extRange_MessageOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MessageOptions } +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (dst *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(dst, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo const Default_MessageOptions_MessageSetWireFormat bool = false const Default_MessageOptions_NoStandardDescriptorAccessor bool = false @@ -1265,13 +1665,15 @@ type FieldOptions struct { Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). By default these types are - // represented as JavaScript strings. This avoids loss of precision that can - // happen when a large value is converted to a floating point JavaScript - // numbers. Specifying JS_NUMBER for the jstype causes the generated - // JavaScript code to use the JavaScript "number" type instead of strings. - // This option is an enum to permit additional types to be added, - // e.g. goog.math.Integer. + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` // Should this field be parsed lazily? Lazy applies only to message-type // fields. It means that when the outer message is initially parsed, the @@ -1311,22 +1713,43 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12} +} var extRange_FieldOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FieldOptions } +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (dst *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(dst, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL @@ -1386,22 +1809,43 @@ func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { type OneofOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{13} +} var extRange_OneofOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_OneofOptions } +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (dst *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(dst, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { @@ -1421,22 +1865,43 @@ type EnumOptions struct { Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{14} +} var extRange_EnumOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumOptions } +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (dst *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(dst, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo const Default_EnumOptions_Deprecated bool = false @@ -1469,22 +1934,43 @@ type EnumValueOptions struct { Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{15} +} var extRange_EnumValueOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumValueOptions } +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (dst *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(dst, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo const Default_EnumValueOptions_Deprecated bool = false @@ -1510,22 +1996,43 @@ type ServiceOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{16} +} var extRange_ServiceOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ServiceOptions } +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (dst *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(dst, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo const Default_ServiceOptions_Deprecated bool = false @@ -1552,22 +2059,43 @@ type MethodOptions struct { IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17} +} var extRange_MethodOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MethodOptions } +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (dst *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(dst, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo const Default_MethodOptions_Deprecated bool = false const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN @@ -1603,19 +2131,40 @@ type UninterpretedOption struct { Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_unrecognized []byte `json:"-"` + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(dst, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1672,17 +2221,36 @@ func (m *UninterpretedOption) GetAggregateValue() string { // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents // "foo.(bar.baz).qux". type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_unrecognized []byte `json:"-"` + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{17, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18, 0} } +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo func (m *UninterpretedOption_NamePart) GetNamePart() string { if m != nil && m.NamePart != nil { @@ -1744,14 +2312,35 @@ type SourceCodeInfo struct { // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_unrecognized []byte `json:"-"` + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(dst, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1841,13 +2430,34 @@ type SourceCodeInfo_Location struct { LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} } +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo func (m *SourceCodeInfo_Location) GetPath() []int32 { if m != nil { @@ -1890,14 +2500,35 @@ func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { type GeneratedCodeInfo struct { // An Annotation connects some span of text in generated code to an element // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_unrecognized []byte `json:"-"` + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { if m != nil { @@ -1918,16 +2549,35 @@ type GeneratedCodeInfo_Annotation struct { // Identifies the ending offset in bytes in the generated code that // relates to the identified offset. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{19, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20, 0} } +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { if m != nil { @@ -1963,9 +2613,11 @@ func init() { proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") @@ -1991,162 +2643,170 @@ func init() { proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) } -func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 2460 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5b, 0x6f, 0xdb, 0xc8, - 0x15, 0x5e, 0x5d, 0x2d, 0x1d, 0xc9, 0xf2, 0x78, 0xec, 0x4d, 0x18, 0xef, 0x25, 0x8e, 0xf6, 0x12, - 0x6f, 0xd2, 0xc8, 0x0b, 0xe7, 0xb2, 0x59, 0xa7, 0x48, 0x21, 0x4b, 0x8c, 0x57, 0xa9, 0x2c, 0xa9, - 0x94, 0xdc, 0x4d, 0xf6, 0x85, 0x18, 0x93, 0x23, 0x99, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, 0xf7, - 0x29, 0x40, 0x9f, 0x0a, 0xf4, 0x07, 0x14, 0x45, 0xd1, 0x87, 0x7d, 0x59, 0xa0, 0x3f, 0xa0, 0xcf, - 0xfd, 0x05, 0x05, 0xf6, 0xb9, 0x2f, 0x45, 0x51, 0xa0, 0xfd, 0x07, 0x7d, 0x2d, 0x66, 0x86, 0xa4, - 0x48, 0x5d, 0x12, 0x77, 0x81, 0xec, 0x3e, 0xd9, 0x73, 0xce, 0x77, 0x0e, 0xcf, 0x9c, 0xf9, 0x66, - 0xce, 0x99, 0x11, 0x6c, 0x8f, 0x6c, 0x7b, 0x64, 0xd2, 0x5d, 0xc7, 0xb5, 0x7d, 0xfb, 0x64, 0x32, - 0xdc, 0xd5, 0xa9, 0xa7, 0xb9, 0x86, 0xe3, 0xdb, 0x6e, 0x8d, 0xcb, 0xf0, 0x9a, 0x40, 0xd4, 0x42, - 0x44, 0xf5, 0x08, 0xd6, 0x1f, 0x18, 0x26, 0x6d, 0x46, 0xc0, 0x3e, 0xf5, 0xf1, 0x5d, 0xc8, 0x0e, - 0x0d, 0x93, 0x4a, 0xa9, 0xed, 0xcc, 0x4e, 0x69, 0xef, 0xc3, 0xda, 0x8c, 0x51, 0x2d, 0x69, 0xd1, - 0x63, 0x62, 0x85, 0x5b, 0x54, 0xff, 0x95, 0x85, 0x8d, 0x05, 0x5a, 0x8c, 0x21, 0x6b, 0x91, 0x31, - 0xf3, 0x98, 0xda, 0x29, 0x2a, 0xfc, 0x7f, 0x2c, 0xc1, 0x8a, 0x43, 0xb4, 0xa7, 0x64, 0x44, 0xa5, - 0x34, 0x17, 0x87, 0x43, 0xfc, 0x3e, 0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0xce, 0xa4, 0xcc, - 0x76, 0x66, 0xa7, 0xa8, 0xc4, 0x24, 0xf8, 0x3a, 0xac, 0x3b, 0x93, 0x13, 0xd3, 0xd0, 0xd4, 0x18, - 0x0c, 0xb6, 0x33, 0x3b, 0x39, 0x05, 0x09, 0x45, 0x73, 0x0a, 0xbe, 0x0a, 0x6b, 0xcf, 0x29, 0x79, - 0x1a, 0x87, 0x96, 0x38, 0xb4, 0xc2, 0xc4, 0x31, 0x60, 0x03, 0xca, 0x63, 0xea, 0x79, 0x64, 0x44, - 0x55, 0xff, 0xcc, 0xa1, 0x52, 0x96, 0xcf, 0x7e, 0x7b, 0x6e, 0xf6, 0xb3, 0x33, 0x2f, 0x05, 0x56, - 0x83, 0x33, 0x87, 0xe2, 0x3a, 0x14, 0xa9, 0x35, 0x19, 0x0b, 0x0f, 0xb9, 0x25, 0xf9, 0x93, 0xad, - 0xc9, 0x78, 0xd6, 0x4b, 0x81, 0x99, 0x05, 0x2e, 0x56, 0x3c, 0xea, 0x3e, 0x33, 0x34, 0x2a, 0xe5, - 0xb9, 0x83, 0xab, 0x73, 0x0e, 0xfa, 0x42, 0x3f, 0xeb, 0x23, 0xb4, 0xc3, 0x0d, 0x28, 0xd2, 0x17, - 0x3e, 0xb5, 0x3c, 0xc3, 0xb6, 0xa4, 0x15, 0xee, 0xe4, 0xa3, 0x05, 0xab, 0x48, 0x4d, 0x7d, 0xd6, - 0xc5, 0xd4, 0x0e, 0xdf, 0x81, 0x15, 0xdb, 0xf1, 0x0d, 0xdb, 0xf2, 0xa4, 0xc2, 0x76, 0x6a, 0xa7, - 0xb4, 0xf7, 0xee, 0x42, 0x22, 0x74, 0x05, 0x46, 0x09, 0xc1, 0xb8, 0x05, 0xc8, 0xb3, 0x27, 0xae, - 0x46, 0x55, 0xcd, 0xd6, 0xa9, 0x6a, 0x58, 0x43, 0x5b, 0x2a, 0x72, 0x07, 0x97, 0xe7, 0x27, 0xc2, - 0x81, 0x0d, 0x5b, 0xa7, 0x2d, 0x6b, 0x68, 0x2b, 0x15, 0x2f, 0x31, 0xc6, 0x17, 0x20, 0xef, 0x9d, - 0x59, 0x3e, 0x79, 0x21, 0x95, 0x39, 0x43, 0x82, 0x51, 0xf5, 0xbf, 0x39, 0x58, 0x3b, 0x0f, 0xc5, - 0xee, 0x41, 0x6e, 0xc8, 0x66, 0x29, 0xa5, 0xff, 0x9f, 0x1c, 0x08, 0x9b, 0x64, 0x12, 0xf3, 0x3f, - 0x30, 0x89, 0x75, 0x28, 0x59, 0xd4, 0xf3, 0xa9, 0x2e, 0x18, 0x91, 0x39, 0x27, 0xa7, 0x40, 0x18, - 0xcd, 0x53, 0x2a, 0xfb, 0x83, 0x28, 0xf5, 0x08, 0xd6, 0xa2, 0x90, 0x54, 0x97, 0x58, 0xa3, 0x90, - 0x9b, 0xbb, 0xaf, 0x8b, 0xa4, 0x26, 0x87, 0x76, 0x0a, 0x33, 0x53, 0x2a, 0x34, 0x31, 0xc6, 0x4d, - 0x00, 0xdb, 0xa2, 0xf6, 0x50, 0xd5, 0xa9, 0x66, 0x4a, 0x85, 0x25, 0x59, 0xea, 0x32, 0xc8, 0x5c, - 0x96, 0x6c, 0x21, 0xd5, 0x4c, 0xfc, 0xf9, 0x94, 0x6a, 0x2b, 0x4b, 0x98, 0x72, 0x24, 0x36, 0xd9, - 0x1c, 0xdb, 0x8e, 0xa1, 0xe2, 0x52, 0xc6, 0x7b, 0xaa, 0x07, 0x33, 0x2b, 0xf2, 0x20, 0x6a, 0xaf, - 0x9d, 0x99, 0x12, 0x98, 0x89, 0x89, 0xad, 0xba, 0xf1, 0x21, 0xfe, 0x00, 0x22, 0x81, 0xca, 0x69, - 0x05, 0xfc, 0x14, 0x2a, 0x87, 0xc2, 0x0e, 0x19, 0xd3, 0xad, 0xbb, 0x50, 0x49, 0xa6, 0x07, 0x6f, - 0x42, 0xce, 0xf3, 0x89, 0xeb, 0x73, 0x16, 0xe6, 0x14, 0x31, 0xc0, 0x08, 0x32, 0xd4, 0xd2, 0xf9, - 0x29, 0x97, 0x53, 0xd8, 0xbf, 0x5b, 0x9f, 0xc1, 0x6a, 0xe2, 0xf3, 0xe7, 0x35, 0xac, 0xfe, 0x3e, - 0x0f, 0x9b, 0x8b, 0x38, 0xb7, 0x90, 0xfe, 0x17, 0x20, 0x6f, 0x4d, 0xc6, 0x27, 0xd4, 0x95, 0x32, - 0xdc, 0x43, 0x30, 0xc2, 0x75, 0xc8, 0x99, 0xe4, 0x84, 0x9a, 0x52, 0x76, 0x3b, 0xb5, 0x53, 0xd9, - 0xbb, 0x7e, 0x2e, 0x56, 0xd7, 0xda, 0xcc, 0x44, 0x11, 0x96, 0xf8, 0x3e, 0x64, 0x83, 0x23, 0x8e, - 0x79, 0xb8, 0x76, 0x3e, 0x0f, 0x8c, 0x8b, 0x0a, 0xb7, 0xc3, 0xef, 0x40, 0x91, 0xfd, 0x15, 0xb9, - 0xcd, 0xf3, 0x98, 0x0b, 0x4c, 0xc0, 0xf2, 0x8a, 0xb7, 0xa0, 0xc0, 0x69, 0xa6, 0xd3, 0xb0, 0x34, - 0x44, 0x63, 0xb6, 0x30, 0x3a, 0x1d, 0x92, 0x89, 0xe9, 0xab, 0xcf, 0x88, 0x39, 0xa1, 0x9c, 0x30, - 0x45, 0xa5, 0x1c, 0x08, 0x7f, 0xcd, 0x64, 0xf8, 0x32, 0x94, 0x04, 0x2b, 0x0d, 0x4b, 0xa7, 0x2f, - 0xf8, 0xe9, 0x93, 0x53, 0x04, 0x51, 0x5b, 0x4c, 0xc2, 0x3e, 0xff, 0xc4, 0xb3, 0xad, 0x70, 0x69, - 0xf9, 0x27, 0x98, 0x80, 0x7f, 0xfe, 0xb3, 0xd9, 0x83, 0xef, 0xbd, 0xc5, 0xd3, 0x9b, 0xe5, 0x62, - 0xf5, 0x2f, 0x69, 0xc8, 0xf2, 0xfd, 0xb6, 0x06, 0xa5, 0xc1, 0xe3, 0x9e, 0xac, 0x36, 0xbb, 0xc7, - 0x07, 0x6d, 0x19, 0xa5, 0x70, 0x05, 0x80, 0x0b, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47, 0xe3, - 0x56, 0x67, 0x70, 0xe7, 0x16, 0xca, 0x44, 0x06, 0xc7, 0x42, 0x90, 0x8d, 0x03, 0x6e, 0xee, 0xa1, - 0x1c, 0x46, 0x50, 0x16, 0x0e, 0x5a, 0x8f, 0xe4, 0xe6, 0x9d, 0x5b, 0x28, 0x9f, 0x94, 0xdc, 0xdc, - 0x43, 0x2b, 0x78, 0x15, 0x8a, 0x5c, 0x72, 0xd0, 0xed, 0xb6, 0x51, 0x21, 0xf2, 0xd9, 0x1f, 0x28, - 0xad, 0xce, 0x21, 0x2a, 0x46, 0x3e, 0x0f, 0x95, 0xee, 0x71, 0x0f, 0x41, 0xe4, 0xe1, 0x48, 0xee, - 0xf7, 0xeb, 0x87, 0x32, 0x2a, 0x45, 0x88, 0x83, 0xc7, 0x03, 0xb9, 0x8f, 0xca, 0x89, 0xb0, 0x6e, - 0xee, 0xa1, 0xd5, 0xe8, 0x13, 0x72, 0xe7, 0xf8, 0x08, 0x55, 0xf0, 0x3a, 0xac, 0x8a, 0x4f, 0x84, - 0x41, 0xac, 0xcd, 0x88, 0xee, 0xdc, 0x42, 0x68, 0x1a, 0x88, 0xf0, 0xb2, 0x9e, 0x10, 0xdc, 0xb9, - 0x85, 0x70, 0xb5, 0x01, 0x39, 0xce, 0x2e, 0x8c, 0xa1, 0xd2, 0xae, 0x1f, 0xc8, 0x6d, 0xb5, 0xdb, - 0x1b, 0xb4, 0xba, 0x9d, 0x7a, 0x1b, 0xa5, 0xa6, 0x32, 0x45, 0xfe, 0xd5, 0x71, 0x4b, 0x91, 0x9b, - 0x28, 0x1d, 0x97, 0xf5, 0xe4, 0xfa, 0x40, 0x6e, 0xa2, 0x4c, 0x55, 0x83, 0xcd, 0x45, 0xe7, 0xcc, - 0xc2, 0x9d, 0x11, 0x5b, 0xe2, 0xf4, 0x92, 0x25, 0xe6, 0xbe, 0xe6, 0x96, 0xf8, 0xdb, 0x14, 0x6c, - 0x2c, 0x38, 0x6b, 0x17, 0x7e, 0xe4, 0x17, 0x90, 0x13, 0x14, 0x15, 0xd5, 0xe7, 0x93, 0x85, 0x87, - 0x36, 0x27, 0xec, 0x5c, 0x05, 0xe2, 0x76, 0xf1, 0x0a, 0x9c, 0x59, 0x52, 0x81, 0x99, 0x8b, 0xb9, - 0x20, 0x7f, 0x93, 0x02, 0x69, 0x99, 0xef, 0xd7, 0x1c, 0x14, 0xe9, 0xc4, 0x41, 0x71, 0x6f, 0x36, - 0x80, 0x2b, 0xcb, 0xe7, 0x30, 0x17, 0xc5, 0x77, 0x29, 0xb8, 0xb0, 0xb8, 0x51, 0x59, 0x18, 0xc3, - 0x7d, 0xc8, 0x8f, 0xa9, 0x7f, 0x6a, 0x87, 0xc5, 0xfa, 0xe3, 0x05, 0x25, 0x80, 0xa9, 0x67, 0x73, - 0x15, 0x58, 0xc5, 0x6b, 0x48, 0x66, 0x59, 0xb7, 0x21, 0xa2, 0x99, 0x8b, 0xf4, 0xb7, 0x69, 0x78, - 0x7b, 0xa1, 0xf3, 0x85, 0x81, 0xbe, 0x07, 0x60, 0x58, 0xce, 0xc4, 0x17, 0x05, 0x59, 0x9c, 0x4f, - 0x45, 0x2e, 0xe1, 0x7b, 0x9f, 0x9d, 0x3d, 0x13, 0x3f, 0xd2, 0x67, 0xb8, 0x1e, 0x84, 0x88, 0x03, - 0xee, 0x4e, 0x03, 0xcd, 0xf2, 0x40, 0xdf, 0x5f, 0x32, 0xd3, 0xb9, 0x5a, 0xf7, 0x29, 0x20, 0xcd, - 0x34, 0xa8, 0xe5, 0xab, 0x9e, 0xef, 0x52, 0x32, 0x36, 0xac, 0x11, 0x3f, 0x80, 0x0b, 0xfb, 0xb9, - 0x21, 0x31, 0x3d, 0xaa, 0xac, 0x09, 0x75, 0x3f, 0xd4, 0x32, 0x0b, 0x5e, 0x65, 0xdc, 0x98, 0x45, - 0x3e, 0x61, 0x21, 0xd4, 0x91, 0x45, 0xf5, 0xef, 0x2b, 0x50, 0x8a, 0xb5, 0x75, 0xf8, 0x0a, 0x94, - 0x9f, 0x90, 0x67, 0x44, 0x0d, 0x5b, 0x75, 0x91, 0x89, 0x12, 0x93, 0xf5, 0x82, 0x76, 0xfd, 0x53, - 0xd8, 0xe4, 0x10, 0x7b, 0xe2, 0x53, 0x57, 0xd5, 0x4c, 0xe2, 0x79, 0x3c, 0x69, 0x05, 0x0e, 0xc5, - 0x4c, 0xd7, 0x65, 0xaa, 0x46, 0xa8, 0xc1, 0xb7, 0x61, 0x83, 0x5b, 0x8c, 0x27, 0xa6, 0x6f, 0x38, - 0x26, 0x55, 0xd9, 0xe5, 0xc1, 0xe3, 0x07, 0x71, 0x14, 0xd9, 0x3a, 0x43, 0x1c, 0x05, 0x00, 0x16, - 0x91, 0x87, 0x9b, 0xf0, 0x1e, 0x37, 0x1b, 0x51, 0x8b, 0xba, 0xc4, 0xa7, 0x2a, 0xfd, 0x7a, 0x42, - 0x4c, 0x4f, 0x25, 0x96, 0xae, 0x9e, 0x12, 0xef, 0x54, 0xda, 0x64, 0x0e, 0x0e, 0xd2, 0x52, 0x4a, - 0xb9, 0xc4, 0x80, 0x87, 0x01, 0x4e, 0xe6, 0xb0, 0xba, 0xa5, 0x7f, 0x41, 0xbc, 0x53, 0xbc, 0x0f, - 0x17, 0xb8, 0x17, 0xcf, 0x77, 0x0d, 0x6b, 0xa4, 0x6a, 0xa7, 0x54, 0x7b, 0xaa, 0x4e, 0xfc, 0xe1, - 0x5d, 0xe9, 0x9d, 0xf8, 0xf7, 0x79, 0x84, 0x7d, 0x8e, 0x69, 0x30, 0xc8, 0xb1, 0x3f, 0xbc, 0x8b, - 0xfb, 0x50, 0x66, 0x8b, 0x31, 0x36, 0xbe, 0xa1, 0xea, 0xd0, 0x76, 0x79, 0x65, 0xa9, 0x2c, 0xd8, - 0xd9, 0xb1, 0x0c, 0xd6, 0xba, 0x81, 0xc1, 0x91, 0xad, 0xd3, 0xfd, 0x5c, 0xbf, 0x27, 0xcb, 0x4d, - 0xa5, 0x14, 0x7a, 0x79, 0x60, 0xbb, 0x8c, 0x50, 0x23, 0x3b, 0x4a, 0x70, 0x49, 0x10, 0x6a, 0x64, - 0x87, 0xe9, 0xbd, 0x0d, 0x1b, 0x9a, 0x26, 0xe6, 0x6c, 0x68, 0x6a, 0xd0, 0xe2, 0x7b, 0x12, 0x4a, - 0x24, 0x4b, 0xd3, 0x0e, 0x05, 0x20, 0xe0, 0xb8, 0x87, 0x3f, 0x87, 0xb7, 0xa7, 0xc9, 0x8a, 0x1b, - 0xae, 0xcf, 0xcd, 0x72, 0xd6, 0xf4, 0x36, 0x6c, 0x38, 0x67, 0xf3, 0x86, 0x38, 0xf1, 0x45, 0xe7, - 0x6c, 0xd6, 0xec, 0x23, 0x7e, 0x6d, 0x73, 0xa9, 0x46, 0x7c, 0xaa, 0x4b, 0x17, 0xe3, 0xe8, 0x98, - 0x02, 0xef, 0x02, 0xd2, 0x34, 0x95, 0x5a, 0xe4, 0xc4, 0xa4, 0x2a, 0x71, 0xa9, 0x45, 0x3c, 0xe9, - 0x72, 0x1c, 0x5c, 0xd1, 0x34, 0x99, 0x6b, 0xeb, 0x5c, 0x89, 0xaf, 0xc1, 0xba, 0x7d, 0xf2, 0x44, - 0x13, 0xcc, 0x52, 0x1d, 0x97, 0x0e, 0x8d, 0x17, 0xd2, 0x87, 0x3c, 0x4d, 0x6b, 0x4c, 0xc1, 0x79, - 0xd5, 0xe3, 0x62, 0xfc, 0x09, 0x20, 0xcd, 0x3b, 0x25, 0xae, 0xc3, 0x4b, 0xbb, 0xe7, 0x10, 0x8d, - 0x4a, 0x1f, 0x09, 0xa8, 0x90, 0x77, 0x42, 0x31, 0x63, 0xb6, 0xf7, 0xdc, 0x18, 0xfa, 0xa1, 0xc7, - 0xab, 0x82, 0xd9, 0x5c, 0x16, 0x78, 0xdb, 0x01, 0xe4, 0x9c, 0x3a, 0xc9, 0x0f, 0xef, 0x70, 0x58, - 0xc5, 0x39, 0x75, 0xe2, 0xdf, 0x7d, 0x04, 0x9b, 0x13, 0xcb, 0xb0, 0x7c, 0xea, 0x3a, 0x2e, 0x65, - 0xed, 0xbe, 0xd8, 0xb3, 0xd2, 0xbf, 0x57, 0x96, 0x34, 0xec, 0xc7, 0x71, 0xb4, 0xa0, 0x8a, 0xb2, - 0x31, 0x99, 0x17, 0x56, 0xf7, 0xa1, 0x1c, 0x67, 0x10, 0x2e, 0x82, 0xe0, 0x10, 0x4a, 0xb1, 0x6a, - 0xdc, 0xe8, 0x36, 0x59, 0x1d, 0xfd, 0x4a, 0x46, 0x69, 0x56, 0xcf, 0xdb, 0xad, 0x81, 0xac, 0x2a, - 0xc7, 0x9d, 0x41, 0xeb, 0x48, 0x46, 0x99, 0x6b, 0xc5, 0xc2, 0x7f, 0x56, 0xd0, 0xcb, 0x97, 0x2f, - 0x5f, 0xa6, 0x1f, 0x66, 0x0b, 0x1f, 0xa3, 0xab, 0xd5, 0xef, 0xd3, 0x50, 0x49, 0x76, 0xd2, 0xf8, - 0xe7, 0x70, 0x31, 0xbc, 0xf6, 0x7a, 0xd4, 0x57, 0x9f, 0x1b, 0x2e, 0xa7, 0xf6, 0x98, 0x88, 0x5e, - 0x34, 0x5a, 0x95, 0xcd, 0x00, 0xd5, 0xa7, 0xfe, 0x97, 0x86, 0xcb, 0x88, 0x3b, 0x26, 0x3e, 0x6e, - 0xc3, 0x65, 0xcb, 0x56, 0x3d, 0x9f, 0x58, 0x3a, 0x71, 0x75, 0x75, 0xfa, 0xe0, 0xa0, 0x12, 0x4d, - 0xa3, 0x9e, 0x67, 0x8b, 0x92, 0x12, 0x79, 0x79, 0xd7, 0xb2, 0xfb, 0x01, 0x78, 0x7a, 0xd6, 0xd6, - 0x03, 0xe8, 0x0c, 0x83, 0x32, 0xcb, 0x18, 0xf4, 0x0e, 0x14, 0xc7, 0xc4, 0x51, 0xa9, 0xe5, 0xbb, - 0x67, 0xbc, 0xff, 0x2b, 0x28, 0x85, 0x31, 0x71, 0x64, 0x36, 0x7e, 0x73, 0x2b, 0x91, 0xcc, 0x66, - 0x01, 0x15, 0x1f, 0x66, 0x0b, 0x45, 0x04, 0xd5, 0x7f, 0x66, 0xa0, 0x1c, 0xef, 0x07, 0x59, 0x7b, - 0xad, 0xf1, 0xb3, 0x3f, 0xc5, 0x4f, 0x87, 0x0f, 0x5e, 0xd9, 0x3d, 0xd6, 0x1a, 0xac, 0x28, 0xec, - 0xe7, 0x45, 0x97, 0xa6, 0x08, 0x4b, 0x56, 0x90, 0xd9, 0x79, 0x40, 0x45, 0xef, 0x5f, 0x50, 0x82, - 0x11, 0x3e, 0x84, 0xfc, 0x13, 0x8f, 0xfb, 0xce, 0x73, 0xdf, 0x1f, 0xbe, 0xda, 0xf7, 0xc3, 0x3e, - 0x77, 0x5e, 0x7c, 0xd8, 0x57, 0x3b, 0x5d, 0xe5, 0xa8, 0xde, 0x56, 0x02, 0x73, 0x7c, 0x09, 0xb2, - 0x26, 0xf9, 0xe6, 0x2c, 0x59, 0x3e, 0xb8, 0xe8, 0xbc, 0x8b, 0x70, 0x09, 0xb2, 0xcf, 0x29, 0x79, - 0x9a, 0x3c, 0xb4, 0xb9, 0xe8, 0x0d, 0x6e, 0x86, 0x5d, 0xc8, 0xf1, 0x7c, 0x61, 0x80, 0x20, 0x63, - 0xe8, 0x2d, 0x5c, 0x80, 0x6c, 0xa3, 0xab, 0xb0, 0x0d, 0x81, 0xa0, 0x2c, 0xa4, 0x6a, 0xaf, 0x25, - 0x37, 0x64, 0x94, 0xae, 0xde, 0x86, 0xbc, 0x48, 0x02, 0xdb, 0x2c, 0x51, 0x1a, 0xd0, 0x5b, 0xc1, - 0x30, 0xf0, 0x91, 0x0a, 0xb5, 0xc7, 0x47, 0x07, 0xb2, 0x82, 0xd2, 0xc9, 0xa5, 0xce, 0xa2, 0x5c, - 0xd5, 0x83, 0x72, 0xbc, 0x21, 0xfc, 0x51, 0x58, 0x56, 0xfd, 0x6b, 0x0a, 0x4a, 0xb1, 0x06, 0x8f, - 0xb5, 0x16, 0xc4, 0x34, 0xed, 0xe7, 0x2a, 0x31, 0x0d, 0xe2, 0x05, 0xd4, 0x00, 0x2e, 0xaa, 0x33, - 0xc9, 0x79, 0x97, 0xee, 0x47, 0xda, 0x22, 0x39, 0x94, 0xaf, 0xfe, 0x29, 0x05, 0x68, 0xb6, 0x45, - 0x9c, 0x09, 0x33, 0xf5, 0x53, 0x86, 0x59, 0xfd, 0x63, 0x0a, 0x2a, 0xc9, 0xbe, 0x70, 0x26, 0xbc, - 0x2b, 0x3f, 0x69, 0x78, 0xff, 0x48, 0xc3, 0x6a, 0xa2, 0x1b, 0x3c, 0x6f, 0x74, 0x5f, 0xc3, 0xba, - 0xa1, 0xd3, 0xb1, 0x63, 0xfb, 0xd4, 0xd2, 0xce, 0x54, 0x93, 0x3e, 0xa3, 0xa6, 0x54, 0xe5, 0x87, - 0xc6, 0xee, 0xab, 0xfb, 0xcd, 0x5a, 0x6b, 0x6a, 0xd7, 0x66, 0x66, 0xfb, 0x1b, 0xad, 0xa6, 0x7c, - 0xd4, 0xeb, 0x0e, 0xe4, 0x4e, 0xe3, 0xb1, 0x7a, 0xdc, 0xf9, 0x65, 0xa7, 0xfb, 0x65, 0x47, 0x41, - 0xc6, 0x0c, 0xec, 0x0d, 0x6e, 0xfb, 0x1e, 0xa0, 0xd9, 0xa0, 0xf0, 0x45, 0x58, 0x14, 0x16, 0x7a, - 0x0b, 0x6f, 0xc0, 0x5a, 0xa7, 0xab, 0xf6, 0x5b, 0x4d, 0x59, 0x95, 0x1f, 0x3c, 0x90, 0x1b, 0x83, - 0xbe, 0xb8, 0x80, 0x47, 0xe8, 0x41, 0x62, 0x83, 0x57, 0xff, 0x90, 0x81, 0x8d, 0x05, 0x91, 0xe0, - 0x7a, 0xd0, 0xfb, 0x8b, 0xeb, 0xc8, 0x8d, 0xf3, 0x44, 0x5f, 0x63, 0xdd, 0x45, 0x8f, 0xb8, 0x7e, - 0x70, 0x55, 0xf8, 0x04, 0x58, 0x96, 0x2c, 0xdf, 0x18, 0x1a, 0xd4, 0x0d, 0xde, 0x2b, 0xc4, 0x85, - 0x60, 0x6d, 0x2a, 0x17, 0x4f, 0x16, 0x3f, 0x03, 0xec, 0xd8, 0x9e, 0xe1, 0x1b, 0xcf, 0xa8, 0x6a, - 0x58, 0xe1, 0xe3, 0x06, 0xbb, 0x20, 0x64, 0x15, 0x14, 0x6a, 0x5a, 0x96, 0x1f, 0xa1, 0x2d, 0x3a, - 0x22, 0x33, 0x68, 0x76, 0x98, 0x67, 0x14, 0x14, 0x6a, 0x22, 0xf4, 0x15, 0x28, 0xeb, 0xf6, 0x84, - 0xb5, 0x5b, 0x02, 0xc7, 0x6a, 0x47, 0x4a, 0x29, 0x09, 0x59, 0x04, 0x09, 0xfa, 0xe1, 0xe9, 0xab, - 0x4a, 0x59, 0x29, 0x09, 0x99, 0x80, 0x5c, 0x85, 0x35, 0x32, 0x1a, 0xb9, 0xcc, 0x79, 0xe8, 0x48, - 0x74, 0xf8, 0x95, 0x48, 0xcc, 0x81, 0x5b, 0x0f, 0xa1, 0x10, 0xe6, 0x81, 0x95, 0x6a, 0x96, 0x09, - 0xd5, 0x11, 0x6f, 0x5b, 0xe9, 0x9d, 0xa2, 0x52, 0xb0, 0x42, 0xe5, 0x15, 0x28, 0x1b, 0x9e, 0x3a, - 0x7d, 0x64, 0x4d, 0x6f, 0xa7, 0x77, 0x0a, 0x4a, 0xc9, 0xf0, 0xa2, 0x57, 0xb5, 0xea, 0x77, 0x69, - 0xa8, 0x24, 0x1f, 0x89, 0x71, 0x13, 0x0a, 0xa6, 0xad, 0x11, 0x4e, 0x2d, 0xf1, 0x0b, 0xc5, 0xce, - 0x6b, 0xde, 0x95, 0x6b, 0xed, 0x00, 0xaf, 0x44, 0x96, 0x5b, 0x7f, 0x4b, 0x41, 0x21, 0x14, 0xe3, - 0x0b, 0x90, 0x75, 0x88, 0x7f, 0xca, 0xdd, 0xe5, 0x0e, 0xd2, 0x28, 0xa5, 0xf0, 0x31, 0x93, 0x7b, - 0x0e, 0xb1, 0x38, 0x05, 0x02, 0x39, 0x1b, 0xb3, 0x75, 0x35, 0x29, 0xd1, 0xf9, 0xf5, 0xc1, 0x1e, - 0x8f, 0xa9, 0xe5, 0x7b, 0xe1, 0xba, 0x06, 0xf2, 0x46, 0x20, 0xc6, 0xd7, 0x61, 0xdd, 0x77, 0x89, - 0x61, 0x26, 0xb0, 0x59, 0x8e, 0x45, 0xa1, 0x22, 0x02, 0xef, 0xc3, 0xa5, 0xd0, 0xaf, 0x4e, 0x7d, - 0xa2, 0x9d, 0x52, 0x7d, 0x6a, 0x94, 0xe7, 0x2f, 0x90, 0x17, 0x03, 0x40, 0x33, 0xd0, 0x87, 0xb6, - 0xd5, 0xef, 0x53, 0xb0, 0x1e, 0x5e, 0x78, 0xf4, 0x28, 0x59, 0x47, 0x00, 0xc4, 0xb2, 0x6c, 0x3f, - 0x9e, 0xae, 0x79, 0x2a, 0xcf, 0xd9, 0xd5, 0xea, 0x91, 0x91, 0x12, 0x73, 0xb0, 0x35, 0x06, 0x98, - 0x6a, 0x96, 0xa6, 0xed, 0x32, 0x94, 0x82, 0x5f, 0x00, 0xf8, 0xcf, 0x48, 0xe2, 0x8a, 0x0c, 0x42, - 0xc4, 0x6e, 0x46, 0x78, 0x13, 0x72, 0x27, 0x74, 0x64, 0x58, 0xc1, 0xbb, 0xa4, 0x18, 0x84, 0xaf, - 0x9d, 0xd9, 0xe8, 0xb5, 0xf3, 0xe0, 0x77, 0x29, 0xd8, 0xd0, 0xec, 0xf1, 0x6c, 0xbc, 0x07, 0x68, - 0xe6, 0x9e, 0xee, 0x7d, 0x91, 0xfa, 0xea, 0xfe, 0xc8, 0xf0, 0x4f, 0x27, 0x27, 0x35, 0xcd, 0x1e, - 0xef, 0x8e, 0x6c, 0x93, 0x58, 0xa3, 0xe9, 0xef, 0x60, 0xfc, 0x1f, 0xed, 0xc6, 0x88, 0x5a, 0x37, - 0x46, 0x76, 0xec, 0x57, 0xb1, 0x7b, 0xd3, 0x7f, 0xbf, 0x4d, 0x67, 0x0e, 0x7b, 0x07, 0x7f, 0x4e, - 0x6f, 0x1d, 0x8a, 0x6f, 0xf5, 0xc2, 0xdc, 0x28, 0x74, 0x68, 0x52, 0x8d, 0xcd, 0xf7, 0x7f, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x8e, 0x54, 0xe7, 0xef, 0x60, 0x1b, 0x00, 0x00, +func init() { + proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_descriptor_4df4cb5f42392df6) +} + +var fileDescriptor_descriptor_4df4cb5f42392df6 = []byte{ + // 2555 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, + 0xf5, 0xcf, 0xf2, 0x4b, 0xe4, 0x21, 0x45, 0x8d, 0x46, 0x8a, 0xbd, 0x56, 0x3e, 0x2c, 0x33, 0x1f, + 0x96, 0x9d, 0x7f, 0xa8, 0xc0, 0xb1, 0x1d, 0x47, 0xfe, 0x23, 0x2d, 0x45, 0xae, 0x15, 0xaa, 0x12, + 0xc9, 0x2e, 0xa9, 0xe6, 0x03, 0x28, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, + 0xb4, 0xad, 0xa0, 0x17, 0x06, 0x7a, 0xd5, 0xab, 0xde, 0x16, 0x45, 0xd1, 0x8b, 0xde, 0x04, 0xe8, + 0x03, 0x14, 0xc8, 0x5d, 0x9f, 0xa0, 0x40, 0xde, 0xa0, 0x68, 0x0b, 0xb4, 0x8f, 0xd0, 0xcb, 0x62, + 0x66, 0x76, 0x97, 0xbb, 0x24, 0x15, 0x2b, 0x01, 0xe2, 0x5c, 0x91, 0xf3, 0x9b, 0xdf, 0x39, 0x73, + 0xe6, 0xcc, 0x99, 0x33, 0x67, 0x66, 0x61, 0x7b, 0xe4, 0x38, 0x23, 0x8b, 0xee, 0xba, 0x9e, 0x13, + 0x38, 0xa7, 0xd3, 0xe1, 0xae, 0x41, 0x7d, 0xdd, 0x33, 0xdd, 0xc0, 0xf1, 0xea, 0x1c, 0xc3, 0x6b, + 0x82, 0x51, 0x8f, 0x18, 0xb5, 0x63, 0x58, 0x7f, 0x60, 0x5a, 0xb4, 0x15, 0x13, 0xfb, 0x34, 0xc0, + 0xf7, 0x20, 0x37, 0x34, 0x2d, 0x2a, 0x4b, 0xdb, 0xd9, 0x9d, 0xf2, 0xad, 0x37, 0xeb, 0x73, 0x42, + 0xf5, 0xb4, 0x44, 0x8f, 0xc1, 0x2a, 0x97, 0xa8, 0xfd, 0x2b, 0x07, 0x1b, 0x4b, 0x7a, 0x31, 0x86, + 0x9c, 0x4d, 0x26, 0x4c, 0xa3, 0xb4, 0x53, 0x52, 0xf9, 0x7f, 0x2c, 0xc3, 0x8a, 0x4b, 0xf4, 0x47, + 0x64, 0x44, 0xe5, 0x0c, 0x87, 0xa3, 0x26, 0x7e, 0x1d, 0xc0, 0xa0, 0x2e, 0xb5, 0x0d, 0x6a, 0xeb, + 0x67, 0x72, 0x76, 0x3b, 0xbb, 0x53, 0x52, 0x13, 0x08, 0x7e, 0x07, 0xd6, 0xdd, 0xe9, 0xa9, 0x65, + 0xea, 0x5a, 0x82, 0x06, 0xdb, 0xd9, 0x9d, 0xbc, 0x8a, 0x44, 0x47, 0x6b, 0x46, 0xbe, 0x0e, 0x6b, + 0x4f, 0x28, 0x79, 0x94, 0xa4, 0x96, 0x39, 0xb5, 0xca, 0xe0, 0x04, 0xb1, 0x09, 0x95, 0x09, 0xf5, + 0x7d, 0x32, 0xa2, 0x5a, 0x70, 0xe6, 0x52, 0x39, 0xc7, 0x67, 0xbf, 0xbd, 0x30, 0xfb, 0xf9, 0x99, + 0x97, 0x43, 0xa9, 0xc1, 0x99, 0x4b, 0x71, 0x03, 0x4a, 0xd4, 0x9e, 0x4e, 0x84, 0x86, 0xfc, 0x39, + 0xfe, 0x53, 0xec, 0xe9, 0x64, 0x5e, 0x4b, 0x91, 0x89, 0x85, 0x2a, 0x56, 0x7c, 0xea, 0x3d, 0x36, + 0x75, 0x2a, 0x17, 0xb8, 0x82, 0xeb, 0x0b, 0x0a, 0xfa, 0xa2, 0x7f, 0x5e, 0x47, 0x24, 0x87, 0x9b, + 0x50, 0xa2, 0x4f, 0x03, 0x6a, 0xfb, 0xa6, 0x63, 0xcb, 0x2b, 0x5c, 0xc9, 0x5b, 0x4b, 0x56, 0x91, + 0x5a, 0xc6, 0xbc, 0x8a, 0x99, 0x1c, 0xbe, 0x0b, 0x2b, 0x8e, 0x1b, 0x98, 0x8e, 0xed, 0xcb, 0xc5, + 0x6d, 0x69, 0xa7, 0x7c, 0xeb, 0xd5, 0xa5, 0x81, 0xd0, 0x15, 0x1c, 0x35, 0x22, 0xe3, 0x36, 0x20, + 0xdf, 0x99, 0x7a, 0x3a, 0xd5, 0x74, 0xc7, 0xa0, 0x9a, 0x69, 0x0f, 0x1d, 0xb9, 0xc4, 0x15, 0x5c, + 0x5d, 0x9c, 0x08, 0x27, 0x36, 0x1d, 0x83, 0xb6, 0xed, 0xa1, 0xa3, 0x56, 0xfd, 0x54, 0x1b, 0x5f, + 0x82, 0x82, 0x7f, 0x66, 0x07, 0xe4, 0xa9, 0x5c, 0xe1, 0x11, 0x12, 0xb6, 0x6a, 0x5f, 0x17, 0x60, + 0xed, 0x22, 0x21, 0x76, 0x1f, 0xf2, 0x43, 0x36, 0x4b, 0x39, 0xf3, 0x5d, 0x7c, 0x20, 0x64, 0xd2, + 0x4e, 0x2c, 0x7c, 0x4f, 0x27, 0x36, 0xa0, 0x6c, 0x53, 0x3f, 0xa0, 0x86, 0x88, 0x88, 0xec, 0x05, + 0x63, 0x0a, 0x84, 0xd0, 0x62, 0x48, 0xe5, 0xbe, 0x57, 0x48, 0x7d, 0x0a, 0x6b, 0xb1, 0x49, 0x9a, + 0x47, 0xec, 0x51, 0x14, 0x9b, 0xbb, 0xcf, 0xb3, 0xa4, 0xae, 0x44, 0x72, 0x2a, 0x13, 0x53, 0xab, + 0x34, 0xd5, 0xc6, 0x2d, 0x00, 0xc7, 0xa6, 0xce, 0x50, 0x33, 0xa8, 0x6e, 0xc9, 0xc5, 0x73, 0xbc, + 0xd4, 0x65, 0x94, 0x05, 0x2f, 0x39, 0x02, 0xd5, 0x2d, 0xfc, 0xe1, 0x2c, 0xd4, 0x56, 0xce, 0x89, + 0x94, 0x63, 0xb1, 0xc9, 0x16, 0xa2, 0xed, 0x04, 0xaa, 0x1e, 0x65, 0x71, 0x4f, 0x8d, 0x70, 0x66, + 0x25, 0x6e, 0x44, 0xfd, 0xb9, 0x33, 0x53, 0x43, 0x31, 0x31, 0xb1, 0x55, 0x2f, 0xd9, 0xc4, 0x6f, + 0x40, 0x0c, 0x68, 0x3c, 0xac, 0x80, 0x67, 0xa1, 0x4a, 0x04, 0x76, 0xc8, 0x84, 0x6e, 0x7d, 0x09, + 0xd5, 0xb4, 0x7b, 0xf0, 0x26, 0xe4, 0xfd, 0x80, 0x78, 0x01, 0x8f, 0xc2, 0xbc, 0x2a, 0x1a, 0x18, + 0x41, 0x96, 0xda, 0x06, 0xcf, 0x72, 0x79, 0x95, 0xfd, 0xc5, 0x3f, 0x9d, 0x4d, 0x38, 0xcb, 0x27, + 0xfc, 0xf6, 0xe2, 0x8a, 0xa6, 0x34, 0xcf, 0xcf, 0x7b, 0xeb, 0x03, 0x58, 0x4d, 0x4d, 0xe0, 0xa2, + 0x43, 0xd7, 0x7e, 0x05, 0x2f, 0x2f, 0x55, 0x8d, 0x3f, 0x85, 0xcd, 0xa9, 0x6d, 0xda, 0x01, 0xf5, + 0x5c, 0x8f, 0xb2, 0x88, 0x15, 0x43, 0xc9, 0xff, 0x5e, 0x39, 0x27, 0xe6, 0x4e, 0x92, 0x6c, 0xa1, + 0x45, 0xdd, 0x98, 0x2e, 0x82, 0x37, 0x4b, 0xc5, 0xff, 0xac, 0xa0, 0x67, 0xcf, 0x9e, 0x3d, 0xcb, + 0xd4, 0x7e, 0x57, 0x80, 0xcd, 0x65, 0x7b, 0x66, 0xe9, 0xf6, 0xbd, 0x04, 0x05, 0x7b, 0x3a, 0x39, + 0xa5, 0x1e, 0x77, 0x52, 0x5e, 0x0d, 0x5b, 0xb8, 0x01, 0x79, 0x8b, 0x9c, 0x52, 0x4b, 0xce, 0x6d, + 0x4b, 0x3b, 0xd5, 0x5b, 0xef, 0x5c, 0x68, 0x57, 0xd6, 0x8f, 0x98, 0x88, 0x2a, 0x24, 0xf1, 0x47, + 0x90, 0x0b, 0x53, 0x34, 0xd3, 0x70, 0xf3, 0x62, 0x1a, 0xd8, 0x5e, 0x52, 0xb9, 0x1c, 0x7e, 0x05, + 0x4a, 0xec, 0x57, 0xc4, 0x46, 0x81, 0xdb, 0x5c, 0x64, 0x00, 0x8b, 0x0b, 0xbc, 0x05, 0x45, 0xbe, + 0x4d, 0x0c, 0x1a, 0x1d, 0x6d, 0x71, 0x9b, 0x05, 0x96, 0x41, 0x87, 0x64, 0x6a, 0x05, 0xda, 0x63, + 0x62, 0x4d, 0x29, 0x0f, 0xf8, 0x92, 0x5a, 0x09, 0xc1, 0x5f, 0x30, 0x0c, 0x5f, 0x85, 0xb2, 0xd8, + 0x55, 0xa6, 0x6d, 0xd0, 0xa7, 0x3c, 0x7b, 0xe6, 0x55, 0xb1, 0xd1, 0xda, 0x0c, 0x61, 0xc3, 0x3f, + 0xf4, 0x1d, 0x3b, 0x0a, 0x4d, 0x3e, 0x04, 0x03, 0xf8, 0xf0, 0x1f, 0xcc, 0x27, 0xee, 0xd7, 0x96, + 0x4f, 0x6f, 0x3e, 0xa6, 0x6a, 0x7f, 0xc9, 0x40, 0x8e, 0xe7, 0x8b, 0x35, 0x28, 0x0f, 0x3e, 0xeb, + 0x29, 0x5a, 0xab, 0x7b, 0xb2, 0x7f, 0xa4, 0x20, 0x09, 0x57, 0x01, 0x38, 0xf0, 0xe0, 0xa8, 0xdb, + 0x18, 0xa0, 0x4c, 0xdc, 0x6e, 0x77, 0x06, 0x77, 0x6f, 0xa3, 0x6c, 0x2c, 0x70, 0x22, 0x80, 0x5c, + 0x92, 0xf0, 0xfe, 0x2d, 0x94, 0xc7, 0x08, 0x2a, 0x42, 0x41, 0xfb, 0x53, 0xa5, 0x75, 0xf7, 0x36, + 0x2a, 0xa4, 0x91, 0xf7, 0x6f, 0xa1, 0x15, 0xbc, 0x0a, 0x25, 0x8e, 0xec, 0x77, 0xbb, 0x47, 0xa8, + 0x18, 0xeb, 0xec, 0x0f, 0xd4, 0x76, 0xe7, 0x00, 0x95, 0x62, 0x9d, 0x07, 0x6a, 0xf7, 0xa4, 0x87, + 0x20, 0xd6, 0x70, 0xac, 0xf4, 0xfb, 0x8d, 0x03, 0x05, 0x95, 0x63, 0xc6, 0xfe, 0x67, 0x03, 0xa5, + 0x8f, 0x2a, 0x29, 0xb3, 0xde, 0xbf, 0x85, 0x56, 0xe3, 0x21, 0x94, 0xce, 0xc9, 0x31, 0xaa, 0xe2, + 0x75, 0x58, 0x15, 0x43, 0x44, 0x46, 0xac, 0xcd, 0x41, 0x77, 0x6f, 0x23, 0x34, 0x33, 0x44, 0x68, + 0x59, 0x4f, 0x01, 0x77, 0x6f, 0x23, 0x5c, 0x6b, 0x42, 0x9e, 0x47, 0x17, 0xc6, 0x50, 0x3d, 0x6a, + 0xec, 0x2b, 0x47, 0x5a, 0xb7, 0x37, 0x68, 0x77, 0x3b, 0x8d, 0x23, 0x24, 0xcd, 0x30, 0x55, 0xf9, + 0xf9, 0x49, 0x5b, 0x55, 0x5a, 0x28, 0x93, 0xc4, 0x7a, 0x4a, 0x63, 0xa0, 0xb4, 0x50, 0xb6, 0xa6, + 0xc3, 0xe6, 0xb2, 0x3c, 0xb9, 0x74, 0x67, 0x24, 0x96, 0x38, 0x73, 0xce, 0x12, 0x73, 0x5d, 0x0b, + 0x4b, 0xfc, 0xcf, 0x0c, 0x6c, 0x2c, 0x39, 0x2b, 0x96, 0x0e, 0xf2, 0x13, 0xc8, 0x8b, 0x10, 0x15, + 0xa7, 0xe7, 0x8d, 0xa5, 0x87, 0x0e, 0x0f, 0xd8, 0x85, 0x13, 0x94, 0xcb, 0x25, 0x2b, 0x88, 0xec, + 0x39, 0x15, 0x04, 0x53, 0xb1, 0x90, 0xd3, 0x7f, 0xb9, 0x90, 0xd3, 0xc5, 0xb1, 0x77, 0xf7, 0x22, + 0xc7, 0x1e, 0xc7, 0xbe, 0x5b, 0x6e, 0xcf, 0x2f, 0xc9, 0xed, 0xf7, 0x61, 0x7d, 0x41, 0xd1, 0x85, + 0x73, 0xec, 0xaf, 0x25, 0x90, 0xcf, 0x73, 0xce, 0x73, 0x32, 0x5d, 0x26, 0x95, 0xe9, 0xee, 0xcf, + 0x7b, 0xf0, 0xda, 0xf9, 0x8b, 0xb0, 0xb0, 0xd6, 0x5f, 0x49, 0x70, 0x69, 0x79, 0xa5, 0xb8, 0xd4, + 0x86, 0x8f, 0xa0, 0x30, 0xa1, 0xc1, 0xd8, 0x89, 0xaa, 0xa5, 0xb7, 0x97, 0x9c, 0xc1, 0xac, 0x7b, + 0x7e, 0xb1, 0x43, 0xa9, 0xe4, 0x21, 0x9e, 0x3d, 0xaf, 0xdc, 0x13, 0xd6, 0x2c, 0x58, 0xfa, 0x9b, + 0x0c, 0xbc, 0xbc, 0x54, 0xf9, 0x52, 0x43, 0x5f, 0x03, 0x30, 0x6d, 0x77, 0x1a, 0x88, 0x8a, 0x48, + 0x24, 0xd8, 0x12, 0x47, 0x78, 0xf2, 0x62, 0xc9, 0x73, 0x1a, 0xc4, 0xfd, 0x59, 0xde, 0x0f, 0x02, + 0xe2, 0x84, 0x7b, 0x33, 0x43, 0x73, 0xdc, 0xd0, 0xd7, 0xcf, 0x99, 0xe9, 0x42, 0x60, 0xbe, 0x07, + 0x48, 0xb7, 0x4c, 0x6a, 0x07, 0x9a, 0x1f, 0x78, 0x94, 0x4c, 0x4c, 0x7b, 0xc4, 0x4f, 0x90, 0xe2, + 0x5e, 0x7e, 0x48, 0x2c, 0x9f, 0xaa, 0x6b, 0xa2, 0xbb, 0x1f, 0xf5, 0x32, 0x09, 0x1e, 0x40, 0x5e, + 0x42, 0xa2, 0x90, 0x92, 0x10, 0xdd, 0xb1, 0x44, 0xed, 0xeb, 0x22, 0x94, 0x13, 0x75, 0x35, 0xbe, + 0x06, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0xf0, 0x44, 0x99, 0x61, 0xbd, 0xf0, 0xbe, + 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0xee, 0xb4, 0x22, + 0xa7, 0x62, 0xd6, 0xd7, 0x65, 0x5d, 0xcd, 0xa8, 0x07, 0xdf, 0x81, 0x0d, 0x2e, 0x31, 0x99, 0x5a, + 0x81, 0xe9, 0x5a, 0x54, 0x63, 0xb7, 0x37, 0x9f, 0x9f, 0x24, 0xb1, 0x65, 0xeb, 0x8c, 0x71, 0x1c, + 0x12, 0x98, 0x45, 0x3e, 0x6e, 0xc1, 0x6b, 0x5c, 0x6c, 0x44, 0x6d, 0xea, 0x91, 0x80, 0x6a, 0xf4, + 0x8b, 0x29, 0xb1, 0x7c, 0x8d, 0xd8, 0x86, 0x36, 0x26, 0xfe, 0x58, 0xde, 0x64, 0x0a, 0xf6, 0x33, + 0xb2, 0xa4, 0x5e, 0x61, 0xc4, 0x83, 0x90, 0xa7, 0x70, 0x5a, 0xc3, 0x36, 0x3e, 0x26, 0xfe, 0x18, + 0xef, 0xc1, 0x25, 0xae, 0xc5, 0x0f, 0x3c, 0xd3, 0x1e, 0x69, 0xfa, 0x98, 0xea, 0x8f, 0xb4, 0x69, + 0x30, 0xbc, 0x27, 0xbf, 0x92, 0x1c, 0x9f, 0x5b, 0xd8, 0xe7, 0x9c, 0x26, 0xa3, 0x9c, 0x04, 0xc3, + 0x7b, 0xb8, 0x0f, 0x15, 0xb6, 0x18, 0x13, 0xf3, 0x4b, 0xaa, 0x0d, 0x1d, 0x8f, 0x1f, 0x8d, 0xd5, + 0x25, 0xa9, 0x29, 0xe1, 0xc1, 0x7a, 0x37, 0x14, 0x38, 0x76, 0x0c, 0xba, 0x97, 0xef, 0xf7, 0x14, + 0xa5, 0xa5, 0x96, 0x23, 0x2d, 0x0f, 0x1c, 0x8f, 0x05, 0xd4, 0xc8, 0x89, 0x1d, 0x5c, 0x16, 0x01, + 0x35, 0x72, 0x22, 0xf7, 0xde, 0x81, 0x0d, 0x5d, 0x17, 0x73, 0x36, 0x75, 0x2d, 0xbc, 0x63, 0xf9, + 0x32, 0x4a, 0x39, 0x4b, 0xd7, 0x0f, 0x04, 0x21, 0x8c, 0x71, 0x1f, 0x7f, 0x08, 0x2f, 0xcf, 0x9c, + 0x95, 0x14, 0x5c, 0x5f, 0x98, 0xe5, 0xbc, 0xe8, 0x1d, 0xd8, 0x70, 0xcf, 0x16, 0x05, 0x71, 0x6a, + 0x44, 0xf7, 0x6c, 0x5e, 0xec, 0x03, 0xd8, 0x74, 0xc7, 0xee, 0xa2, 0xdc, 0xcd, 0xa4, 0x1c, 0x76, + 0xc7, 0xee, 0xbc, 0xe0, 0x5b, 0xfc, 0xc2, 0xed, 0x51, 0x9d, 0x04, 0xd4, 0x90, 0x2f, 0x27, 0xe9, + 0x89, 0x0e, 0xbc, 0x0b, 0x48, 0xd7, 0x35, 0x6a, 0x93, 0x53, 0x8b, 0x6a, 0xc4, 0xa3, 0x36, 0xf1, + 0xe5, 0xab, 0x49, 0x72, 0x55, 0xd7, 0x15, 0xde, 0xdb, 0xe0, 0x9d, 0xf8, 0x26, 0xac, 0x3b, 0xa7, + 0x0f, 0x75, 0x11, 0x92, 0x9a, 0xeb, 0xd1, 0xa1, 0xf9, 0x54, 0x7e, 0x93, 0xfb, 0x77, 0x8d, 0x75, + 0xf0, 0x80, 0xec, 0x71, 0x18, 0xdf, 0x00, 0xa4, 0xfb, 0x63, 0xe2, 0xb9, 0x3c, 0x27, 0xfb, 0x2e, + 0xd1, 0xa9, 0xfc, 0x96, 0xa0, 0x0a, 0xbc, 0x13, 0xc1, 0x6c, 0x4b, 0xf8, 0x4f, 0xcc, 0x61, 0x10, + 0x69, 0xbc, 0x2e, 0xb6, 0x04, 0xc7, 0x42, 0x6d, 0x3b, 0x80, 0x98, 0x2b, 0x52, 0x03, 0xef, 0x70, + 0x5a, 0xd5, 0x1d, 0xbb, 0xc9, 0x71, 0xdf, 0x80, 0x55, 0xc6, 0x9c, 0x0d, 0x7a, 0x43, 0x14, 0x64, + 0xee, 0x38, 0x31, 0xe2, 0x0f, 0x56, 0x1b, 0xd7, 0xf6, 0xa0, 0x92, 0x8c, 0x4f, 0x5c, 0x02, 0x11, + 0xa1, 0x48, 0x62, 0xc5, 0x4a, 0xb3, 0xdb, 0x62, 0x65, 0xc6, 0xe7, 0x0a, 0xca, 0xb0, 0x72, 0xe7, + 0xa8, 0x3d, 0x50, 0x34, 0xf5, 0xa4, 0x33, 0x68, 0x1f, 0x2b, 0x28, 0x9b, 0xa8, 0xab, 0x0f, 0x73, + 0xc5, 0xb7, 0xd1, 0xf5, 0xda, 0x37, 0x19, 0xa8, 0xa6, 0x2f, 0x4a, 0xf8, 0xff, 0xe1, 0x72, 0xf4, + 0xaa, 0xe1, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0xe3, 0x4c, 0x88, 0x38, 0xc4, 0xe2, 0xa5, 0xdb, + 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0x5b, 0x4c, 0x48, 0x80, 0x8f, 0xe0, 0xaa, 0xed, + 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0x4f, 0xd2, 0x88, 0xae, 0x53, 0xdf, 0x77, + 0xc4, 0x81, 0x15, 0x6b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x96, 0xc9, 0x1b, 0x21, 0x75, 0x2e, + 0xcc, 0xb2, 0xe7, 0x85, 0xd9, 0x2b, 0x50, 0x9a, 0x10, 0x57, 0xa3, 0x76, 0xe0, 0x9d, 0xf1, 0xf2, + 0xb8, 0xa8, 0x16, 0x27, 0xc4, 0x55, 0x58, 0xfb, 0x85, 0xdc, 0x52, 0x0e, 0x73, 0xc5, 0x22, 0x2a, + 0x1d, 0xe6, 0x8a, 0x25, 0x04, 0xb5, 0x7f, 0x64, 0xa1, 0x92, 0x2c, 0x97, 0xd9, 0xed, 0x43, 0xe7, + 0x27, 0x8b, 0xc4, 0x73, 0xcf, 0x1b, 0xdf, 0x5a, 0x5c, 0xd7, 0x9b, 0xec, 0xc8, 0xd9, 0x2b, 0x88, + 0x22, 0x56, 0x15, 0x92, 0xec, 0xb8, 0x67, 0xd9, 0x86, 0x8a, 0xa2, 0xa1, 0xa8, 0x86, 0x2d, 0x7c, + 0x00, 0x85, 0x87, 0x3e, 0xd7, 0x5d, 0xe0, 0xba, 0xdf, 0xfc, 0x76, 0xdd, 0x87, 0x7d, 0xae, 0xbc, + 0x74, 0xd8, 0xd7, 0x3a, 0x5d, 0xf5, 0xb8, 0x71, 0xa4, 0x86, 0xe2, 0xf8, 0x0a, 0xe4, 0x2c, 0xf2, + 0xe5, 0x59, 0xfa, 0x70, 0xe2, 0xd0, 0x45, 0x17, 0xe1, 0x0a, 0xe4, 0x9e, 0x50, 0xf2, 0x28, 0x7d, + 0x24, 0x70, 0xe8, 0x07, 0xdc, 0x0c, 0xbb, 0x90, 0xe7, 0xfe, 0xc2, 0x00, 0xa1, 0xc7, 0xd0, 0x4b, + 0xb8, 0x08, 0xb9, 0x66, 0x57, 0x65, 0x1b, 0x02, 0x41, 0x45, 0xa0, 0x5a, 0xaf, 0xad, 0x34, 0x15, + 0x94, 0xa9, 0xdd, 0x81, 0x82, 0x70, 0x02, 0xdb, 0x2c, 0xb1, 0x1b, 0xd0, 0x4b, 0x61, 0x33, 0xd4, + 0x21, 0x45, 0xbd, 0x27, 0xc7, 0xfb, 0x8a, 0x8a, 0x32, 0xe9, 0xa5, 0xce, 0xa1, 0x7c, 0xcd, 0x87, + 0x4a, 0xb2, 0x5e, 0x7e, 0x31, 0x77, 0xe1, 0xbf, 0x4a, 0x50, 0x4e, 0xd4, 0xbf, 0xac, 0x70, 0x21, + 0x96, 0xe5, 0x3c, 0xd1, 0x88, 0x65, 0x12, 0x3f, 0x0c, 0x0d, 0xe0, 0x50, 0x83, 0x21, 0x17, 0x5d, + 0xba, 0x17, 0xb4, 0x45, 0xf2, 0xa8, 0x50, 0xfb, 0xa3, 0x04, 0x68, 0xbe, 0x00, 0x9d, 0x33, 0x53, + 0xfa, 0x31, 0xcd, 0xac, 0xfd, 0x41, 0x82, 0x6a, 0xba, 0xea, 0x9c, 0x33, 0xef, 0xda, 0x8f, 0x6a, + 0xde, 0xdf, 0x33, 0xb0, 0x9a, 0xaa, 0x35, 0x2f, 0x6a, 0xdd, 0x17, 0xb0, 0x6e, 0x1a, 0x74, 0xe2, + 0x3a, 0x01, 0xb5, 0xf5, 0x33, 0xcd, 0xa2, 0x8f, 0xa9, 0x25, 0xd7, 0x78, 0xd2, 0xd8, 0xfd, 0xf6, + 0x6a, 0xb6, 0xde, 0x9e, 0xc9, 0x1d, 0x31, 0xb1, 0xbd, 0x8d, 0x76, 0x4b, 0x39, 0xee, 0x75, 0x07, + 0x4a, 0xa7, 0xf9, 0x99, 0x76, 0xd2, 0xf9, 0x59, 0xa7, 0xfb, 0x49, 0x47, 0x45, 0xe6, 0x1c, 0xed, + 0x07, 0xdc, 0xf6, 0x3d, 0x40, 0xf3, 0x46, 0xe1, 0xcb, 0xb0, 0xcc, 0x2c, 0xf4, 0x12, 0xde, 0x80, + 0xb5, 0x4e, 0x57, 0xeb, 0xb7, 0x5b, 0x8a, 0xa6, 0x3c, 0x78, 0xa0, 0x34, 0x07, 0x7d, 0xf1, 0x3e, + 0x11, 0xb3, 0x07, 0xa9, 0x0d, 0x5e, 0xfb, 0x7d, 0x16, 0x36, 0x96, 0x58, 0x82, 0x1b, 0xe1, 0xcd, + 0x42, 0x5c, 0x76, 0xde, 0xbd, 0x88, 0xf5, 0x75, 0x56, 0x10, 0xf4, 0x88, 0x17, 0x84, 0x17, 0x91, + 0x1b, 0xc0, 0xbc, 0x64, 0x07, 0xe6, 0xd0, 0xa4, 0x5e, 0xf8, 0x9c, 0x23, 0xae, 0x1b, 0x6b, 0x33, + 0x5c, 0xbc, 0xe8, 0xfc, 0x1f, 0x60, 0xd7, 0xf1, 0xcd, 0xc0, 0x7c, 0x4c, 0x35, 0xd3, 0x8e, 0xde, + 0x7e, 0xd8, 0xf5, 0x23, 0xa7, 0xa2, 0xa8, 0xa7, 0x6d, 0x07, 0x31, 0xdb, 0xa6, 0x23, 0x32, 0xc7, + 0x66, 0xc9, 0x3c, 0xab, 0xa2, 0xa8, 0x27, 0x66, 0x5f, 0x83, 0x8a, 0xe1, 0x4c, 0x59, 0x4d, 0x26, + 0x78, 0xec, 0xec, 0x90, 0xd4, 0xb2, 0xc0, 0x62, 0x4a, 0x58, 0x6d, 0xcf, 0x1e, 0x9d, 0x2a, 0x6a, + 0x59, 0x60, 0x82, 0x72, 0x1d, 0xd6, 0xc8, 0x68, 0xe4, 0x31, 0xe5, 0x91, 0x22, 0x71, 0x7f, 0xa8, + 0xc6, 0x30, 0x27, 0x6e, 0x1d, 0x42, 0x31, 0xf2, 0x03, 0x3b, 0xaa, 0x99, 0x27, 0x34, 0x57, 0x5c, + 0x8a, 0x33, 0x3b, 0x25, 0xb5, 0x68, 0x47, 0x9d, 0xd7, 0xa0, 0x62, 0xfa, 0xda, 0xec, 0x0d, 0x3d, + 0xb3, 0x9d, 0xd9, 0x29, 0xaa, 0x65, 0xd3, 0x8f, 0xdf, 0x1f, 0x6b, 0x5f, 0x65, 0xa0, 0x9a, 0xfe, + 0x06, 0x80, 0x5b, 0x50, 0xb4, 0x1c, 0x9d, 0xf0, 0xd0, 0x12, 0x1f, 0xa0, 0x76, 0x9e, 0xf3, 0xd9, + 0xa0, 0x7e, 0x14, 0xf2, 0xd5, 0x58, 0x72, 0xeb, 0x6f, 0x12, 0x14, 0x23, 0x18, 0x5f, 0x82, 0x9c, + 0x4b, 0x82, 0x31, 0x57, 0x97, 0xdf, 0xcf, 0x20, 0x49, 0xe5, 0x6d, 0x86, 0xfb, 0x2e, 0xb1, 0x79, + 0x08, 0x84, 0x38, 0x6b, 0xb3, 0x75, 0xb5, 0x28, 0x31, 0xf8, 0xe5, 0xc4, 0x99, 0x4c, 0xa8, 0x1d, + 0xf8, 0xd1, 0xba, 0x86, 0x78, 0x33, 0x84, 0xf1, 0x3b, 0xb0, 0x1e, 0x78, 0xc4, 0xb4, 0x52, 0xdc, + 0x1c, 0xe7, 0xa2, 0xa8, 0x23, 0x26, 0xef, 0xc1, 0x95, 0x48, 0xaf, 0x41, 0x03, 0xa2, 0x8f, 0xa9, + 0x31, 0x13, 0x2a, 0xf0, 0x47, 0x88, 0xcb, 0x21, 0xa1, 0x15, 0xf6, 0x47, 0xb2, 0xb5, 0x6f, 0x24, + 0x58, 0x8f, 0xae, 0x53, 0x46, 0xec, 0xac, 0x63, 0x00, 0x62, 0xdb, 0x4e, 0x90, 0x74, 0xd7, 0x62, + 0x28, 0x2f, 0xc8, 0xd5, 0x1b, 0xb1, 0x90, 0x9a, 0x50, 0xb0, 0x35, 0x01, 0x98, 0xf5, 0x9c, 0xeb, + 0xb6, 0xab, 0x50, 0x0e, 0x3f, 0xf0, 0xf0, 0xaf, 0x84, 0xe2, 0x02, 0x0e, 0x02, 0x62, 0xf7, 0x2e, + 0xbc, 0x09, 0xf9, 0x53, 0x3a, 0x32, 0xed, 0xf0, 0xd9, 0x56, 0x34, 0xa2, 0x67, 0x92, 0x5c, 0xfc, + 0x4c, 0xb2, 0xff, 0x5b, 0x09, 0x36, 0x74, 0x67, 0x32, 0x6f, 0xef, 0x3e, 0x9a, 0x7b, 0x05, 0xf0, + 0x3f, 0x96, 0x3e, 0xff, 0x68, 0x64, 0x06, 0xe3, 0xe9, 0x69, 0x5d, 0x77, 0x26, 0xbb, 0x23, 0xc7, + 0x22, 0xf6, 0x68, 0xf6, 0x99, 0x93, 0xff, 0xd1, 0xdf, 0x1d, 0x51, 0xfb, 0xdd, 0x91, 0x93, 0xf8, + 0xe8, 0x79, 0x7f, 0xf6, 0xf7, 0xbf, 0x92, 0xf4, 0xa7, 0x4c, 0xf6, 0xa0, 0xb7, 0xff, 0xe7, 0xcc, + 0xd6, 0x81, 0x18, 0xae, 0x17, 0xb9, 0x47, 0xa5, 0x43, 0x8b, 0xea, 0x6c, 0xca, 0xff, 0x0b, 0x00, + 0x00, 0xff, 0xff, 0x1a, 0x28, 0x25, 0x79, 0x42, 0x1d, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto new file mode 100644 index 000000000..8697a50de --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto @@ -0,0 +1,872 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + optional bool php_generic_services = 42 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 000000000..70276e8f5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,141 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { + return false + } + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index f2c6906b9..e3c56d3ff 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,17 +1,7 @@ -// Code generated by protoc-gen-go. -// source: github.com/golang/protobuf/ptypes/any/any.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto -/* -Package any is a generated protocol buffer package. - -It is generated from these files: - github.com/golang/protobuf/ptypes/any/any.proto - -It has these top-level messages: - Any -*/ -package any +package any // import "github.com/golang/protobuf/ptypes/any" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -63,6 +53,16 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // any.Unpack(foo) // ... // +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' @@ -121,35 +121,71 @@ type Any struct { // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_any_744b9ca530f228db, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (dst *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(dst, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } -var fileDescriptor0 = []byte{ - // 187 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, - 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, - 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, - 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, - 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, - 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, - 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, - 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, - 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, - 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, - 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, +var fileDescriptor_any_744b9ca530f228db = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, + 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, + 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, + 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto index 81dcf46cc..c74866762 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -37,7 +37,6 @@ option go_package = "github.com/golang/protobuf/ptypes/any"; option java_package = "com.google.protobuf"; option java_outer_classname = "AnyProto"; option java_multiple_files = true; -option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; // `Any` contains an arbitrary serialized protocol buffer message along with a @@ -75,6 +74,16 @@ option objc_class_prefix = "GPB"; // any.Unpack(foo) // ... // +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 000000000..c0d595da7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 000000000..65cb0f8eb --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 000000000..a7beb2c41 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package duration // import "github.com/golang/protobuf/ptypes/duration" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_duration_e7d612259e3f0613, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) +} + +var fileDescriptor_duration_e7d612259e3f0613 = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, + 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, + 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, + 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 000000000..975fce41a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 000000000..2ac3e6586 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,440 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package structpb // import "github.com/golang/protobuf/ptypes/struct" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} +} +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} +} +func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Struct.Unmarshal(m, b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) +} +func (dst *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(dst, src) +} +func (m *Struct) XXX_Size() int { + return xxx_messageInfo_Struct.Size(m) +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{1} +} +func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (dst *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(dst, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_StructValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += 1 // tag and wire + n += 8 + case *Value_StringValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += 1 // tag and wire + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{2} +} +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListValue.Unmarshal(m, b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) +} +func (dst *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(dst, src) +} +func (m *ListValue) XXX_Size() int { + return xxx_messageInfo_ListValue.Size(m) +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} + +func init() { + proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_3a5a94e0c7801b27) +} + +var fileDescriptor_struct_3a5a94e0c7801b27 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, + 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, + 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, + 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, + 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, + 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, + 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, + 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, + 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, + 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, + 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, + 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, + 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, + 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, + 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, + 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, + 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, + 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, + 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, + 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, + 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, + 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, + 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, + 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, + 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto new file mode 100644 index 000000000..7d7808e7f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 000000000..47f10dbc2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,134 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *tspb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 000000000..8e76ae976 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,175 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) +} + +var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, + 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, + 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, + 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, + 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, + 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 000000000..06750ab1f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,133 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/google/btree/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md new file mode 100644 index 000000000..6062a4dac --- /dev/null +++ b/vendor/github.com/google/btree/README.md @@ -0,0 +1,12 @@ +# BTree implementation for Go + +![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +The API is based off of the wonderful +http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to +act as a drop-in replacement for gollrb trees. + +See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go new file mode 100644 index 000000000..7e4551d73 --- /dev/null +++ b/vendor/github.com/google/btree/btree.go @@ -0,0 +1,881 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +var ( + nilItems = make(items, 16) + nilChildren = make(children, 16) +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList struct { + mu sync.Mutex + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +// freeNode adds the given node to the list, returning true if it was added +// and false if it was discarded. +func (f *FreeList) freeNode(n *node) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + cow: ©OnWriteContext{freelist: f}, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return item.Less(s[i]) + }) + if i > 0 && !s[i-1].Less(item) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + cow *copyOnWriteContext +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int) Item { + i, found := n.items.find(item) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree): + // no change, we want first split node + case inTree.Less(item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item) Item { + i, found := n.items.find(key) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { + var ok bool + switch dir { + case ascend: + for i := 0; i < len(n.items); i++ { + if start != nil && n.items[i].Less(start) { + continue + } + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { + hit = true + continue + } + hit = true + if stop != nil && !n.items[i].Less(stop) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + for i := len(n.items) - 1; i >= 0; i-- { + if start != nil && !n.items[i].Less(start) { + if !includeStart || hit || start.Less(n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop != nil && !stop.Less(n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct { + freelist *FreeList +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext) newNode() (n *node) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext) freeNode(n *node) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out := t.root.insert(item, t.maxItems()) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax) +} + +func (t *BTree) deleteItem(item Item, typ toRemove) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + t.root = t.root.mutableFor(t.cow) + out := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, pivot, false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, pivot, nil, true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, nil, false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, pivot, nil, true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range (pivot, last], until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, pivot, false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, nil, false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node) reset(c *copyOnWriteContext) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE new file mode 100644 index 000000000..6b0b1270f --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go new file mode 100644 index 000000000..4fd44c45e --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -0,0 +1,8847 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package openapi_v2 + +import ( + "fmt" + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v2" + "regexp" + "strings" +) + +// Version returns the package name (and OpenAPI version). +func Version() string { + return "openapi_v2" +} + +// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. +func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { + errors := make([]error, 0) + x := &AdditionalPropertiesItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // bool boolean = 2; + boolValue, ok := in.(bool) + if ok { + x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAny creates an object of type Any if possible, returning an error if not. +func NewAny(in interface{}, context *compiler.Context) (*Any, error) { + errors := make([]error, 0) + x := &Any{} + bytes, _ := yaml.Marshal(in) + x.Yaml = string(bytes) + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. +func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { + errors := make([]error, 0) + x := &ApiKeySecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [apiKey] + if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header query] + if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. +func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { + errors := make([]error, 0) + x := &BasicAuthenticationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [basic] + if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. +func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { + errors := make([]error, 0) + x := &BodyParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "schema"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "required", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [body] + if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 4; + v4 := compiler.MapValueForKey(m, "required") + if v4 != nil { + x.Required, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema schema = 5; + v5 := compiler.MapValueForKey(m, "schema") + if v5 != nil { + var err error + x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewContact creates an object of type Contact if possible, returning an error if not. +func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { + errors := make([]error, 0) + x := &Contact{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"email", "name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string email = 3; + v3 := compiler.MapValueForKey(m, "email") + if v3 != nil { + x.Email, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefault creates an object of type Default if possible, returning an error if not. +func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { + errors := make([]error, 0) + x := &Default{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefinitions creates an object of type Definitions if possible, returning an error if not. +func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { + errors := make([]error, 0) + x := &Definitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDocument creates an object of type Document if possible, returning an error if not. +func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { + errors := make([]error, 0) + x := &Document{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"info", "paths", "swagger"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string swagger = 1; + v1 := compiler.MapValueForKey(m, "swagger") + if v1 != nil { + x.Swagger, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [2.0] + if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Info info = 2; + v2 := compiler.MapValueForKey(m, "info") + if v2 != nil { + var err error + x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) + if err != nil { + errors = append(errors, err) + } + } + // string host = 3; + v3 := compiler.MapValueForKey(m, "host") + if v3 != nil { + x.Host, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string base_path = 4; + v4 := compiler.MapValueForKey(m, "basePath") + if v4 != nil { + x.BasePath, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string schemes = 5; + v5 := compiler.MapValueForKey(m, "schemes") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 6; + v6 := compiler.MapValueForKey(m, "consumes") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 7; + v7 := compiler.MapValueForKey(m, "produces") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Paths paths = 8; + v8 := compiler.MapValueForKey(m, "paths") + if v8 != nil { + var err error + x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) + if err != nil { + errors = append(errors, err) + } + } + // Definitions definitions = 9; + v9 := compiler.MapValueForKey(m, "definitions") + if v9 != nil { + var err error + x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // ParameterDefinitions parameters = 10; + v10 := compiler.MapValueForKey(m, "parameters") + if v10 != nil { + var err error + x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + } + // ResponseDefinitions responses = 11; + v11 := compiler.MapValueForKey(m, "responses") + if v11 != nil { + var err error + x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // SecurityDefinitions security_definitions = 13; + v13 := compiler.MapValueForKey(m, "securityDefinitions") + if v13 != nil { + var err error + x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Tag tags = 14; + v14 := compiler.MapValueForKey(m, "tags") + if v14 != nil { + // repeated Tag + x.Tags = make([]*Tag, 0) + a, ok := v14.([]interface{}) + if ok { + for _, item := range a { + y, err := NewTag(item, compiler.NewContext("tags", context)) + if err != nil { + errors = append(errors, err) + } + x.Tags = append(x.Tags, y) + } + } + } + // ExternalDocs external_docs = 15; + v15 := compiler.MapValueForKey(m, "externalDocs") + if v15 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 16; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExamples creates an object of type Examples if possible, returning an error if not. +func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { + errors := make([]error, 0) + x := &Examples{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. +func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { + errors := make([]error, 0) + x := &ExternalDocs{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. +func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { + errors := make([]error, 0) + x := &FileSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string format = 1; + v1 := compiler.MapValueForKey(m, "format") + if v1 != nil { + x.Format, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 2; + v2 := compiler.MapValueForKey(m, "title") + if v2 != nil { + x.Title, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 4; + v4 := compiler.MapValueForKey(m, "default") + if v4 != nil { + var err error + x.Default, err = NewAny(v4, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string required = 5; + v5 := compiler.MapValueForKey(m, "required") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [file] + if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 7; + v7 := compiler.MapValueForKey(m, "readOnly") + if v7 != nil { + x.ReadOnly, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 8; + v8 := compiler.MapValueForKey(m, "externalDocs") + if v8 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 9; + v9 := compiler.MapValueForKey(m, "example") + if v9 != nil { + var err error + x.Example, err = NewAny(v9, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. +func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { + errors := make([]error, 0) + x := &FormDataParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [formData] + if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array file] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeader creates an object of type Header if possible, returning an error if not. +func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { + errors := make([]error, 0) + x := &Header{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 18; + v18 := compiler.MapValueForKey(m, "description") + if v18 != nil { + x.Description, ok = v18.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 19; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. +func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { + errors := make([]error, 0) + x := &HeaderParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header] + if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaders creates an object of type Headers if possible, returning an error if not. +func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { + errors := make([]error, 0) + x := &Headers{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedHeader additional_properties = 1; + // MAP: Header + x.AdditionalProperties = make([]*NamedHeader, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedHeader{} + pair.Name = k + var err error + pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewInfo creates an object of type Info if possible, returning an error if not. +func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { + errors := make([]error, 0) + x := &Info{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"title", "version"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string title = 1; + v1 := compiler.MapValueForKey(m, "title") + if v1 != nil { + x.Title, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string version = 2; + v2 := compiler.MapValueForKey(m, "version") + if v2 != nil { + x.Version, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string terms_of_service = 4; + v4 := compiler.MapValueForKey(m, "termsOfService") + if v4 != nil { + x.TermsOfService, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Contact contact = 5; + v5 := compiler.MapValueForKey(m, "contact") + if v5 != nil { + var err error + x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) + if err != nil { + errors = append(errors, err) + } + } + // License license = 6; + v6 := compiler.MapValueForKey(m, "license") + if v6 != nil { + var err error + x.License, err = NewLicense(v6, compiler.NewContext("license", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. +func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { + errors := make([]error, 0) + x := &ItemsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Schema = make([]*Schema, 0) + y, err := NewSchema(m, compiler.NewContext("", context)) + if err != nil { + return nil, err + } + x.Schema = append(x.Schema, y) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. +func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { + errors := make([]error, 0) + x := &JsonReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"$ref"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"$ref", "description"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLicense creates an object of type License if possible, returning an error if not. +func NewLicense(in interface{}, context *compiler.Context) (*License, error) { + errors := make([]error, 0) + x := &License{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. +func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { + errors := make([]error, 0) + x := &NamedAny{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewAny(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. +func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { + errors := make([]error, 0) + x := &NamedHeader{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Header value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. +func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { + errors := make([]error, 0) + x := &NamedParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Parameter value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. +func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { + errors := make([]error, 0) + x := &NamedPathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PathItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. +func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { + errors := make([]error, 0) + x := &NamedResponse{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Response value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. +func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { + errors := make([]error, 0) + x := &NamedResponseValue{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ResponseValue value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. +func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { + errors := make([]error, 0) + x := &NamedSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. +func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &NamedSecurityDefinitionsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SecurityDefinitionsItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedString creates an object of type NamedString if possible, returning an error if not. +func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { + errors := make([]error, 0) + x := &NamedString{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + x.Value, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. +func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { + errors := make([]error, 0) + x := &NamedStringArray{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // StringArray value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. +func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { + errors := make([]error, 0) + x := &NonBodyParameter{} + matched := false + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // HeaderParameterSubSchema header_parameter_sub_schema = 1; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // QueryParameterSubSchema query_parameter_sub_schema = 3; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // PathParameterSubSchema path_parameter_sub_schema = 4; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. +func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { + errors := make([]error, 0) + x := &Oauth2AccessCodeSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [accessCode] + if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string token_url = 5; + v5 := compiler.MapValueForKey(m, "tokenUrl") + if v5 != nil { + x.TokenUrl, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 6; + v6 := compiler.MapValueForKey(m, "description") + if v6 != nil { + x.Description, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. +func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ApplicationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [application] + if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. +func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ImplicitSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [implicit] + if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. +func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { + errors := make([]error, 0) + x := &Oauth2PasswordSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [password] + if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. +func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { + errors := make([]error, 0) + x := &Oauth2Scopes{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedString additional_properties = 1; + // MAP: string + x.AdditionalProperties = make([]*NamedString, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedString{} + pair.Name = k + pair.Value = v.(string) + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOperation creates an object of type Operation if possible, returning an error if not. +func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { + errors := make([]error, 0) + x := &Operation{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"responses"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string tags = 1; + v1 := compiler.MapValueForKey(m, "tags") + if v1 != nil { + v, ok := v1.([]interface{}) + if ok { + x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 4; + v4 := compiler.MapValueForKey(m, "externalDocs") + if v4 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // string operation_id = 5; + v5 := compiler.MapValueForKey(m, "operationId") + if v5 != nil { + x.OperationId, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 6; + v6 := compiler.MapValueForKey(m, "produces") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 7; + v7 := compiler.MapValueForKey(m, "consumes") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated ParametersItem parameters = 8; + v8 := compiler.MapValueForKey(m, "parameters") + if v8 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v8.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // Responses responses = 9; + v9 := compiler.MapValueForKey(m, "responses") + if v9 != nil { + var err error + x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string schemes = 10; + v10 := compiler.MapValueForKey(m, "schemes") + if v10 != nil { + v, ok := v10.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 11; + v11 := compiler.MapValueForKey(m, "deprecated") + if v11 != nil { + x.Deprecated, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated NamedAny vendor_extension = 13; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameter creates an object of type Parameter if possible, returning an error if not. +func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { + errors := make([]error, 0) + x := &Parameter{} + matched := false + // BodyParameter body_parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_BodyParameter{BodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // NonBodyParameter non_body_parameter = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. +func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { + errors := make([]error, 0) + x := &ParameterDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedParameter additional_properties = 1; + // MAP: Parameter + x.AdditionalProperties = make([]*NamedParameter, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedParameter{} + pair.Name = k + var err error + pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. +func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { + errors := make([]error, 0) + x := &ParametersItem{} + matched := false + // Parameter parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_Parameter{Parameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathItem creates an object of type PathItem if possible, returning an error if not. +func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { + errors := make([]error, 0) + x := &PathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Operation get = 2; + v2 := compiler.MapValueForKey(m, "get") + if v2 != nil { + var err error + x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation put = 3; + v3 := compiler.MapValueForKey(m, "put") + if v3 != nil { + var err error + x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation post = 4; + v4 := compiler.MapValueForKey(m, "post") + if v4 != nil { + var err error + x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation delete = 5; + v5 := compiler.MapValueForKey(m, "delete") + if v5 != nil { + var err error + x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation options = 6; + v6 := compiler.MapValueForKey(m, "options") + if v6 != nil { + var err error + x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation head = 7; + v7 := compiler.MapValueForKey(m, "head") + if v7 != nil { + var err error + x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation patch = 8; + v8 := compiler.MapValueForKey(m, "patch") + if v8 != nil { + var err error + x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated ParametersItem parameters = 9; + v9 := compiler.MapValueForKey(m, "parameters") + if v9 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v9.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. +func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { + errors := make([]error, 0) + x := &PathParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"required"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [path] + if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPaths creates an object of type Paths if possible, returning an error if not. +func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { + errors := make([]error, 0) + x := &Paths{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern0, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedAny vendor_extension = 1; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + // repeated NamedPathItem path = 2; + // MAP: PathItem ^/ + x.Path = make([]*NamedPathItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "/") { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. +func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { + errors := make([]error, 0) + x := &PrimitivesItems{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 18; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewProperties creates an object of type Properties if possible, returning an error if not. +func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { + errors := make([]error, 0) + x := &Properties{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. +func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { + errors := make([]error, 0) + x := &QueryParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [query] + if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponse creates an object of type Response if possible, returning an error if not. +func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { + errors := make([]error, 0) + x := &Response{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"description"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "examples", "headers", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaItem schema = 2; + v2 := compiler.MapValueForKey(m, "schema") + if v2 != nil { + var err error + x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // Headers headers = 3; + v3 := compiler.MapValueForKey(m, "headers") + if v3 != nil { + var err error + x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) + if err != nil { + errors = append(errors, err) + } + } + // Examples examples = 4; + v4 := compiler.MapValueForKey(m, "examples") + if v4 != nil { + var err error + x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. +func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { + errors := make([]error, 0) + x := &ResponseDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedResponse additional_properties = 1; + // MAP: Response + x.AdditionalProperties = make([]*NamedResponse, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedResponse{} + pair.Name = k + var err error + pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. +func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { + errors := make([]error, 0) + x := &ResponseValue{} + matched := false + // Response response = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewResponse(m, compiler.NewContext("response", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_Response{Response: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponses creates an object of type Responses if possible, returning an error if not. +func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { + errors := make([]error, 0) + x := &Responses{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern2, pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedResponseValue response_code = 1; + // MAP: ResponseValue ^([0-9]{3})$|^(default)$ + x.ResponseCode = make([]*NamedResponseValue, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if pattern2.MatchString(k) { + pair := &NamedResponseValue{} + pair.Name = k + var err error + pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.ResponseCode = append(x.ResponseCode, pair) + } + } + } + // repeated NamedAny vendor_extension = 2; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchema creates an object of type Schema if possible, returning an error if not. +func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { + errors := make([]error, 0) + x := &Schema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 3; + v3 := compiler.MapValueForKey(m, "title") + if v3 != nil { + x.Title, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float multiple_of = 6; + v6 := compiler.MapValueForKey(m, "multipleOf") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.MultipleOf = v6 + case float32: + x.MultipleOf = float64(v6) + case uint64: + x.MultipleOf = float64(v6) + case uint32: + x.MultipleOf = float64(v6) + case int64: + x.MultipleOf = float64(v6) + case int32: + x.MultipleOf = float64(v6) + case int: + x.MultipleOf = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float maximum = 7; + v7 := compiler.MapValueForKey(m, "maximum") + if v7 != nil { + switch v7 := v7.(type) { + case float64: + x.Maximum = v7 + case float32: + x.Maximum = float64(v7) + case uint64: + x.Maximum = float64(v7) + case uint32: + x.Maximum = float64(v7) + case int64: + x.Maximum = float64(v7) + case int32: + x.Maximum = float64(v7) + case int: + x.Maximum = float64(v7) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 8; + v8 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v8 != nil { + x.ExclusiveMaximum, ok = v8.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 9; + v9 := compiler.MapValueForKey(m, "minimum") + if v9 != nil { + switch v9 := v9.(type) { + case float64: + x.Minimum = v9 + case float32: + x.Minimum = float64(v9) + case uint64: + x.Minimum = float64(v9) + case uint32: + x.Minimum = float64(v9) + case int64: + x.Minimum = float64(v9) + case int32: + x.Minimum = float64(v9) + case int: + x.Minimum = float64(v9) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 10; + v10 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v10 != nil { + x.ExclusiveMinimum, ok = v10.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 11; + v11 := compiler.MapValueForKey(m, "maxLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 12; + v12 := compiler.MapValueForKey(m, "minLength") + if v12 != nil { + t, ok := v12.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 13; + v13 := compiler.MapValueForKey(m, "pattern") + if v13 != nil { + x.Pattern, ok = v13.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 14; + v14 := compiler.MapValueForKey(m, "maxItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 15; + v15 := compiler.MapValueForKey(m, "minItems") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 16; + v16 := compiler.MapValueForKey(m, "uniqueItems") + if v16 != nil { + x.UniqueItems, ok = v16.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_properties = 17; + v17 := compiler.MapValueForKey(m, "maxProperties") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_properties = 18; + v18 := compiler.MapValueForKey(m, "minProperties") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string required = 19; + v19 := compiler.MapValueForKey(m, "required") + if v19 != nil { + v, ok := v19.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // AdditionalPropertiesItem additional_properties = 21; + v21 := compiler.MapValueForKey(m, "additionalProperties") + if v21 != nil { + var err error + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) + if err != nil { + errors = append(errors, err) + } + } + // TypeItem type = 22; + v22 := compiler.MapValueForKey(m, "type") + if v22 != nil { + var err error + x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) + if err != nil { + errors = append(errors, err) + } + } + // ItemsItem items = 23; + v23 := compiler.MapValueForKey(m, "items") + if v23 != nil { + var err error + x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Schema all_of = 24; + v24 := compiler.MapValueForKey(m, "allOf") + if v24 != nil { + // repeated Schema + x.AllOf = make([]*Schema, 0) + a, ok := v24.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSchema(item, compiler.NewContext("allOf", context)) + if err != nil { + errors = append(errors, err) + } + x.AllOf = append(x.AllOf, y) + } + } + } + // Properties properties = 25; + v25 := compiler.MapValueForKey(m, "properties") + if v25 != nil { + var err error + x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) + if err != nil { + errors = append(errors, err) + } + } + // string discriminator = 26; + v26 := compiler.MapValueForKey(m, "discriminator") + if v26 != nil { + x.Discriminator, ok = v26.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 27; + v27 := compiler.MapValueForKey(m, "readOnly") + if v27 != nil { + x.ReadOnly, ok = v27.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Xml xml = 28; + v28 := compiler.MapValueForKey(m, "xml") + if v28 != nil { + var err error + x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) + if err != nil { + errors = append(errors, err) + } + } + // ExternalDocs external_docs = 29; + v29 := compiler.MapValueForKey(m, "externalDocs") + if v29 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 30; + v30 := compiler.MapValueForKey(m, "example") + if v30 != nil { + var err error + x.Example, err = NewAny(v30, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 31; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. +func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { + errors := make([]error, 0) + x := &SchemaItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // FileSchema file_schema = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_FileSchema{FileSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. +func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { + errors := make([]error, 0) + x := &SecurityDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSecurityDefinitionsItem additional_properties = 1; + // MAP: SecurityDefinitionsItem + x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSecurityDefinitionsItem{} + pair.Name = k + var err error + pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. +func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &SecurityDefinitionsItem{} + matched := false + // BasicAuthenticationSecurity basic_authentication_security = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // ApiKeySecurity api_key_security = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ImplicitSecurity oauth2_implicit_security = 3; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2PasswordSecurity oauth2_password_security = 4; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ApplicationSecurity oauth2_application_security = 5; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. +func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { + errors := make([]error, 0) + x := &SecurityRequirement{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedStringArray additional_properties = 1; + // MAP: StringArray + x.AdditionalProperties = make([]*NamedStringArray, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedStringArray{} + pair.Name = k + var err error + pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStringArray creates an object of type StringArray if possible, returning an error if not. +func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { + errors := make([]error, 0) + x := &StringArray{} + a, ok := in.([]interface{}) + if !ok { + message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Value = make([]string, 0) + for _, s := range a { + x.Value = append(x.Value, s.(string)) + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTag creates an object of type Tag if possible, returning an error if not. +func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { + errors := make([]error, 0) + x := &Tag{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "externalDocs", "name"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 3; + v3 := compiler.MapValueForKey(m, "externalDocs") + if v3 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. +func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { + errors := make([]error, 0) + x := &TypeItem{} + switch in := in.(type) { + case string: + x.Value = make([]string, 0) + x.Value = append(x.Value, in) + case []interface{}: + x.Value = make([]string, 0) + for _, v := range in { + value, ok := v.(string) + if ok { + x.Value = append(x.Value, value) + } else { + message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) + errors = append(errors, compiler.NewError(context, message)) + } + } + default: + message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. +func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { + errors := make([]error, 0) + x := &VendorExtension{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewXml creates an object of type Xml if possible, returning an error if not. +func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { + errors := make([]error, 0) + x := &Xml{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string namespace = 2; + v2 := compiler.MapValueForKey(m, "namespace") + if v2 != nil { + x.Namespace, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string prefix = 3; + v3 := compiler.MapValueForKey(m, "prefix") + if v3 != nil { + x.Prefix, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool attribute = 4; + v4 := compiler.MapValueForKey(m, "attribute") + if v4 != nil { + x.Attribute, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool wrapped = 5; + v5 := compiler.MapValueForKey(m, "wrapped") + if v5 != nil { + x.Wrapped, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Any objects. +func (m *Any) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ApiKeySecurity objects. +func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. +func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BodyParameter objects. +func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Contact objects. +func (m *Contact) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Default objects. +func (m *Default) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Definitions objects. +func (m *Definitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Document objects. +func (m *Document) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Info != nil { + _, err := m.Info.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Paths != nil { + _, err := m.Paths.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Definitions != nil { + _, err := m.Definitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.SecurityDefinitions != nil { + _, err := m.SecurityDefinitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Tags { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Examples objects. +func (m *Examples) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExternalDocs objects. +func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FileSchema objects. +func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. +func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Header objects. +func (m *Header) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. +func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Headers objects. +func (m *Headers) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Info objects. +func (m *Info) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Contact != nil { + _, err := m.Contact.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.License != nil { + _, err := m.License.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ItemsItem objects. +func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.Schema { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside JsonReference objects. +func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewJsonReference(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside License objects. +func (m *License) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedAny objects. +func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedHeader objects. +func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedParameter objects. +func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedPathItem objects. +func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponse objects. +func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponseValue objects. +func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSchema objects. +func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. +func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedString objects. +func (m *NamedString) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedStringArray objects. +func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NonBodyParameter objects. +func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) + if ok { + _, err := p.HeaderParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) + if ok { + _, err := p.FormDataParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) + if ok { + _, err := p.QueryParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) + if ok { + _, err := p.PathParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. +func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. +func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. +func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. +func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2Scopes objects. +func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Operation objects. +func (m *Operation) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Parameter objects. +func (m *Parameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*Parameter_BodyParameter) + if ok { + _, err := p.BodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*Parameter_NonBodyParameter) + if ok { + _, err := p.NonBodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParameterDefinitions objects. +func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParametersItem objects. +func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ParametersItem_Parameter) + if ok { + _, err := p.Parameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ParametersItem_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewParametersItem(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathItem objects. +func (m *PathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewPathItem(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Get != nil { + _, err := m.Get.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Put != nil { + _, err := m.Put.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Post != nil { + _, err := m.Post.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Delete != nil { + _, err := m.Delete.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Options != nil { + _, err := m.Options.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Head != nil { + _, err := m.Head.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Patch != nil { + _, err := m.Patch.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathParameterSubSchema objects. +func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Paths objects. +func (m *Paths) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PrimitivesItems objects. +func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Properties objects. +func (m *Properties) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside QueryParameterSubSchema objects. +func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Response objects. +func (m *Response) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseDefinitions objects. +func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseValue objects. +func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ResponseValue_Response) + if ok { + _, err := p.Response.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ResponseValue_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewResponseValue(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Responses objects. +func (m *Responses) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.ResponseCode { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Schema objects. +func (m *Schema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewSchema(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.AdditionalProperties != nil { + _, err := m.AdditionalProperties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Type != nil { + _, err := m.Type.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.AllOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Properties != nil { + _, err := m.Properties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Xml != nil { + _, err := m.Xml.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemaItem objects. +func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SchemaItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SchemaItem_FileSchema) + if ok { + _, err := p.FileSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitions objects. +func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. +func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) + if ok { + _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) + if ok { + _, err := p.ApiKeySecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) + if ok { + _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) + if ok { + _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) + if ok { + _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) + if ok { + _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityRequirement objects. +func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside StringArray objects. +func (m *StringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Tag objects. +func (m *Tag) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside TypeItem objects. +func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside VendorExtension objects. +func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Xml objects. +func (m *Xml) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. +func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // AdditionalPropertiesItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return v1.Boolean + } + return nil +} + +// ToRawInfo returns a description of Any suitable for JSON or YAML export. +func (m *Any) ToRawInfo() interface{} { + var err error + var info1 []yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info1) + if err == nil { + return info1 + } + var info2 yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info2) + if err == nil { + return info2 + } + var info3 interface{} + err = yaml.Unmarshal([]byte(m.Yaml), &info3) + if err == nil { + return info3 + } + return nil +} + +// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. +func (m *ApiKeySecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. +func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. +func (m *BodyParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + if m.Required != false { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Contact suitable for JSON or YAML export. +func (m *Contact) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + } + if m.Email != "" { + info = append(info, yaml.MapItem{Key: "email", Value: m.Email}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Default suitable for JSON or YAML export. +func (m *Default) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. +func (m *Definitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Document suitable for JSON or YAML export. +func (m *Document) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) + // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Host != "" { + info = append(info, yaml.MapItem{Key: "host", Value: m.Host}) + } + if m.BasePath != "" { + info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath}) + } + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) + // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Definitions != nil { + info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()}) + } + // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Parameters != nil { + info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()}) + } + // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Responses != nil { + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) + } + // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "security", Value: items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.SecurityDefinitions != nil { + info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()}) + } + // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Tags) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Tags { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "tags", Value: items}) + } + // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Examples suitable for JSON or YAML export. +func (m *Examples) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. +func (m *ExternalDocs) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. +func (m *FileSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + if m.ReadOnly != false { + info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. +func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Required != false { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Header suitable for JSON or YAML export. +func (m *Header) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. +func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Required != false { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Headers suitable for JSON or YAML export. +func (m *Headers) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Info suitable for JSON or YAML export. +func (m *Info) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.TermsOfService != "" { + info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService}) + } + if m.Contact != nil { + info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()}) + } + // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.License != nil { + info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()}) + } + // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. +func (m *ItemsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if len(m.Schema) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "schema", Value: items}) + } + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. +func (m *JsonReference) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + return info +} + +// ToRawInfo returns a description of License suitable for JSON or YAML export. +func (m *License) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + if m.Url != "" { + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. +func (m *NamedAny) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. +func (m *NamedHeader) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. +func (m *NamedParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. +func (m *NamedPathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. +func (m *NamedResponse) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. +func (m *NamedResponseValue) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. +func (m *NamedSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. +func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. +func (m *NamedString) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Value != "" { + info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) + } + return info +} + +// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. +func (m *NamedStringArray) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. +func (m *NonBodyParameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // NonBodyParameter + // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetHeaderParameterSubSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFormDataParameterSubSchema() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetQueryParameterSubSchema() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetPathParameterSubSchema() + if v3 != nil { + return v3.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. +func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m.Scopes != nil { + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + // always include this required field. + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. +func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m.Scopes != nil { + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + // always include this required field. + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. +func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m.Scopes != nil { + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + // always include this required field. + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. +func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m.Scopes != nil { + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + // always include this required field. + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. +func (m *Oauth2Scopes) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Operation suitable for JSON or YAML export. +func (m *Operation) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if len(m.Tags) != 0 { + info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags}) + } + if m.Summary != "" { + info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.OperationId != "" { + info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) + } + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "parameters", Value: items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + // always include this required field. + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) + // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) + } + if m.Deprecated != false { + info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated}) + } + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "security", Value: items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. +func (m *Parameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // Parameter + // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBodyParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetNonBodyParameter() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. +func (m *ParameterDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. +func (m *ParametersItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ParametersItem + // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. +func (m *PathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.XRef != "" { + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + } + if m.Get != nil { + info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()}) + } + // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Put != nil { + info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()}) + } + // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Post != nil { + info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()}) + } + // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Delete != nil { + info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()}) + } + // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Options != nil { + info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()}) + } + // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Head != nil { + info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()}) + } + // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Patch != nil { + info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()}) + } + // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "parameters", Value: items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. +func (m *PathParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Paths suitable for JSON or YAML export. +func (m *Paths) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + if m.Path != nil { + for _, item := range m.Path { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. +func (m *PrimitivesItems) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Properties suitable for JSON or YAML export. +func (m *Properties) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. +func (m *QueryParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Required != false { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Response suitable for JSON or YAML export. +func (m *Response) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + if m.Schema != nil { + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) + } + // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Headers != nil { + info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()}) + } + // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Examples != nil { + info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()}) + } + // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. +func (m *ResponseDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. +func (m *ResponseValue) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ResponseValue + // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetResponse() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Responses suitable for JSON or YAML export. +func (m *Responses) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.ResponseCode != nil { + for _, item := range m.ResponseCode { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Schema suitable for JSON or YAML export. +func (m *Schema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.XRef != "" { + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if m.MaxProperties != 0 { + info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties}) + } + if m.MinProperties != 0 { + info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties}) + } + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.AdditionalProperties != nil { + info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()}) + } + // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Type != nil { + if len(m.Type.Value) == 1 { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]}) + } else { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value}) + } + } + // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Items != nil { + items := make([]interface{}, 0) + for _, item := range m.Items.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "items", Value: items[0]}) + } + // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.AllOf) != 0 { + items := make([]interface{}, 0) + for _, item := range m.AllOf { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "allOf", Value: items}) + } + // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.Properties != nil { + info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()}) + } + // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Discriminator != "" { + info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator}) + } + if m.ReadOnly != false { + info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) + } + if m.Xml != nil { + info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()}) + } + // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. +func (m *SchemaItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SchemaItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFileSchema() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. +func (m *SecurityDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. +func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SecurityDefinitionsItem + // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBasicAuthenticationSecurity() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetApiKeySecurity() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetOauth2ImplicitSecurity() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetOauth2PasswordSecurity() + if v3 != nil { + return v3.ToRawInfo() + } + // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v4 := m.GetOauth2ApplicationSecurity() + if v4 != nil { + return v4.ToRawInfo() + } + // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v5 := m.GetOauth2AccessCodeSecurity() + if v5 != nil { + return v5.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. +func (m *SecurityRequirement) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. +func (m *StringArray) ToRawInfo() interface{} { + return m.Value +} + +// ToRawInfo returns a description of Tag suitable for JSON or YAML export. +func (m *Tag) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. +func (m *TypeItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if len(m.Value) != 0 { + info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) + } + return info +} + +// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. +func (m *VendorExtension) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Xml suitable for JSON or YAML export. +func (m *Xml) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Namespace != "" { + info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace}) + } + if m.Prefix != "" { + info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix}) + } + if m.Attribute != false { + info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute}) + } + if m.Wrapped != false { + info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +var ( + pattern0 = regexp.MustCompile("^x-") + pattern1 = regexp.MustCompile("^/") + pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") +) diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go new file mode 100644 index 000000000..a030fa676 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -0,0 +1,4455 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: OpenAPIv2/OpenAPIv2.proto + +/* +Package openapi_v2 is a generated protocol buffer package. + +It is generated from these files: + OpenAPIv2/OpenAPIv2.proto + +It has these top-level messages: + AdditionalPropertiesItem + Any + ApiKeySecurity + BasicAuthenticationSecurity + BodyParameter + Contact + Default + Definitions + Document + Examples + ExternalDocs + FileSchema + FormDataParameterSubSchema + Header + HeaderParameterSubSchema + Headers + Info + ItemsItem + JsonReference + License + NamedAny + NamedHeader + NamedParameter + NamedPathItem + NamedResponse + NamedResponseValue + NamedSchema + NamedSecurityDefinitionsItem + NamedString + NamedStringArray + NonBodyParameter + Oauth2AccessCodeSecurity + Oauth2ApplicationSecurity + Oauth2ImplicitSecurity + Oauth2PasswordSecurity + Oauth2Scopes + Operation + Parameter + ParameterDefinitions + ParametersItem + PathItem + PathParameterSubSchema + Paths + PrimitivesItems + Properties + QueryParameterSubSchema + Response + ResponseDefinitions + ResponseValue + Responses + Schema + SchemaItem + SecurityDefinitions + SecurityDefinitionsItem + SecurityRequirement + StringArray + Tag + TypeItem + VendorExtension + Xml +*/ +package openapi_v2 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AdditionalPropertiesItem struct { + // Types that are valid to be assigned to Oneof: + // *AdditionalPropertiesItem_Schema + // *AdditionalPropertiesItem_Boolean + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } +func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } +func (*AdditionalPropertiesItem) ProtoMessage() {} +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() +} + +type AdditionalPropertiesItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"` +} + +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *AdditionalPropertiesItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return x.Boolean + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{ + (*AdditionalPropertiesItem_Schema)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } +} + +func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *AdditionalPropertiesItem_Boolean: + t := uint64(0) + if x.Boolean { + t = 1 + } + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AdditionalPropertiesItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &AdditionalPropertiesItem_Schema{msg} + return true, err + case 2: // oneof.boolean + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0} + return true, err + default: + return false, nil + } +} + +func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AdditionalPropertiesItem_Boolean: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Any struct { + Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Any) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +func (m *Any) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +type ApiKeySecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } +func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } +func (*ApiKeySecurity) ProtoMessage() {} +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ApiKeySecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ApiKeySecurity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ApiKeySecurity) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *ApiKeySecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BasicAuthenticationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } +func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } +func (*BasicAuthenticationSecurity) ProtoMessage() {} +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *BasicAuthenticationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BodyParameter struct { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,4,opt,name=required" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BodyParameter) Reset() { *m = BodyParameter{} } +func (m *BodyParameter) String() string { return proto.CompactTextString(m) } +func (*BodyParameter) ProtoMessage() {} +func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *BodyParameter) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BodyParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BodyParameter) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *BodyParameter) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *BodyParameter) GetSchema() *Schema { + if m != nil { + return m.Schema + } + return nil +} + +func (m *BodyParameter) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Contact information for the owners of the API. +type Contact struct { + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the contact information. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + // The email address of the contact person/organization. + Email string `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Contact) Reset() { *m = Contact{} } +func (m *Contact) String() string { return proto.CompactTextString(m) } +func (*Contact) ProtoMessage() {} +func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Contact) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Contact) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *Contact) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *Contact) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Default struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Default) Reset() { *m = Default{} } +func (m *Default) String() string { return proto.CompactTextString(m) } +func (*Default) ProtoMessage() {} +func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Default) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +type Definitions struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Definitions) Reset() { *m = Definitions{} } +func (m *Definitions) String() string { return proto.CompactTextString(m) } +func (*Definitions) ProtoMessage() {} +func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *Definitions) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Document struct { + // The Swagger version of this document. + Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + // The host (name or ip) of the API. Example: 'swagger.io' + Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` + // The base path to the API. Example: '/api'. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"` + // A list of MIME types accepted by the API. + Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *Document) GetSwagger() string { + if m != nil { + return m.Swagger + } + return "" +} + +func (m *Document) GetInfo() *Info { + if m != nil { + return m.Info + } + return nil +} + +func (m *Document) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Document) GetBasePath() string { + if m != nil { + return m.BasePath + } + return "" +} + +func (m *Document) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Document) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Document) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Document) GetPaths() *Paths { + if m != nil { + return m.Paths + } + return nil +} + +func (m *Document) GetDefinitions() *Definitions { + if m != nil { + return m.Definitions + } + return nil +} + +func (m *Document) GetParameters() *ParameterDefinitions { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Document) GetResponses() *ResponseDefinitions { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Document) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { + if m != nil { + return m.SecurityDefinitions + } + return nil +} + +func (m *Document) GetTags() []*Tag { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Document) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Document) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Examples struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Examples) Reset() { *m = Examples{} } +func (m *Examples) String() string { return proto.CompactTextString(m) } +func (*Examples) ProtoMessage() {} +func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *Examples) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// information about external documentation +type ExternalDocs struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } +func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } +func (*ExternalDocs) ProtoMessage() {} +func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ExternalDocs) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ExternalDocs) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *ExternalDocs) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type FileSchema struct { + Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FileSchema) Reset() { *m = FileSchema{} } +func (m *FileSchema) String() string { return proto.CompactTextString(m) } +func (*FileSchema) ProtoMessage() {} +func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *FileSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FileSchema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *FileSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FileSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FileSchema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *FileSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FileSchema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *FileSchema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *FileSchema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *FileSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type FormDataParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } +func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*FormDataParameterSubSchema) ProtoMessage() {} +func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *FormDataParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *FormDataParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FormDataParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *FormDataParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FormDataParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *FormDataParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *FormDataParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *FormDataParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Header struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *Header) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Header) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Header) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *Header) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *Header) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Header) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Header) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Header) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Header) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Header) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Header) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Header) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Header) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Header) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Header) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Header) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Header) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Header) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Header) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type HeaderParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } +func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*HeaderParameterSubSchema) ProtoMessage() {} +func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *HeaderParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *HeaderParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *HeaderParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *HeaderParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *HeaderParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *HeaderParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *HeaderParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *HeaderParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Headers struct { + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Headers) Reset() { *m = Headers{} } +func (m *Headers) String() string { return proto.CompactTextString(m) } +func (*Headers) ProtoMessage() {} +func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *Headers) GetAdditionalProperties() []*NamedHeader { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// General information about the API. +type Info struct { + // A unique and precise title of the API. + Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` + // A semantic version number of the API. + Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The terms of service for the API. + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *Info) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Info) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Info) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Info) GetTermsOfService() string { + if m != nil { + return m.TermsOfService + } + return "" +} + +func (m *Info) GetContact() *Contact { + if m != nil { + return m.Contact + } + return nil +} + +func (m *Info) GetLicense() *License { + if m != nil { + return m.License + } + return nil +} + +func (m *Info) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type ItemsItem struct { + Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"` +} + +func (m *ItemsItem) Reset() { *m = ItemsItem{} } +func (m *ItemsItem) String() string { return proto.CompactTextString(m) } +func (*ItemsItem) ProtoMessage() {} +func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *ItemsItem) GetSchema() []*Schema { + if m != nil { + return m.Schema + } + return nil +} + +type JsonReference struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *JsonReference) Reset() { *m = JsonReference{} } +func (m *JsonReference) String() string { return proto.CompactTextString(m) } +func (*JsonReference) ProtoMessage() {} +func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *JsonReference) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *JsonReference) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type License struct { + // The name of the license type. It's encouraged to use an OSI compatible license. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the license. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *License) Reset() { *m = License{} } +func (m *License) String() string { return proto.CompactTextString(m) } +func (*License) ProtoMessage() {} +func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *License) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *License) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *License) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +type NamedAny struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedAny) Reset() { *m = NamedAny{} } +func (m *NamedAny) String() string { return proto.CompactTextString(m) } +func (*NamedAny) ProtoMessage() {} +func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *NamedAny) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedAny) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +type NamedHeader struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedHeader) Reset() { *m = NamedHeader{} } +func (m *NamedHeader) String() string { return proto.CompactTextString(m) } +func (*NamedHeader) ProtoMessage() {} +func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *NamedHeader) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedHeader) GetValue() *Header { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +type NamedParameter struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedParameter) Reset() { *m = NamedParameter{} } +func (m *NamedParameter) String() string { return proto.CompactTextString(m) } +func (*NamedParameter) ProtoMessage() {} +func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *NamedParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedParameter) GetValue() *Parameter { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +type NamedPathItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } +func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } +func (*NamedPathItem) ProtoMessage() {} +func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *NamedPathItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedPathItem) GetValue() *PathItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +type NamedResponse struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponse) Reset() { *m = NamedResponse{} } +func (m *NamedResponse) String() string { return proto.CompactTextString(m) } +func (*NamedResponse) ProtoMessage() {} +func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *NamedResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponse) GetValue() *Response { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +type NamedResponseValue struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } +func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } +func (*NamedResponseValue) ProtoMessage() {} +func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *NamedResponseValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponseValue) GetValue() *ResponseValue { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +type NamedSchema struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSchema) Reset() { *m = NamedSchema{} } +func (m *NamedSchema) String() string { return proto.CompactTextString(m) } +func (*NamedSchema) ProtoMessage() {} +func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *NamedSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSchema) GetValue() *Schema { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +type NamedSecurityDefinitionsItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } +func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *NamedSecurityDefinitionsItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +type NamedString struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedString) Reset() { *m = NamedString{} } +func (m *NamedString) String() string { return proto.CompactTextString(m) } +func (*NamedString) ProtoMessage() {} +func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *NamedString) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +type NamedStringArray struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } +func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } +func (*NamedStringArray) ProtoMessage() {} +func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *NamedStringArray) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedStringArray) GetValue() *StringArray { + if m != nil { + return m.Value + } + return nil +} + +type NonBodyParameter struct { + // Types that are valid to be assigned to Oneof: + // *NonBodyParameter_HeaderParameterSubSchema + // *NonBodyParameter_FormDataParameterSubSchema + // *NonBodyParameter_QueryParameterSubSchema + // *NonBodyParameter_PathParameterSubSchema + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } +func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } +func (*NonBodyParameter) ProtoMessage() {} +func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type isNonBodyParameter_Oneof interface { + isNonBodyParameter_Oneof() +} + +type NonBodyParameter_HeaderParameterSubSchema struct { + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"` +} +type NonBodyParameter_FormDataParameterSubSchema struct { + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"` +} +type NonBodyParameter_QueryParameterSubSchema struct { + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"` +} +type NonBodyParameter_PathParameterSubSchema struct { + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"` +} + +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { + return x.HeaderParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { + return x.FormDataParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { + return x.QueryParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { + return x.PathParameterSubSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{ + (*NonBodyParameter_HeaderParameterSubSchema)(nil), + (*NonBodyParameter_FormDataParameterSubSchema)(nil), + (*NonBodyParameter_QueryParameterSubSchema)(nil), + (*NonBodyParameter_PathParameterSubSchema)(nil), + } +} + +func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_FormDataParameterSubSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_QueryParameterSubSchema: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_PathParameterSubSchema: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NonBodyParameter) + switch tag { + case 1: // oneof.header_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HeaderParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg} + return true, err + case 2: // oneof.form_data_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FormDataParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg} + return true, err + case 3: // oneof.query_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg} + return true, err + case 4: // oneof.path_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PathParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg} + return true, err + default: + return false, nil + } +} + +func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + s := proto.Size(x.HeaderParameterSubSchema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_FormDataParameterSubSchema: + s := proto.Size(x.FormDataParameterSubSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_QueryParameterSubSchema: + s := proto.Size(x.QueryParameterSubSchema) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_PathParameterSubSchema: + s := proto.Size(x.PathParameterSubSchema) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oauth2AccessCodeSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } +func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *Oauth2AccessCodeSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ApplicationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } +func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ApplicationSecurity) ProtoMessage() {} +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *Oauth2ApplicationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ImplicitSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } +func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ImplicitSecurity) ProtoMessage() {} +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *Oauth2ImplicitSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2PasswordSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } +func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2PasswordSecurity) ProtoMessage() {} +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *Oauth2PasswordSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2PasswordSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2Scopes struct { + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } +func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } +func (*Oauth2Scopes) ProtoMessage() {} +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Operation struct { + Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` + // A brief summary of the operation. + Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + // A longer description of the operation, GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + // A unique identifier of the operation. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"` + // A list of MIME types the API can consume. + Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"` + Responses *Responses `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *Operation) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Operation) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Operation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Operation) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Operation) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *Operation) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Operation) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Operation) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Operation) GetResponses() *Responses { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Operation) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Operation) GetDeprecated() bool { + if m != nil { + return m.Deprecated + } + return false +} + +func (m *Operation) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Operation) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Parameter struct { + // Types that are valid to be assigned to Oneof: + // *Parameter_BodyParameter + // *Parameter_NonBodyParameter + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *Parameter) Reset() { *m = Parameter{} } +func (m *Parameter) String() string { return proto.CompactTextString(m) } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type isParameter_Oneof interface { + isParameter_Oneof() +} + +type Parameter_BodyParameter struct { + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"` +} +type Parameter_NonBodyParameter struct { + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"` +} + +func (*Parameter_BodyParameter) isParameter_Oneof() {} +func (*Parameter_NonBodyParameter) isParameter_Oneof() {} + +func (m *Parameter) GetOneof() isParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *Parameter) GetBodyParameter() *BodyParameter { + if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { + return x.BodyParameter + } + return nil +} + +func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { + if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { + return x.NonBodyParameter + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{ + (*Parameter_BodyParameter)(nil), + (*Parameter_NonBodyParameter)(nil), + } +} + +func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BodyParameter); err != nil { + return err + } + case *Parameter_NonBodyParameter: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NonBodyParameter); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Parameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Parameter) + switch tag { + case 1: // oneof.body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_BodyParameter{msg} + return true, err + case 2: // oneof.non_body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NonBodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_NonBodyParameter{msg} + return true, err + default: + return false, nil + } +} + +func _Parameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + s := proto.Size(x.BodyParameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Parameter_NonBodyParameter: + s := proto.Size(x.NonBodyParameter) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// One or more JSON representations for parameters +type ParameterDefinitions struct { + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } +func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } +func (*ParameterDefinitions) ProtoMessage() {} +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ParametersItem struct { + // Types that are valid to be assigned to Oneof: + // *ParametersItem_Parameter + // *ParametersItem_JsonReference + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ParametersItem) Reset() { *m = ParametersItem{} } +func (m *ParametersItem) String() string { return proto.CompactTextString(m) } +func (*ParametersItem) ProtoMessage() {} +func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +type isParametersItem_Oneof interface { + isParametersItem_Oneof() +} + +type ParametersItem_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"` +} +type ParametersItem_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} +func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} + +func (m *ParametersItem) GetOneof() isParametersItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ParametersItem) GetParameter() *Parameter { + if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { + return x.Parameter + } + return nil +} + +func (m *ParametersItem) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{ + (*ParametersItem_Parameter)(nil), + (*ParametersItem_JsonReference)(nil), + } +} + +func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Parameter); err != nil { + return err + } + case *ParametersItem_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ParametersItem) + switch tag { + case 1: // oneof.parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Parameter) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_Parameter{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ParametersItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + s := proto.Size(x.Parameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ParametersItem_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type PathItem struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Get *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathItem) Reset() { *m = PathItem{} } +func (m *PathItem) String() string { return proto.CompactTextString(m) } +func (*PathItem) ProtoMessage() {} +func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *PathItem) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *PathItem) GetGet() *Operation { + if m != nil { + return m.Get + } + return nil +} + +func (m *PathItem) GetPut() *Operation { + if m != nil { + return m.Put + } + return nil +} + +func (m *PathItem) GetPost() *Operation { + if m != nil { + return m.Post + } + return nil +} + +func (m *PathItem) GetDelete() *Operation { + if m != nil { + return m.Delete + } + return nil +} + +func (m *PathItem) GetOptions() *Operation { + if m != nil { + return m.Options + } + return nil +} + +func (m *PathItem) GetHead() *Operation { + if m != nil { + return m.Head + } + return nil +} + +func (m *PathItem) GetPatch() *Operation { + if m != nil { + return m.Patch + } + return nil +} + +func (m *PathItem) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *PathItem) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type PathParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } +func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*PathParameterSubSchema) ProtoMessage() {} +func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *PathParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *PathParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *PathParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PathParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PathParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PathParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PathParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PathParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PathParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PathParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PathParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PathParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PathParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PathParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +type Paths struct { + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` +} + +func (m *Paths) Reset() { *m = Paths{} } +func (m *Paths) String() string { return proto.CompactTextString(m) } +func (*Paths) ProtoMessage() {} +func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func (m *Paths) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func (m *Paths) GetPath() []*NamedPathItem { + if m != nil { + return m.Path + } + return nil +} + +type PrimitivesItems struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } +func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } +func (*PrimitivesItems) ProtoMessage() {} +func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } + +func (m *PrimitivesItems) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PrimitivesItems) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PrimitivesItems) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PrimitivesItems) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PrimitivesItems) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PrimitivesItems) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PrimitivesItems) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PrimitivesItems) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PrimitivesItems) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PrimitivesItems) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PrimitivesItems) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PrimitivesItems) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PrimitivesItems) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PrimitivesItems) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PrimitivesItems) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Properties struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Properties) Reset() { *m = Properties{} } +func (m *Properties) String() string { return proto.CompactTextString(m) } +func (*Properties) ProtoMessage() {} +func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } + +func (m *Properties) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type QueryParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } +func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*QueryParameterSubSchema) ProtoMessage() {} +func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } + +func (m *QueryParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *QueryParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *QueryParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *QueryParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *QueryParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *QueryParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *QueryParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *QueryParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *QueryParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *QueryParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *QueryParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *QueryParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *QueryParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *QueryParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Response struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } + +func (m *Response) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Response) GetSchema() *SchemaItem { + if m != nil { + return m.Schema + } + return nil +} + +func (m *Response) GetHeaders() *Headers { + if m != nil { + return m.Headers + } + return nil +} + +func (m *Response) GetExamples() *Examples { + if m != nil { + return m.Examples + } + return nil +} + +func (m *Response) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// One or more JSON representations for parameters +type ResponseDefinitions struct { + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } +func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } +func (*ResponseDefinitions) ProtoMessage() {} +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } + +func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ResponseValue struct { + // Types that are valid to be assigned to Oneof: + // *ResponseValue_Response + // *ResponseValue_JsonReference + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ResponseValue) Reset() { *m = ResponseValue{} } +func (m *ResponseValue) String() string { return proto.CompactTextString(m) } +func (*ResponseValue) ProtoMessage() {} +func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } + +type isResponseValue_Oneof interface { + isResponseValue_Oneof() +} + +type ResponseValue_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,oneof"` +} +type ResponseValue_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ResponseValue_Response) isResponseValue_Oneof() {} +func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} + +func (m *ResponseValue) GetOneof() isResponseValue_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ResponseValue) GetResponse() *Response { + if x, ok := m.GetOneof().(*ResponseValue_Response); ok { + return x.Response + } + return nil +} + +func (m *ResponseValue) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{ + (*ResponseValue_Response)(nil), + (*ResponseValue_JsonReference)(nil), + } +} + +func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Response); err != nil { + return err + } + case *ResponseValue_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x) + } + return nil +} + +func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ResponseValue) + switch tag { + case 1: // oneof.response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Response) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_Response{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ResponseValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + s := proto.Size(x.Response) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseValue_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Response objects names can either be any valid HTTP status code or 'default'. +type Responses struct { + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Responses) Reset() { *m = Responses{} } +func (m *Responses) String() string { return proto.CompactTextString(m) } +func (*Responses) ProtoMessage() {} +func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } + +func (m *Responses) GetResponseCode() []*NamedResponseValue { + if m != nil { + return m.ResponseCode + } + return nil +} + +func (m *Responses) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type Schema struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + Type *TypeItem `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"` + Items *ItemsItem `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"` + AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"` + Properties *Properties `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"` + Discriminator string `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + Xml *Xml `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } + +func (m *Schema) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *Schema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Schema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Schema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Schema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Schema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Schema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Schema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Schema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Schema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Schema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Schema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Schema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Schema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Schema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Schema) GetMaxProperties() int64 { + if m != nil { + return m.MaxProperties + } + return 0 +} + +func (m *Schema) GetMinProperties() int64 { + if m != nil { + return m.MinProperties + } + return 0 +} + +func (m *Schema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *Schema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +func (m *Schema) GetType() *TypeItem { + if m != nil { + return m.Type + } + return nil +} + +func (m *Schema) GetItems() *ItemsItem { + if m != nil { + return m.Items + } + return nil +} + +func (m *Schema) GetAllOf() []*Schema { + if m != nil { + return m.AllOf + } + return nil +} + +func (m *Schema) GetProperties() *Properties { + if m != nil { + return m.Properties + } + return nil +} + +func (m *Schema) GetDiscriminator() string { + if m != nil { + return m.Discriminator + } + return "" +} + +func (m *Schema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *Schema) GetXml() *Xml { + if m != nil { + return m.Xml + } + return nil +} + +func (m *Schema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Schema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *Schema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type SchemaItem struct { + // Types that are valid to be assigned to Oneof: + // *SchemaItem_Schema + // *SchemaItem_FileSchema + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SchemaItem) Reset() { *m = SchemaItem{} } +func (m *SchemaItem) String() string { return proto.CompactTextString(m) } +func (*SchemaItem) ProtoMessage() {} +func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } + +type isSchemaItem_Oneof interface { + isSchemaItem_Oneof() +} + +type SchemaItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type SchemaItem_FileSchema struct { + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"` +} + +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} +func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} + +func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SchemaItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *SchemaItem) GetFileSchema() *FileSchema { + if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { + return x.FileSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{ + (*SchemaItem_Schema)(nil), + (*SchemaItem_FileSchema)(nil), + } +} + +func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *SchemaItem_FileSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FileSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SchemaItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_Schema{msg} + return true, err + case 2: // oneof.file_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileSchema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_FileSchema{msg} + return true, err + default: + return false, nil + } +} + +func _SchemaItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SchemaItem_FileSchema: + s := proto.Size(x.FileSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityDefinitions struct { + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } +func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitions) ProtoMessage() {} +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } + +func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type SecurityDefinitionsItem struct { + // Types that are valid to be assigned to Oneof: + // *SecurityDefinitionsItem_BasicAuthenticationSecurity + // *SecurityDefinitionsItem_ApiKeySecurity + // *SecurityDefinitionsItem_Oauth2ImplicitSecurity + // *SecurityDefinitionsItem_Oauth2PasswordSecurity + // *SecurityDefinitionsItem_Oauth2ApplicationSecurity + // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } +func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitionsItem) ProtoMessage() {} +func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } + +type isSecurityDefinitionsItem_Oneof interface { + isSecurityDefinitionsItem_Oneof() +} + +type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { + BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"` +} +type SecurityDefinitionsItem_ApiKeySecurity struct { + ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { + Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { + Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { + Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { + Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"` +} + +func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { + return x.BasicAuthenticationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { + return x.ApiKeySecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { + return x.Oauth2ImplicitSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { + return x.Oauth2PasswordSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { + return x.Oauth2ApplicationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { + return x.Oauth2AccessCodeSecurity + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{ + (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), + (*SecurityDefinitionsItem_ApiKeySecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), + } +} + +func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_ApiKeySecurity: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ApiKeySecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SecurityDefinitionsItem) + switch tag { + case 1: // oneof.basic_authentication_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BasicAuthenticationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg} + return true, err + case 2: // oneof.api_key_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ApiKeySecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg} + return true, err + case 3: // oneof.oauth2_implicit_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ImplicitSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg} + return true, err + case 4: // oneof.oauth2_password_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2PasswordSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg} + return true, err + case 5: // oneof.oauth2_application_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ApplicationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg} + return true, err + case 6: // oneof.oauth2_access_code_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2AccessCodeSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg} + return true, err + default: + return false, nil + } +} + +func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + s := proto.Size(x.BasicAuthenticationSecurity) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_ApiKeySecurity: + s := proto.Size(x.ApiKeySecurity) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + s := proto.Size(x.Oauth2ImplicitSecurity) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + s := proto.Size(x.Oauth2PasswordSecurity) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + s := proto.Size(x.Oauth2ApplicationSecurity) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + s := proto.Size(x.Oauth2AccessCodeSecurity) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityRequirement struct { + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } +func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } +func (*SecurityRequirement) ProtoMessage() {} +func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } + +func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type StringArray struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *StringArray) Reset() { *m = StringArray{} } +func (m *StringArray) String() string { return proto.CompactTextString(m) } +func (*StringArray) ProtoMessage() {} +func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } + +func (m *StringArray) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Tag struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} +func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } + +func (m *Tag) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Tag) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Tag) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Tag) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type TypeItem struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *TypeItem) Reset() { *m = TypeItem{} } +func (m *TypeItem) String() string { return proto.CompactTextString(m) } +func (*TypeItem) ProtoMessage() {} +func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } + +func (m *TypeItem) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +// Any property starting with x- is valid. +type VendorExtension struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *VendorExtension) Reset() { *m = VendorExtension{} } +func (m *VendorExtension) String() string { return proto.CompactTextString(m) } +func (*VendorExtension) ProtoMessage() {} +func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } + +func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Xml struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Xml) Reset() { *m = Xml{} } +func (m *Xml) String() string { return proto.CompactTextString(m) } +func (*Xml) ProtoMessage() {} +func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } + +func (m *Xml) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Xml) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *Xml) GetPrefix() string { + if m != nil { + return m.Prefix + } + return "" +} + +func (m *Xml) GetAttribute() bool { + if m != nil { + return m.Attribute + } + return false +} + +func (m *Xml) GetWrapped() bool { + if m != nil { + return m.Wrapped + } + return false +} + +func (m *Xml) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func init() { + proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") + proto.RegisterType((*Any)(nil), "openapi.v2.Any") + proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") + proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") + proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") + proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") + proto.RegisterType((*Default)(nil), "openapi.v2.Default") + proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") + proto.RegisterType((*Document)(nil), "openapi.v2.Document") + proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") + proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") + proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") + proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") + proto.RegisterType((*Header)(nil), "openapi.v2.Header") + proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") + proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") + proto.RegisterType((*Info)(nil), "openapi.v2.Info") + proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") + proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") + proto.RegisterType((*License)(nil), "openapi.v2.License") + proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") + proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") + proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") + proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") + proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") + proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") + proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") + proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") + proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") + proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") + proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") + proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") + proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") + proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") + proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") + proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") + proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") + proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") + proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") + proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") + proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") + proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") + proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") + proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") + proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") + proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") + proto.RegisterType((*Response)(nil), "openapi.v2.Response") + proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") + proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") + proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") + proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") + proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") + proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") + proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") + proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") + proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") + proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") + proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") + proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") + proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") +} + +func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 3129 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, + 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, + 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, + 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, + 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5, + 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xfa, 0x31, 0x7d, + 0x7b, 0x1e, 0x96, 0x0b, 0x28, 0xd0, 0x6a, 0xe6, 0xde, 0x73, 0xee, 0xb9, 0xa7, 0x4f, 0x9f, 0xd7, + 0x3d, 0xe7, 0x36, 0xac, 0xef, 0x79, 0xd8, 0xdd, 0xdd, 0x7f, 0x70, 0xb2, 0x73, 0x2b, 0xfa, 0xb7, + 0xed, 0xf9, 0x24, 0x24, 0x1a, 0x10, 0x0f, 0xbb, 0xc8, 0xb3, 0xb6, 0x4f, 0x76, 0x36, 0xd6, 0x8f, + 0x08, 0x39, 0xb2, 0xf1, 0x2d, 0x06, 0x39, 0x1c, 0x0e, 0x6e, 0x21, 0x77, 0xc4, 0xd1, 0xb6, 0x1c, + 0xd0, 0x77, 0xfb, 0x7d, 0x2b, 0xb4, 0x88, 0x8b, 0xec, 0x7d, 0x9f, 0x78, 0xd8, 0x0f, 0x2d, 0x1c, + 0x3c, 0x08, 0xb1, 0xa3, 0xfd, 0x1f, 0xd4, 0x82, 0xde, 0x31, 0x76, 0x90, 0x5e, 0xbc, 0x52, 0xbc, + 0xb6, 0xb0, 0xa3, 0x6d, 0xc7, 0x34, 0xb7, 0x0f, 0x18, 0xa4, 0x5b, 0x30, 0x04, 0x8e, 0xb6, 0x01, + 0xf5, 0x43, 0x42, 0x6c, 0x8c, 0x5c, 0xbd, 0x74, 0xa5, 0x78, 0xad, 0xd1, 0x2d, 0x18, 0x72, 0xe2, + 0x76, 0x1d, 0xaa, 0xc4, 0xc5, 0x64, 0xb0, 0x75, 0x0f, 0xca, 0xbb, 0xee, 0x48, 0xbb, 0x01, 0xd5, + 0x13, 0x64, 0x0f, 0xb1, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0xef, 0xba, 0x23, + 0x83, 0xa3, 0x68, 0x1a, 0x54, 0x46, 0xc8, 0xb1, 0x19, 0xd1, 0xa6, 0xc1, 0xfe, 0x6f, 0x7d, 0x51, + 0x84, 0xf6, 0xae, 0x67, 0xbd, 0x8b, 0x47, 0x07, 0xb8, 0x37, 0xf4, 0xad, 0x70, 0x44, 0xd1, 0xc2, + 0x91, 0xc7, 0x29, 0x36, 0x0d, 0xf6, 0x9f, 0xce, 0xb9, 0xc8, 0xc1, 0x72, 0x29, 0xfd, 0xaf, 0xb5, + 0xa1, 0x64, 0xb9, 0x7a, 0x99, 0xcd, 0x94, 0x2c, 0x57, 0xbb, 0x02, 0x0b, 0x7d, 0x1c, 0xf4, 0x7c, + 0xcb, 0xa3, 0x32, 0xd0, 0x2b, 0x0c, 0x90, 0x9c, 0xd2, 0xbe, 0x06, 0x9d, 0x13, 0xec, 0xf6, 0x89, + 0x6f, 0xe2, 0xd3, 0x10, 0xbb, 0x01, 0x45, 0xab, 0x5e, 0x29, 0x33, 0xbe, 0x13, 0x02, 0x79, 0x0f, + 0x39, 0xb8, 0x4f, 0xf9, 0x5e, 0xe2, 0xd8, 0xf7, 0x24, 0xf2, 0xd6, 0x67, 0x45, 0xd8, 0xbc, 0x8d, + 0x02, 0xab, 0xb7, 0x3b, 0x0c, 0x8f, 0xb1, 0x1b, 0x5a, 0x3d, 0x44, 0x09, 0x4f, 0x64, 0x7d, 0x8c, + 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, 0x3e, + 0xf2, 0x91, 0x83, 0x43, 0xec, 0x8f, 0x6f, 0x5a, 0xcc, 0x6e, 0x3a, 0x8b, 0x44, 0x37, 0xa0, 0xe1, + 0xe3, 0x27, 0x43, 0xcb, 0xc7, 0x7d, 0x26, 0xce, 0x86, 0x11, 0x8d, 0xb5, 0x1b, 0x91, 0x4a, 0x55, + 0xf3, 0x54, 0x2a, 0x52, 0x28, 0xd5, 0x03, 0xd6, 0xe6, 0x79, 0xc0, 0x1f, 0x17, 0xa1, 0x7e, 0x87, + 0xb8, 0x21, 0xea, 0x85, 0x11, 0xe3, 0xc5, 0x04, 0xe3, 0x1d, 0x28, 0x0f, 0x7d, 0xa9, 0x58, 0xf4, + 0xaf, 0xb6, 0x0a, 0x55, 0xec, 0x20, 0xcb, 0x16, 0x4f, 0xc3, 0x07, 0x4a, 0x46, 0x2a, 0xf3, 0x30, + 0xf2, 0x08, 0xea, 0x77, 0xf1, 0x00, 0x0d, 0xed, 0x50, 0x7b, 0x00, 0x17, 0x50, 0x64, 0x6f, 0xa6, + 0x17, 0x19, 0x9c, 0x5e, 0x9c, 0x40, 0x70, 0x15, 0x29, 0x4c, 0x74, 0xeb, 0x3b, 0xb0, 0x70, 0x17, + 0x0f, 0x2c, 0x97, 0x41, 0x02, 0xed, 0xe1, 0x64, 0xca, 0x17, 0x33, 0x94, 0x85, 0xb8, 0xd5, 0xc4, + 0xff, 0x58, 0x85, 0xc6, 0x5d, 0xd2, 0x1b, 0x3a, 0xd8, 0x0d, 0x35, 0x1d, 0xea, 0xc1, 0x53, 0x74, + 0x74, 0x84, 0x7d, 0x21, 0x3f, 0x39, 0xd4, 0x5e, 0x86, 0x8a, 0xe5, 0x0e, 0x08, 0x93, 0xe1, 0xc2, + 0x4e, 0x27, 0xb9, 0xc7, 0x03, 0x77, 0x40, 0x0c, 0x06, 0xa5, 0xc2, 0x3f, 0x26, 0x41, 0x28, 0xa4, + 0xca, 0xfe, 0x6b, 0x9b, 0xd0, 0x3c, 0x44, 0x01, 0x36, 0x3d, 0x14, 0x1e, 0x0b, 0xab, 0x6b, 0xd0, + 0x89, 0x7d, 0x14, 0x1e, 0xb3, 0x0d, 0x29, 0x77, 0x38, 0x60, 0x96, 0x46, 0x37, 0xe4, 0x43, 0xaa, + 0x5c, 0x3d, 0xe2, 0x06, 0x43, 0x0a, 0xaa, 0x31, 0x50, 0x34, 0xa6, 0x30, 0xcf, 0x27, 0xfd, 0x61, + 0x0f, 0x07, 0x7a, 0x9d, 0xc3, 0xe4, 0x58, 0x7b, 0x0d, 0xaa, 0x74, 0xa7, 0x40, 0x6f, 0x30, 0x4e, + 0x97, 0x93, 0x9c, 0xd2, 0x2d, 0x03, 0x83, 0xc3, 0xb5, 0xb7, 0xa9, 0x0d, 0x44, 0x52, 0xd5, 0x9b, + 0x0c, 0x3d, 0x25, 0xbc, 0x84, 0xd0, 0x8d, 0x24, 0xae, 0xf6, 0x75, 0x00, 0x4f, 0xda, 0x52, 0xa0, + 0x03, 0x5b, 0x79, 0x25, 0xbd, 0x91, 0x80, 0x26, 0x49, 0x24, 0xd6, 0x68, 0xef, 0x40, 0xd3, 0xc7, + 0x81, 0x47, 0xdc, 0x00, 0x07, 0xfa, 0x02, 0x23, 0xf0, 0x62, 0x92, 0x80, 0x21, 0x80, 0xc9, 0xf5, + 0xf1, 0x0a, 0xed, 0xab, 0xd0, 0x08, 0x84, 0x53, 0xd1, 0x17, 0xd9, 0x5b, 0x4f, 0xad, 0x96, 0x0e, + 0xc7, 0xe0, 0xd6, 0x48, 0x5f, 0xad, 0x11, 0x2d, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, 0xa4, 0x04, + 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x24, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, 0x10, 0x1d, + 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x94, 0xa4, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, 0x03, 0x2d, + 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x4f, 0x62, 0xdf, 0x13, + 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x62, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, 0xf9, 0x18, + 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, 0x26, 0xd9, + 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, 0x1b, 0x73, + 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, 0x17, 0x5a, + 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0x8c, 0x73, 0x5c, 0xce, 0x72, 0x7c, 0x1d, 0xea, 0x7d, 0xee, 0xd9, + 0x98, 0x0d, 0x8f, 0xbd, 0x63, 0xca, 0x91, 0x84, 0xa7, 0xc2, 0x02, 0x37, 0xea, 0x38, 0x2c, 0xc8, + 0x08, 0x58, 0x4b, 0x44, 0xc0, 0x4d, 0x6a, 0x0b, 0xa8, 0x6f, 0x12, 0xd7, 0x1e, 0xe9, 0x75, 0x19, + 0x47, 0x50, 0x7f, 0xcf, 0xb5, 0x47, 0x59, 0x9d, 0x69, 0xcc, 0xa5, 0x33, 0xd7, 0xa1, 0x8e, 0xf9, + 0x2b, 0x17, 0x06, 0x9e, 0x65, 0x5b, 0xc0, 0x95, 0x6f, 0x00, 0xe6, 0x79, 0x03, 0x5f, 0xd4, 0x60, + 0xe3, 0x3e, 0xf1, 0x9d, 0xbb, 0x28, 0x44, 0x91, 0x03, 0x38, 0x18, 0x1e, 0x1e, 0xc8, 0xb4, 0x29, + 0x16, 0x4b, 0x71, 0x2c, 0x5a, 0xf2, 0xc8, 0x5a, 0xca, 0xcb, 0x55, 0xca, 0xf9, 0xf1, 0xb9, 0x92, + 0x08, 0x73, 0x37, 0x60, 0x19, 0xd9, 0x36, 0x79, 0x6a, 0x62, 0xc7, 0x0b, 0x47, 0x26, 0x4f, 0xbc, + 0xaa, 0x6c, 0xab, 0x25, 0x06, 0xb8, 0x47, 0xe7, 0x3f, 0x90, 0xc9, 0x56, 0xe6, 0x45, 0xc4, 0x3a, + 0x53, 0x4f, 0xe9, 0xcc, 0xff, 0x43, 0xd5, 0x0a, 0xb1, 0x23, 0x65, 0xbf, 0x99, 0xf2, 0x74, 0xbe, + 0xe5, 0x58, 0xa1, 0x75, 0xc2, 0x33, 0xc9, 0xc0, 0xe0, 0x98, 0xda, 0xeb, 0xb0, 0xdc, 0x23, 0xb6, + 0x8d, 0x7b, 0x94, 0x59, 0x53, 0x50, 0x6d, 0x32, 0xaa, 0x9d, 0x18, 0x70, 0x9f, 0xd3, 0x4f, 0xe8, + 0x16, 0x4c, 0xd1, 0x2d, 0x1d, 0xea, 0x0e, 0x3a, 0xb5, 0x9c, 0xa1, 0xc3, 0xbc, 0x66, 0xd1, 0x90, + 0x43, 0xba, 0x23, 0x3e, 0xed, 0xd9, 0xc3, 0xc0, 0x3a, 0xc1, 0xa6, 0xc4, 0x59, 0x64, 0x0f, 0xdf, + 0x89, 0x00, 0xdf, 0x14, 0xc8, 0x94, 0x8c, 0xe5, 0x32, 0x94, 0x96, 0x20, 0xc3, 0x87, 0x63, 0x64, + 0x04, 0x4e, 0x7b, 0x9c, 0x8c, 0x40, 0x7e, 0x01, 0xc0, 0x41, 0xa7, 0xa6, 0x8d, 0xdd, 0xa3, 0xf0, + 0x98, 0x79, 0xb3, 0xb2, 0xd1, 0x74, 0xd0, 0xe9, 0x43, 0x36, 0xc1, 0xc0, 0x96, 0x2b, 0xc1, 0x1d, + 0x01, 0xb6, 0x5c, 0x01, 0xd6, 0xa1, 0xee, 0xa1, 0x90, 0x2a, 0xab, 0xbe, 0xcc, 0x83, 0xad, 0x18, + 0x52, 0x8b, 0xa0, 0x74, 0xb9, 0xd0, 0x35, 0xb6, 0xae, 0xe1, 0xa0, 0x53, 0x26, 0x61, 0x06, 0xb4, + 0x5c, 0x01, 0x5c, 0x11, 0x40, 0xcb, 0xe5, 0xc0, 0x97, 0x60, 0x71, 0xe8, 0x5a, 0x4f, 0x86, 0x58, + 0xc0, 0x57, 0x19, 0xe7, 0x0b, 0x7c, 0x8e, 0xa3, 0x5c, 0x85, 0x0a, 0x76, 0x87, 0x8e, 0x7e, 0x21, + 0xeb, 0xaa, 0xa9, 0xa8, 0x19, 0x50, 0x7b, 0x11, 0x16, 0x9c, 0xa1, 0x1d, 0x5a, 0x9e, 0x8d, 0x4d, + 0x32, 0xd0, 0xd7, 0x98, 0x90, 0x40, 0x4e, 0xed, 0x0d, 0x94, 0xd6, 0x72, 0x71, 0x2e, 0x6b, 0xa9, + 0x42, 0xad, 0x8b, 0x51, 0x1f, 0xfb, 0xca, 0xb4, 0x38, 0xd6, 0xc5, 0x92, 0x5a, 0x17, 0xcb, 0x67, + 0xd3, 0xc5, 0xca, 0x74, 0x5d, 0xac, 0xce, 0xae, 0x8b, 0xb5, 0x19, 0x74, 0xb1, 0x3e, 0x5d, 0x17, + 0x1b, 0x33, 0xe8, 0x62, 0x73, 0x26, 0x5d, 0x84, 0xc9, 0xba, 0xb8, 0x30, 0x41, 0x17, 0x17, 0x27, + 0xe8, 0x62, 0x6b, 0x92, 0x2e, 0xb6, 0xa7, 0xe8, 0xe2, 0x52, 0xbe, 0x2e, 0x76, 0xe6, 0xd0, 0xc5, + 0xe5, 0x8c, 0x2e, 0x8e, 0x79, 0x4b, 0x6d, 0xb6, 0x23, 0xd4, 0xca, 0x3c, 0xda, 0xfa, 0xb7, 0x2a, + 0xe8, 0x5c, 0x5b, 0xff, 0x2d, 0x9e, 0x5d, 0x5a, 0x48, 0x55, 0x69, 0x21, 0x35, 0xb5, 0x85, 0xd4, + 0xcf, 0x66, 0x21, 0x8d, 0xe9, 0x16, 0xd2, 0x9c, 0xdd, 0x42, 0x60, 0x06, 0x0b, 0x59, 0x98, 0x6e, + 0x21, 0x8b, 0x33, 0x58, 0x48, 0x6b, 0x26, 0x0b, 0x69, 0x4f, 0xb6, 0x90, 0xa5, 0x09, 0x16, 0xd2, + 0x99, 0x60, 0x21, 0xcb, 0x93, 0x2c, 0x44, 0x9b, 0x62, 0x21, 0x2b, 0xf9, 0x16, 0xb2, 0x3a, 0x87, + 0x85, 0x5c, 0x98, 0xc9, 0x5b, 0xaf, 0xcd, 0xa3, 0xff, 0xdf, 0x82, 0x3a, 0x57, 0xff, 0x67, 0x38, + 0x7e, 0xf2, 0x85, 0x39, 0xc9, 0xf3, 0xe7, 0x25, 0xa8, 0xd0, 0x03, 0x64, 0x9c, 0x98, 0x16, 0x93, + 0x89, 0xa9, 0x0e, 0xf5, 0x13, 0xec, 0x07, 0x71, 0x65, 0x44, 0x0e, 0x67, 0x30, 0xa4, 0x6b, 0xd0, + 0x09, 0xb1, 0xef, 0x04, 0x26, 0x19, 0x98, 0x01, 0xf6, 0x4f, 0xac, 0x9e, 0x34, 0xaa, 0x36, 0x9b, + 0xdf, 0x1b, 0x1c, 0xf0, 0x59, 0xed, 0x26, 0xd4, 0x7b, 0xbc, 0x7c, 0x20, 0x9c, 0xfe, 0x4a, 0xf2, + 0x21, 0x44, 0x65, 0xc1, 0x90, 0x38, 0x14, 0xdd, 0xb6, 0x7a, 0xd8, 0x0d, 0x78, 0xfa, 0x34, 0x86, + 0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c, + 0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02, + 0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3, + 0xb2, 0x81, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3, + 0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e, + 0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7, + 0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94, + 0x45, 0x01, 0x49, 0x72, 0x0f, 0x5a, 0x82, 0x64, 0x78, 0xcc, 0xde, 0xad, 0x8a, 0xe2, 0x8d, 0x34, + 0xc5, 0xd5, 0xf1, 0x7a, 0x06, 0x5d, 0x38, 0x4e, 0x50, 0x56, 0x0f, 0xe6, 0x26, 0x28, 0x17, 0x4a, + 0x82, 0x1f, 0x81, 0x96, 0x22, 0x18, 0x9d, 0x1d, 0x32, 0x54, 0x6f, 0xa5, 0xa9, 0xae, 0xab, 0xa8, + 0xb2, 0xd5, 0xe3, 0x2f, 0x47, 0xc4, 0xd0, 0x79, 0x5f, 0x8e, 0xd0, 0x74, 0x41, 0xcc, 0x81, 0x4b, + 0x9c, 0x58, 0xb6, 0x34, 0x91, 0x2b, 0xd8, 0xb7, 0xd3, 0xd4, 0xaf, 0x4e, 0xa9, 0x7b, 0x24, 0xe5, + 0xfc, 0x96, 0xe4, 0x3d, 0xf4, 0x2d, 0xf7, 0x48, 0x49, 0x7d, 0x35, 0x49, 0xbd, 0x29, 0x17, 0x3e, + 0x86, 0x4e, 0x62, 0xe1, 0xae, 0xef, 0x23, 0xb5, 0x82, 0xdf, 0x4c, 0xf3, 0x96, 0xf2, 0xa9, 0x89, + 0xb5, 0x92, 0xec, 0x6f, 0xca, 0xd0, 0x79, 0x8f, 0xb8, 0xe9, 0x1a, 0x2f, 0x86, 0xcd, 0x63, 0xa6, + 0xc1, 0x66, 0x54, 0x77, 0x32, 0x83, 0xe1, 0xa1, 0x99, 0xaa, 0xf4, 0xbf, 0x9c, 0x55, 0xf8, 0x6c, + 0x82, 0xd3, 0x2d, 0x18, 0xfa, 0x71, 0x5e, 0xf2, 0x63, 0xc3, 0x65, 0x9a, 0x30, 0x98, 0x7d, 0x14, + 0x22, 0xf5, 0x4e, 0xfc, 0x19, 0x5e, 0x4d, 0xee, 0x94, 0x7f, 0x4c, 0xee, 0x16, 0x8c, 0x8d, 0x41, + 0xfe, 0x21, 0xfa, 0x10, 0x36, 0x9e, 0x0c, 0xb1, 0x3f, 0x52, 0xef, 0x54, 0xce, 0xbe, 0xc9, 0xf7, + 0x29, 0xb6, 0x72, 0x9b, 0x8b, 0x4f, 0xd4, 0x20, 0xcd, 0x84, 0x75, 0x0f, 0x85, 0xc7, 0xea, 0x2d, + 0x78, 0xf1, 0x63, 0x6b, 0xdc, 0x0a, 0x95, 0x3b, 0xac, 0x79, 0x4a, 0x48, 0xdc, 0x24, 0xf9, 0xbc, + 0x04, 0xfa, 0x1e, 0x1a, 0x86, 0xc7, 0x3b, 0xbb, 0xbd, 0x1e, 0x0e, 0x82, 0x3b, 0xa4, 0x8f, 0xa7, + 0xf5, 0x39, 0x06, 0x36, 0x79, 0x2a, 0xab, 0xf2, 0xf4, 0xbf, 0xf6, 0x06, 0x0d, 0x08, 0xc4, 0xc3, + 0xf2, 0x48, 0x94, 0x2a, 0x8d, 0x70, 0xea, 0x07, 0x0c, 0x6e, 0x08, 0x3c, 0x9a, 0x35, 0xd1, 0x69, + 0xe2, 0x5b, 0xdf, 0x67, 0xfd, 0x09, 0x93, 0xfa, 0x6f, 0x71, 0x20, 0x4a, 0x01, 0x1e, 0xfb, 0x36, + 0x4d, 0x60, 0x42, 0xf2, 0x29, 0xe6, 0x48, 0x3c, 0xff, 0x6c, 0xb0, 0x09, 0x0a, 0x1c, 0x0b, 0x1e, + 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, 0x2c, + 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xd4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, 0xab, + 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, 0x33, + 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, 0x07, + 0xde, 0xfc, 0xc7, 0xb0, 0x98, 0xe4, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, 0x3f, + 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, 0x2f, + 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, 0x3b, + 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, 0xbc, + 0xd1, 0xdc, 0x83, 0x7e, 0xaa, 0xf7, 0x53, 0x1b, 0xeb, 0xfd, 0x24, 0x7b, 0x46, 0xf5, 0xb1, 0x9e, + 0xd1, 0x57, 0x52, 0x3d, 0x9b, 0x06, 0x13, 0xdd, 0x86, 0x32, 0x3d, 0xe3, 0xa1, 0x3e, 0xd9, 0xad, + 0x79, 0x33, 0xd9, 0xad, 0x69, 0x66, 0x33, 0x3b, 0x99, 0xe0, 0xa4, 0x7a, 0x34, 0x89, 0xd6, 0x16, + 0xa4, 0x5b, 0x5b, 0x97, 0x01, 0xfa, 0xd8, 0xf3, 0x71, 0x0f, 0x85, 0xb8, 0x2f, 0x4e, 0xbd, 0x89, + 0x99, 0xb3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x9a, 0x47, 0xfd, 0x7e, 0x59, 0x84, 0x66, 0x9c, 0x45, + 0xdc, 0x86, 0xf6, 0x21, 0xe9, 0x27, 0xe2, 0xad, 0x48, 0x1c, 0x52, 0x09, 0x5e, 0x2a, 0xf1, 0xe8, + 0x16, 0x8c, 0xd6, 0x61, 0x2a, 0x13, 0x79, 0x08, 0x9a, 0x4b, 0x5c, 0x73, 0x8c, 0x0e, 0x4f, 0x0b, + 0x2e, 0xa5, 0x98, 0x1a, 0xcb, 0x61, 0xba, 0x05, 0xa3, 0xe3, 0x8e, 0xcd, 0xc5, 0xd1, 0xf3, 0x08, + 0x56, 0x55, 0x7d, 0x36, 0x6d, 0x6f, 0xb2, 0xbd, 0x6c, 0x64, 0xc4, 0x10, 0x27, 0xe6, 0x6a, 0x93, + 0xf9, 0xac, 0x08, 0xed, 0xb4, 0x76, 0x68, 0x5f, 0x82, 0xe6, 0xb8, 0x44, 0xd4, 0xb9, 0x7e, 0xb7, + 0x60, 0xc4, 0x98, 0x54, 0x9a, 0x9f, 0x04, 0xc4, 0xa5, 0x67, 0x30, 0x7e, 0x22, 0x53, 0xa5, 0xcb, + 0xa9, 0x23, 0x1b, 0x95, 0xe6, 0x27, 0xc9, 0x89, 0xf8, 0xf9, 0x7f, 0x5f, 0x86, 0x46, 0x74, 0x74, + 0x50, 0x9c, 0xec, 0x5e, 0x83, 0xf2, 0x11, 0x0e, 0x55, 0x27, 0x91, 0xc8, 0xfe, 0x0d, 0x8a, 0x41, + 0x11, 0xbd, 0x61, 0x28, 0xfc, 0x63, 0x1e, 0xa2, 0x37, 0x0c, 0xb5, 0xeb, 0x50, 0xf1, 0x48, 0x20, + 0x3b, 0x40, 0x39, 0x98, 0x0c, 0x45, 0xbb, 0x09, 0xb5, 0x3e, 0xb6, 0x71, 0x88, 0xc5, 0x89, 0x3a, + 0x07, 0x59, 0x20, 0x69, 0xb7, 0xa0, 0x4e, 0x3c, 0xde, 0x86, 0xac, 0x4d, 0xc2, 0x97, 0x58, 0x94, + 0x15, 0x9a, 0x92, 0x8a, 0x22, 0x57, 0x1e, 0x2b, 0x14, 0x85, 0x9e, 0xc9, 0x3c, 0x14, 0xf6, 0x8e, + 0x45, 0xfb, 0x22, 0x07, 0x97, 0xe3, 0x8c, 0xb9, 0x89, 0xe6, 0x5c, 0x6e, 0xe2, 0xcc, 0x1d, 0xa4, + 0xbf, 0x56, 0x61, 0x4d, 0x9d, 0x4d, 0x9e, 0xd7, 0x18, 0xcf, 0x6b, 0x8c, 0xff, 0xed, 0x35, 0xc6, + 0xa7, 0x50, 0x65, 0x17, 0x34, 0x94, 0x94, 0x8a, 0x73, 0x50, 0xd2, 0x6e, 0x42, 0x85, 0xdd, 0x36, + 0x29, 0xb1, 0x45, 0xeb, 0x0a, 0x87, 0x2f, 0xea, 0x26, 0x0c, 0x6d, 0xeb, 0x67, 0x55, 0x58, 0x1a, + 0xd3, 0xda, 0xf3, 0x9e, 0xd4, 0x79, 0x4f, 0xea, 0x4c, 0x3d, 0x29, 0x95, 0x0e, 0x6b, 0xf3, 0x58, + 0xc3, 0xb7, 0x01, 0xe2, 0x14, 0xe4, 0x39, 0xdf, 0xf9, 0xfa, 0x55, 0x0d, 0x2e, 0xe6, 0x14, 0x46, + 0xce, 0xaf, 0x29, 0x9c, 0x5f, 0x53, 0x38, 0xbf, 0xa6, 0x10, 0x9b, 0xe1, 0xdf, 0x8b, 0xd0, 0x88, + 0xca, 0xe9, 0xd3, 0x2f, 0x76, 0x6d, 0x47, 0xdd, 0x19, 0x9e, 0x76, 0xaf, 0x65, 0x6b, 0xd6, 0x2c, + 0xf0, 0xc8, 0xab, 0xaf, 0x37, 0xa1, 0xce, 0x2b, 0xab, 0x32, 0x78, 0xac, 0x64, 0x0b, 0xb2, 0x81, + 0x21, 0x71, 0xb4, 0x37, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0x93, 0xf5, 0x6a, 0xfa, 0x64, 0xcd, 0x61, + 0x46, 0x84, 0x75, 0xf6, 0x3b, 0xcd, 0x18, 0x56, 0x14, 0x97, 0x11, 0xb5, 0xf7, 0x26, 0x3b, 0xa4, + 0x6c, 0xcc, 0x8d, 0x5a, 0x0b, 0x6a, 0x97, 0xf4, 0x93, 0x22, 0xb4, 0xd2, 0x5d, 0x86, 0x1d, 0xea, + 0x88, 0xf8, 0x44, 0x74, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x08, 0xef, 0xf9, 0x9e, 0xaf, + 0x7e, 0x5a, 0x84, 0x66, 0x74, 0xb2, 0xd7, 0xee, 0x40, 0x4b, 0x6e, 0x63, 0xf6, 0x48, 0x1f, 0x8b, + 0x07, 0xbd, 0x9c, 0xfb, 0xa0, 0xbc, 0xdb, 0xb1, 0x28, 0x17, 0xdd, 0x21, 0x7d, 0x75, 0x2b, 0xb0, + 0x34, 0xcf, 0xdb, 0xf8, 0x75, 0x13, 0x6a, 0xc2, 0x51, 0x2b, 0x4e, 0x7c, 0x79, 0x09, 0x4a, 0xd4, + 0x5b, 0x2d, 0x4f, 0xb8, 0xf4, 0x57, 0x99, 0x78, 0xe9, 0x6f, 0x5a, 0xe2, 0x31, 0x66, 0x89, 0xb5, + 0x8c, 0x25, 0x26, 0x5c, 0x62, 0x7d, 0x06, 0x97, 0xd8, 0x98, 0xee, 0x12, 0x9b, 0x33, 0xb8, 0x44, + 0x98, 0xc9, 0x25, 0x2e, 0x4c, 0x76, 0x89, 0x8b, 0x13, 0x5c, 0x62, 0x6b, 0x82, 0x4b, 0x6c, 0x4f, + 0x72, 0x89, 0x4b, 0x53, 0x5c, 0x62, 0x27, 0xeb, 0x12, 0x5f, 0x81, 0x36, 0x25, 0x9e, 0x30, 0x36, + 0x7e, 0x12, 0x68, 0x39, 0xe8, 0x34, 0x91, 0x2b, 0x50, 0x34, 0xcb, 0x4d, 0xa2, 0x69, 0x02, 0xcd, + 0x72, 0x13, 0x68, 0xc9, 0x40, 0xbf, 0x32, 0x76, 0x4d, 0x73, 0xa6, 0x13, 0xc1, 0x47, 0x79, 0x2e, + 0xe0, 0x42, 0xb6, 0xb5, 0x94, 0xf7, 0xe9, 0x89, 0xda, 0x1b, 0x68, 0xd7, 0x44, 0xd8, 0x5f, 0xcb, + 0xda, 0xfd, 0xa3, 0x91, 0x87, 0x79, 0xee, 0xce, 0x92, 0x81, 0xd7, 0x65, 0xd0, 0xbf, 0x98, 0x3d, + 0xdc, 0x47, 0x4d, 0x73, 0x19, 0xee, 0xaf, 0x43, 0x0d, 0xd9, 0x36, 0xd5, 0x4f, 0x3d, 0xb7, 0x77, + 0x5e, 0x45, 0xb6, 0xbd, 0x37, 0xd0, 0xbe, 0x0c, 0x90, 0x78, 0xa2, 0xf5, 0xac, 0x33, 0x8f, 0xb9, + 0x35, 0x12, 0x98, 0xda, 0xcb, 0xd0, 0xea, 0x5b, 0xd4, 0x82, 0x1c, 0xcb, 0x45, 0x21, 0xf1, 0xf5, + 0x0d, 0xa6, 0x20, 0xe9, 0xc9, 0xf4, 0x95, 0xd7, 0xcd, 0xb1, 0x2b, 0xaf, 0x2f, 0x41, 0xf9, 0xd4, + 0xb1, 0xf5, 0x4b, 0x59, 0x8b, 0xfb, 0xd0, 0xb1, 0x0d, 0x0a, 0xcb, 0x96, 0x59, 0x5f, 0x78, 0xd6, + 0x5b, 0xb1, 0x97, 0x9f, 0xe1, 0x56, 0xec, 0x8b, 0xf3, 0x78, 0xac, 0x1f, 0x00, 0xc4, 0x71, 0x6f, + 0xce, 0x2f, 0x8d, 0xde, 0x86, 0x85, 0x81, 0x65, 0x63, 0x33, 0x3f, 0xa4, 0xc6, 0x37, 0x9e, 0xbb, + 0x05, 0x03, 0x06, 0xd1, 0x28, 0xf6, 0xe2, 0x21, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0x27, 0xc7, + 0xaf, 0x6b, 0xd9, 0x84, 0x3a, 0xa7, 0x25, 0xac, 0x0e, 0x67, 0x7f, 0xaa, 0xc0, 0xc5, 0xbc, 0x66, + 0xb4, 0x03, 0x2f, 0x1c, 0xa2, 0xc0, 0xea, 0x99, 0x28, 0xf5, 0x95, 0x90, 0x19, 0xd5, 0x7c, 0xb9, + 0x68, 0x5e, 0x4b, 0x55, 0x58, 0xf3, 0xbf, 0x2a, 0xea, 0x16, 0x8c, 0xcd, 0xc3, 0x09, 0x1f, 0x1d, + 0xdd, 0x87, 0x0e, 0xf2, 0x2c, 0xf3, 0x53, 0x3c, 0x8a, 0x77, 0xe0, 0x92, 0x4c, 0xd5, 0xb5, 0xd2, + 0x5f, 0x59, 0x75, 0x0b, 0x46, 0x1b, 0xa5, 0xbf, 0xbb, 0xfa, 0x1e, 0xe8, 0x84, 0xb5, 0x25, 0x4c, + 0x4b, 0x34, 0xa4, 0x62, 0x7a, 0xe5, 0x6c, 0x57, 0x54, 0xdd, 0xbb, 0xea, 0x16, 0x8c, 0x35, 0xa2, + 0xee, 0x6a, 0xc5, 0xf4, 0x3d, 0xd1, 0xeb, 0x89, 0xe9, 0x57, 0xf2, 0xe8, 0x8f, 0xb7, 0x85, 0x62, + 0xfa, 0x99, 0x86, 0xd1, 0x11, 0x6c, 0x0a, 0xfa, 0x28, 0x6e, 0x24, 0xc6, 0x5b, 0xf0, 0x00, 0xf7, + 0x4a, 0x76, 0x0b, 0x45, 0xdb, 0xb1, 0x5b, 0x30, 0xd6, 0x49, 0x6e, 0x4f, 0x12, 0xc7, 0x1b, 0xb1, + 0xae, 0x2e, 0x4b, 0x17, 0xe2, 0x8d, 0x6a, 0x59, 0xef, 0x98, 0xd7, 0x03, 0xee, 0x16, 0x0c, 0x21, + 0x93, 0x2c, 0x2c, 0xd6, 0xf0, 0xe3, 0x58, 0xc3, 0x13, 0x2d, 0x01, 0xed, 0xfd, 0xc9, 0x1a, 0x7e, + 0x29, 0xa7, 0x6d, 0xc4, 0x2f, 0x16, 0xa8, 0xb5, 0xfa, 0x2a, 0x2c, 0x24, 0x6f, 0x2e, 0xac, 0xc6, + 0x1f, 0xf7, 0x95, 0xe3, 0x3b, 0x0e, 0xbf, 0x2d, 0x42, 0xf9, 0x11, 0x52, 0xdf, 0x8a, 0x98, 0xfe, + 0xb1, 0x5b, 0xc6, 0xb3, 0x95, 0xcf, 0xfc, 0x8d, 0xc8, 0x5c, 0x5f, 0x70, 0x5d, 0x81, 0x86, 0x8c, + 0x30, 0x39, 0xcf, 0xf7, 0x31, 0x2c, 0x7d, 0x30, 0x56, 0x6f, 0x7a, 0x8e, 0x1f, 0x93, 0xfc, 0xae, + 0x08, 0xe5, 0x0f, 0x1d, 0x5b, 0x29, 0xbd, 0x4b, 0xd0, 0xa4, 0xbf, 0x81, 0x87, 0x7a, 0xf2, 0x5e, + 0x49, 0x3c, 0x41, 0x93, 0x3f, 0xcf, 0xc7, 0x03, 0xeb, 0x54, 0x64, 0x79, 0x62, 0x44, 0x57, 0xa1, + 0x30, 0xf4, 0xad, 0xc3, 0x61, 0x88, 0xc5, 0x67, 0x7a, 0xf1, 0x04, 0x4d, 0x65, 0x9e, 0xfa, 0xc8, + 0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47, + 0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf, + 0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff, + 0xff, 0xd4, 0x0a, 0xef, 0xca, 0xe4, 0x3a, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto new file mode 100644 index 000000000..557c88072 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto @@ -0,0 +1,663 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +syntax = "proto3"; + +package openapi.v2; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v2"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +message AdditionalPropertiesItem { + oneof oneof { + Schema schema = 1; + bool boolean = 2; + } +} + +message Any { + google.protobuf.Any value = 1; + string yaml = 2; +} + +message ApiKeySecurity { + string type = 1; + string name = 2; + string in = 3; + string description = 4; + repeated NamedAny vendor_extension = 5; +} + +message BasicAuthenticationSecurity { + string type = 1; + string description = 2; + repeated NamedAny vendor_extension = 3; +} + +message BodyParameter { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 1; + // The name of the parameter. + string name = 2; + // Determines the location of the parameter. + string in = 3; + // Determines whether or not this parameter is required or optional. + bool required = 4; + Schema schema = 5; + repeated NamedAny vendor_extension = 6; +} + +// Contact information for the owners of the API. +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. + string url = 2; + // The email address of the contact person/organization. + string email = 3; + repeated NamedAny vendor_extension = 4; +} + +message Default { + repeated NamedAny additional_properties = 1; +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +message Definitions { + repeated NamedSchema additional_properties = 1; +} + +message Document { + // The Swagger version of this document. + string swagger = 1; + Info info = 2; + // The host (name or ip) of the API. Example: 'swagger.io' + string host = 3; + // The base path to the API. Example: '/api'. + string base_path = 4; + // The transfer protocol of the API. + repeated string schemes = 5; + // A list of MIME types accepted by the API. + repeated string consumes = 6; + // A list of MIME types the API can produce. + repeated string produces = 7; + Paths paths = 8; + Definitions definitions = 9; + ParameterDefinitions parameters = 10; + ResponseDefinitions responses = 11; + repeated SecurityRequirement security = 12; + SecurityDefinitions security_definitions = 13; + repeated Tag tags = 14; + ExternalDocs external_docs = 15; + repeated NamedAny vendor_extension = 16; +} + +message Examples { + repeated NamedAny additional_properties = 1; +} + +// information about external documentation +message ExternalDocs { + string description = 1; + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// A deterministic version of a JSON Schema object. +message FileSchema { + string format = 1; + string title = 2; + string description = 3; + Any default = 4; + repeated string required = 5; + string type = 6; + bool read_only = 7; + ExternalDocs external_docs = 8; + Any example = 9; + repeated NamedAny vendor_extension = 10; +} + +message FormDataParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Header { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + string description = 18; + repeated NamedAny vendor_extension = 19; +} + +message HeaderParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +message Headers { + repeated NamedHeader additional_properties = 1; +} + +// General information about the API. +message Info { + // A unique and precise title of the API. + string title = 1; + // A semantic version number of the API. + string version = 2; + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + string description = 3; + // The terms of service for the API. + string terms_of_service = 4; + Contact contact = 5; + License license = 6; + repeated NamedAny vendor_extension = 7; +} + +message ItemsItem { + repeated Schema schema = 1; +} + +message JsonReference { + string _ref = 1; + string description = 2; +} + +message License { + // The name of the license type. It's encouraged to use an OSI compatible license. + string name = 1; + // The URL pointing to the license. + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +message NamedAny { + // Map key + string name = 1; + // Mapped value + Any value = 2; +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +message NamedHeader { + // Map key + string name = 1; + // Mapped value + Header value = 2; +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +message NamedParameter { + // Map key + string name = 1; + // Mapped value + Parameter value = 2; +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +message NamedPathItem { + // Map key + string name = 1; + // Mapped value + PathItem value = 2; +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +message NamedResponse { + // Map key + string name = 1; + // Mapped value + Response value = 2; +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +message NamedResponseValue { + // Map key + string name = 1; + // Mapped value + ResponseValue value = 2; +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +message NamedSchema { + // Map key + string name = 1; + // Mapped value + Schema value = 2; +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +message NamedSecurityDefinitionsItem { + // Map key + string name = 1; + // Mapped value + SecurityDefinitionsItem value = 2; +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +message NamedString { + // Map key + string name = 1; + // Mapped value + string value = 2; +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +message NamedStringArray { + // Map key + string name = 1; + // Mapped value + StringArray value = 2; +} + +message NonBodyParameter { + oneof oneof { + HeaderParameterSubSchema header_parameter_sub_schema = 1; + FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + QueryParameterSubSchema query_parameter_sub_schema = 3; + PathParameterSubSchema path_parameter_sub_schema = 4; + } +} + +message Oauth2AccessCodeSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string token_url = 5; + string description = 6; + repeated NamedAny vendor_extension = 7; +} + +message Oauth2ApplicationSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2ImplicitSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2PasswordSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2Scopes { + repeated NamedString additional_properties = 1; +} + +message Operation { + repeated string tags = 1; + // A brief summary of the operation. + string summary = 2; + // A longer description of the operation, GitHub Flavored Markdown is allowed. + string description = 3; + ExternalDocs external_docs = 4; + // A unique identifier of the operation. + string operation_id = 5; + // A list of MIME types the API can produce. + repeated string produces = 6; + // A list of MIME types the API can consume. + repeated string consumes = 7; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 8; + Responses responses = 9; + // The transfer protocol of the API. + repeated string schemes = 10; + bool deprecated = 11; + repeated SecurityRequirement security = 12; + repeated NamedAny vendor_extension = 13; +} + +message Parameter { + oneof oneof { + BodyParameter body_parameter = 1; + NonBodyParameter non_body_parameter = 2; + } +} + +// One or more JSON representations for parameters +message ParameterDefinitions { + repeated NamedParameter additional_properties = 1; +} + +message ParametersItem { + oneof oneof { + Parameter parameter = 1; + JsonReference json_reference = 2; + } +} + +message PathItem { + string _ref = 1; + Operation get = 2; + Operation put = 3; + Operation post = 4; + Operation delete = 5; + Operation options = 6; + Operation head = 7; + Operation patch = 8; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 9; + repeated NamedAny vendor_extension = 10; +} + +message PathParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +message Paths { + repeated NamedAny vendor_extension = 1; + repeated NamedPathItem path = 2; +} + +message PrimitivesItems { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + repeated NamedAny vendor_extension = 18; +} + +message Properties { + repeated NamedSchema additional_properties = 1; +} + +message QueryParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Response { + string description = 1; + SchemaItem schema = 2; + Headers headers = 3; + Examples examples = 4; + repeated NamedAny vendor_extension = 5; +} + +// One or more JSON representations for parameters +message ResponseDefinitions { + repeated NamedResponse additional_properties = 1; +} + +message ResponseValue { + oneof oneof { + Response response = 1; + JsonReference json_reference = 2; + } +} + +// Response objects names can either be any valid HTTP status code or 'default'. +message Responses { + repeated NamedResponseValue response_code = 1; + repeated NamedAny vendor_extension = 2; +} + +// A deterministic version of a JSON Schema object. +message Schema { + string _ref = 1; + string format = 2; + string title = 3; + string description = 4; + Any default = 5; + double multiple_of = 6; + double maximum = 7; + bool exclusive_maximum = 8; + double minimum = 9; + bool exclusive_minimum = 10; + int64 max_length = 11; + int64 min_length = 12; + string pattern = 13; + int64 max_items = 14; + int64 min_items = 15; + bool unique_items = 16; + int64 max_properties = 17; + int64 min_properties = 18; + repeated string required = 19; + repeated Any enum = 20; + AdditionalPropertiesItem additional_properties = 21; + TypeItem type = 22; + ItemsItem items = 23; + repeated Schema all_of = 24; + Properties properties = 25; + string discriminator = 26; + bool read_only = 27; + Xml xml = 28; + ExternalDocs external_docs = 29; + Any example = 30; + repeated NamedAny vendor_extension = 31; +} + +message SchemaItem { + oneof oneof { + Schema schema = 1; + FileSchema file_schema = 2; + } +} + +message SecurityDefinitions { + repeated NamedSecurityDefinitionsItem additional_properties = 1; +} + +message SecurityDefinitionsItem { + oneof oneof { + BasicAuthenticationSecurity basic_authentication_security = 1; + ApiKeySecurity api_key_security = 2; + Oauth2ImplicitSecurity oauth2_implicit_security = 3; + Oauth2PasswordSecurity oauth2_password_security = 4; + Oauth2ApplicationSecurity oauth2_application_security = 5; + Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + } +} + +message SecurityRequirement { + repeated NamedStringArray additional_properties = 1; +} + +message StringArray { + repeated string value = 1; +} + +message Tag { + string name = 1; + string description = 2; + ExternalDocs external_docs = 3; + repeated NamedAny vendor_extension = 4; +} + +message TypeItem { + repeated string value = 1; +} + +// Any property starting with x- is valid. +message VendorExtension { + repeated NamedAny additional_properties = 1; +} + +message Xml { + string name = 1; + string namespace = 2; + string prefix = 3; + bool attribute = 4; + bool wrapped = 5; + repeated NamedAny vendor_extension = 6; +} + diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md new file mode 100644 index 000000000..836fb32a7 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md @@ -0,0 +1,16 @@ +# OpenAPI v2 Protocol Buffer Models + +This directory contains a Protocol Buffer-language model +and related code for supporting OpenAPI v2. + +Gnostic applications and plugins can use OpenAPIv2.proto +to generate Protocol Buffer support code for their preferred languages. + +OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI +descriptions into the Protocol Buffer-based datastructures +generated from OpenAPIv2.proto. + +OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic +compiler generator, and OpenAPIv2.pb.go is generated by +protoc, the Protocol Buffer compiler, and protoc-gen-go, the +Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json new file mode 100644 index 000000000..2815a26ea --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json @@ -0,0 +1,1610 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for parameters" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + }, + "description": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md new file mode 100644 index 000000000..848b16c69 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/README.md @@ -0,0 +1,3 @@ +# Compiler support code + +This directory contains compiler support code used by Gnostic and Gnostic extensions. \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go new file mode 100644 index 000000000..a64c1b75d --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/context.go @@ -0,0 +1,43 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Context contains state of the compiler as it traverses a document. +type Context struct { + Parent *Context + Name string + ExtensionHandlers *[]ExtensionHandler +} + +// NewContextWithExtensions returns a new object representing the compiler state +func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { + return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} +} + +// NewContext returns a new object representing the compiler state +func NewContext(name string, parent *Context) *Context { + if parent != nil { + return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} + } + return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} +} + +// Description returns a text description of the compiler state +func (context *Context) Description() string { + if context.Parent != nil { + return context.Parent.Description() + "." + context.Name + } + return context.Name +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go new file mode 100644 index 000000000..d8672c100 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/error.go @@ -0,0 +1,61 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Error represents compiler errors and their location in the document. +type Error struct { + Context *Context + Message string +} + +// NewError creates an Error. +func NewError(context *Context, message string) *Error { + return &Error{Context: context, Message: message} +} + +// Error returns the string value of an Error. +func (err *Error) Error() string { + if err.Context == nil { + return "ERROR " + err.Message + } + return "ERROR " + err.Context.Description() + " " + err.Message +} + +// ErrorGroup is a container for groups of Error values. +type ErrorGroup struct { + Errors []error +} + +// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. +func NewErrorGroupOrNil(errors []error) error { + if len(errors) == 0 { + return nil + } else if len(errors) == 1 { + return errors[0] + } else { + return &ErrorGroup{Errors: errors} + } +} + +func (group *ErrorGroup) Error() string { + result := "" + for i, err := range group.Errors { + if i > 0 { + result += "\n" + } + result += err.Error() + } + return result +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go new file mode 100644 index 000000000..1f85b650e --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go @@ -0,0 +1,101 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "bytes" + "fmt" + "os/exec" + + "strings" + + "errors" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + ext_plugin "github.com/googleapis/gnostic/extensions" + yaml "gopkg.in/yaml.v2" +) + +// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. +type ExtensionHandler struct { + Name string +} + +// HandleExtension calls a binary extension handler. +func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { + handled := false + var errFromPlugin error + var outFromPlugin *any.Any + + if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { + for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { + outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) + if outFromPlugin == nil { + continue + } else { + handled = true + break + } + } + } + return handled, outFromPlugin, errFromPlugin +} + +func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { + if extensionHandlers.Name != "" { + binary, _ := yaml.Marshal(in) + + request := &ext_plugin.ExtensionHandlerRequest{} + + version := &ext_plugin.Version{} + version.Major = 0 + version.Minor = 1 + version.Patch = 0 + request.CompilerVersion = version + + request.Wrapper = &ext_plugin.Wrapper{} + + request.Wrapper.Version = "v2" + request.Wrapper.Yaml = string(binary) + request.Wrapper.ExtensionName = extensionName + + requestBytes, _ := proto.Marshal(request) + cmd := exec.Command(extensionHandlers.Name) + cmd.Stdin = bytes.NewReader(requestBytes) + output, err := cmd.Output() + + if err != nil { + fmt.Printf("Error: %+v\n", err) + return nil, err + } + response := &ext_plugin.ExtensionHandlerResponse{} + err = proto.Unmarshal(output, response) + if err != nil { + fmt.Printf("Error: %+v\n", err) + fmt.Printf("%s\n", string(output)) + return nil, err + } + if !response.Handled { + return nil, nil + } + if len(response.Error) != 0 { + message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) + return nil, errors.New(message) + } + return response.Value, nil + } + return nil, nil +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go new file mode 100644 index 000000000..76df635ff --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go @@ -0,0 +1,197 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "gopkg.in/yaml.v2" + "regexp" + "sort" + "strconv" +) + +// compiler helper functions, usually called from generated code + +// UnpackMap gets a yaml.MapSlice if possible. +func UnpackMap(in interface{}) (yaml.MapSlice, bool) { + m, ok := in.(yaml.MapSlice) + if ok { + return m, true + } + // do we have an empty array? + a, ok := in.([]interface{}) + if ok && len(a) == 0 { + // if so, return an empty map + return yaml.MapSlice{}, true + } + return nil, false +} + +// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. +func SortedKeysForMap(m yaml.MapSlice) []string { + keys := make([]string, 0) + for _, item := range m { + keys = append(keys, item.Key.(string)) + } + sort.Strings(keys) + return keys +} + +// MapHasKey returns true if a yaml.MapSlice contains a specified key. +func MapHasKey(m yaml.MapSlice, key string) bool { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return true + } + } + return false +} + +// MapValueForKey gets the value of a map value for a specified key. +func MapValueForKey(m yaml.MapSlice, key string) interface{} { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return item.Value + } + } + return nil +} + +// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. +func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { + stringArray := make([]string, 0) + for _, item := range interfaceArray { + v, ok := item.(string) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +// MissingKeysInMap identifies which keys from a list of required keys are not in a map. +func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { + missingKeys := make([]string, 0) + for _, k := range requiredKeys { + if !MapHasKey(m, k) { + missingKeys = append(missingKeys, k) + } + } + return missingKeys +} + +// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. +func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { + invalidKeys := make([]string, 0) + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok { + key := itemKey + found := false + // does the key match an allowed key? + for _, allowedKey := range allowedKeys { + if key == allowedKey { + found = true + break + } + } + if !found { + // does the key match an allowed pattern? + for _, allowedPattern := range allowedPatterns { + if allowedPattern.MatchString(key) { + found = true + break + } + } + if !found { + invalidKeys = append(invalidKeys, key) + } + } + } + } + return invalidKeys +} + +// DescribeMap describes a map (for debugging purposes). +func DescribeMap(in interface{}, indent string) string { + description := "" + m, ok := in.(map[string]interface{}) + if ok { + keys := make([]string, 0) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := m[k] + description += fmt.Sprintf("%s%s:\n", indent, k) + description += DescribeMap(v, indent+" ") + } + return description + } + a, ok := in.([]interface{}) + if ok { + for i, v := range a { + description += fmt.Sprintf("%s%d:\n", indent, i) + description += DescribeMap(v, indent+" ") + } + return description + } + description += fmt.Sprintf("%s%+v\n", indent, in) + return description +} + +// PluralProperties returns the string "properties" pluralized. +func PluralProperties(count int) string { + if count == 1 { + return "property" + } + return "properties" +} + +// StringArrayContainsValue returns true if a string array contains a specified value. +func StringArrayContainsValue(array []string, value string) bool { + for _, item := range array { + if item == value { + return true + } + } + return false +} + +// StringArrayContainsValues returns true if a string array contains all of a list of specified values. +func StringArrayContainsValues(array []string, values []string) bool { + for _, value := range values { + if !StringArrayContainsValue(array, value) { + return false + } + } + return true +} + +// StringValue returns the string value of an item. +func StringValue(item interface{}) (value string, ok bool) { + value, ok = item.(string) + if ok { + return value, ok + } + intValue, ok := item.(int) + if ok { + return strconv.Itoa(intValue), true + } + return "", false +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go new file mode 100644 index 000000000..9713a21cc --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/main.go @@ -0,0 +1,16 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compiler provides support functions to generated compiler code. +package compiler diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go new file mode 100644 index 000000000..c954a2d9b --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -0,0 +1,175 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "errors" + "fmt" + "gopkg.in/yaml.v2" + "io/ioutil" + "log" + "net/http" + "net/url" + "path/filepath" + "strings" +) + +var fileCache map[string][]byte +var infoCache map[string]interface{} +var count int64 + +var verboseReader = false + +func initializeFileCache() { + if fileCache == nil { + fileCache = make(map[string][]byte, 0) + } +} + +func initializeInfoCache() { + if infoCache == nil { + infoCache = make(map[string]interface{}, 0) + } +} + +// FetchFile gets a specified file from the local filesystem or a remote location. +func FetchFile(fileurl string) ([]byte, error) { + initializeFileCache() + bytes, ok := fileCache[fileurl] + if ok { + if verboseReader { + log.Printf("Cache hit %s", fileurl) + } + return bytes, nil + } + if verboseReader { + log.Printf("Fetching %s", fileurl) + } + response, err := http.Get(fileurl) + if err != nil { + return nil, err + } + if response.StatusCode != 200 { + return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) + } + defer response.Body.Close() + bytes, err = ioutil.ReadAll(response.Body) + if err == nil { + fileCache[fileurl] = bytes + } + return bytes, err +} + +// ReadBytesForFile reads the bytes of a file. +func ReadBytesForFile(filename string) ([]byte, error) { + // is the filename a url? + fileurl, _ := url.Parse(filename) + if fileurl.Scheme != "" { + // yes, fetch it + bytes, err := FetchFile(filename) + if err != nil { + return nil, err + } + return bytes, nil + } + // no, it's a local filename + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. +func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { + initializeInfoCache() + cachedInfo, ok := infoCache[filename] + if ok { + if verboseReader { + log.Printf("Cache hit info for file %s", filename) + } + return cachedInfo, nil + } + if verboseReader { + log.Printf("Reading info for file %s", filename) + } + var info yaml.MapSlice + err := yaml.Unmarshal(bytes, &info) + if err != nil { + return nil, err + } + if len(filename) > 0 { + infoCache[filename] = info + } + return info, nil +} + +// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. +func ReadInfoForRef(basefile string, ref string) (interface{}, error) { + initializeInfoCache() + { + info, ok := infoCache[ref] + if ok { + if verboseReader { + log.Printf("Cache hit for ref %s#%s", basefile, ref) + } + return info, nil + } + } + if verboseReader { + log.Printf("Reading info for ref %s#%s", basefile, ref) + } + count = count + 1 + basedir, _ := filepath.Split(basefile) + parts := strings.Split(ref, "#") + var filename string + if parts[0] != "" { + filename = basedir + parts[0] + } else { + filename = basefile + } + bytes, err := ReadBytesForFile(filename) + if err != nil { + return nil, err + } + info, err := ReadInfoFromBytes(filename, bytes) + if err != nil { + log.Printf("File error: %v\n", err) + } else { + if len(parts) > 1 { + path := strings.Split(parts[1], "/") + for i, key := range path { + if i > 0 { + m, ok := info.(yaml.MapSlice) + if ok { + found := false + for _, section := range m { + if section.Key == key { + info = section.Value + found = true + } + } + if !found { + infoCache[ref] = nil + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + } + } + } + } + } + infoCache[ref] = info + return info, nil +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh new file mode 100755 index 000000000..68d02a02a --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh @@ -0,0 +1,5 @@ +go get github.com/golang/protobuf/protoc-gen-go + +protoc \ +--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto + diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md new file mode 100644 index 000000000..ff1c2eb1e --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/README.md @@ -0,0 +1,5 @@ +# Extensions + +This directory contains support code for building Gnostic extensions and associated examples. + +Extensions are used to compile vendor or specification extensions into protocol buffer structures. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go new file mode 100644 index 000000000..749ff7841 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -0,0 +1,218 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: extension.proto + +/* +Package openapiextension_v1 is a generated protocol buffer package. + +It is generated from these files: + extension.proto + +It has these top-level messages: + Version + ExtensionHandlerRequest + ExtensionHandlerResponse + Wrapper +*/ +package openapiextension_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The version number of OpenAPI compiler. +type Version struct { + Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Version) GetMajor() int32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil { + return m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil { + return m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil { + return m.Suffix + } + return "" +} + +// An encoded Request is written to the ExtensionHandler's stdin. +type ExtensionHandlerRequest struct { + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to gnostic. + Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` + // The version number of openapi compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } +func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerRequest) ProtoMessage() {} +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { + if m != nil { + return m.Wrapper + } + return nil +} + +func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +type ExtensionHandlerResponse struct { + // true if the extension is handled by the extension handler; false otherwise + Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"` + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"` + // text output + Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` +} + +func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } +func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerResponse) ProtoMessage() {} +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ExtensionHandlerResponse) GetHandled() bool { + if m != nil { + return m.Handled + } + return false +} + +func (m *ExtensionHandlerResponse) GetError() []string { + if m != nil { + return m.Error + } + return nil +} + +func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +type Wrapper struct { + // version of the OpenAPI specification in which this extension was written. + Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // Name of the extension + ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"` + // Must be a valid yaml for the proto + Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Wrapper) Reset() { *m = Wrapper{} } +func (m *Wrapper) String() string { return proto.CompactTextString(m) } +func (*Wrapper) ProtoMessage() {} +func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Wrapper) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Wrapper) GetExtensionName() string { + if m != nil { + return m.ExtensionName + } + return "" +} + +func (m *Wrapper) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") + proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") + proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") + proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") +} + +func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 357 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xc3, 0x40, + 0x18, 0x84, 0x49, 0xbf, 0x62, 0x56, 0x6c, 0x65, 0x2d, 0x1a, 0xc5, 0x43, 0x09, 0x08, 0x45, 0x64, + 0x4b, 0x15, 0xbc, 0xb7, 0x50, 0xd4, 0x8b, 0x2d, 0x7b, 0xa8, 0x37, 0xcb, 0x36, 0x7d, 0x9b, 0x46, + 0x92, 0xdd, 0x75, 0xf3, 0x61, 0xfb, 0x57, 0x3c, 0xfa, 0x4b, 0x25, 0xbb, 0x49, 0x3d, 0xa8, 0xb7, + 0xcc, 0xc3, 0x24, 0xef, 0xcc, 0x04, 0x75, 0x60, 0x9b, 0x02, 0x4f, 0x42, 0xc1, 0x89, 0x54, 0x22, + 0x15, 0xf8, 0x44, 0x48, 0xe0, 0x4c, 0x86, 0x3f, 0x3c, 0x1f, 0x5e, 0x9c, 0x07, 0x42, 0x04, 0x11, + 0x0c, 0xb4, 0x65, 0x99, 0xad, 0x07, 0x8c, 0xef, 0x8c, 0xdf, 0xf3, 0x91, 0x3d, 0x07, 0x55, 0x18, + 0x71, 0x17, 0x35, 0x63, 0xf6, 0x26, 0x94, 0x6b, 0xf5, 0xac, 0x7e, 0x93, 0x1a, 0xa1, 0x69, 0xc8, + 0x85, 0x72, 0x6b, 0x25, 0x2d, 0x44, 0x41, 0x25, 0x4b, 0xfd, 0x8d, 0x5b, 0x37, 0x54, 0x0b, 0x7c, + 0x8a, 0x5a, 0x49, 0xb6, 0x5e, 0x87, 0x5b, 0xb7, 0xd1, 0xb3, 0xfa, 0x0e, 0x2d, 0x95, 0xf7, 0x69, + 0xa1, 0xb3, 0x49, 0x15, 0xe8, 0x91, 0xf1, 0x55, 0x04, 0x8a, 0xc2, 0x7b, 0x06, 0x49, 0x8a, 0xef, + 0x91, 0xfd, 0xa1, 0x98, 0x94, 0x60, 0xee, 0x1e, 0xde, 0x5e, 0x92, 0x3f, 0x2a, 0x90, 0x17, 0xe3, + 0xa1, 0x95, 0x19, 0x3f, 0xa0, 0x63, 0x5f, 0xc4, 0x32, 0x8c, 0x40, 0x2d, 0x72, 0xd3, 0x40, 0x87, + 0xf9, 0xef, 0x03, 0x65, 0x4b, 0xda, 0xa9, 0xde, 0x2a, 0x81, 0x97, 0x23, 0xf7, 0x77, 0xb6, 0x44, + 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd1, 0x68, 0xa5, 0xc3, 0x1d, 0xd0, 0x4a, 0x16, 0x03, 0x80, + 0x52, 0x7a, 0x96, 0x7a, 0xdf, 0xa1, 0x46, 0xe0, 0x6b, 0xd4, 0xcc, 0x59, 0x94, 0x41, 0x99, 0xa4, + 0x4b, 0xcc, 0xf0, 0xa4, 0x1a, 0x9e, 0x8c, 0xf8, 0x8e, 0x1a, 0x8b, 0xf7, 0x8a, 0xec, 0xb2, 0x54, + 0x71, 0xa6, 0xaa, 0x60, 0xe9, 0xe1, 0x2a, 0x89, 0xaf, 0x50, 0x7b, 0xdf, 0x62, 0xc1, 0x59, 0x0c, + 0xfa, 0x37, 0x38, 0xf4, 0x68, 0x4f, 0x9f, 0x59, 0x0c, 0x18, 0xa3, 0xc6, 0x8e, 0xc5, 0x91, 0x3e, + 0xeb, 0x50, 0xfd, 0x3c, 0xbe, 0x41, 0x6d, 0xa1, 0x02, 0x12, 0x70, 0x91, 0xa4, 0xa1, 0x4f, 0xf2, + 0xe1, 0x18, 0x4f, 0x25, 0xf0, 0xd1, 0xec, 0x69, 0x5f, 0x77, 0x3e, 0x9c, 0x59, 0x5f, 0xb5, 0xfa, + 0x74, 0x34, 0x59, 0xb6, 0x74, 0xc4, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x5c, 0x6b, + 0x80, 0x51, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto new file mode 100644 index 000000000..04856f913 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -0,0 +1,93 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +package openapiextension.v1; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIExtensionV1"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.gnostic.v1"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +// +option objc_class_prefix = "OAE"; // "OpenAPI Extension" + +// The version number of OpenAPI compiler. +message Version { + int32 major = 1; + int32 minor = 2; + int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + string suffix = 4; +} + +// An encoded Request is written to the ExtensionHandler's stdin. +message ExtensionHandlerRequest { + + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to gnostic. + Wrapper wrapper = 1; + + // The version number of openapi compiler. + Version compiler_version = 3; +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +message ExtensionHandlerResponse { + + // true if the extension is handled by the extension handler; false otherwise + bool handled = 1; + + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + repeated string error = 2; + + // text output + google.protobuf.Any value = 3; +} + +message Wrapper { + // version of the OpenAPI specification in which this extension was written. + string version = 1; + + // Name of the extension + string extension_name = 2; + + // Must be a valid yaml for the proto + string yaml = 3; +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go new file mode 100644 index 000000000..94a8e62a7 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go @@ -0,0 +1,82 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapiextension_v1 + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" +) + +type documentHandler func(version string, extensionName string, document string) +type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) + +func forInputYamlFromOpenapic(handler documentHandler) { + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + fmt.Println("File error:", err.Error()) + os.Exit(1) + } + if len(data) == 0 { + fmt.Println("No input data.") + os.Exit(1) + } + request := &ExtensionHandlerRequest{} + err = proto.Unmarshal(data, request) + if err != nil { + fmt.Println("Input error:", err.Error()) + os.Exit(1) + } + handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) +} + +// ProcessExtension calles the handler for a specified extension. +func ProcessExtension(handleExtension extensionHandler) { + response := &ExtensionHandlerResponse{} + forInputYamlFromOpenapic( + func(version string, extensionName string, yamlInput string) { + var newObject proto.Message + var err error + + handled, newObject, err := handleExtension(extensionName, yamlInput) + if !handled { + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + + // If we reach here, then the extension is handled + response.Handled = true + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + response.Value, err = ptypes.MarshalAny(newObject) + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + }) + + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) +} diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt new file mode 100644 index 000000000..81316beb0 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt @@ -0,0 +1,7 @@ +Copyright © 2012 Greg Jones (greg.jones@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md new file mode 100644 index 000000000..09c9e7c17 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/README.md @@ -0,0 +1,25 @@ +httpcache +========= + +[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) + +Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses. + +It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). + +Cache Backends +-------------- + +- The built-in 'memory' cache stores responses in an in-memory map. +- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. +- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. +- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. +- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). +- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. +- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. +- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork). + +License +------- + +- [MIT License](LICENSE.txt) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go new file mode 100644 index 000000000..42e3129d8 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go @@ -0,0 +1,61 @@ +// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package +// to supplement an in-memory map with persistent storage +// +package diskcache + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "github.com/peterbourgon/diskv" + "io" +) + +// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage +type Cache struct { + d *diskv.Diskv +} + +// Get returns the response corresponding to key if present +func (c *Cache) Get(key string) (resp []byte, ok bool) { + key = keyToFilename(key) + resp, err := c.d.Read(key) + if err != nil { + return []byte{}, false + } + return resp, true +} + +// Set saves a response to the cache as key +func (c *Cache) Set(key string, resp []byte) { + key = keyToFilename(key) + c.d.WriteStream(key, bytes.NewReader(resp), true) +} + +// Delete removes the response with key from the cache +func (c *Cache) Delete(key string) { + key = keyToFilename(key) + c.d.Erase(key) +} + +func keyToFilename(key string) string { + h := md5.New() + io.WriteString(h, key) + return hex.EncodeToString(h.Sum(nil)) +} + +// New returns a new Cache that will store files in basePath +func New(basePath string) *Cache { + return &Cache{ + d: diskv.New(diskv.Options{ + BasePath: basePath, + CacheSizeMax: 100 * 1024 * 1024, // 100MB + }), + } +} + +// NewWithDiskv returns a new Cache using the provided Diskv as underlying +// storage. +func NewWithDiskv(d *diskv.Diskv) *Cache { + return &Cache{d} +} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go new file mode 100644 index 000000000..f6a2ec4a5 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -0,0 +1,551 @@ +// Package httpcache provides a http.RoundTripper implementation that works as a +// mostly RFC-compliant cache for http responses. +// +// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client +// and not for a shared proxy). +// +package httpcache + +import ( + "bufio" + "bytes" + "errors" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "strings" + "sync" + "time" +) + +const ( + stale = iota + fresh + transparent + // XFromCache is the header added to responses that are returned from the cache + XFromCache = "X-From-Cache" +) + +// A Cache interface is used by the Transport to store and retrieve responses. +type Cache interface { + // Get returns the []byte representation of a cached response and a bool + // set to true if the value isn't empty + Get(key string) (responseBytes []byte, ok bool) + // Set stores the []byte representation of a response against a key + Set(key string, responseBytes []byte) + // Delete removes the value associated with the key + Delete(key string) +} + +// cacheKey returns the cache key for req. +func cacheKey(req *http.Request) string { + if req.Method == http.MethodGet { + return req.URL.String() + } else { + return req.Method + " " + req.URL.String() + } +} + +// CachedResponse returns the cached http.Response for req if present, and nil +// otherwise. +func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { + cachedVal, ok := c.Get(cacheKey(req)) + if !ok { + return + } + + b := bytes.NewBuffer(cachedVal) + return http.ReadResponse(bufio.NewReader(b), req) +} + +// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. +type MemoryCache struct { + mu sync.RWMutex + items map[string][]byte +} + +// Get returns the []byte representation of the response and true if present, false if not +func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { + c.mu.RLock() + resp, ok = c.items[key] + c.mu.RUnlock() + return resp, ok +} + +// Set saves response resp to the cache with key +func (c *MemoryCache) Set(key string, resp []byte) { + c.mu.Lock() + c.items[key] = resp + c.mu.Unlock() +} + +// Delete removes key from the cache +func (c *MemoryCache) Delete(key string) { + c.mu.Lock() + delete(c.items, key) + c.mu.Unlock() +} + +// NewMemoryCache returns a new Cache that will store items in an in-memory map +func NewMemoryCache() *MemoryCache { + c := &MemoryCache{items: map[string][]byte{}} + return c +} + +// Transport is an implementation of http.RoundTripper that will return values from a cache +// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) +// to repeated requests allowing servers to return 304 / Not Modified +type Transport struct { + // The RoundTripper interface actually used to make requests + // If nil, http.DefaultTransport is used + Transport http.RoundTripper + Cache Cache + // If true, responses returned from the cache will be given an extra header, X-From-Cache + MarkCachedResponses bool +} + +// NewTransport returns a new Transport with the +// provided Cache implementation and MarkCachedResponses set to true +func NewTransport(c Cache) *Transport { + return &Transport{Cache: c, MarkCachedResponses: true} +} + +// Client returns an *http.Client that caches responses. +func (t *Transport) Client() *http.Client { + return &http.Client{Transport: t} +} + +// varyMatches will return false unless all of the cached values for the headers listed in Vary +// match the new request +func varyMatches(cachedResp *http.Response, req *http.Request) bool { + for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { + header = http.CanonicalHeaderKey(header) + if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { + return false + } + } + return true +} + +// RoundTrip takes a Request and returns a Response +// +// If there is a fresh Response already in cache, then it will be returned without connecting to +// the server. +// +// If there is a stale Response, then any validators it contains will be set on the new request +// to give the server a chance to respond with NotModified. If this happens, then the cached Response +// will be returned. +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + cacheKey := cacheKey(req) + cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" + var cachedResp *http.Response + if cacheable { + cachedResp, err = CachedResponse(t.Cache, req) + } else { + // Need to invalidate an existing value + t.Cache.Delete(cacheKey) + } + + transport := t.Transport + if transport == nil { + transport = http.DefaultTransport + } + + if cacheable && cachedResp != nil && err == nil { + if t.MarkCachedResponses { + cachedResp.Header.Set(XFromCache, "1") + } + + if varyMatches(cachedResp, req) { + // Can only use cached value if the new request doesn't Vary significantly + freshness := getFreshness(cachedResp.Header, req.Header) + if freshness == fresh { + return cachedResp, nil + } + + if freshness == stale { + var req2 *http.Request + // Add validators if caller hasn't already done so + etag := cachedResp.Header.Get("etag") + if etag != "" && req.Header.Get("etag") == "" { + req2 = cloneRequest(req) + req2.Header.Set("if-none-match", etag) + } + lastModified := cachedResp.Header.Get("last-modified") + if lastModified != "" && req.Header.Get("last-modified") == "" { + if req2 == nil { + req2 = cloneRequest(req) + } + req2.Header.Set("if-modified-since", lastModified) + } + if req2 != nil { + req = req2 + } + } + } + + resp, err = transport.RoundTrip(req) + if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { + // Replace the 304 response with the one from cache, but update with some new headers + endToEndHeaders := getEndToEndHeaders(resp.Header) + for _, header := range endToEndHeaders { + cachedResp.Header[header] = resp.Header[header] + } + resp = cachedResp + } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && + req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { + // In case of transport failure and stale-if-error activated, returns cached content + // when available + return cachedResp, nil + } else { + if err != nil || resp.StatusCode != http.StatusOK { + t.Cache.Delete(cacheKey) + } + if err != nil { + return nil, err + } + } + } else { + reqCacheControl := parseCacheControl(req.Header) + if _, ok := reqCacheControl["only-if-cached"]; ok { + resp = newGatewayTimeoutResponse(req) + } else { + resp, err = transport.RoundTrip(req) + if err != nil { + return nil, err + } + } + } + + if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { + for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { + varyKey = http.CanonicalHeaderKey(varyKey) + fakeHeader := "X-Varied-" + varyKey + reqValue := req.Header.Get(varyKey) + if reqValue != "" { + resp.Header.Set(fakeHeader, reqValue) + } + } + switch req.Method { + case "GET": + // Delay caching until EOF is reached. + resp.Body = &cachingReadCloser{ + R: resp.Body, + OnEOF: func(r io.Reader) { + resp := *resp + resp.Body = ioutil.NopCloser(r) + respBytes, err := httputil.DumpResponse(&resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + }, + } + default: + respBytes, err := httputil.DumpResponse(resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + } + } else { + t.Cache.Delete(cacheKey) + } + return resp, nil +} + +// ErrNoDateHeader indicates that the HTTP headers contained no Date header. +var ErrNoDateHeader = errors.New("no Date header") + +// Date parses and returns the value of the Date header. +func Date(respHeaders http.Header) (date time.Time, err error) { + dateHeader := respHeaders.Get("date") + if dateHeader == "" { + err = ErrNoDateHeader + return + } + + return time.Parse(time.RFC1123, dateHeader) +} + +type realClock struct{} + +func (c *realClock) since(d time.Time) time.Duration { + return time.Since(d) +} + +type timer interface { + since(d time.Time) time.Duration +} + +var clock timer = &realClock{} + +// getFreshness will return one of fresh/stale/transparent based on the cache-control +// values of the request and the response +// +// fresh indicates the response can be returned +// stale indicates that the response needs validating before it is returned +// transparent indicates the response should not be used to fulfil the request +// +// Because this is only a private cache, 'public' and 'private' in cache-control aren't +// signficant. Similarly, smax-age isn't used. +func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + if _, ok := reqCacheControl["no-cache"]; ok { + return transparent + } + if _, ok := respCacheControl["no-cache"]; ok { + return stale + } + if _, ok := reqCacheControl["only-if-cached"]; ok { + return fresh + } + + date, err := Date(respHeaders) + if err != nil { + return stale + } + currentAge := clock.since(date) + + var lifetime time.Duration + var zeroDuration time.Duration + + // If a response includes both an Expires header and a max-age directive, + // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. + if maxAge, ok := respCacheControl["max-age"]; ok { + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } else { + expiresHeader := respHeaders.Get("Expires") + if expiresHeader != "" { + expires, err := time.Parse(time.RFC1123, expiresHeader) + if err != nil { + lifetime = zeroDuration + } else { + lifetime = expires.Sub(date) + } + } + } + + if maxAge, ok := reqCacheControl["max-age"]; ok { + // the client is willing to accept a response whose age is no greater than the specified time in seconds + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } + if minfresh, ok := reqCacheControl["min-fresh"]; ok { + // the client wants a response that will still be fresh for at least the specified number of seconds. + minfreshDuration, err := time.ParseDuration(minfresh + "s") + if err == nil { + currentAge = time.Duration(currentAge + minfreshDuration) + } + } + + if maxstale, ok := reqCacheControl["max-stale"]; ok { + // Indicates that the client is willing to accept a response that has exceeded its expiration time. + // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded + // its expiration time by no more than the specified number of seconds. + // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. + // + // Responses served only because of a max-stale value are supposed to have a Warning header added to them, + // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different + // return-value available here. + if maxstale == "" { + return fresh + } + maxstaleDuration, err := time.ParseDuration(maxstale + "s") + if err == nil { + currentAge = time.Duration(currentAge - maxstaleDuration) + } + } + + if lifetime > currentAge { + return fresh + } + + return stale +} + +// Returns true if either the request or the response includes the stale-if-error +// cache control extension: https://tools.ietf.org/html/rfc5861 +func canStaleOnError(respHeaders, reqHeaders http.Header) bool { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + + var err error + lifetime := time.Duration(-1) + + if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + + if lifetime >= 0 { + date, err := Date(respHeaders) + if err != nil { + return false + } + currentAge := clock.since(date) + if lifetime > currentAge { + return true + } + } + + return false +} + +func getEndToEndHeaders(respHeaders http.Header) []string { + // These headers are always hop-by-hop + hopByHopHeaders := map[string]struct{}{ + "Connection": struct{}{}, + "Keep-Alive": struct{}{}, + "Proxy-Authenticate": struct{}{}, + "Proxy-Authorization": struct{}{}, + "Te": struct{}{}, + "Trailers": struct{}{}, + "Transfer-Encoding": struct{}{}, + "Upgrade": struct{}{}, + } + + for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { + // any header listed in connection, if present, is also considered hop-by-hop + if strings.Trim(extra, " ") != "" { + hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} + } + } + endToEndHeaders := []string{} + for respHeader, _ := range respHeaders { + if _, ok := hopByHopHeaders[respHeader]; !ok { + endToEndHeaders = append(endToEndHeaders, respHeader) + } + } + return endToEndHeaders +} + +func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { + if _, ok := respCacheControl["no-store"]; ok { + return false + } + if _, ok := reqCacheControl["no-store"]; ok { + return false + } + return true +} + +func newGatewayTimeoutResponse(req *http.Request) *http.Response { + var braw bytes.Buffer + braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") + resp, err := http.ReadResponse(bufio.NewReader(&braw), req) + if err != nil { + panic(err) + } + return resp +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +type cacheControl map[string]string + +func parseCacheControl(headers http.Header) cacheControl { + cc := cacheControl{} + ccHeader := headers.Get("Cache-Control") + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + keyval := strings.Split(part, "=") + cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") + } else { + cc[part] = "" + } + } + return cc +} + +// headerAllCommaSepValues returns all comma-separated values (each +// with whitespace trimmed) for header name in headers. According to +// Section 4.2 of the HTTP/1.1 spec +// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), +// values from multiple occurrences of a header should be concatenated, if +// the header's value is a comma-separated list. +func headerAllCommaSepValues(headers http.Header, name string) []string { + var vals []string + for _, val := range headers[http.CanonicalHeaderKey(name)] { + fields := strings.Split(val, ",") + for i, f := range fields { + fields[i] = strings.TrimSpace(f) + } + vals = append(vals, fields...) + } + return vals +} + +// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF +// handler with a full copy of the content read from R when EOF is +// reached. +type cachingReadCloser struct { + // Underlying ReadCloser. + R io.ReadCloser + // OnEOF is called with a copy of the content of R when EOF is reached. + OnEOF func(io.Reader) + + buf bytes.Buffer // buf stores a copy of the content of R. +} + +// Read reads the next len(p) bytes from R or until R is drained. The +// return value n is the number of bytes read. If R has no data to +// return, err is io.EOF and OnEOF is called with a full copy of what +// has been read so far. +func (r *cachingReadCloser) Read(p []byte) (n int, err error) { + n, err = r.R.Read(p) + r.buf.Write(p[:n]) + if err == io.EOF { + r.OnEOF(bytes.NewReader(r.buf.Bytes())) + } + return n, err +} + +func (r *cachingReadCloser) Close() error { + return r.R.Close() +} + +// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation +func NewMemoryCacheTransport() *Transport { + c := NewMemoryCache() + t := NewTransport(c) + return t +} diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go new file mode 100644 index 000000000..e474cd075 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -0,0 +1,223 @@ +package lru + +import ( + "fmt" + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache struct { + size int + recentSize int + + recent simplelru.LRUCache + frequent simplelru.LRUCache + recentEvict simplelru.LRUCache + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q(size int) (*TwoQueueCache, error) { + return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, fmt.Errorf("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, fmt.Errorf("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU(evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache{ + size: size, + recentSize: recentSize, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *TwoQueueCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) + return +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, nil) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +// Len returns the number of items in the cache. +func (c *TwoQueueCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. +func (c *TwoQueueCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +// Remove removes the provided key from the cache. +func (c *TwoQueueCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +// Purge is used to completely clear the cache. +func (c *TwoQueueCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *TwoQueueCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 000000000..be2cc4dfb --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md new file mode 100644 index 000000000..33e58cfaf --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/README.md @@ -0,0 +1,25 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru) + +Example +======= + +Using the LRU is very simple: + +```go +l, _ := New(128) +for i := 0; i < 256; i++ { + l.Add(i, nil) +} +if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) +} +``` diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go new file mode 100644 index 000000000..555225a21 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -0,0 +1,257 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). +// ARC is an enhancement over the standard LRU cache in that tracks both +// frequency and recency of use. This avoids a burst in access to new +// entries from evicting the frequently used older entries. It adds some +// additional tracking overhead to a standard LRU cache, computationally +// it is roughly 2x the cost, and the extra memory overhead is linear +// with the size of the cache. ARC has been patented by IBM, but is +// similar to the TwoQueueCache (2Q) which requires setting parameters. +type ARCCache struct { + size int // Size is the total capacity of the cache + p int // P is the dynamic preference towards T1 or T2 + + t1 simplelru.LRUCache // T1 is the LRU for recently accessed items + b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 + + t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items + b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 + + lock sync.RWMutex +} + +// NewARC creates an ARC of the given size +func NewARC(size int) (*ARCCache, error) { + // Create the sub LRUs + b1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + b2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + + // Initialize the ARC + c := &ARCCache{ + size: size, + p: 0, + t1: t1, + b1: b1, + t2: t2, + b2: b2, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // If the value is contained in T1 (recent), then + // promote it to T2 (frequent) + if val, ok := c.t1.Peek(key); ok { + c.t1.Remove(key) + c.t2.Add(key, val) + return val, ok + } + + // Check if the value is contained in T2 (frequent) + if val, ok := c.t2.Get(key); ok { + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *ARCCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is contained in T1 (recent), and potentially + // promote it to frequent T2 + if c.t1.Contains(key) { + c.t1.Remove(key) + c.t2.Add(key, value) + return + } + + // Check if the value is already in T2 (frequent) and update it + if c.t2.Contains(key) { + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // recently used list + if c.b1.Contains(key) { + // T1 set is too small, increase P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b2Len > b1Len { + delta = b2Len / b1Len + } + if c.p+delta >= c.size { + c.p = c.size + } else { + c.p += delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Remove from B1 + c.b1.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // frequently used list + if c.b2.Contains(key) { + // T2 set is too small, decrease P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b1Len > b2Len { + delta = b1Len / b2Len + } + if delta >= c.p { + c.p = 0 + } else { + c.p -= delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(true) + } + + // Remove from B2 + c.b2.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Keep the size of the ghost buffers trim + if c.b1.Len() > c.size-c.p { + c.b1.RemoveOldest() + } + if c.b2.Len() > c.p { + c.b2.RemoveOldest() + } + + // Add to the recently seen list + c.t1.Add(key, value) + return +} + +// replace is used to adaptively evict from either T1 or T2 +// based on the current learned value of P +func (c *ARCCache) replace(b2ContainsKey bool) { + t1Len := c.t1.Len() + if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { + k, _, ok := c.t1.RemoveOldest() + if ok { + c.b1.Add(k, nil) + } + } else { + k, _, ok := c.t2.RemoveOldest() + if ok { + c.b2.Add(k, nil) + } + } +} + +// Len returns the number of cached entries +func (c *ARCCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Len() + c.t2.Len() +} + +// Keys returns all the cached keys +func (c *ARCCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.t1.Keys() + k2 := c.t2.Keys() + return append(k1, k2...) +} + +// Remove is used to purge a key from the cache +func (c *ARCCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.t1.Remove(key) { + return + } + if c.t2.Remove(key) { + return + } + if c.b1.Remove(key) { + return + } + if c.b2.Remove(key) { + return + } +} + +// Purge is used to clear the cache +func (c *ARCCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.t1.Purge() + c.t2.Purge() + c.b1.Purge() + c.b2.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *ARCCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Contains(key) || c.t2.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.t1.Peek(key); ok { + return val, ok + } + return c.t2.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go new file mode 100644 index 000000000..2547df979 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/doc.go @@ -0,0 +1,21 @@ +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, +// at the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as +// well as recent usage in both the frequent and recent caches. Its +// computational overhead is comparable to TwoQueueCache, but the memory +// overhead is linear with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go new file mode 100644 index 000000000..c8d9b0a23 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -0,0 +1,110 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache struct { + lru simplelru.LRUCache + lock sync.RWMutex +} + +// New creates an LRU of the given size. +func New(size int) (*Cache, error) { + return NewWithEvict(size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { + lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) + if err != nil { + return nil, err + } + c := &Cache{ + lru: lru, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *Cache) Purge() { + c.lock.Lock() + c.lru.Purge() + c.lock.Unlock() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache) Add(key, value interface{}) (evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Add(key, value) +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Get(key) +} + +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. +func (c *Cache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Contains(key) +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Peek(key) +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.lru.Contains(key) { + return true, false + } + evicted = c.lru.Add(key, value) + return false, evicted +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key interface{}) { + c.lock.Lock() + c.lru.Remove(key) + c.lock.Unlock() +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + c.lock.Lock() + c.lru.RemoveOldest() + c.lock.Unlock() +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Keys() +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Len() +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 000000000..5673773b2 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,161 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 000000000..744cac01c --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,37 @@ +package simplelru + + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Check if a key exsists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clear all cache entries + Purge() +} diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock index f34f5b4ad..3719afe8e 100644 --- a/vendor/github.com/json-iterator/go/Gopkg.lock +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -2,32 +2,26 @@ [[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "github.com/google/gofuzz" + name = "github.com/json-iterator/go" packages = ["."] - revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" + revision = "ca39e5af3ece67bbcda3d0f4f56a8e24d9f2dad4" + version = "1.1.3" [[projects]] - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" [[projects]] - name = "github.com/stretchr/testify" - packages = ["assert","require"] - revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0" - version = "v1.1.4" + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f" + version = "1.0.0" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "f8b7cf3941d3792cbbd570bb53c093adaf774334d1162c651565c97a58dc9d09" + inputs-digest = "56a0b9e9e61d2bc8af5e1b68537401b7f4d60805eda3d107058f3171aa5cf793" solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml index 0ac55ef87..5801ffa1e 100644 --- a/vendor/github.com/json-iterator/go/Gopkg.toml +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -19,15 +19,8 @@ # name = "github.com/x/y" # version = "2.4.0" +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] [[constraint]] - name = "github.com/davecgh/go-spew" - version = "1.1.0" - -[[constraint]] - branch = "master" - name = "github.com/google/gofuzz" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "1.1.4" + name = "github.com/modern-go/reflect2" + version = "1.0.0" diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 9f404aaa3..54d5afe95 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -31,6 +31,9 @@ Raw Result (easyjson requires static code generation) | easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | | jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | +Always benchmark with your own workload. +The result depends heavily on the data input. + # Usage 100% compatibility with standard lib diff --git a/vendor/github.com/json-iterator/go/feature_adapter.go b/vendor/github.com/json-iterator/go/adapter.go similarity index 94% rename from vendor/github.com/json-iterator/go/feature_adapter.go rename to vendor/github.com/json-iterator/go/adapter.go index 1ba32a40b..f371bfed7 100644 --- a/vendor/github.com/json-iterator/go/feature_adapter.go +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -16,15 +16,6 @@ func Unmarshal(data []byte, v interface{}) error { return ConfigDefault.Unmarshal(data, v) } -func lastNotSpacePos(data []byte) int { - for i := len(data) - 1; i >= 0; i-- { - if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' { - return i + 1 - } - } - return 0 -} - // UnmarshalFromString convenient method to read from string instead of []byte func UnmarshalFromString(str string, v interface{}) error { return ConfigDefault.UnmarshalFromString(str, v) @@ -86,7 +77,14 @@ func (adapter *Decoder) Decode(obj interface{}) error { // More is there more? func (adapter *Decoder) More() bool { - return adapter.iter.head != adapter.iter.tail + iter := adapter.iter + if iter.Error != nil { + return false + } + if iter.head != iter.tail { + return true + } + return iter.loadMore() } // Buffered remaining buffer diff --git a/vendor/github.com/json-iterator/go/feature_any.go b/vendor/github.com/json-iterator/go/any.go similarity index 75% rename from vendor/github.com/json-iterator/go/feature_any.go rename to vendor/github.com/json-iterator/go/any.go index 87716d1fc..daecfed61 100644 --- a/vendor/github.com/json-iterator/go/feature_any.go +++ b/vendor/github.com/json-iterator/go/any.go @@ -3,8 +3,11 @@ package jsoniter import ( "errors" "fmt" + "github.com/modern-go/reflect2" "io" "reflect" + "strconv" + "unsafe" ) // Any generic object representation. @@ -25,7 +28,6 @@ type Any interface { ToString() string ToVal(val interface{}) Get(path ...interface{}) Any - // TODO: add Set Size() int Keys() []string GetInterface() interface{} @@ -35,7 +37,7 @@ type Any interface { type baseAny struct{} func (any *baseAny) Get(path ...interface{}) Any { - return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} } func (any *baseAny) Size() int { @@ -89,7 +91,7 @@ func Wrap(val interface{}) Any { if isAny { return asAny } - typ := reflect.TypeOf(val) + typ := reflect2.TypeOf(val) switch typ.Kind() { case reflect.Slice: return wrapArray(val) @@ -100,6 +102,9 @@ func Wrap(val interface{}) Any { case reflect.String: return WrapString(val.(string)) case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } return WrapInt64(int64(val.(int))) case reflect.Int8: return WrapInt32(int32(val.(int8))) @@ -110,7 +115,15 @@ func Wrap(val interface{}) Any { case reflect.Int64: return WrapInt64(val.(int64)) case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) case reflect.Uint8: return WrapUint32(uint32(val.(uint8))) case reflect.Uint16: @@ -243,3 +256,66 @@ func locatePath(iter *Iterator, path []interface{}) Any { } return iter.readAny() } + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/vendor/github.com/json-iterator/go/feature_any_array.go b/vendor/github.com/json-iterator/go/any_array.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_array.go rename to vendor/github.com/json-iterator/go/any_array.go diff --git a/vendor/github.com/json-iterator/go/feature_any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_bool.go rename to vendor/github.com/json-iterator/go/any_bool.go diff --git a/vendor/github.com/json-iterator/go/feature_any_float.go b/vendor/github.com/json-iterator/go/any_float.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_float.go rename to vendor/github.com/json-iterator/go/any_float.go diff --git a/vendor/github.com/json-iterator/go/feature_any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_int32.go rename to vendor/github.com/json-iterator/go/any_int32.go diff --git a/vendor/github.com/json-iterator/go/feature_any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_int64.go rename to vendor/github.com/json-iterator/go/any_int64.go diff --git a/vendor/github.com/json-iterator/go/feature_any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_invalid.go rename to vendor/github.com/json-iterator/go/any_invalid.go diff --git a/vendor/github.com/json-iterator/go/feature_any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_nil.go rename to vendor/github.com/json-iterator/go/any_nil.go diff --git a/vendor/github.com/json-iterator/go/feature_any_number.go b/vendor/github.com/json-iterator/go/any_number.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_number.go rename to vendor/github.com/json-iterator/go/any_number.go diff --git a/vendor/github.com/json-iterator/go/feature_any_object.go b/vendor/github.com/json-iterator/go/any_object.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_object.go rename to vendor/github.com/json-iterator/go/any_object.go diff --git a/vendor/github.com/json-iterator/go/feature_any_string.go b/vendor/github.com/json-iterator/go/any_str.go similarity index 97% rename from vendor/github.com/json-iterator/go/feature_any_string.go rename to vendor/github.com/json-iterator/go/any_str.go index abf060bd5..a4b93c78c 100644 --- a/vendor/github.com/json-iterator/go/feature_any_string.go +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -14,7 +14,7 @@ func (any *stringAny) Get(path ...interface{}) Any { if len(path) == 0 { return any } - return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} } func (any *stringAny) Parse() *Iterator { diff --git a/vendor/github.com/json-iterator/go/feature_any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_uint32.go rename to vendor/github.com/json-iterator/go/any_uint32.go diff --git a/vendor/github.com/json-iterator/go/feature_any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_uint64.go rename to vendor/github.com/json-iterator/go/any_uint64.go diff --git a/vendor/github.com/json-iterator/go/feature_config.go b/vendor/github.com/json-iterator/go/config.go similarity index 63% rename from vendor/github.com/json-iterator/go/feature_config.go rename to vendor/github.com/json-iterator/go/config.go index c5604ea44..835819129 100644 --- a/vendor/github.com/json-iterator/go/feature_config.go +++ b/vendor/github.com/json-iterator/go/config.go @@ -2,10 +2,13 @@ package jsoniter import ( "encoding/json" - "errors" "io" "reflect" + "sync" "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" ) // Config customize how the API should behave. @@ -21,6 +24,7 @@ type Config struct { OnlyTaggedField bool ValidateJsonRawMessage bool ObjectFieldMustBeSimpleString bool + CaseSensitive bool } // API the public interface of this package. @@ -38,6 +42,8 @@ type API interface { NewDecoder(reader io.Reader) *Decoder Valid(data []byte) bool RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder } // ConfigDefault the default API @@ -59,6 +65,64 @@ var ConfigFastest = Config{ ObjectFieldMustBeSimpleString: true, // do not unescape object field }.Froze() +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + extensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + // Froze forge API from config func (cfg Config) Froze() API { api := &frozenConfig{ @@ -67,21 +131,38 @@ func (cfg Config) Froze() API { objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, onlyTaggedField: cfg.OnlyTaggedField, disallowUnknownFields: cfg.DisallowUnknownFields, - streamPool: make(chan *Stream, 16), - iteratorPool: make(chan *Iterator, 16), + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, } api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} if cfg.MarshalFloatWith6Digits { - api.marshalFloatWith6Digits() + api.marshalFloatWith6Digits(encoderExtension) } if cfg.EscapeHTML { - api.escapeHTML() + api.escapeHTML(encoderExtension) } if cfg.UseNumber { - api.useNumber() + api.useNumber(decoderExtension) } if cfg.ValidateJsonRawMessage { - api.validateJsonRawMessage() + api.validateJsonRawMessage(encoderExtension) + } + if len(encoderExtension) > 0 { + api.extensions = append(api.extensions, encoderExtension) + } + if len(decoderExtension) > 0 { + api.extensions = append(api.extensions, decoderExtension) } api.configBeforeFrozen = cfg return api @@ -97,7 +178,7 @@ func (cfg Config) frozeWithCacheReuse() *frozenConfig { return api } -func (cfg *frozenConfig) validateJsonRawMessage() { +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { rawMessage := *(*json.RawMessage)(ptr) iter := cfg.BorrowIterator([]byte(rawMessage)) @@ -111,18 +192,23 @@ func (cfg *frozenConfig) validateJsonRawMessage() { }, func(ptr unsafe.Pointer) bool { return false }} - cfg.addEncoderToCache(reflect.TypeOf((*json.RawMessage)(nil)).Elem(), encoder) - cfg.addEncoderToCache(reflect.TypeOf((*RawMessage)(nil)).Elem(), encoder) + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder } -func (cfg *frozenConfig) useNumber() { - cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } if iter.WhatIsNext() == NumberValue { *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) } else { *((*interface{})(ptr)) = iter.Read() } - }}) + }} } func (cfg *frozenConfig) getTagKey() string { tagKey := cfg.configBeforeFrozen.TagKey @@ -143,10 +229,6 @@ func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteFloat32Lossy(*((*float32)(ptr))) } -func (encoder *lossyFloat32Encoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { return *((*float32)(ptr)) == 0 } @@ -158,20 +240,16 @@ func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteFloat64Lossy(*((*float64)(ptr))) } -func (encoder *lossyFloat64Encoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { return *((*float64)(ptr)) == 0 } // EnableLossyFloatMarshalling keeps 10**(-6) precision // for float variables for better performance. -func (cfg *frozenConfig) marshalFloatWith6Digits() { +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { // for better performance - cfg.addEncoderToCache(reflect.TypeOf((*float32)(nil)).Elem(), &lossyFloat32Encoder{}) - cfg.addEncoderToCache(reflect.TypeOf((*float64)(nil)).Elem(), &lossyFloat64Encoder{}) + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} } type htmlEscapedStringEncoder struct { @@ -182,16 +260,12 @@ func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stre stream.WriteStringWithHTMLEscaped(str) } -func (encoder *htmlEscapedStringEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { return *((*string)(ptr)) == "" } -func (cfg *frozenConfig) escapeHTML() { - cfg.addEncoderToCache(reflect.TypeOf((*string)(nil)).Elem(), &htmlEscapedStringEncoder{}) +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} } func (cfg *frozenConfig) cleanDecoders() { @@ -245,19 +319,17 @@ func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([] func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { data := []byte(str) - data = data[:lastNotSpacePos(data)] iter := cfg.BorrowIterator(data) defer cfg.ReturnIterator(iter) iter.ReadVal(v) - if iter.head == iter.tail { - iter.loadMore() - } - if iter.Error == io.EOF { - return nil - } - if iter.Error == nil { - iter.ReportError("UnmarshalFromString", "there are bytes left after unmarshal") + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") return iter.Error } @@ -268,24 +340,17 @@ func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { } func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { - data = data[:lastNotSpacePos(data)] iter := cfg.BorrowIterator(data) defer cfg.ReturnIterator(iter) - typ := reflect.TypeOf(v) - if typ.Kind() != reflect.Ptr { - // return non-pointer error - return errors.New("the second param must be ptr type") - } iter.ReadVal(v) - if iter.head == iter.tail { - iter.loadMore() - } - if iter.Error == io.EOF { - return nil - } - if iter.Error == nil { - iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") return iter.Error } diff --git a/vendor/github.com/json-iterator/go/feature_config_with_sync_map.go b/vendor/github.com/json-iterator/go/feature_config_with_sync_map.go deleted file mode 100644 index 02dfaee74..000000000 --- a/vendor/github.com/json-iterator/go/feature_config_with_sync_map.go +++ /dev/null @@ -1,65 +0,0 @@ -//+build go1.9 - -package jsoniter - -import ( - "reflect" - "sync" -) - -type frozenConfig struct { - configBeforeFrozen Config - sortMapKeys bool - indentionStep int - objectFieldMustBeSimpleString bool - onlyTaggedField bool - disallowUnknownFields bool - decoderCache sync.Map - encoderCache sync.Map - extensions []Extension - streamPool chan *Stream - iteratorPool chan *Iterator -} - -func (cfg *frozenConfig) initCache() { - cfg.decoderCache = sync.Map{} - cfg.encoderCache = sync.Map{} -} - -func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) { - cfg.decoderCache.Store(cacheKey, decoder) -} - -func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) { - cfg.encoderCache.Store(cacheKey, encoder) -} - -func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder { - decoder, found := cfg.decoderCache.Load(cacheKey) - if found { - return decoder.(ValDecoder) - } - return nil -} - -func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder { - encoder, found := cfg.encoderCache.Load(cacheKey) - if found { - return encoder.(ValEncoder) - } - return nil -} - -var cfgCache = &sync.Map{} - -func getFrozenConfigFromCache(cfg Config) *frozenConfig { - obj, found := cfgCache.Load(cfg) - if found { - return obj.(*frozenConfig) - } - return nil -} - -func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { - cfgCache.Store(cfg, frozenConfig) -} diff --git a/vendor/github.com/json-iterator/go/feature_config_without_sync_map.go b/vendor/github.com/json-iterator/go/feature_config_without_sync_map.go deleted file mode 100644 index 23e557558..000000000 --- a/vendor/github.com/json-iterator/go/feature_config_without_sync_map.go +++ /dev/null @@ -1,71 +0,0 @@ -//+build !go1.9 - -package jsoniter - -import ( - "reflect" - "sync" -) - -type frozenConfig struct { - configBeforeFrozen Config - sortMapKeys bool - indentionStep int - objectFieldMustBeSimpleString bool - onlyTaggedField bool - disallowUnknownFields bool - cacheLock *sync.RWMutex - decoderCache map[reflect.Type]ValDecoder - encoderCache map[reflect.Type]ValEncoder - extensions []Extension - streamPool chan *Stream - iteratorPool chan *Iterator -} - -func (cfg *frozenConfig) initCache() { - cfg.cacheLock = &sync.RWMutex{} - cfg.decoderCache = map[reflect.Type]ValDecoder{} - cfg.encoderCache = map[reflect.Type]ValEncoder{} -} - -func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) { - cfg.cacheLock.Lock() - cfg.decoderCache[cacheKey] = decoder - cfg.cacheLock.Unlock() -} - -func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) { - cfg.cacheLock.Lock() - cfg.encoderCache[cacheKey] = encoder - cfg.cacheLock.Unlock() -} - -func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder { - cfg.cacheLock.RLock() - decoder, _ := cfg.decoderCache[cacheKey].(ValDecoder) - cfg.cacheLock.RUnlock() - return decoder -} - -func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder { - cfg.cacheLock.RLock() - encoder, _ := cfg.encoderCache[cacheKey].(ValEncoder) - cfg.cacheLock.RUnlock() - return encoder -} - -var cfgCacheLock = &sync.RWMutex{} -var cfgCache = map[Config]*frozenConfig{} - -func getFrozenConfigFromCache(cfg Config) *frozenConfig { - cfgCacheLock.RLock() - frozenConfig := cfgCache[cfg] - cfgCacheLock.RUnlock() - return frozenConfig -} - -func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { - cfgCacheLock.Lock() - cfgCache[cfg] = frozenConfig - cfgCacheLock.Unlock() -} diff --git a/vendor/github.com/json-iterator/go/feature_json_number.go b/vendor/github.com/json-iterator/go/feature_json_number.go deleted file mode 100644 index e187b200a..000000000 --- a/vendor/github.com/json-iterator/go/feature_json_number.go +++ /dev/null @@ -1,31 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "strconv" -) - -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -func CastJsonNumber(val interface{}) (string, bool) { - switch typedVal := val.(type) { - case json.Number: - return string(typedVal), true - case Number: - return string(typedVal), true - } - return "", false -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect.go b/vendor/github.com/json-iterator/go/feature_reflect.go deleted file mode 100644 index 75d533b07..000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect.go +++ /dev/null @@ -1,607 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "fmt" - "reflect" - "time" - "unsafe" -) - -// ValDecoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValDecoder with json.Decoder. -// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). -// -// Reflection on type to create decoders, which is then cached -// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions -// 1. create instance of new value, for example *int will need a int to be allocated -// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New -// 3. assignment to map, both key and value will be reflect.Value -// For a simple struct binding, it will be reflect.Value free and allocation free -type ValDecoder interface { - Decode(ptr unsafe.Pointer, iter *Iterator) -} - -// ValEncoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValEncoder with json.Encoder. -// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). -type ValEncoder interface { - IsEmpty(ptr unsafe.Pointer) bool - Encode(ptr unsafe.Pointer, stream *Stream) - EncodeInterface(val interface{}, stream *Stream) -} - -type checkIsEmpty interface { - IsEmpty(ptr unsafe.Pointer) bool -} - -// WriteToStream the default implementation for TypeEncoder method EncodeInterface -func WriteToStream(val interface{}, stream *Stream, encoder ValEncoder) { - e := (*emptyInterface)(unsafe.Pointer(&val)) - if e.word == nil { - stream.WriteNil() - return - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - -var jsonNumberType reflect.Type -var jsoniterNumberType reflect.Type -var jsonRawMessageType reflect.Type -var jsoniterRawMessageType reflect.Type -var anyType reflect.Type -var marshalerType reflect.Type -var unmarshalerType reflect.Type -var textMarshalerType reflect.Type -var textUnmarshalerType reflect.Type - -func init() { - jsonNumberType = reflect.TypeOf((*json.Number)(nil)).Elem() - jsoniterNumberType = reflect.TypeOf((*Number)(nil)).Elem() - jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem() - jsoniterRawMessageType = reflect.TypeOf((*RawMessage)(nil)).Elem() - anyType = reflect.TypeOf((*Any)(nil)).Elem() - marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() - textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() -} - -// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal -func (iter *Iterator) ReadVal(obj interface{}) { - typ := reflect.TypeOf(obj) - cacheKey := typ.Elem() - decoder := decoderOfType(iter.cfg, "", cacheKey) - e := (*emptyInterface)(unsafe.Pointer(&obj)) - if e.word == nil { - iter.ReportError("ReadVal", "can not read into nil pointer") - return - } - decoder.Decode(e.word, iter) -} - -// WriteVal copy the go interface into underlying JSON, same as json.Marshal -func (stream *Stream) WriteVal(val interface{}) { - if nil == val { - stream.WriteNil() - return - } - typ := reflect.TypeOf(val) - cacheKey := typ - encoder := encoderOfType(stream.cfg, "", cacheKey) - encoder.EncodeInterface(val, stream) -} - -func decoderOfType(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { - cacheKey := typ - decoder := cfg.getDecoderFromCache(cacheKey) - if decoder != nil { - return decoder - } - decoder = getTypeDecoderFromExtension(cfg, typ) - if decoder != nil { - cfg.addDecoderToCache(cacheKey, decoder) - return decoder - } - decoder = &placeholderDecoder{cfg: cfg, cacheKey: cacheKey} - cfg.addDecoderToCache(cacheKey, decoder) - decoder = createDecoderOfType(cfg, prefix, typ) - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - for _, extension := range cfg.extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - cfg.addDecoderToCache(cacheKey, decoder) - return decoder -} - -func createDecoderOfType(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { - typeName := typ.String() - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{} - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{} - } - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{} - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{} - } - if typ.Implements(unmarshalerType) { - templateInterface := reflect.New(typ).Elem().Interface() - var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} - if typ.Kind() == reflect.Ptr { - decoder = &OptionalDecoder{typ.Elem(), decoder} - } - return decoder - } - if reflect.PtrTo(typ).Implements(unmarshalerType) { - templateInterface := reflect.New(typ).Interface() - var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} - return decoder - } - if typ.Implements(textUnmarshalerType) { - templateInterface := reflect.New(typ).Elem().Interface() - var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} - if typ.Kind() == reflect.Ptr { - decoder = &OptionalDecoder{typ.Elem(), decoder} - } - return decoder - } - if reflect.PtrTo(typ).Implements(textUnmarshalerType) { - templateInterface := reflect.New(typ).Interface() - var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} - return decoder - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { - sliceDecoder := decoderOfSlice(cfg, prefix, typ) - return &base64Codec{sliceDecoder: sliceDecoder} - } - if typ.Implements(anyType) { - return &anyCodec{} - } - switch typ.Kind() { - case reflect.String: - if typeName != "string" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*string)(nil)).Elem()) - } - return &stringCodec{} - case reflect.Int: - if typeName != "int" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*int)(nil)).Elem()) - } - return &intCodec{} - case reflect.Int8: - if typeName != "int8" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*int8)(nil)).Elem()) - } - return &int8Codec{} - case reflect.Int16: - if typeName != "int16" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*int16)(nil)).Elem()) - } - return &int16Codec{} - case reflect.Int32: - if typeName != "int32" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*int32)(nil)).Elem()) - } - return &int32Codec{} - case reflect.Int64: - if typeName != "int64" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*int64)(nil)).Elem()) - } - return &int64Codec{} - case reflect.Uint: - if typeName != "uint" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*uint)(nil)).Elem()) - } - return &uintCodec{} - case reflect.Uint8: - if typeName != "uint8" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*uint8)(nil)).Elem()) - } - return &uint8Codec{} - case reflect.Uint16: - if typeName != "uint16" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*uint16)(nil)).Elem()) - } - return &uint16Codec{} - case reflect.Uint32: - if typeName != "uint32" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*uint32)(nil)).Elem()) - } - return &uint32Codec{} - case reflect.Uintptr: - if typeName != "uintptr" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*uintptr)(nil)).Elem()) - } - return &uintptrCodec{} - case reflect.Uint64: - if typeName != "uint64" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*uint64)(nil)).Elem()) - } - return &uint64Codec{} - case reflect.Float32: - if typeName != "float32" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*float32)(nil)).Elem()) - } - return &float32Codec{} - case reflect.Float64: - if typeName != "float64" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*float64)(nil)).Elem()) - } - return &float64Codec{} - case reflect.Bool: - if typeName != "bool" { - return decoderOfType(cfg, prefix, reflect.TypeOf((*bool)(nil)).Elem()) - } - return &boolCodec{} - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{} - } - return &nonEmptyInterfaceCodec{} - case reflect.Struct: - return decoderOfStruct(cfg, prefix, typ) - case reflect.Array: - return decoderOfArray(cfg, prefix, typ) - case reflect.Slice: - return decoderOfSlice(cfg, prefix, typ) - case reflect.Map: - return decoderOfMap(cfg, prefix, typ) - case reflect.Ptr: - return decoderOfOptional(cfg, prefix, typ) - default: - return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", prefix, typ.String())} - } -} - -func encoderOfType(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { - cacheKey := typ - encoder := cfg.getEncoderFromCache(cacheKey) - if encoder != nil { - return encoder - } - encoder = getTypeEncoderFromExtension(cfg, typ) - if encoder != nil { - cfg.addEncoderToCache(cacheKey, encoder) - return encoder - } - encoder = &placeholderEncoder{cfg: cfg, cacheKey: cacheKey} - cfg.addEncoderToCache(cacheKey, encoder) - encoder = createEncoderOfType(cfg, prefix, typ) - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - for _, extension := range cfg.extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - cfg.addEncoderToCache(cacheKey, encoder) - return encoder -} - -func createEncoderOfType(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{} - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{} - } - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{} - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{} - } - if typ.Implements(marshalerType) { - checkIsEmpty := createCheckIsEmpty(cfg, typ) - templateInterface := reflect.New(typ).Elem().Interface() - var encoder ValEncoder = &marshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - if typ.Kind() == reflect.Ptr { - encoder = &OptionalEncoder{encoder} - } - return encoder - } - if reflect.PtrTo(typ).Implements(marshalerType) { - checkIsEmpty := createCheckIsEmpty(cfg, reflect.PtrTo(typ)) - templateInterface := reflect.New(typ).Interface() - var encoder ValEncoder = &marshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - return encoder - } - if typ.Implements(textMarshalerType) { - checkIsEmpty := createCheckIsEmpty(cfg, typ) - templateInterface := reflect.New(typ).Elem().Interface() - var encoder ValEncoder = &textMarshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - if typ.Kind() == reflect.Ptr { - encoder = &OptionalEncoder{encoder} - } - return encoder - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { - return &base64Codec{} - } - if typ.Implements(anyType) { - return &anyCodec{} - } - return createEncoderOfSimpleType(cfg, prefix, typ) -} - -func createCheckIsEmpty(cfg *frozenConfig, typ reflect.Type) checkIsEmpty { - kind := typ.Kind() - switch kind { - case reflect.String: - return &stringCodec{} - case reflect.Int: - return &intCodec{} - case reflect.Int8: - return &int8Codec{} - case reflect.Int16: - return &int16Codec{} - case reflect.Int32: - return &int32Codec{} - case reflect.Int64: - return &int64Codec{} - case reflect.Uint: - return &uintCodec{} - case reflect.Uint8: - return &uint8Codec{} - case reflect.Uint16: - return &uint16Codec{} - case reflect.Uint32: - return &uint32Codec{} - case reflect.Uintptr: - return &uintptrCodec{} - case reflect.Uint64: - return &uint64Codec{} - case reflect.Float32: - return &float32Codec{} - case reflect.Float64: - return &float64Codec{} - case reflect.Bool: - return &boolCodec{} - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{} - } - return &nonEmptyInterfaceCodec{} - case reflect.Struct: - return &structEncoder{typ: typ} - case reflect.Array: - return &arrayEncoder{} - case reflect.Slice: - return &sliceEncoder{} - case reflect.Map: - return encoderOfMap(cfg, "", typ) - case reflect.Ptr: - return &OptionalEncoder{} - default: - return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} - } -} - -func createEncoderOfSimpleType(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { - typeName := typ.String() - kind := typ.Kind() - switch kind { - case reflect.String: - if typeName != "string" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*string)(nil)).Elem()) - } - return &stringCodec{} - case reflect.Int: - if typeName != "int" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*int)(nil)).Elem()) - } - return &intCodec{} - case reflect.Int8: - if typeName != "int8" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*int8)(nil)).Elem()) - } - return &int8Codec{} - case reflect.Int16: - if typeName != "int16" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*int16)(nil)).Elem()) - } - return &int16Codec{} - case reflect.Int32: - if typeName != "int32" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*int32)(nil)).Elem()) - } - return &int32Codec{} - case reflect.Int64: - if typeName != "int64" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*int64)(nil)).Elem()) - } - return &int64Codec{} - case reflect.Uint: - if typeName != "uint" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*uint)(nil)).Elem()) - } - return &uintCodec{} - case reflect.Uint8: - if typeName != "uint8" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*uint8)(nil)).Elem()) - } - return &uint8Codec{} - case reflect.Uint16: - if typeName != "uint16" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*uint16)(nil)).Elem()) - } - return &uint16Codec{} - case reflect.Uint32: - if typeName != "uint32" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*uint32)(nil)).Elem()) - } - return &uint32Codec{} - case reflect.Uintptr: - if typeName != "uintptr" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*uintptr)(nil)).Elem()) - } - return &uintptrCodec{} - case reflect.Uint64: - if typeName != "uint64" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*uint64)(nil)).Elem()) - } - return &uint64Codec{} - case reflect.Float32: - if typeName != "float32" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*float32)(nil)).Elem()) - } - return &float32Codec{} - case reflect.Float64: - if typeName != "float64" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*float64)(nil)).Elem()) - } - return &float64Codec{} - case reflect.Bool: - if typeName != "bool" { - return encoderOfType(cfg, prefix, reflect.TypeOf((*bool)(nil)).Elem()) - } - return &boolCodec{} - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{} - } - return &nonEmptyInterfaceCodec{} - case reflect.Struct: - return encoderOfStruct(cfg, prefix, typ) - case reflect.Array: - return encoderOfArray(cfg, prefix, typ) - case reflect.Slice: - return encoderOfSlice(cfg, prefix, typ) - case reflect.Map: - return encoderOfMap(cfg, prefix, typ) - case reflect.Ptr: - return encoderOfOptional(cfg, prefix, typ) - default: - return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", prefix, typ.String())} - } -} - -type placeholderEncoder struct { - cfg *frozenConfig - cacheKey reflect.Type -} - -func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.getRealEncoder().Encode(ptr, stream) -} - -func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) { - encoder.getRealEncoder().EncodeInterface(val, stream) -} - -func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.getRealEncoder().IsEmpty(ptr) -} - -func (encoder *placeholderEncoder) getRealEncoder() ValEncoder { - for i := 0; i < 500; i++ { - realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey) - _, isPlaceholder := realDecoder.(*placeholderEncoder) - if isPlaceholder { - time.Sleep(10 * time.Millisecond) - } else { - return realDecoder - } - } - panic(fmt.Sprintf("real encoder not found for cache key: %v", encoder.cacheKey)) -} - -type placeholderDecoder struct { - cfg *frozenConfig - cacheKey reflect.Type -} - -func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - for i := 0; i < 500; i++ { - realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey) - _, isPlaceholder := realDecoder.(*placeholderDecoder) - if isPlaceholder { - time.Sleep(10 * time.Millisecond) - } else { - realDecoder.Decode(ptr, iter) - return - } - } - panic(fmt.Sprintf("real decoder not found for cache key: %v", decoder.cacheKey)) -} - -type lazyErrorDecoder struct { - err error -} - -func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.WhatIsNext() != NilValue { - if iter.Error == nil { - iter.Error = decoder.err - } - } else { - iter.Skip() - } -} - -type lazyErrorEncoder struct { - err error -} - -func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if ptr == nil { - stream.WriteNil() - } else if stream.Error == nil { - stream.Error = encoder.err - } -} - -func (encoder *lazyErrorEncoder) EncodeInterface(val interface{}, stream *Stream) { - if val == nil { - stream.WriteNil() - } else if stream.Error == nil { - stream.Error = encoder.err - } -} - -func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -func extractInterface(val interface{}) emptyInterface { - return *((*emptyInterface)(unsafe.Pointer(&val))) -} - -// emptyInterface is the header for an interface{} value. -type emptyInterface struct { - typ unsafe.Pointer - word unsafe.Pointer -} - -// emptyInterface is the header for an interface with method (not interface{}) -type nonEmptyInterface struct { - // see ../runtime/iface.go:/Itab - itab *struct { - ityp unsafe.Pointer // static interface type - typ unsafe.Pointer // dynamic concrete type - link unsafe.Pointer - bad int32 - unused int32 - fun [100000]unsafe.Pointer // method table - } - word unsafe.Pointer -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_array.go b/vendor/github.com/json-iterator/go/feature_reflect_array.go deleted file mode 100644 index a6dd91cab..000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_array.go +++ /dev/null @@ -1,110 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "unsafe" -) - -func decoderOfArray(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { - decoder := decoderOfType(cfg, prefix+"[array]->", typ.Elem()) - return &arrayDecoder{typ, typ.Elem(), decoder} -} - -func encoderOfArray(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { - if typ.Len() == 0 { - return emptyArrayEncoder{} - } - encoder := encoderOfType(cfg, prefix+"[array]->", typ.Elem()) - if typ.Elem().Kind() == reflect.Map { - encoder = &OptionalEncoder{encoder} - } - return &arrayEncoder{typ, typ.Elem(), encoder} -} - -type emptyArrayEncoder struct{} - -func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteEmptyArray() -} - -func (encoder emptyArrayEncoder) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteEmptyArray() -} - -func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return true -} - -type arrayEncoder struct { - arrayType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder -} - -func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(ptr) - encoder.elemEncoder.Encode(elemPtr, stream) - for i := 1; i < encoder.arrayType.Len(); i++ { - stream.WriteMore() - elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) - } -} - -func (encoder *arrayEncoder) EncodeInterface(val interface{}, stream *Stream) { - // special optimization for interface{} - e := (*emptyInterface)(unsafe.Pointer(&val)) - if e.word == nil { - stream.WriteArrayStart() - stream.WriteNil() - stream.WriteArrayEnd() - return - } - elemType := encoder.arrayType.Elem() - if encoder.arrayType.Len() == 1 && (elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map) { - ptr := uintptr(e.word) - e.word = unsafe.Pointer(&ptr) - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - -func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type arrayDecoder struct { - arrayType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder -} - -func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) - } -} - -func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - offset := uintptr(0) - iter.ReadArrayCB(func(iter *Iterator) bool { - if offset < decoder.arrayType.Size() { - decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(ptr)+offset), iter) - offset += decoder.elemType.Size() - } else { - iter.Skip() - } - return true - }) -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_map.go b/vendor/github.com/json-iterator/go/feature_reflect_map.go deleted file mode 100644 index f537d026a..000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_map.go +++ /dev/null @@ -1,260 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "reflect" - "sort" - "strconv" - "unsafe" -) - -func decoderOfMap(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { - decoder := decoderOfType(cfg, prefix+"[map]->", typ.Elem()) - mapInterface := reflect.New(typ).Interface() - return &mapDecoder{typ, typ.Key(), typ.Elem(), decoder, extractInterface(mapInterface)} -} - -func encoderOfMap(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { - elemType := typ.Elem() - encoder := encoderOfType(cfg, prefix+"[map]->", elemType) - mapInterface := reflect.New(typ).Elem().Interface() - if cfg.sortMapKeys { - return &sortKeysMapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))} - } - return &mapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))} -} - -type mapDecoder struct { - mapType reflect.Type - keyType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder - mapInterface emptyInterface -} - -func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - // dark magic to cast unsafe.Pointer back to interface{} using reflect.Type - mapInterface := decoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface).Elem() - if iter.ReadNil() { - realVal.Set(reflect.Zero(decoder.mapType)) - return - } - if realVal.IsNil() { - realVal.Set(reflect.MakeMap(realVal.Type())) - } - iter.ReadMapCB(func(iter *Iterator, keyStr string) bool { - elem := reflect.New(decoder.elemType) - decoder.elemDecoder.Decode(extractInterface(elem.Interface()).word, iter) - // to put into map, we have to use reflection - keyType := decoder.keyType - // TODO: remove this from loop - switch { - case keyType.Kind() == reflect.String: - realVal.SetMapIndex(reflect.ValueOf(keyStr).Convert(keyType), elem.Elem()) - return true - case keyType.Implements(textUnmarshalerType): - textUnmarshaler := reflect.New(keyType.Elem()).Interface().(encoding.TextUnmarshaler) - err := textUnmarshaler.UnmarshalText([]byte(keyStr)) - if err != nil { - iter.ReportError("read map key as TextUnmarshaler", err.Error()) - return false - } - realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler), elem.Elem()) - return true - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): - textUnmarshaler := reflect.New(keyType).Interface().(encoding.TextUnmarshaler) - err := textUnmarshaler.UnmarshalText([]byte(keyStr)) - if err != nil { - iter.ReportError("read map key as TextUnmarshaler", err.Error()) - return false - } - realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler).Elem(), elem.Elem()) - return true - default: - switch keyType.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(keyStr, 10, 64) - if err != nil || reflect.Zero(keyType).OverflowInt(n) { - iter.ReportError("read map key as int64", "read int64 failed") - return false - } - realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(keyStr, 10, 64) - if err != nil || reflect.Zero(keyType).OverflowUint(n) { - iter.ReportError("read map key as uint64", "read uint64 failed") - return false - } - realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) - return true - } - } - iter.ReportError("read map key", "unexpected map key type "+keyType.String()) - return true - }) -} - -type mapEncoder struct { - mapType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder - mapInterface emptyInterface -} - -func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - stream.WriteObjectStart() - for i, key := range realVal.MapKeys() { - if i != 0 { - stream.WriteMore() - } - encodeMapKey(key, stream) - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - val := realVal.MapIndex(key).Interface() - encoder.elemEncoder.EncodeInterface(val, stream) - } - stream.WriteObjectEnd() -} - -func encodeMapKey(key reflect.Value, stream *Stream) { - if key.Kind() == reflect.String { - stream.WriteString(key.String()) - return - } - if tm, ok := key.Interface().(encoding.TextMarshaler); ok { - buf, err := tm.MarshalText() - if err != nil { - stream.Error = err - return - } - stream.writeByte('"') - stream.Write(buf) - stream.writeByte('"') - return - } - switch key.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - stream.writeByte('"') - stream.WriteInt64(key.Int()) - stream.writeByte('"') - return - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - stream.writeByte('"') - stream.WriteUint64(key.Uint()) - stream.writeByte('"') - return - } - stream.Error = &json.UnsupportedTypeError{Type: key.Type()} -} - -func (encoder *mapEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - return realVal.Len() == 0 -} - -type sortKeysMapEncoder struct { - mapType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder - mapInterface emptyInterface -} - -func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - - // Extract and sort the keys. - keys := realVal.MapKeys() - sv := stringValues(make([]reflectWithString, len(keys))) - for i, v := range keys { - sv[i].v = v - if err := sv[i].resolve(); err != nil { - stream.Error = err - return - } - } - sort.Sort(sv) - - stream.WriteObjectStart() - for i, key := range sv { - if i != 0 { - stream.WriteMore() - } - stream.WriteVal(key.s) // might need html escape, so can not WriteString directly - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - val := realVal.MapIndex(key.v).Interface() - encoder.elemEncoder.EncodeInterface(val, stream) - } - stream.WriteObjectEnd() -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflectWithString - -type reflectWithString struct { - v reflect.Value - s string -} - -func (w *reflectWithString) resolve() error { - if w.v.Kind() == reflect.String { - w.s = w.v.String() - return nil - } - if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { - buf, err := tm.MarshalText() - w.s = string(buf) - return err - } - switch w.v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - w.s = strconv.FormatInt(w.v.Int(), 10) - return nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - w.s = strconv.FormatUint(w.v.Uint(), 10) - return nil - } - return &json.UnsupportedTypeError{Type: w.v.Type()} -} - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { return sv[i].s < sv[j].s } - -func (encoder *sortKeysMapEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - return realVal.Len() == 0 -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_native.go b/vendor/github.com/json-iterator/go/feature_reflect_native.go deleted file mode 100644 index e4e956aa2..000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_native.go +++ /dev/null @@ -1,789 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/base64" - "encoding/json" - "reflect" - "unsafe" -) - -type stringCodec struct { -} - -func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*string)(ptr)) = iter.ReadString() -} - -func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteString(str) -} - -func (codec *stringCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -type intCodec struct { -} - -func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int)(ptr)) = iter.ReadInt() - } -} - -func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt(*((*int)(ptr))) -} - -func (codec *intCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *intCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int)(ptr)) == 0 -} - -type uintptrCodec struct { -} - -func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uintptr)(ptr)) = uintptr(iter.ReadUint64()) - } -} - -func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(uint64(*((*uintptr)(ptr)))) -} - -func (codec *uintptrCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uintptrCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uintptr)(ptr)) == 0 -} - -type int8Codec struct { -} - -func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int8)(ptr)) = iter.ReadInt8() - } -} - -func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt8(*((*int8)(ptr))) -} - -func (codec *int8Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int8)(ptr)) == 0 -} - -type int16Codec struct { -} - -func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int16)(ptr)) = iter.ReadInt16() - } -} - -func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt16(*((*int16)(ptr))) -} - -func (codec *int16Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int16)(ptr)) == 0 -} - -type int32Codec struct { -} - -func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int32)(ptr)) = iter.ReadInt32() - } -} - -func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt32(*((*int32)(ptr))) -} - -func (codec *int32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int32)(ptr)) == 0 -} - -type int64Codec struct { -} - -func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int64)(ptr)) = iter.ReadInt64() - } -} - -func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt64(*((*int64)(ptr))) -} - -func (codec *int64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int64)(ptr)) == 0 -} - -type uintCodec struct { -} - -func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint)(ptr)) = iter.ReadUint() - return - } -} - -func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint(*((*uint)(ptr))) -} - -func (codec *uintCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uintCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint)(ptr)) == 0 -} - -type uint8Codec struct { -} - -func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint8)(ptr)) = iter.ReadUint8() - } -} - -func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint8(*((*uint8)(ptr))) -} - -func (codec *uint8Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint8)(ptr)) == 0 -} - -type uint16Codec struct { -} - -func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint16)(ptr)) = iter.ReadUint16() - } -} - -func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint16(*((*uint16)(ptr))) -} - -func (codec *uint16Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint16)(ptr)) == 0 -} - -type uint32Codec struct { -} - -func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint32)(ptr)) = iter.ReadUint32() - } -} - -func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint32(*((*uint32)(ptr))) -} - -func (codec *uint32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint32)(ptr)) == 0 -} - -type uint64Codec struct { -} - -func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint64)(ptr)) = iter.ReadUint64() - } -} - -func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(*((*uint64)(ptr))) -} - -func (codec *uint64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint64)(ptr)) == 0 -} - -type float32Codec struct { -} - -func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float32)(ptr)) = iter.ReadFloat32() - } -} - -func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32(*((*float32)(ptr))) -} - -func (codec *float32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type float64Codec struct { -} - -func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float64)(ptr)) = iter.ReadFloat64() - } -} - -func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64(*((*float64)(ptr))) -} - -func (codec *float64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -type boolCodec struct { -} - -func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*bool)(ptr)) = iter.ReadBool() - } -} - -func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteBool(*((*bool)(ptr))) -} - -func (codec *boolCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { - return !(*((*bool)(ptr))) -} - -type emptyInterfaceCodec struct { -} - -func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - existing := *((*interface{})(ptr)) - - // Checking for both typed and untyped nil pointers. - if existing != nil && - reflect.TypeOf(existing).Kind() == reflect.Ptr && - !reflect.ValueOf(existing).IsNil() { - - var ptrToExisting interface{} - for { - elem := reflect.ValueOf(existing).Elem() - if elem.Kind() != reflect.Ptr || elem.IsNil() { - break - } - ptrToExisting = existing - existing = elem.Interface() - } - - if iter.ReadNil() { - if ptrToExisting != nil { - nilPtr := reflect.Zero(reflect.TypeOf(ptrToExisting).Elem()) - reflect.ValueOf(ptrToExisting).Elem().Set(nilPtr) - } else { - *((*interface{})(ptr)) = nil - } - } else { - iter.ReadVal(existing) - } - - return - } - - if iter.ReadNil() { - *((*interface{})(ptr)) = nil - } else { - *((*interface{})(ptr)) = iter.Read() - } -} - -func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteVal(*((*interface{})(ptr))) -} - -func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteVal(val) -} - -func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { - emptyInterface := (*emptyInterface)(ptr) - return emptyInterface.typ == nil -} - -type nonEmptyInterfaceCodec struct { -} - -func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.WhatIsNext() == NilValue { - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*interface{})(ptr)) = nil - return - } - nonEmptyInterface := (*nonEmptyInterface)(ptr) - if nonEmptyInterface.itab == nil { - iter.ReportError("read non-empty interface", "do not know which concrete type to decode to") - return - } - var i interface{} - e := (*emptyInterface)(unsafe.Pointer(&i)) - e.typ = nonEmptyInterface.itab.typ - e.word = nonEmptyInterface.word - iter.ReadVal(&i) - if e.word == nil { - nonEmptyInterface.itab = nil - } - nonEmptyInterface.word = e.word -} - -func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - var i interface{} - if nonEmptyInterface.itab != nil { - e := (*emptyInterface)(unsafe.Pointer(&i)) - e.typ = nonEmptyInterface.itab.typ - e.word = nonEmptyInterface.word - } - stream.WriteVal(i) -} - -func (codec *nonEmptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteVal(val) -} - -func (codec *nonEmptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - return nonEmptyInterface.word == nil -} - -type anyCodec struct { -} - -func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*Any)(ptr)) = iter.ReadAny() -} - -func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - (*((*Any)(ptr))).WriteTo(stream) -} - -func (codec *anyCodec) EncodeInterface(val interface{}, stream *Stream) { - (val.(Any)).WriteTo(stream) -} - -func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { - return (*((*Any)(ptr))).Size() == 0 -} - -type jsonNumberCodec struct { -} - -func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*json.Number)(ptr)) = json.Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*json.Number)(ptr)) = "" - default: - *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - number := *((*json.Number)(ptr)) - if len(number) == 0 { - stream.WriteRaw("0") - } else { - stream.WriteRaw(string(number)) - } -} - -func (codec *jsonNumberCodec) EncodeInterface(val interface{}, stream *Stream) { - number := val.(json.Number) - if len(number) == 0 { - stream.WriteRaw("0") - } else { - stream.WriteRaw(string(number)) - } -} - -func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.Number)(ptr))) == 0 -} - -type jsoniterNumberCodec struct { -} - -func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*Number)(ptr)) = Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*Number)(ptr)) = "" - default: - *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - number := *((*Number)(ptr)) - if len(number) == 0 { - stream.WriteRaw("0") - } else { - stream.WriteRaw(string(number)) - } -} - -func (codec *jsoniterNumberCodec) EncodeInterface(val interface{}, stream *Stream) { - number := val.(Number) - if len(number) == 0 { - stream.WriteRaw("0") - } else { - stream.WriteRaw(string(number)) - } -} - -func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*Number)(ptr))) == 0 -} - -type jsonRawMessageCodec struct { -} - -func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) -} - -func (codec *jsonRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(json.RawMessage))) -} - -func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.RawMessage)(ptr))) == 0 -} - -type jsoniterRawMessageCodec struct { -} - -func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*RawMessage)(ptr)))) -} - -func (codec *jsoniterRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(RawMessage))) -} - -func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*RawMessage)(ptr))) == 0 -} - -type base64Codec struct { - sliceDecoder ValDecoder -} - -func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - ptrSlice := (*sliceHeader)(ptr) - ptrSlice.Len = 0 - ptrSlice.Cap = 0 - ptrSlice.Data = nil - return - } - switch iter.WhatIsNext() { - case StringValue: - encoding := base64.StdEncoding - src := iter.SkipAndReturnBytes() - src = src[1 : len(src)-1] - decodedLen := encoding.DecodedLen(len(src)) - dst := make([]byte, decodedLen) - len, err := encoding.Decode(dst, src) - if err != nil { - iter.ReportError("decode base64", err.Error()) - } else { - dst = dst[:len] - dstSlice := (*sliceHeader)(unsafe.Pointer(&dst)) - ptrSlice := (*sliceHeader)(ptr) - ptrSlice.Data = dstSlice.Data - ptrSlice.Cap = dstSlice.Cap - ptrSlice.Len = dstSlice.Len - } - case ArrayValue: - codec.sliceDecoder.Decode(ptr, iter) - default: - iter.ReportError("base64Codec", "invalid input") - } -} - -func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - src := *((*[]byte)(ptr)) - if len(src) == 0 { - stream.WriteNil() - return - } - encoding := base64.StdEncoding - stream.writeByte('"') - size := encoding.EncodedLen(len(src)) - buf := make([]byte, size) - encoding.Encode(buf, src) - stream.buf = append(stream.buf, buf...) - stream.writeByte('"') -} - -func (codec *base64Codec) EncodeInterface(val interface{}, stream *Stream) { - ptr := extractInterface(val).word - src := *((*[]byte)(ptr)) - if len(src) == 0 { - stream.WriteNil() - return - } - encoding := base64.StdEncoding - stream.writeByte('"') - size := encoding.EncodedLen(len(src)) - buf := make([]byte, size) - encoding.Encode(buf, src) - stream.buf = append(stream.buf, buf...) - stream.writeByte('"') -} - -func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*[]byte)(ptr))) == 0 -} - -type stringModeNumberDecoder struct { - elemDecoder ValDecoder -} - -func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } - decoder.elemDecoder.Decode(ptr, iter) - if iter.Error != nil { - return - } - c = iter.readByte() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } -} - -type stringModeStringDecoder struct { - elemDecoder ValDecoder - cfg *frozenConfig -} - -func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.elemDecoder.Decode(ptr, iter) - str := *((*string)(ptr)) - tempIter := decoder.cfg.BorrowIterator([]byte(str)) - defer decoder.cfg.ReturnIterator(tempIter) - *((*string)(ptr)) = tempIter.ReadString() -} - -type stringModeNumberEncoder struct { - elemEncoder ValEncoder -} - -func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.writeByte('"') - encoder.elemEncoder.Encode(ptr, stream) - stream.writeByte('"') -} - -func (encoder *stringModeNumberEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type stringModeStringEncoder struct { - elemEncoder ValEncoder - cfg *frozenConfig -} - -func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - tempStream := encoder.cfg.BorrowStream(nil) - defer encoder.cfg.ReturnStream(tempStream) - encoder.elemEncoder.Encode(ptr, tempStream) - stream.WriteString(string(tempStream.Buffer())) -} - -func (encoder *stringModeStringEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type marshalerEncoder struct { - templateInterface emptyInterface - checkIsEmpty checkIsEmpty -} - -func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - templateInterface := encoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - marshaler, ok := (*realInterface).(json.Marshaler) - if !ok { - stream.WriteVal(nil) - return - } - - bytes, err := marshaler.MarshalJSON() - if err != nil { - stream.Error = err - } else { - stream.Write(bytes) - } -} -func (encoder *marshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type textMarshalerEncoder struct { - templateInterface emptyInterface - checkIsEmpty checkIsEmpty -} - -func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - templateInterface := encoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - marshaler := (*realInterface).(encoding.TextMarshaler) - bytes, err := marshaler.MarshalText() - if err != nil { - stream.Error = err - } else { - stream.WriteString(string(bytes)) - } -} - -func (encoder *textMarshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type unmarshalerDecoder struct { - templateInterface emptyInterface -} - -func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - templateInterface := decoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - unmarshaler := (*realInterface).(json.Unmarshaler) - iter.nextToken() - iter.unreadByte() // skip spaces - bytes := iter.SkipAndReturnBytes() - err := unmarshaler.UnmarshalJSON(bytes) - if err != nil { - iter.ReportError("unmarshalerDecoder", err.Error()) - } -} - -type textUnmarshalerDecoder struct { - templateInterface emptyInterface -} - -func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - templateInterface := decoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - unmarshaler := (*realInterface).(encoding.TextUnmarshaler) - str := iter.ReadString() - err := unmarshaler.UnmarshalText([]byte(str)) - if err != nil { - iter.ReportError("textUnmarshalerDecoder", err.Error()) - } -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_slice.go b/vendor/github.com/json-iterator/go/feature_reflect_slice.go deleted file mode 100644 index 078b3bc07..000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_slice.go +++ /dev/null @@ -1,143 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "unsafe" -) - -func decoderOfSlice(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { - decoder := decoderOfType(cfg, prefix+"[slice]->", typ.Elem()) - return &sliceDecoder{typ, typ.Elem(), decoder} -} - -func encoderOfSlice(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { - encoder := encoderOfType(cfg, prefix+"[slice]->", typ.Elem()) - if typ.Elem().Kind() == reflect.Map { - encoder = &OptionalEncoder{encoder} - } - return &sliceEncoder{typ, typ.Elem(), encoder} -} - -type sliceEncoder struct { - sliceType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder -} - -func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - slice := (*sliceHeader)(ptr) - if slice.Data == nil { - stream.WriteNil() - return - } - if slice.Len == 0 { - stream.WriteEmptyArray() - return - } - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(slice.Data) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - for i := 1; i < slice.Len; i++ { - stream.WriteMore() - elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) - } -} - -func (encoder *sliceEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - slice := (*sliceHeader)(ptr) - return slice.Len == 0 -} - -type sliceDecoder struct { - sliceType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder -} - -// sliceHeader is a safe version of SliceHeader used within this package. -type sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int -} - -func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) - } -} - -func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - slice := (*sliceHeader)(ptr) - if iter.ReadNil() { - slice.Len = 0 - slice.Cap = 0 - slice.Data = nil - return - } - reuseSlice(slice, decoder.sliceType, 4) - slice.Len = 0 - offset := uintptr(0) - iter.ReadArrayCB(func(iter *Iterator) bool { - growOne(slice, decoder.sliceType, decoder.elemType) - decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(slice.Data)+offset), iter) - offset += decoder.elemType.Size() - return true - }) -} - -// grow grows the slice s so that it can hold extra more values, allocating -// more capacity if needed. It also returns the old and new slice lengths. -func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Type) { - newLen := slice.Len + 1 - if newLen <= slice.Cap { - slice.Len = newLen - return - } - newCap := slice.Cap - if newCap == 0 { - newCap = 1 - } else { - for newCap < newLen { - if slice.Len < 1024 { - newCap += newCap - } else { - newCap += newCap / 4 - } - } - } - newVal := reflect.MakeSlice(sliceType, newLen, newCap).Interface() - newValPtr := extractInterface(newVal).word - dst := (*sliceHeader)(newValPtr).Data - // copy old array into new array - originalBytesCount := slice.Len * int(elementType.Size()) - srcSliceHeader := (unsafe.Pointer)(&sliceHeader{slice.Data, originalBytesCount, originalBytesCount}) - dstSliceHeader := (unsafe.Pointer)(&sliceHeader{dst, originalBytesCount, originalBytesCount}) - copy(*(*[]byte)(dstSliceHeader), *(*[]byte)(srcSliceHeader)) - slice.Data = dst - slice.Len = newLen - slice.Cap = newCap -} - -func reuseSlice(slice *sliceHeader, sliceType reflect.Type, expectedCap int) { - if expectedCap <= slice.Cap { - return - } - newVal := reflect.MakeSlice(sliceType, 0, expectedCap).Interface() - newValPtr := extractInterface(newVal).word - dst := (*sliceHeader)(newValPtr).Data - slice.Data = dst - slice.Cap = expectedCap -} diff --git a/vendor/github.com/json-iterator/go/feature_iter.go b/vendor/github.com/json-iterator/go/iter.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_iter.go rename to vendor/github.com/json-iterator/go/iter.go diff --git a/vendor/github.com/json-iterator/go/feature_iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_iter_array.go rename to vendor/github.com/json-iterator/go/iter_array.go diff --git a/vendor/github.com/json-iterator/go/feature_iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_iter_float.go rename to vendor/github.com/json-iterator/go/iter_float.go diff --git a/vendor/github.com/json-iterator/go/feature_iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go similarity index 98% rename from vendor/github.com/json-iterator/go/feature_iter_int.go rename to vendor/github.com/json-iterator/go/iter_int.go index 4781c6393..214232035 100644 --- a/vendor/github.com/json-iterator/go/feature_iter_int.go +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -22,11 +22,17 @@ func init() { // ReadUint read uint func (iter *Iterator) ReadUint() uint { + if strconv.IntSize == 32 { + return uint(iter.ReadUint32()) + } return uint(iter.ReadUint64()) } // ReadInt read int func (iter *Iterator) ReadInt() int { + if strconv.IntSize == 32 { + return int(iter.ReadInt32()) + } return int(iter.ReadInt64()) } diff --git a/vendor/github.com/json-iterator/go/feature_iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go similarity index 95% rename from vendor/github.com/json-iterator/go/feature_iter_object.go rename to vendor/github.com/json-iterator/go/iter_object.go index ebd3da895..6e7c370ab 100644 --- a/vendor/github.com/json-iterator/go/feature_iter_object.go +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -60,7 +60,7 @@ func (iter *Iterator) readFieldHash() int64 { if b == '\\' { iter.head = i for _, b := range iter.readStringSlowPath() { - if 'A' <= b && b <= 'Z' { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { b += 'a' - 'A' } hash ^= int64(b) @@ -82,7 +82,7 @@ func (iter *Iterator) readFieldHash() int64 { } return hash } - if 'A' <= b && b <= 'Z' { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { b += 'a' - 'A' } hash ^= int64(b) @@ -95,10 +95,14 @@ func (iter *Iterator) readFieldHash() int64 { } } -func calcHash(str string) int64 { +func calcHash(str string, caseSensitive bool) int64 { hash := int64(0x811c9dc5) for _, b := range str { - hash ^= int64(unicode.ToLower(b)) + if caseSensitive { + hash ^= int64(b) + } else { + hash ^= int64(unicode.ToLower(b)) + } hash *= 0x1000193 } return int64(hash) diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_iter_skip.go rename to vendor/github.com/json-iterator/go/iter_skip.go diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go rename to vendor/github.com/json-iterator/go/iter_skip_sloppy.go diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_iter_skip_strict.go rename to vendor/github.com/json-iterator/go/iter_skip_strict.go diff --git a/vendor/github.com/json-iterator/go/feature_iter_string.go b/vendor/github.com/json-iterator/go/iter_str.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_iter_string.go rename to vendor/github.com/json-iterator/go/iter_str.go diff --git a/vendor/github.com/json-iterator/go/feature_pool.go b/vendor/github.com/json-iterator/go/pool.go similarity index 64% rename from vendor/github.com/json-iterator/go/feature_pool.go rename to vendor/github.com/json-iterator/go/pool.go index 52d38e685..e2389b56c 100644 --- a/vendor/github.com/json-iterator/go/feature_pool.go +++ b/vendor/github.com/json-iterator/go/pool.go @@ -17,43 +17,26 @@ type StreamPool interface { } func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { - select { - case stream := <-cfg.streamPool: - stream.Reset(writer) - return stream - default: - return NewStream(cfg, writer, 512) - } + stream := cfg.streamPool.Get().(*Stream) + stream.Reset(writer) + return stream } func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.out = nil stream.Error = nil stream.Attachment = nil - select { - case cfg.streamPool <- stream: - return - default: - return - } + cfg.streamPool.Put(stream) } func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { - select { - case iter := <-cfg.iteratorPool: - iter.ResetBytes(data) - return iter - default: - return ParseBytes(cfg, data) - } + iter := cfg.iteratorPool.Get().(*Iterator) + iter.ResetBytes(data) + return iter } func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { iter.Error = nil iter.Attachment = nil - select { - case cfg.iteratorPool <- iter: - return - default: - return - } + cfg.iteratorPool.Put(iter) } diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go new file mode 100644 index 000000000..be7a0e218 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -0,0 +1,330 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +type ctx struct { + *frozenConfig + prefix string + encoders map[reflect2.Type]ValEncoder + decoders map[reflect2.Type]ValDecoder +} + +func (b *ctx) caseSensitive() bool { + if b.frozenConfig == nil { + // default is case-insensitive + return false + } + return b.frozenConfig.caseSensitive +} + +func (b *ctx) append(prefix string) *ctx { + return &ctx{ + frozenConfig: b.frozenConfig, + prefix: b.prefix + " " + prefix, + encoders: b.encoders, + decoders: b.decoders, + } +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + cacheKey := reflect2.RTypeOf(obj) + decoder := iter.cfg.getDecoderFromCache(cacheKey) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + iter.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = iter.cfg.DecoderOf(typ) + } + ptr := reflect2.PtrOf(obj) + if ptr == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(ptr, iter) +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + cacheKey := reflect2.RTypeOf(val) + encoder := stream.cfg.getEncoderFromCache(cacheKey) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = stream.cfg.EncoderOf(typ) + } + encoder.Encode(reflect2.PtrOf(val), stream) +} + +func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { + cacheKey := typ.RType() + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(ctx, ptrType.Elem()) + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfType(ctx, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + for _, extension := range ctx.extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + return decoder +} + +func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoders[typ] + if decoder != nil { + return decoder + } + placeholder := &placeholderDecoder{} + ctx.decoders[typ] = placeholder + decoder = _createDecoderOfType(ctx, typ) + placeholder.decoder = decoder + return decoder +} + +func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := createDecoderOfJsonRawMessage(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfJsonNumber(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfMarshaler(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfAny(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfNative(ctx, typ) + if decoder != nil { + return decoder + } + switch typ.Kind() { + case reflect.Interface: + ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) + if isIFace { + return &ifaceDecoder{valType: ifaceType} + } + return &efaceDecoder{} + case reflect.Struct: + return decoderOfStruct(ctx, typ) + case reflect.Array: + return decoderOfArray(ctx, typ) + case reflect.Slice: + return decoderOfSlice(ctx, typ) + case reflect.Map: + return decoderOfMap(ctx, typ) + case reflect.Ptr: + return decoderOfOptional(ctx, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { + cacheKey := typ.RType() + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + encoder = encoderOfType(ctx, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +type onePtrEncoder struct { + encoder ValEncoder +} + +func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfType(ctx, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + for _, extension := range ctx.extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + return encoder +} + +func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoders[typ] + if encoder != nil { + return encoder + } + placeholder := &placeholderEncoder{} + ctx.encoders[typ] = placeholder + encoder = _createEncoderOfType(ctx, typ) + placeholder.encoder = encoder + return encoder +} +func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := createEncoderOfJsonRawMessage(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfJsonNumber(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfMarshaler(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfAny(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return encoderOfStruct(ctx, typ) + case reflect.Array: + return encoderOfArray(ctx, typ) + case reflect.Slice: + return encoderOfSlice(ctx, typ) + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return encoderOfOptional(ctx, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type placeholderDecoder struct { + decoder ValDecoder +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(ptr, iter) +} + +type placeholderEncoder struct { + encoder ValEncoder +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go new file mode 100644 index 000000000..13a0b7b08 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_array.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayDecoder{arrayType, decoder} +} + +func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + if arrayType.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayEncoder{arrayType, encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType *reflect2.UnsafeArrayType + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType *reflect2.UnsafeArrayType + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + arrayType := decoder.arrayType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return + } + if c != '[' { + iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + return + } + iter.unreadByte() + elemPtr := arrayType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + if length >= arrayType.Len() { + iter.Skip() + continue + } + idx := length + length += 1 + elemPtr = arrayType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go new file mode 100644 index 000000000..8b6bc8b43 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -0,0 +1,70 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +type dynamicEncoder struct { + valType reflect2.Type +} + +func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + stream.WriteVal(obj) +} + +func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.valType.UnsafeIndirect(ptr) == nil +} + +type efaceDecoder struct { +} + +func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + pObj := (*interface{})(ptr) + obj := *pObj + if obj == nil { + *pObj = iter.Read() + return + } + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + *pObj = iter.Read() + return + } + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if iter.WhatIsNext() == NilValue { + if ptrElemType.Kind() != reflect.Ptr { + iter.skipFourBytes('n', 'u', 'l', 'l') + *pObj = nil + return + } + } + if reflect2.IsNil(obj) { + obj := ptrElemType.New() + iter.ReadVal(obj) + *pObj = obj + return + } + iter.ReadVal(obj) +} + +type ifaceDecoder struct { + valType *reflect2.UnsafeIFaceType +} + +func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) + return + } + obj := decoder.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + iter.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + iter.ReadVal(obj) +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go similarity index 57% rename from vendor/github.com/json-iterator/go/feature_reflect_extension.go rename to vendor/github.com/json-iterator/go/reflect_extension.go index 66083a521..917bbe84e 100644 --- a/vendor/github.com/json-iterator/go/feature_reflect_extension.go +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -2,6 +2,7 @@ package jsoniter import ( "fmt" + "github.com/modern-go/reflect2" "reflect" "sort" "strings" @@ -17,17 +18,15 @@ var extensions = []Extension{} // StructDescriptor describe how should we encode/decode the struct type StructDescriptor struct { - onePtrEmbedded bool - onePtrOptimization bool - Type reflect.Type - Fields []*Binding + Type reflect2.Type + Fields []*Binding } // GetField get one field from the descriptor by its name. // Can not use map here to keep field orders. func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { for _, binding := range structDescriptor.Fields { - if binding.Field.Name == fieldName { + if binding.Field.Name() == fieldName { return binding } } @@ -37,7 +36,7 @@ func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { // Binding describe how should we encode/decode the struct field type Binding struct { levels []int - Field *reflect.StructField + Field reflect2.StructField FromNames []string ToNames []string Encoder ValEncoder @@ -48,10 +47,12 @@ type Binding struct { // Can also rename fields by UpdateStructDescriptor. type Extension interface { UpdateStructDescriptor(structDescriptor *StructDescriptor) - CreateDecoder(typ reflect.Type) ValDecoder - CreateEncoder(typ reflect.Type) ValEncoder - DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder - DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder + CreateMapKeyDecoder(typ reflect2.Type) ValDecoder + CreateMapKeyEncoder(typ reflect2.Type) ValEncoder + CreateDecoder(typ reflect2.Type) ValDecoder + CreateEncoder(typ reflect2.Type) ValEncoder + DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder } // DummyExtension embed this type get dummy implementation for all methods of Extension @@ -62,23 +63,105 @@ type DummyExtension struct { func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { } +// CreateMapKeyDecoder No-op +func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + // CreateDecoder No-op -func (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder { +func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { return nil } // CreateEncoder No-op -func (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder { +func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { return nil } // DecorateDecoder No-op -func (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder { +func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { return decoder } // DecorateEncoder No-op -func (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder { +func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type EncoderExtension map[reflect2.Type]ValEncoder + +// UpdateStructDescriptor No-op +func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder get encoder from map +func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return extension[typ] +} + +// CreateMapKeyDecoder No-op +func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type DecoderExtension map[reflect2.Type]ValDecoder + +// UpdateStructDescriptor No-op +func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder get decoder from map +func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return extension[typ] +} + +// CreateEncoder No-op +func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { return encoder } @@ -99,10 +182,6 @@ func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { encoder.fun(ptr, stream) } -func (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { if encoder.isEmptyFunc == nil { return false @@ -161,26 +240,26 @@ func RegisterExtension(extension Extension) { extensions = append(extensions, extension) } -func getTypeDecoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValDecoder { - decoder := _getTypeDecoderFromExtension(cfg, typ) +func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(ctx, typ) if decoder != nil { for _, extension := range extensions { decoder = extension.DecorateDecoder(typ, decoder) } - for _, extension := range cfg.extensions { + for _, extension := range ctx.extensions { decoder = extension.DecorateDecoder(typ, decoder) } } return decoder } -func _getTypeDecoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValDecoder { +func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { for _, extension := range extensions { decoder := extension.CreateDecoder(typ) if decoder != nil { return decoder } } - for _, extension := range cfg.extensions { + for _, extension := range ctx.extensions { decoder := extension.CreateDecoder(typ) if decoder != nil { return decoder @@ -192,35 +271,36 @@ func _getTypeDecoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValDecode return decoder } if typ.Kind() == reflect.Ptr { - decoder := typeDecoders[typ.Elem().String()] + ptrType := typ.(*reflect2.UnsafePtrType) + decoder := typeDecoders[ptrType.Elem().String()] if decoder != nil { - return &OptionalDecoder{typ.Elem(), decoder} + return &OptionalDecoder{ptrType.Elem(), decoder} } } return nil } -func getTypeEncoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValEncoder { - encoder := _getTypeEncoderFromExtension(cfg, typ) +func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(ctx, typ) if encoder != nil { for _, extension := range extensions { encoder = extension.DecorateEncoder(typ, encoder) } - for _, extension := range cfg.extensions { + for _, extension := range ctx.extensions { encoder = extension.DecorateEncoder(typ, encoder) } } return encoder } -func _getTypeEncoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValEncoder { +func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { for _, extension := range extensions { encoder := extension.CreateEncoder(typ) if encoder != nil { return encoder } } - for _, extension := range cfg.extensions { + for _, extension := range ctx.extensions { encoder := extension.CreateEncoder(typ) if encoder != nil { return encoder @@ -232,7 +312,8 @@ func _getTypeEncoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValEncode return encoder } if typ.Kind() == reflect.Ptr { - encoder := typeEncoders[typ.Elem().String()] + typePtr := typ.(*reflect2.UnsafePtrType) + encoder := typeEncoders[typePtr.Elem().String()] if encoder != nil { return &OptionalEncoder{encoder} } @@ -240,61 +321,60 @@ func _getTypeEncoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValEncode return nil } -func describeStruct(cfg *frozenConfig, prefix string, typ reflect.Type) *StructDescriptor { +func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { + structType := typ.(*reflect2.UnsafeStructType) embeddedBindings := []*Binding{} bindings := []*Binding{} - for i := 0; i < typ.NumField(); i++ { - field := typ.Field(i) - tag, hastag := field.Tag.Lookup(cfg.getTagKey()) - if cfg.onlyTaggedField && !hastag { + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + tag, hastag := field.Tag().Lookup(ctx.getTagKey()) + if ctx.onlyTaggedField && !hastag { continue } tagParts := strings.Split(tag, ",") if tag == "-" { continue } - if field.Anonymous && (tag == "" || tagParts[0] == "") { - if field.Type.Kind() == reflect.Struct { - structDescriptor := describeStruct(cfg, prefix, field.Type) + if field.Anonymous() && (tag == "" || tagParts[0] == "") { + if field.Type().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, field.Type()) for _, binding := range structDescriptor.Fields { binding.levels = append([]int{i}, binding.levels...) omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} - binding.Decoder = &structFieldDecoder{&field, binding.Decoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} embeddedBindings = append(embeddedBindings, binding) } continue - } else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { - structDescriptor := describeStruct(cfg, prefix, field.Type.Elem()) - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &dereferenceEncoder{binding.Encoder} - binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} - binding.Decoder = &dereferenceDecoder{field.Type.Elem(), binding.Decoder} - binding.Decoder = &structFieldDecoder{&field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) + } else if field.Type().Kind() == reflect.Ptr { + ptrType := field.Type().(*reflect2.UnsafePtrType) + if ptrType.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, ptrType.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue } - continue } } - fieldNames := calcFieldNames(field.Name, tagParts[0], tag) - fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name) + fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) decoder := fieldDecoders[fieldCacheKey] if decoder == nil { - decoder = decoderOfType(cfg, prefix+typ.String()+"."+field.Name+"->", field.Type) + decoder = decoderOfType(ctx.append(field.Name()), field.Type()) } encoder := fieldEncoders[fieldCacheKey] if encoder == nil { - encoder = encoderOfType(cfg, prefix+typ.String()+"."+field.Name+"->", field.Type) - // map is stored as pointer in the struct, - // and treat nil or empty map as empty field - if encoder != nil && field.Type.Kind() == reflect.Map { - encoder = &optionalMapEncoder{encoder} - } + encoder = encoderOfType(ctx.append(field.Name()), field.Type()) } binding := &Binding{ - Field: &field, + Field: field, FromNames: fieldNames, ToNames: fieldNames, Decoder: decoder, @@ -303,38 +383,20 @@ func describeStruct(cfg *frozenConfig, prefix string, typ reflect.Type) *StructD binding.levels = []int{i} bindings = append(bindings, binding) } - return createStructDescriptor(cfg, typ, bindings, embeddedBindings) + return createStructDescriptor(ctx, typ, bindings, embeddedBindings) } -func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { - onePtrEmbedded := false - onePtrOptimization := false - if typ.NumField() == 1 { - firstField := typ.Field(0) - switch firstField.Type.Kind() { - case reflect.Ptr: - if firstField.Anonymous && firstField.Type.Elem().Kind() == reflect.Struct { - onePtrEmbedded = true - } - fallthrough - case reflect.Map: - onePtrOptimization = true - case reflect.Struct: - onePtrOptimization = isStructOnePtr(firstField.Type) - } - } +func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { structDescriptor := &StructDescriptor{ - onePtrEmbedded: onePtrEmbedded, - onePtrOptimization: onePtrOptimization, - Type: typ, - Fields: bindings, + Type: typ, + Fields: bindings, } for _, extension := range extensions { extension.UpdateStructDescriptor(structDescriptor) } - for _, extension := range cfg.extensions { + for _, extension := range ctx.extensions { extension.UpdateStructDescriptor(structDescriptor) } - processTags(structDescriptor, cfg) + processTags(structDescriptor, ctx.frozenConfig) // merge normal & embedded bindings & sort with original order allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) sort.Sort(allBindings) @@ -342,21 +404,6 @@ func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Bin return structDescriptor } -func isStructOnePtr(typ reflect.Type) bool { - if typ.NumField() == 1 { - firstField := typ.Field(0) - switch firstField.Type.Kind() { - case reflect.Ptr: - return true - case reflect.Map: - return true - case reflect.Struct: - return isStructOnePtr(firstField.Type) - } - } - return false -} - type sortableBindings []*Binding func (bindings sortableBindings) Len() int { @@ -384,12 +431,12 @@ func (bindings sortableBindings) Swap(i, j int) { func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { for _, binding := range structDescriptor.Fields { shouldOmitEmpty := false - tagParts := strings.Split(binding.Field.Tag.Get(cfg.getTagKey()), ",") + tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") for _, tagPart := range tagParts[1:] { if tagPart == "omitempty" { shouldOmitEmpty = true } else if tagPart == "string" { - if binding.Field.Type.Kind() == reflect.String { + if binding.Field.Type().Kind() == reflect.String { binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} } else { diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go new file mode 100644 index 000000000..98d45c1ec --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -0,0 +1,112 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} + +var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() +var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() + +func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go new file mode 100644 index 000000000..f2619936c --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -0,0 +1,60 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() +var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() + +func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go new file mode 100644 index 000000000..8812f0850 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -0,0 +1,318 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "sort" + "unsafe" +) + +func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) + elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) + return &mapDecoder{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + keyDecoder: keyDecoder, + elemDecoder: elemDecoder, + } +} + +func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + if ctx.sortMapKeys { + return &sortKeysMapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } + } + return &mapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } +} + +func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { + for _, extension := range ctx.extensions { + decoder := extension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + } + switch typ.Kind() { + case reflect.String: + return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyDecoder{decoderOfType(ctx, typ)} + default: + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(textMarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textMarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { + for _, extension := range ctx.extensions { + encoder := extension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + } + switch typ.Kind() { + case reflect.String: + return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyEncoder{encoderOfType(ctx, typ)} + default: + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } + return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + keyDecoder ValDecoder + elemDecoder ValDecoder +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + mapType := decoder.mapType + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + *(*unsafe.Pointer)(ptr) = nil + mapType.UnsafeSet(ptr, mapType.UnsafeNew()) + return + } + if mapType.UnsafeIsNil(ptr) { + mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) + } + if c != '{' { + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return + } + c = iter.nextToken() + if c == '}' { + return + } + if c != '"' { + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + return + } + iter.unreadByte() + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + } + if c != '}' { + iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) + } +} + +type numericMapKeyDecoder struct { + decoder ValDecoder +} + +func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } + decoder.decoder.Decode(ptr, iter) + c = iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } +} + +type numericMapKeyEncoder struct { + encoder ValEncoder +} + +func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.encoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + +type mapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + iter := encoder.mapType.UnsafeIterate(ptr) + for i := 0; iter.HasNext(); i++ { + if i != 0 { + stream.WriteMore() + } + key, elem := iter.UnsafeNext() + encoder.keyEncoder.Encode(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, stream) + } + stream.WriteObjectEnd() +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type sortKeysMapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + mapIter := encoder.mapType.UnsafeIterate(ptr) + subStream := stream.cfg.BorrowStream(nil) + subIter := stream.cfg.BorrowIterator(nil) + keyValues := encodedKeyValues{} + for mapIter.HasNext() { + subStream.buf = make([]byte, 0, 64) + key, elem := mapIter.UnsafeNext() + encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } + encodedKey := subStream.Buffer() + subIter.ResetBytes(encodedKey) + decodedKey := subIter.ReadString() + if stream.indention > 0 { + subStream.writeTwoBytes(byte(':'), byte(' ')) + } else { + subStream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, subStream) + keyValues = append(keyValues, encodedKV{ + key: decodedKey, + keyValue: subStream.Buffer(), + }) + } + sort.Sort(keyValues) + for i, keyValue := range keyValues { + if i != 0 { + stream.WriteMore() + } + stream.Write(keyValue.keyValue) + } + stream.WriteObjectEnd() + stream.cfg.ReturnStream(subStream) + stream.cfg.ReturnIterator(subIter) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type encodedKeyValues []encodedKV + +type encodedKV struct { + key string + keyValue []byte +} + +func (sv encodedKeyValues) Len() int { return len(sv) } +func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go new file mode 100644 index 000000000..58ac959ad --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -0,0 +1,218 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() +var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() +var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() + +func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ptrType}, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == marshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &marshalerEncoder{ + valType: typ, + checkIsEmpty: checkIsEmpty, + } + return encoder + } + ptrType := reflect2.PtrTo(typ) + if ctx.prefix != "" && ptrType.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &marshalerEncoder{ + valType: ptrType, + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + if typ == textMarshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directTextMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + // if prefix is empty, the type is the root type + if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: ptrType, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + return nil +} + +type marshalerEncoder struct { + checkIsEmpty checkIsEmpty + valType reflect2.Type +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directMarshalerEncoder struct { + checkIsEmpty checkIsEmpty +} + +func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*json.Marshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + valType reflect2.Type + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := (obj).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directTextMarshalerEncoder struct { + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*encoding.TextMarshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + unmarshaler := obj.(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := valType.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elem := elemType.UnsafeNew() + ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) + obj = valType.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go new file mode 100644 index 000000000..9042eb0cb --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -0,0 +1,451 @@ +package jsoniter + +import ( + "encoding/base64" + "reflect" + "strconv" + "unsafe" + + "github.com/modern-go/reflect2" +) + +const ptrSize = 32 << uintptr(^uintptr(0)>>63) + +func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type base64Codec struct { + sliceType *reflect2.UnsafeSliceType + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + codec.sliceType.UnsafeSetNil(ptr) + return + } + switch iter.WhatIsNext() { + case StringValue: + src := iter.ReadString() + dst, err := base64.StdEncoding.DecodeString(src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go similarity index 52% rename from vendor/github.com/json-iterator/go/feature_reflect_optional.go rename to vendor/github.com/json-iterator/go/reflect_optional.go index fe7055fcd..43ec71d6d 100644 --- a/vendor/github.com/json-iterator/go/feature_reflect_optional.go +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -1,28 +1,31 @@ package jsoniter import ( + "github.com/modern-go/reflect2" "reflect" "unsafe" ) -func decoderOfOptional(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { - elemType := typ.Elem() - decoder := decoderOfType(cfg, prefix, elemType) +func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + decoder := decoderOfType(ctx, elemType) + if ctx.prefix == "" && elemType.Kind() == reflect.Ptr { + return &dereferenceDecoder{elemType, decoder} + } return &OptionalDecoder{elemType, decoder} } -func encoderOfOptional(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { - elemType := typ.Elem() - elemEncoder := encoderOfType(cfg, prefix, elemType) +func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elemEncoder := encoderOfType(ctx, elemType) encoder := &OptionalEncoder{elemEncoder} - if elemType.Kind() == reflect.Map { - encoder = &OptionalEncoder{encoder} - } return encoder } type OptionalDecoder struct { - ValueType reflect.Type + ValueType reflect2.Type ValueDecoder ValDecoder } @@ -32,10 +35,9 @@ func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { } else { if *((*unsafe.Pointer)(ptr)) == nil { //pointer to null, we have to allocate memory to hold the value - value := reflect.New(decoder.ValueType) - newPtr := extractInterface(value.Interface()).word + newPtr := decoder.ValueType.UnsafeNew() decoder.ValueDecoder.Decode(newPtr, iter) - *((*uintptr)(ptr)) = uintptr(newPtr) + *((*unsafe.Pointer)(ptr)) = newPtr } else { //reuse existing instance decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) @@ -45,17 +47,16 @@ func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { type dereferenceDecoder struct { // only to deference a pointer - valueType reflect.Type + valueType reflect2.Type valueDecoder ValDecoder } func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { if *((*unsafe.Pointer)(ptr)) == nil { //pointer to null, we have to allocate memory to hold the value - value := reflect.New(decoder.valueType) - newPtr := extractInterface(value.Interface()).word + newPtr := decoder.valueType.UnsafeNew() decoder.valueDecoder.Decode(newPtr, iter) - *((*uintptr)(ptr)) = uintptr(newPtr) + *((*unsafe.Pointer)(ptr)) = newPtr } else { //reuse existing instance decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) @@ -74,10 +75,6 @@ func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { } } -func (encoder *OptionalEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { return *((*unsafe.Pointer)(ptr)) == nil } @@ -94,31 +91,43 @@ func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { } } -func (encoder *dereferenceEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.ValueEncoder.IsEmpty(*((*unsafe.Pointer)(ptr))) -} - -type optionalMapEncoder struct { - valueEncoder ValEncoder -} - -func (encoder *optionalMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.valueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + dePtr := *((*unsafe.Pointer)(ptr)) + if dePtr == nil { + return true } + return encoder.ValueEncoder.IsEmpty(dePtr) } -func (encoder *optionalMapEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) +func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + deReferenced := *((*unsafe.Pointer)(ptr)) + if deReferenced == nil { + return true + } + isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := unsafe.Pointer(deReferenced) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) } -func (encoder *optionalMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - p := *((*unsafe.Pointer)(ptr)) - return p == nil || encoder.valueEncoder.IsEmpty(p) +type referenceEncoder struct { + encoder ValEncoder +} + +func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) } diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go new file mode 100644 index 000000000..9441d79df --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_slice.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceDecoder{sliceType, decoder} +} + +func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceEncoder{sliceType, encoder} +} + +type sliceEncoder struct { + sliceType *reflect2.UnsafeSliceType + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if encoder.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + length := encoder.sliceType.UnsafeLengthOf(ptr) + if length == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) + for i := 1; i < length; i++ { + stream.WriteMore() + elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.sliceType.UnsafeLengthOf(ptr) == 0 +} + +type sliceDecoder struct { + sliceType *reflect2.UnsafeSliceType + elemDecoder ValDecoder +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + sliceType := decoder.sliceType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + sliceType.UnsafeSetNil(ptr) + return + } + if c != '[' { + iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) + return + } + iter.unreadByte() + sliceType.UnsafeGrow(ptr, 1) + elemPtr := sliceType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + idx := length + length += 1 + sliceType.UnsafeGrow(ptr, length) + elemPtr = sliceType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go similarity index 88% rename from vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go rename to vendor/github.com/json-iterator/go/reflect_struct_decoder.go index 4d03b35e7..355d2d116 100644 --- a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -3,24 +3,61 @@ package jsoniter import ( "fmt" "io" - "reflect" "strings" "unsafe" + + "github.com/modern-go/reflect2" ) -func createStructDecoder(cfg *frozenConfig, typ reflect.Type, fields map[string]*structFieldDecoder) ValDecoder { - if cfg.disallowUnknownFields { +func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[k] = binding.Decoder.(*structFieldDecoder) + } + + if !ctx.caseSensitive() { + for k, binding := range bindings { + if _, found := fields[strings.ToLower(k)]; !found { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + } + } + + return createStructDecoder(ctx, typ, fields) +} + +func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { + if ctx.disallowUnknownFields { return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} } knownHash := map[int64]struct{}{ 0: {}, } + switch len(fields) { case 0: return &skipObjectDecoder{typ} case 1: for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { return &generalStructDecoder{typ, fields, false} @@ -34,7 +71,7 @@ func createStructDecoder(cfg *frozenConfig, typ reflect.Type, fields map[string] var fieldDecoder1 *structFieldDecoder var fieldDecoder2 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { return &generalStructDecoder{typ, fields, false} @@ -57,7 +94,7 @@ func createStructDecoder(cfg *frozenConfig, typ reflect.Type, fields map[string] var fieldDecoder2 *structFieldDecoder var fieldDecoder3 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { return &generalStructDecoder{typ, fields, false} @@ -88,7 +125,7 @@ func createStructDecoder(cfg *frozenConfig, typ reflect.Type, fields map[string] var fieldDecoder3 *structFieldDecoder var fieldDecoder4 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { return &generalStructDecoder{typ, fields, false} @@ -125,7 +162,7 @@ func createStructDecoder(cfg *frozenConfig, typ reflect.Type, fields map[string] var fieldDecoder4 *structFieldDecoder var fieldDecoder5 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { return &generalStructDecoder{typ, fields, false} @@ -168,7 +205,7 @@ func createStructDecoder(cfg *frozenConfig, typ reflect.Type, fields map[string] var fieldDecoder5 *structFieldDecoder var fieldDecoder6 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { return &generalStructDecoder{typ, fields, false} @@ -217,7 +254,7 @@ func createStructDecoder(cfg *frozenConfig, typ reflect.Type, fields map[string] var fieldDecoder6 *structFieldDecoder var fieldDecoder7 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { return &generalStructDecoder{typ, fields, false} @@ -272,7 +309,7 @@ func createStructDecoder(cfg *frozenConfig, typ reflect.Type, fields map[string] var fieldDecoder7 *structFieldDecoder var fieldDecoder8 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { return &generalStructDecoder{typ, fields, false} @@ -333,7 +370,7 @@ func createStructDecoder(cfg *frozenConfig, typ reflect.Type, fields map[string] var fieldDecoder8 *structFieldDecoder var fieldDecoder9 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { return &generalStructDecoder{typ, fields, false} @@ -400,7 +437,7 @@ func createStructDecoder(cfg *frozenConfig, typ reflect.Type, fields map[string] var fieldDecoder9 *structFieldDecoder var fieldDecoder10 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { return &generalStructDecoder{typ, fields, false} @@ -454,7 +491,7 @@ func createStructDecoder(cfg *frozenConfig, typ reflect.Type, fields map[string] } type generalStructDecoder struct { - typ reflect.Type + typ reflect2.Type fields map[string]*structFieldDecoder disallowUnknownFields bool } @@ -463,13 +500,16 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } - decoder.decodeOneField(ptr, iter) - for iter.nextToken() == ',' { + var c byte + for c = ','; c == ','; c = iter.nextToken() { decoder.decodeOneField(ptr, iter) } if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + if c != '}' { + iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) + } } func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { @@ -479,13 +519,13 @@ func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *It fieldBytes := iter.ReadStringAsSlice() field = *(*string)(unsafe.Pointer(&fieldBytes)) fieldDecoder = decoder.fields[field] - if fieldDecoder == nil { + if fieldDecoder == nil && !iter.cfg.caseSensitive { fieldDecoder = decoder.fields[strings.ToLower(field)] } } else { field = iter.ReadString() fieldDecoder = decoder.fields[field] - if fieldDecoder == nil { + if fieldDecoder == nil && !iter.cfg.caseSensitive { fieldDecoder = decoder.fields[strings.ToLower(field)] } } @@ -509,7 +549,7 @@ func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *It } type skipObjectDecoder struct { - typ reflect.Type + typ reflect2.Type } func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { @@ -522,7 +562,7 @@ func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { } type oneFieldStructDecoder struct { - typ reflect.Type + typ reflect2.Type fieldHash int64 fieldDecoder *structFieldDecoder } @@ -547,7 +587,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) } type twoFieldsStructDecoder struct { - typ reflect.Type + typ reflect2.Type fieldHash1 int64 fieldDecoder1 *structFieldDecoder fieldHash2 int64 @@ -577,7 +617,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator } type threeFieldsStructDecoder struct { - typ reflect.Type + typ reflect2.Type fieldHash1 int64 fieldDecoder1 *structFieldDecoder fieldHash2 int64 @@ -611,7 +651,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat } type fourFieldsStructDecoder struct { - typ reflect.Type + typ reflect2.Type fieldHash1 int64 fieldDecoder1 *structFieldDecoder fieldHash2 int64 @@ -649,7 +689,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato } type fiveFieldsStructDecoder struct { - typ reflect.Type + typ reflect2.Type fieldHash1 int64 fieldDecoder1 *structFieldDecoder fieldHash2 int64 @@ -691,7 +731,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato } type sixFieldsStructDecoder struct { - typ reflect.Type + typ reflect2.Type fieldHash1 int64 fieldDecoder1 *structFieldDecoder fieldHash2 int64 @@ -737,7 +777,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator } type sevenFieldsStructDecoder struct { - typ reflect.Type + typ reflect2.Type fieldHash1 int64 fieldDecoder1 *structFieldDecoder fieldHash2 int64 @@ -787,7 +827,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat } type eightFieldsStructDecoder struct { - typ reflect.Type + typ reflect2.Type fieldHash1 int64 fieldDecoder1 *structFieldDecoder fieldHash2 int64 @@ -841,7 +881,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat } type nineFieldsStructDecoder struct { - typ reflect.Type + typ reflect2.Type fieldHash1 int64 fieldDecoder1 *structFieldDecoder fieldHash2 int64 @@ -899,7 +939,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato } type tenFieldsStructDecoder struct { - typ reflect.Type + typ reflect2.Type fieldHash1 int64 fieldDecoder1 *structFieldDecoder fieldHash2 int64 @@ -961,14 +1001,48 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator } type structFieldDecoder struct { - field *reflect.StructField + field reflect2.StructField fieldDecoder ValDecoder } func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - fieldPtr := unsafe.Pointer(uintptr(ptr) + decoder.field.Offset) + fieldPtr := decoder.field.UnsafeGet(ptr) decoder.fieldDecoder.Decode(fieldPtr, iter) if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%s: %s", decoder.field.Name, iter.Error.Error()) + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return } } diff --git a/vendor/github.com/json-iterator/go/feature_reflect_object.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go similarity index 53% rename from vendor/github.com/json-iterator/go/feature_reflect_object.go rename to vendor/github.com/json-iterator/go/reflect_struct_encoder.go index 65c8805f9..d0759cf64 100644 --- a/vendor/github.com/json-iterator/go/feature_reflect_object.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -2,19 +2,20 @@ package jsoniter import ( "fmt" + "github.com/modern-go/reflect2" "io" "reflect" "unsafe" ) -func encoderOfStruct(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { +func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { type bindingTo struct { binding *Binding toName string ignored bool } orderedBindings := []*bindingTo{} - structDescriptor := describeStruct(cfg, prefix, typ) + structDescriptor := describeStruct(ctx, typ) for _, binding := range structDescriptor.Fields { for _, toName := range binding.ToNames { new := &bindingTo{ @@ -25,7 +26,7 @@ func encoderOfStruct(cfg *frozenConfig, prefix string, typ reflect.Type) ValEnco if old.toName != toName { continue } - old.ignored, new.ignored = resolveConflictBinding(cfg, old.binding, new.binding) + old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) } orderedBindings = append(orderedBindings, new) } @@ -42,13 +43,36 @@ func encoderOfStruct(cfg *frozenConfig, prefix string, typ reflect.Type) ValEnco }) } } - return &structEncoder{typ, structDescriptor.onePtrEmbedded, - structDescriptor.onePtrOptimization, finalOrderedFields} + return &structEncoder{typ, finalOrderedFields} +} + +func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { + encoder := createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } } func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { - newTagged := new.Field.Tag.Get(cfg.getTagKey()) != "" - oldTagged := old.Field.Tag.Get(cfg.getTagKey()) != "" + newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" if newTagged { if oldTagged { if len(old.levels) > len(new.levels) { @@ -75,60 +99,41 @@ func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ig } } -func decoderOfStruct(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { - bindings := map[string]*Binding{} - structDescriptor := describeStruct(cfg, prefix, typ) - for _, binding := range structDescriptor.Fields { - for _, fromName := range binding.FromNames { - old := bindings[fromName] - if old == nil { - bindings[fromName] = binding - continue - } - ignoreOld, ignoreNew := resolveConflictBinding(cfg, old, binding) - if ignoreOld { - delete(bindings, fromName) - } - if !ignoreNew { - bindings[fromName] = binding - } - } - } - fields := map[string]*structFieldDecoder{} - for k, binding := range bindings { - fields[k] = binding.Decoder.(*structFieldDecoder) - } - return createStructDecoder(cfg, typ, fields) -} - type structFieldEncoder struct { - field *reflect.StructField + field reflect2.StructField fieldEncoder ValEncoder omitempty bool } func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + fieldPtr := encoder.field.UnsafeGet(ptr) encoder.fieldEncoder.Encode(fieldPtr, stream) if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%s: %s", encoder.field.Name, stream.Error.Error()) + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) } } -func (encoder *structFieldEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { - fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + fieldPtr := encoder.field.UnsafeGet(ptr) return encoder.fieldEncoder.IsEmpty(fieldPtr) } +func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := encoder.field.UnsafeGet(ptr) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type IsEmbeddedPtrNil interface { + IsEmbeddedPtrNil(ptr unsafe.Pointer) bool +} + type structEncoder struct { - typ reflect.Type - onePtrEmbedded bool - onePtrOptimization bool - fields []structFieldTo + typ reflect2.Type + fields []structFieldTo } type structFieldTo struct { @@ -143,6 +148,9 @@ func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { continue } + if field.encoder.IsEmbeddedPtrNil(ptr) { + continue + } if isNotFirst { stream.WriteMore() } @@ -156,24 +164,6 @@ func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { } } -func (encoder *structEncoder) EncodeInterface(val interface{}, stream *Stream) { - e := (*emptyInterface)(unsafe.Pointer(&val)) - if encoder.onePtrOptimization { - if e.word == nil && encoder.onePtrEmbedded { - stream.WriteObjectStart() - stream.WriteObjectEnd() - return - } - ptr := uintptr(e.word) - e.word = unsafe.Pointer(&ptr) - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { return false } @@ -185,10 +175,36 @@ func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteEmptyObject() } -func (encoder *emptyStructEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { return false } + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/feature_stream.go b/vendor/github.com/json-iterator/go/stream.go similarity index 97% rename from vendor/github.com/json-iterator/go/feature_stream.go rename to vendor/github.com/json-iterator/go/stream.go index d6e529808..17662fded 100644 --- a/vendor/github.com/json-iterator/go/feature_stream.go +++ b/vendor/github.com/json-iterator/go/stream.go @@ -55,6 +55,11 @@ func (stream *Stream) Buffer() []byte { return stream.buf } +// SetBuffer allows to append to the internal buffer directly +func (stream *Stream) SetBuffer(buf []byte) { + stream.buf = buf +} + // Write writes the contents of p into the buffer. // It returns the number of bytes written. // If nn < len(p), it also returns an error explaining diff --git a/vendor/github.com/json-iterator/go/feature_stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_stream_float.go rename to vendor/github.com/json-iterator/go/stream_float.go diff --git a/vendor/github.com/json-iterator/go/feature_stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go similarity index 95% rename from vendor/github.com/json-iterator/go/feature_stream_int.go rename to vendor/github.com/json-iterator/go/stream_int.go index 0f7424860..d1059ee4c 100644 --- a/vendor/github.com/json-iterator/go/feature_stream_int.go +++ b/vendor/github.com/json-iterator/go/stream_int.go @@ -17,16 +17,16 @@ func init() { func writeFirstBuf(space []byte, v uint32) []byte { start := v >> 24 if start == 0 { - space = append(space, byte(v >> 16), byte(v >> 8)) + space = append(space, byte(v>>16), byte(v>>8)) } else if start == 1 { - space = append(space, byte(v >> 8)) + space = append(space, byte(v>>8)) } space = append(space, byte(v)) return space } func writeBuf(buf []byte, v uint32) []byte { - return append(buf, byte(v >> 16), byte(v >> 8), byte(v)) + return append(buf, byte(v>>16), byte(v>>8), byte(v)) } // WriteUint8 write uint8 to stream @@ -91,7 +91,7 @@ func (stream *Stream) WriteUint32(val uint32) { stream.buf = writeFirstBuf(stream.buf, digits[q2]) } else { r3 := q2 - q3*1000 - stream.buf = append(stream.buf, byte(q3 + '0')) + stream.buf = append(stream.buf, byte(q3+'0')) stream.buf = writeBuf(stream.buf, digits[r3]) } stream.buf = writeBuf(stream.buf, digits[r2]) diff --git a/vendor/github.com/json-iterator/go/feature_stream_string.go b/vendor/github.com/json-iterator/go/stream_str.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_stream_string.go rename to vendor/github.com/json-iterator/go/stream_str.go diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/github.com/modern-go/concurrent/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/modern-go/concurrent/README.md b/vendor/github.com/modern-go/concurrent/README.md new file mode 100644 index 000000000..acab3200a --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/README.md @@ -0,0 +1,49 @@ +# concurrent + +[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/concurrent/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/concurrent?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/concurrent) +[![Build Status](https://travis-ci.org/modern-go/concurrent.svg?branch=master)](https://travis-ci.org/modern-go/concurrent) +[![codecov](https://codecov.io/gh/modern-go/concurrent/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/concurrent) +[![rcard](https://goreportcard.com/badge/github.com/modern-go/concurrent)](https://goreportcard.com/report/github.com/modern-go/concurrent) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/concurrent/master/LICENSE) + +* concurrent.Map: backport sync.Map for go below 1.9 +* concurrent.Executor: goroutine with explicit ownership and cancellable + +# concurrent.Map + +because sync.Map is only available in go 1.9, we can use concurrent.Map to make code portable + +```go +m := concurrent.NewMap() +m.Store("hello", "world") +elem, found := m.Load("hello") +// elem will be "world" +// found will be true +``` + +# concurrent.Executor + +```go +executor := concurrent.NewUnboundedExecutor() +executor.Go(func(ctx context.Context) { + everyMillisecond := time.NewTicker(time.Millisecond) + for { + select { + case <-ctx.Done(): + fmt.Println("goroutine exited") + return + case <-everyMillisecond.C: + // do something + } + } +}) +time.Sleep(time.Second) +executor.StopAndWaitForever() +fmt.Println("executor stopped") +``` + +attach goroutine to executor instance, so that we can + +* cancel it by stop the executor with Stop/StopAndWait/StopAndWaitForever +* handle panic by callback: the default behavior will no longer crash your application \ No newline at end of file diff --git a/vendor/github.com/modern-go/concurrent/executor.go b/vendor/github.com/modern-go/concurrent/executor.go new file mode 100644 index 000000000..623dba1ac --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/executor.go @@ -0,0 +1,14 @@ +package concurrent + +import "context" + +// Executor replace go keyword to start a new goroutine +// the goroutine should cancel itself if the context passed in has been cancelled +// the goroutine started by the executor, is owned by the executor +// we can cancel all executors owned by the executor just by stop the executor itself +// however Executor interface does not Stop method, the one starting and owning executor +// should use the concrete type of executor, instead of this interface. +type Executor interface { + // Go starts a new goroutine controlled by the context + Go(handler func(ctx context.Context)) +} diff --git a/vendor/github.com/modern-go/concurrent/go_above_19.go b/vendor/github.com/modern-go/concurrent/go_above_19.go new file mode 100644 index 000000000..aeabf8c4f --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/go_above_19.go @@ -0,0 +1,15 @@ +//+build go1.9 + +package concurrent + +import "sync" + +// Map is a wrapper for sync.Map introduced in go1.9 +type Map struct { + sync.Map +} + +// NewMap creates a thread safe Map +func NewMap() *Map { + return &Map{} +} diff --git a/vendor/github.com/modern-go/concurrent/go_below_19.go b/vendor/github.com/modern-go/concurrent/go_below_19.go new file mode 100644 index 000000000..b9c8df7f4 --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/go_below_19.go @@ -0,0 +1,33 @@ +//+build !go1.9 + +package concurrent + +import "sync" + +// Map implements a thread safe map for go version below 1.9 using mutex +type Map struct { + lock sync.RWMutex + data map[interface{}]interface{} +} + +// NewMap creates a thread safe map +func NewMap() *Map { + return &Map{ + data: make(map[interface{}]interface{}, 32), + } +} + +// Load is same as sync.Map Load +func (m *Map) Load(key interface{}) (elem interface{}, found bool) { + m.lock.RLock() + elem, found = m.data[key] + m.lock.RUnlock() + return +} + +// Load is same as sync.Map Store +func (m *Map) Store(key interface{}, elem interface{}) { + m.lock.Lock() + m.data[key] = elem + m.lock.Unlock() +} diff --git a/vendor/github.com/modern-go/concurrent/log.go b/vendor/github.com/modern-go/concurrent/log.go new file mode 100644 index 000000000..9756fcc75 --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/log.go @@ -0,0 +1,13 @@ +package concurrent + +import ( + "os" + "log" + "io/ioutil" +) + +// ErrorLogger is used to print out error, can be set to writer other than stderr +var ErrorLogger = log.New(os.Stderr, "", 0) + +// InfoLogger is used to print informational message, default to off +var InfoLogger = log.New(ioutil.Discard, "", 0) \ No newline at end of file diff --git a/vendor/github.com/modern-go/concurrent/test.sh b/vendor/github.com/modern-go/concurrent/test.sh new file mode 100755 index 000000000..d1e6b2ec5 --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -coverprofile=profile.out -coverpkg=github.com/modern-go/concurrent $d + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/modern-go/concurrent/unbounded_executor.go b/vendor/github.com/modern-go/concurrent/unbounded_executor.go new file mode 100644 index 000000000..05a77dceb --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/unbounded_executor.go @@ -0,0 +1,119 @@ +package concurrent + +import ( + "context" + "fmt" + "runtime" + "runtime/debug" + "sync" + "time" + "reflect" +) + +// HandlePanic logs goroutine panic by default +var HandlePanic = func(recovered interface{}, funcName string) { + ErrorLogger.Println(fmt.Sprintf("%s panic: %v", funcName, recovered)) + ErrorLogger.Println(string(debug.Stack())) +} + +// UnboundedExecutor is a executor without limits on counts of alive goroutines +// it tracks the goroutine started by it, and can cancel them when shutdown +type UnboundedExecutor struct { + ctx context.Context + cancel context.CancelFunc + activeGoroutinesMutex *sync.Mutex + activeGoroutines map[string]int + HandlePanic func(recovered interface{}, funcName string) +} + +// GlobalUnboundedExecutor has the life cycle of the program itself +// any goroutine want to be shutdown before main exit can be started from this executor +// GlobalUnboundedExecutor expects the main function to call stop +// it does not magically knows the main function exits +var GlobalUnboundedExecutor = NewUnboundedExecutor() + +// NewUnboundedExecutor creates a new UnboundedExecutor, +// UnboundedExecutor can not be created by &UnboundedExecutor{} +// HandlePanic can be set with a callback to override global HandlePanic +func NewUnboundedExecutor() *UnboundedExecutor { + ctx, cancel := context.WithCancel(context.TODO()) + return &UnboundedExecutor{ + ctx: ctx, + cancel: cancel, + activeGoroutinesMutex: &sync.Mutex{}, + activeGoroutines: map[string]int{}, + } +} + +// Go starts a new goroutine and tracks its lifecycle. +// Panic will be recovered and logged automatically, except for StopSignal +func (executor *UnboundedExecutor) Go(handler func(ctx context.Context)) { + pc := reflect.ValueOf(handler).Pointer() + f := runtime.FuncForPC(pc) + funcName := f.Name() + file, line := f.FileLine(pc) + executor.activeGoroutinesMutex.Lock() + defer executor.activeGoroutinesMutex.Unlock() + startFrom := fmt.Sprintf("%s:%d", file, line) + executor.activeGoroutines[startFrom] += 1 + go func() { + defer func() { + recovered := recover() + // if you want to quit a goroutine without trigger HandlePanic + // use runtime.Goexit() to quit + if recovered != nil { + if executor.HandlePanic == nil { + HandlePanic(recovered, funcName) + } else { + executor.HandlePanic(recovered, funcName) + } + } + executor.activeGoroutinesMutex.Lock() + executor.activeGoroutines[startFrom] -= 1 + executor.activeGoroutinesMutex.Unlock() + }() + handler(executor.ctx) + }() +} + +// Stop cancel all goroutines started by this executor without wait +func (executor *UnboundedExecutor) Stop() { + executor.cancel() +} + +// StopAndWaitForever cancel all goroutines started by this executor and +// wait until all goroutines exited +func (executor *UnboundedExecutor) StopAndWaitForever() { + executor.StopAndWait(context.Background()) +} + +// StopAndWait cancel all goroutines started by this executor and wait. +// Wait can be cancelled by the context passed in. +func (executor *UnboundedExecutor) StopAndWait(ctx context.Context) { + executor.cancel() + for { + oneHundredMilliseconds := time.NewTimer(time.Millisecond * 100) + select { + case <-oneHundredMilliseconds.C: + if executor.checkNoActiveGoroutines() { + return + } + case <-ctx.Done(): + return + } + } +} + +func (executor *UnboundedExecutor) checkNoActiveGoroutines() bool { + executor.activeGoroutinesMutex.Lock() + defer executor.activeGoroutinesMutex.Unlock() + for startFrom, count := range executor.activeGoroutines { + if count > 0 { + InfoLogger.Println("UnboundedExecutor is still waiting goroutines to quit", + "startFrom", startFrom, + "count", count) + return false + } + } + return true +} diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock new file mode 100644 index 000000000..2a3a69893 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/Gopkg.lock @@ -0,0 +1,15 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml new file mode 100644 index 000000000..2f4f4dbdc --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/Gopkg.toml @@ -0,0 +1,35 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +ignored = [] + +[[constraint]] + name = "github.com/modern-go/concurrent" + version = "1.0.0" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/github.com/modern-go/reflect2/LICENSE b/vendor/github.com/modern-go/reflect2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/modern-go/reflect2/README.md b/vendor/github.com/modern-go/reflect2/README.md new file mode 100644 index 000000000..6f968aab9 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/README.md @@ -0,0 +1,71 @@ +# reflect2 + +[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/reflect2/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/reflect2?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/reflect2) +[![Build Status](https://travis-ci.org/modern-go/reflect2.svg?branch=master)](https://travis-ci.org/modern-go/reflect2) +[![codecov](https://codecov.io/gh/modern-go/reflect2/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/reflect2) +[![rcard](https://goreportcard.com/badge/github.com/modern-go/reflect2)](https://goreportcard.com/report/github.com/modern-go/reflect2) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/reflect2/master/LICENSE) + +reflect api that avoids runtime reflect.Value cost + +* reflect get/set interface{}, with type checking +* reflect get/set unsafe.Pointer, without type checking +* `reflect2.TypeByName` works like `Class.forName` found in java + +[json-iterator](https://github.com/json-iterator/go) use this package to save runtime dispatching cost. +This package is designed for low level libraries to optimize reflection performance. +General application should still use reflect standard library. + +# reflect2.TypeByName + +```go +// given package is github.com/your/awesome-package +type MyStruct struct { + // ... +} + +// will return the type +reflect2.TypeByName("awesome-package.MyStruct") +// however, if the type has not been used +// it will be eliminated by compiler, so we can not get it in runtime +``` + +# reflect2 get/set interface{} + +```go +valType := reflect2.TypeOf(1) +i := 1 +j := 10 +valType.Set(&i, &j) +// i will be 10 +``` + +to get set `type`, always use its pointer `*type` + +# reflect2 get/set unsafe.Pointer + +```go +valType := reflect2.TypeOf(1) +i := 1 +j := 10 +valType.UnsafeSet(unsafe.Pointer(&i), unsafe.Pointer(&j)) +// i will be 10 +``` + +to get set `type`, always use its pointer `*type` + +# benchmark + +Benchmark is not necessary for this package. It does nothing actually. +As it is just a thin wrapper to make go runtime public. +Both `reflect2` and `reflect` call same function +provided by `runtime` package exposed by go language. + +# unsafe safety + +Instead of casting `[]byte` to `sliceHeader` in your application using unsafe. +We can use reflect2 instead. This way, if `sliceHeader` changes in the future, +only reflect2 need to be upgraded. + +reflect2 tries its best to keep the implementation same as reflect (by testing). \ No newline at end of file diff --git a/vendor/github.com/modern-go/reflect2/go_above_17.go b/vendor/github.com/modern-go/reflect2/go_above_17.go new file mode 100644 index 000000000..5c1cea868 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_above_17.go @@ -0,0 +1,8 @@ +//+build go1.7 + +package reflect2 + +import "unsafe" + +//go:linkname resolveTypeOff reflect.resolveTypeOff +func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go new file mode 100644 index 000000000..c7e3b7801 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_above_19.go @@ -0,0 +1,14 @@ +//+build go1.9 + +package reflect2 + +import ( + "unsafe" +) + +//go:linkname makemap reflect.makemap +func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer) + +func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer { + return makemap(rtype, cap) +} diff --git a/vendor/github.com/modern-go/reflect2/go_below_17.go b/vendor/github.com/modern-go/reflect2/go_below_17.go new file mode 100644 index 000000000..65a93c889 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_below_17.go @@ -0,0 +1,9 @@ +//+build !go1.7 + +package reflect2 + +import "unsafe" + +func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { + return nil +} diff --git a/vendor/github.com/modern-go/reflect2/go_below_19.go b/vendor/github.com/modern-go/reflect2/go_below_19.go new file mode 100644 index 000000000..b050ef70c --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_below_19.go @@ -0,0 +1,14 @@ +//+build !go1.9 + +package reflect2 + +import ( + "unsafe" +) + +//go:linkname makemap reflect.makemap +func makemap(rtype unsafe.Pointer) (m unsafe.Pointer) + +func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer { + return makemap(rtype) +} diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go new file mode 100644 index 000000000..63b49c799 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/reflect2.go @@ -0,0 +1,298 @@ +package reflect2 + +import ( + "github.com/modern-go/concurrent" + "reflect" + "unsafe" +) + +type Type interface { + Kind() reflect.Kind + // New return pointer to data of this type + New() interface{} + // UnsafeNew return the allocated space pointed by unsafe.Pointer + UnsafeNew() unsafe.Pointer + // PackEFace cast a unsafe pointer to object represented pointer + PackEFace(ptr unsafe.Pointer) interface{} + // Indirect dereference object represented pointer to this type + Indirect(obj interface{}) interface{} + // UnsafeIndirect dereference pointer to this type + UnsafeIndirect(ptr unsafe.Pointer) interface{} + // Type1 returns reflect.Type + Type1() reflect.Type + Implements(thatType Type) bool + String() string + RType() uintptr + // interface{} of this type has pointer like behavior + LikePtr() bool + IsNullable() bool + IsNil(obj interface{}) bool + UnsafeIsNil(ptr unsafe.Pointer) bool + Set(obj interface{}, val interface{}) + UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) + AssignableTo(anotherType Type) bool +} + +type ListType interface { + Type + Elem() Type + SetIndex(obj interface{}, index int, elem interface{}) + UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) + GetIndex(obj interface{}, index int) interface{} + UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer +} + +type ArrayType interface { + ListType + Len() int +} + +type SliceType interface { + ListType + MakeSlice(length int, cap int) interface{} + UnsafeMakeSlice(length int, cap int) unsafe.Pointer + Grow(obj interface{}, newLength int) + UnsafeGrow(ptr unsafe.Pointer, newLength int) + Append(obj interface{}, elem interface{}) + UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) + LengthOf(obj interface{}) int + UnsafeLengthOf(ptr unsafe.Pointer) int + SetNil(obj interface{}) + UnsafeSetNil(ptr unsafe.Pointer) + Cap(obj interface{}) int + UnsafeCap(ptr unsafe.Pointer) int +} + +type StructType interface { + Type + NumField() int + Field(i int) StructField + FieldByName(name string) StructField + FieldByIndex(index []int) StructField + FieldByNameFunc(match func(string) bool) StructField +} + +type StructField interface { + Offset() uintptr + Name() string + PkgPath() string + Type() Type + Tag() reflect.StructTag + Index() []int + Anonymous() bool + Set(obj interface{}, value interface{}) + UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) + Get(obj interface{}) interface{} + UnsafeGet(obj unsafe.Pointer) unsafe.Pointer +} + +type MapType interface { + Type + Key() Type + Elem() Type + MakeMap(cap int) interface{} + UnsafeMakeMap(cap int) unsafe.Pointer + SetIndex(obj interface{}, key interface{}, elem interface{}) + UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) + TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) + GetIndex(obj interface{}, key interface{}) interface{} + UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer + Iterate(obj interface{}) MapIterator + UnsafeIterate(obj unsafe.Pointer) MapIterator +} + +type MapIterator interface { + HasNext() bool + Next() (key interface{}, elem interface{}) + UnsafeNext() (key unsafe.Pointer, elem unsafe.Pointer) +} + +type PtrType interface { + Type + Elem() Type +} + +type InterfaceType interface { + NumMethod() int +} + +type Config struct { + UseSafeImplementation bool +} + +type API interface { + TypeOf(obj interface{}) Type + Type2(type1 reflect.Type) Type +} + +var ConfigUnsafe = Config{UseSafeImplementation: false}.Froze() +var ConfigSafe = Config{UseSafeImplementation: true}.Froze() + +type frozenConfig struct { + useSafeImplementation bool + cache *concurrent.Map +} + +func (cfg Config) Froze() *frozenConfig { + return &frozenConfig{ + useSafeImplementation: cfg.UseSafeImplementation, + cache: concurrent.NewMap(), + } +} + +func (cfg *frozenConfig) TypeOf(obj interface{}) Type { + cacheKey := uintptr(unpackEFace(obj).rtype) + typeObj, found := cfg.cache.Load(cacheKey) + if found { + return typeObj.(Type) + } + return cfg.Type2(reflect.TypeOf(obj)) +} + +func (cfg *frozenConfig) Type2(type1 reflect.Type) Type { + if type1 == nil { + return nil + } + cacheKey := uintptr(unpackEFace(type1).data) + typeObj, found := cfg.cache.Load(cacheKey) + if found { + return typeObj.(Type) + } + type2 := cfg.wrapType(type1) + cfg.cache.Store(cacheKey, type2) + return type2 +} + +func (cfg *frozenConfig) wrapType(type1 reflect.Type) Type { + safeType := safeType{Type: type1, cfg: cfg} + switch type1.Kind() { + case reflect.Struct: + if cfg.useSafeImplementation { + return &safeStructType{safeType} + } + return newUnsafeStructType(cfg, type1) + case reflect.Array: + if cfg.useSafeImplementation { + return &safeSliceType{safeType} + } + return newUnsafeArrayType(cfg, type1) + case reflect.Slice: + if cfg.useSafeImplementation { + return &safeSliceType{safeType} + } + return newUnsafeSliceType(cfg, type1) + case reflect.Map: + if cfg.useSafeImplementation { + return &safeMapType{safeType} + } + return newUnsafeMapType(cfg, type1) + case reflect.Ptr, reflect.Chan, reflect.Func: + if cfg.useSafeImplementation { + return &safeMapType{safeType} + } + return newUnsafePtrType(cfg, type1) + case reflect.Interface: + if cfg.useSafeImplementation { + return &safeMapType{safeType} + } + if type1.NumMethod() == 0 { + return newUnsafeEFaceType(cfg, type1) + } + return newUnsafeIFaceType(cfg, type1) + default: + if cfg.useSafeImplementation { + return &safeType + } + return newUnsafeType(cfg, type1) + } +} + +func TypeOf(obj interface{}) Type { + return ConfigUnsafe.TypeOf(obj) +} + +func TypeOfPtr(obj interface{}) PtrType { + return TypeOf(obj).(PtrType) +} + +func Type2(type1 reflect.Type) Type { + if type1 == nil { + return nil + } + return ConfigUnsafe.Type2(type1) +} + +func PtrTo(typ Type) Type { + return Type2(reflect.PtrTo(typ.Type1())) +} + +func PtrOf(obj interface{}) unsafe.Pointer { + return unpackEFace(obj).data +} + +func RTypeOf(obj interface{}) uintptr { + return uintptr(unpackEFace(obj).rtype) +} + +func IsNil(obj interface{}) bool { + if obj == nil { + return true + } + return unpackEFace(obj).data == nil +} + +func IsNullable(kind reflect.Kind) bool { + switch kind { + case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func, reflect.Slice, reflect.Interface: + return true + } + return false +} + +func likePtrKind(kind reflect.Kind) bool { + switch kind { + case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func: + return true + } + return false +} + +func likePtrType(typ reflect.Type) bool { + if likePtrKind(typ.Kind()) { + return true + } + if typ.Kind() == reflect.Struct { + if typ.NumField() != 1 { + return false + } + return likePtrType(typ.Field(0).Type) + } + if typ.Kind() == reflect.Array { + if typ.Len() != 1 { + return false + } + return likePtrType(typ.Elem()) + } + return false +} + +// NoEscape hides a pointer from escape analysis. noescape is +// the identity function but escape analysis doesn't think the +// output depends on the input. noescape is inlined and currently +// compiles down to zero instructions. +// USE CAREFULLY! +//go:nosplit +func NoEscape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} + +func UnsafeCastString(str string) []byte { + stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str)) + sliceHeader := &reflect.SliceHeader{ + Data: stringHeader.Data, + Cap: stringHeader.Len, + Len: stringHeader.Len, + } + return *(*[]byte)(unsafe.Pointer(sliceHeader)) +} diff --git a/vendor/github.com/modern-go/reflect2/reflect2_amd64.s b/vendor/github.com/modern-go/reflect2/reflect2_amd64.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/reflect2_kind.go b/vendor/github.com/modern-go/reflect2/reflect2_kind.go new file mode 100644 index 000000000..62f299e40 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/reflect2_kind.go @@ -0,0 +1,30 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +// DefaultTypeOfKind return the non aliased default type for the kind +func DefaultTypeOfKind(kind reflect.Kind) Type { + return kindTypes[kind] +} + +var kindTypes = map[reflect.Kind]Type{ + reflect.Bool: TypeOf(true), + reflect.Uint8: TypeOf(uint8(0)), + reflect.Int8: TypeOf(int8(0)), + reflect.Uint16: TypeOf(uint16(0)), + reflect.Int16: TypeOf(int16(0)), + reflect.Uint32: TypeOf(uint32(0)), + reflect.Int32: TypeOf(int32(0)), + reflect.Uint64: TypeOf(uint64(0)), + reflect.Int64: TypeOf(int64(0)), + reflect.Uint: TypeOf(uint(0)), + reflect.Int: TypeOf(int(0)), + reflect.Float32: TypeOf(float32(0)), + reflect.Float64: TypeOf(float64(0)), + reflect.Uintptr: TypeOf(uintptr(0)), + reflect.String: TypeOf(""), + reflect.UnsafePointer: TypeOf(unsafe.Pointer(nil)), +} diff --git a/vendor/github.com/modern-go/reflect2/relfect2_386.s b/vendor/github.com/modern-go/reflect2/relfect2_386.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s b/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm.s b/vendor/github.com/modern-go/reflect2/relfect2_arm.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm64.s b/vendor/github.com/modern-go/reflect2/relfect2_arm64.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s b/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s b/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s b/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_s390x.s b/vendor/github.com/modern-go/reflect2/relfect2_s390x.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/safe_field.go b/vendor/github.com/modern-go/reflect2/safe_field.go new file mode 100644 index 000000000..d4ba1f4f8 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/safe_field.go @@ -0,0 +1,58 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type safeField struct { + reflect.StructField +} + +func (field *safeField) Offset() uintptr { + return field.StructField.Offset +} + +func (field *safeField) Name() string { + return field.StructField.Name +} + +func (field *safeField) PkgPath() string { + return field.StructField.PkgPath +} + +func (field *safeField) Type() Type { + panic("not implemented") +} + +func (field *safeField) Tag() reflect.StructTag { + return field.StructField.Tag +} + +func (field *safeField) Index() []int { + return field.StructField.Index +} + +func (field *safeField) Anonymous() bool { + return field.StructField.Anonymous +} + +func (field *safeField) Set(obj interface{}, value interface{}) { + val := reflect.ValueOf(obj).Elem() + val.FieldByIndex(field.Index()).Set(reflect.ValueOf(value).Elem()) +} + +func (field *safeField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) { + panic("unsafe operation is not supported") +} + +func (field *safeField) Get(obj interface{}) interface{} { + val := reflect.ValueOf(obj).Elem().FieldByIndex(field.Index()) + ptr := reflect.New(val.Type()) + ptr.Elem().Set(val) + return ptr.Interface() +} + +func (field *safeField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer { + panic("does not support unsafe operation") +} diff --git a/vendor/github.com/modern-go/reflect2/safe_map.go b/vendor/github.com/modern-go/reflect2/safe_map.go new file mode 100644 index 000000000..88362205a --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/safe_map.go @@ -0,0 +1,101 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type safeMapType struct { + safeType +} + +func (type2 *safeMapType) Key() Type { + return type2.safeType.cfg.Type2(type2.Type.Key()) +} + +func (type2 *safeMapType) MakeMap(cap int) interface{} { + ptr := reflect.New(type2.Type) + ptr.Elem().Set(reflect.MakeMap(type2.Type)) + return ptr.Interface() +} + +func (type2 *safeMapType) UnsafeMakeMap(cap int) unsafe.Pointer { + panic("does not support unsafe operation") +} + +func (type2 *safeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) { + keyVal := reflect.ValueOf(key) + elemVal := reflect.ValueOf(elem) + val := reflect.ValueOf(obj) + val.Elem().SetMapIndex(keyVal.Elem(), elemVal.Elem()) +} + +func (type2 *safeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) { + panic("does not support unsafe operation") +} + +func (type2 *safeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) { + keyVal := reflect.ValueOf(key) + if key == nil { + keyVal = reflect.New(type2.Type.Key()).Elem() + } + val := reflect.ValueOf(obj).MapIndex(keyVal) + if !val.IsValid() { + return nil, false + } + return val.Interface(), true +} + +func (type2 *safeMapType) GetIndex(obj interface{}, key interface{}) interface{} { + val := reflect.ValueOf(obj).Elem() + keyVal := reflect.ValueOf(key).Elem() + elemVal := val.MapIndex(keyVal) + if !elemVal.IsValid() { + ptr := reflect.New(reflect.PtrTo(val.Type().Elem())) + return ptr.Elem().Interface() + } + ptr := reflect.New(elemVal.Type()) + ptr.Elem().Set(elemVal) + return ptr.Interface() +} + +func (type2 *safeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer { + panic("does not support unsafe operation") +} + +func (type2 *safeMapType) Iterate(obj interface{}) MapIterator { + m := reflect.ValueOf(obj).Elem() + return &safeMapIterator{ + m: m, + keys: m.MapKeys(), + } +} + +func (type2 *safeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + panic("does not support unsafe operation") +} + +type safeMapIterator struct { + i int + m reflect.Value + keys []reflect.Value +} + +func (iter *safeMapIterator) HasNext() bool { + return iter.i != len(iter.keys) +} + +func (iter *safeMapIterator) Next() (interface{}, interface{}) { + key := iter.keys[iter.i] + elem := iter.m.MapIndex(key) + iter.i += 1 + keyPtr := reflect.New(key.Type()) + keyPtr.Elem().Set(key) + elemPtr := reflect.New(elem.Type()) + elemPtr.Elem().Set(elem) + return keyPtr.Interface(), elemPtr.Interface() +} + +func (iter *safeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) { + panic("does not support unsafe operation") +} diff --git a/vendor/github.com/modern-go/reflect2/safe_slice.go b/vendor/github.com/modern-go/reflect2/safe_slice.go new file mode 100644 index 000000000..bcce6fd20 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/safe_slice.go @@ -0,0 +1,92 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type safeSliceType struct { + safeType +} + +func (type2 *safeSliceType) SetIndex(obj interface{}, index int, value interface{}) { + val := reflect.ValueOf(obj).Elem() + elem := reflect.ValueOf(value).Elem() + val.Index(index).Set(elem) +} + +func (type2 *safeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, value unsafe.Pointer) { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) GetIndex(obj interface{}, index int) interface{} { + val := reflect.ValueOf(obj).Elem() + elem := val.Index(index) + ptr := reflect.New(elem.Type()) + ptr.Elem().Set(elem) + return ptr.Interface() +} + +func (type2 *safeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) MakeSlice(length int, cap int) interface{} { + val := reflect.MakeSlice(type2.Type, length, cap) + ptr := reflect.New(val.Type()) + ptr.Elem().Set(val) + return ptr.Interface() +} + +func (type2 *safeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) Grow(obj interface{}, newLength int) { + oldCap := type2.Cap(obj) + oldSlice := reflect.ValueOf(obj).Elem() + delta := newLength - oldCap + deltaVals := make([]reflect.Value, delta) + newSlice := reflect.Append(oldSlice, deltaVals...) + oldSlice.Set(newSlice) +} + +func (type2 *safeSliceType) UnsafeGrow(ptr unsafe.Pointer, newLength int) { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) Append(obj interface{}, elem interface{}) { + val := reflect.ValueOf(obj).Elem() + elemVal := reflect.ValueOf(elem).Elem() + newVal := reflect.Append(val, elemVal) + val.Set(newVal) +} + +func (type2 *safeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) SetNil(obj interface{}) { + val := reflect.ValueOf(obj).Elem() + val.Set(reflect.Zero(val.Type())) +} + +func (type2 *safeSliceType) UnsafeSetNil(ptr unsafe.Pointer) { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) LengthOf(obj interface{}) int { + return reflect.ValueOf(obj).Elem().Len() +} + +func (type2 *safeSliceType) UnsafeLengthOf(ptr unsafe.Pointer) int { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) Cap(obj interface{}) int { + return reflect.ValueOf(obj).Elem().Cap() +} + +func (type2 *safeSliceType) UnsafeCap(ptr unsafe.Pointer) int { + panic("does not support unsafe operation") +} diff --git a/vendor/github.com/modern-go/reflect2/safe_struct.go b/vendor/github.com/modern-go/reflect2/safe_struct.go new file mode 100644 index 000000000..e5fb9b313 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/safe_struct.go @@ -0,0 +1,29 @@ +package reflect2 + +type safeStructType struct { + safeType +} + +func (type2 *safeStructType) FieldByName(name string) StructField { + field, found := type2.Type.FieldByName(name) + if !found { + panic("field " + name + " not found") + } + return &safeField{StructField: field} +} + +func (type2 *safeStructType) Field(i int) StructField { + return &safeField{StructField: type2.Type.Field(i)} +} + +func (type2 *safeStructType) FieldByIndex(index []int) StructField { + return &safeField{StructField: type2.Type.FieldByIndex(index)} +} + +func (type2 *safeStructType) FieldByNameFunc(match func(string) bool) StructField { + field, found := type2.Type.FieldByNameFunc(match) + if !found { + panic("field match condition not found in " + type2.Type.String()) + } + return &safeField{StructField: field} +} diff --git a/vendor/github.com/modern-go/reflect2/safe_type.go b/vendor/github.com/modern-go/reflect2/safe_type.go new file mode 100644 index 000000000..ee4e7bb6e --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/safe_type.go @@ -0,0 +1,78 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type safeType struct { + reflect.Type + cfg *frozenConfig +} + +func (type2 *safeType) New() interface{} { + return reflect.New(type2.Type).Interface() +} + +func (type2 *safeType) UnsafeNew() unsafe.Pointer { + panic("does not support unsafe operation") +} + +func (type2 *safeType) Elem() Type { + return type2.cfg.Type2(type2.Type.Elem()) +} + +func (type2 *safeType) Type1() reflect.Type { + return type2.Type +} + +func (type2 *safeType) PackEFace(ptr unsafe.Pointer) interface{} { + panic("does not support unsafe operation") +} + +func (type2 *safeType) Implements(thatType Type) bool { + return type2.Type.Implements(thatType.Type1()) +} + +func (type2 *safeType) RType() uintptr { + panic("does not support unsafe operation") +} + +func (type2 *safeType) Indirect(obj interface{}) interface{} { + return reflect.Indirect(reflect.ValueOf(obj)).Interface() +} + +func (type2 *safeType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + panic("does not support unsafe operation") +} + +func (type2 *safeType) LikePtr() bool { + panic("does not support unsafe operation") +} + +func (type2 *safeType) IsNullable() bool { + return IsNullable(type2.Kind()) +} + +func (type2 *safeType) IsNil(obj interface{}) bool { + if obj == nil { + return true + } + return reflect.ValueOf(obj).Elem().IsNil() +} + +func (type2 *safeType) UnsafeIsNil(ptr unsafe.Pointer) bool { + panic("does not support unsafe operation") +} + +func (type2 *safeType) Set(obj interface{}, val interface{}) { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(val).Elem()) +} + +func (type2 *safeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) { + panic("does not support unsafe operation") +} + +func (type2 *safeType) AssignableTo(anotherType Type) bool { + return type2.Type1().AssignableTo(anotherType.Type1()) +} diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh new file mode 100755 index 000000000..3d2b9768c --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do + go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go new file mode 100644 index 000000000..6d489112f --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/type_map.go @@ -0,0 +1,103 @@ +package reflect2 + +import ( + "reflect" + "runtime" + "strings" + "unsafe" +) + +// typelinks1 for 1.5 ~ 1.6 +//go:linkname typelinks1 reflect.typelinks +func typelinks1() [][]unsafe.Pointer + +// typelinks2 for 1.7 ~ +//go:linkname typelinks2 reflect.typelinks +func typelinks2() (sections []unsafe.Pointer, offset [][]int32) + +var types = map[string]reflect.Type{} +var packages = map[string]map[string]reflect.Type{} + +func init() { + ver := runtime.Version() + if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") { + loadGo15Types() + } else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") { + loadGo15Types() + } else { + loadGo17Types() + } +} + +func loadGo15Types() { + var obj interface{} = reflect.TypeOf(0) + typePtrss := typelinks1() + for _, typePtrs := range typePtrss { + for _, typePtr := range typePtrs { + (*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr + typ := obj.(reflect.Type) + if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct { + loadedType := typ.Elem() + pkgTypes := packages[loadedType.PkgPath()] + if pkgTypes == nil { + pkgTypes = map[string]reflect.Type{} + packages[loadedType.PkgPath()] = pkgTypes + } + types[loadedType.String()] = loadedType + pkgTypes[loadedType.Name()] = loadedType + } + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr && + typ.Elem().Elem().Kind() == reflect.Struct { + loadedType := typ.Elem().Elem() + pkgTypes := packages[loadedType.PkgPath()] + if pkgTypes == nil { + pkgTypes = map[string]reflect.Type{} + packages[loadedType.PkgPath()] = pkgTypes + } + types[loadedType.String()] = loadedType + pkgTypes[loadedType.Name()] = loadedType + } + } + } +} + +func loadGo17Types() { + var obj interface{} = reflect.TypeOf(0) + sections, offset := typelinks2() + for i, offs := range offset { + rodata := sections[i] + for _, off := range offs { + (*emptyInterface)(unsafe.Pointer(&obj)).word = resolveTypeOff(unsafe.Pointer(rodata), off) + typ := obj.(reflect.Type) + if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct { + loadedType := typ.Elem() + pkgTypes := packages[loadedType.PkgPath()] + if pkgTypes == nil { + pkgTypes = map[string]reflect.Type{} + packages[loadedType.PkgPath()] = pkgTypes + } + types[loadedType.String()] = loadedType + pkgTypes[loadedType.Name()] = loadedType + } + } + } +} + +type emptyInterface struct { + typ unsafe.Pointer + word unsafe.Pointer +} + +// TypeByName return the type by its name, just like Class.forName in java +func TypeByName(typeName string) Type { + return Type2(types[typeName]) +} + +// TypeByPackageName return the type by its package and name +func TypeByPackageName(pkgPath string, name string) Type { + pkgTypes := packages[pkgPath] + if pkgTypes == nil { + return nil + } + return Type2(pkgTypes[name]) +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_array.go b/vendor/github.com/modern-go/reflect2/unsafe_array.go new file mode 100644 index 000000000..76cbdba6e --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_array.go @@ -0,0 +1,65 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type UnsafeArrayType struct { + unsafeType + elemRType unsafe.Pointer + pElemRType unsafe.Pointer + elemSize uintptr + likePtr bool +} + +func newUnsafeArrayType(cfg *frozenConfig, type1 reflect.Type) *UnsafeArrayType { + return &UnsafeArrayType{ + unsafeType: *newUnsafeType(cfg, type1), + elemRType: unpackEFace(type1.Elem()).data, + pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data, + elemSize: type1.Elem().Size(), + likePtr: likePtrType(type1), + } +} + +func (type2 *UnsafeArrayType) LikePtr() bool { + return type2.likePtr +} + +func (type2 *UnsafeArrayType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *UnsafeArrayType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + if type2.likePtr { + return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr)) + } + return packEFace(type2.rtype, ptr) +} + +func (type2 *UnsafeArrayType) SetIndex(obj interface{}, index int, elem interface{}) { + objEFace := unpackEFace(obj) + assertType("ArrayType.SetIndex argument 1", type2.ptrRType, objEFace.rtype) + elemEFace := unpackEFace(elem) + assertType("ArrayType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype) + type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data) +} + +func (type2 *UnsafeArrayType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) { + elemPtr := arrayAt(obj, index, type2.elemSize, "i < s.Len") + typedmemmove(type2.elemRType, elemPtr, elem) +} + +func (type2 *UnsafeArrayType) GetIndex(obj interface{}, index int) interface{} { + objEFace := unpackEFace(obj) + assertType("ArrayType.GetIndex argument 1", type2.ptrRType, objEFace.rtype) + elemPtr := type2.UnsafeGetIndex(objEFace.data, index) + return packEFace(type2.pElemRType, elemPtr) +} + +func (type2 *UnsafeArrayType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer { + return arrayAt(obj, index, type2.elemSize, "i < s.Len") +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_eface.go b/vendor/github.com/modern-go/reflect2/unsafe_eface.go new file mode 100644 index 000000000..805010f3a --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_eface.go @@ -0,0 +1,59 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type eface struct { + rtype unsafe.Pointer + data unsafe.Pointer +} + +func unpackEFace(obj interface{}) *eface { + return (*eface)(unsafe.Pointer(&obj)) +} + +func packEFace(rtype unsafe.Pointer, data unsafe.Pointer) interface{} { + var i interface{} + e := (*eface)(unsafe.Pointer(&i)) + e.rtype = rtype + e.data = data + return i +} + +type UnsafeEFaceType struct { + unsafeType +} + +func newUnsafeEFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeEFaceType { + return &UnsafeEFaceType{ + unsafeType: *newUnsafeType(cfg, type1), + } +} + +func (type2 *UnsafeEFaceType) IsNil(obj interface{}) bool { + if obj == nil { + return true + } + objEFace := unpackEFace(obj) + assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIsNil(objEFace.data) +} + +func (type2 *UnsafeEFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool { + if ptr == nil { + return true + } + return unpackEFace(*(*interface{})(ptr)).data == nil +} + +func (type2 *UnsafeEFaceType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *UnsafeEFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + return *(*interface{})(ptr) +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_field.go b/vendor/github.com/modern-go/reflect2/unsafe_field.go new file mode 100644 index 000000000..5eb53130a --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_field.go @@ -0,0 +1,74 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type UnsafeStructField struct { + reflect.StructField + structType *UnsafeStructType + rtype unsafe.Pointer + ptrRType unsafe.Pointer +} + +func newUnsafeStructField(structType *UnsafeStructType, structField reflect.StructField) *UnsafeStructField { + return &UnsafeStructField{ + StructField: structField, + rtype: unpackEFace(structField.Type).data, + ptrRType: unpackEFace(reflect.PtrTo(structField.Type)).data, + structType: structType, + } +} + +func (field *UnsafeStructField) Offset() uintptr { + return field.StructField.Offset +} + +func (field *UnsafeStructField) Name() string { + return field.StructField.Name +} + +func (field *UnsafeStructField) PkgPath() string { + return field.StructField.PkgPath +} + +func (field *UnsafeStructField) Type() Type { + return field.structType.cfg.Type2(field.StructField.Type) +} + +func (field *UnsafeStructField) Tag() reflect.StructTag { + return field.StructField.Tag +} + +func (field *UnsafeStructField) Index() []int { + return field.StructField.Index +} + +func (field *UnsafeStructField) Anonymous() bool { + return field.StructField.Anonymous +} + +func (field *UnsafeStructField) Set(obj interface{}, value interface{}) { + objEFace := unpackEFace(obj) + assertType("StructField.SetIndex argument 1", field.structType.ptrRType, objEFace.rtype) + valueEFace := unpackEFace(value) + assertType("StructField.SetIndex argument 2", field.ptrRType, valueEFace.rtype) + field.UnsafeSet(objEFace.data, valueEFace.data) +} + +func (field *UnsafeStructField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) { + fieldPtr := add(obj, field.StructField.Offset, "same as non-reflect &v.field") + typedmemmove(field.rtype, fieldPtr, value) +} + +func (field *UnsafeStructField) Get(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("StructField.GetIndex argument 1", field.structType.ptrRType, objEFace.rtype) + value := field.UnsafeGet(objEFace.data) + return packEFace(field.ptrRType, value) +} + +func (field *UnsafeStructField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer { + return add(obj, field.StructField.Offset, "same as non-reflect &v.field") +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_iface.go b/vendor/github.com/modern-go/reflect2/unsafe_iface.go new file mode 100644 index 000000000..b60195533 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_iface.go @@ -0,0 +1,64 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type iface struct { + itab *itab + data unsafe.Pointer +} + +type itab struct { + ignore unsafe.Pointer + rtype unsafe.Pointer +} + +func IFaceToEFace(ptr unsafe.Pointer) interface{} { + iface := (*iface)(ptr) + if iface.itab == nil { + return nil + } + return packEFace(iface.itab.rtype, iface.data) +} + +type UnsafeIFaceType struct { + unsafeType +} + +func newUnsafeIFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeIFaceType { + return &UnsafeIFaceType{ + unsafeType: *newUnsafeType(cfg, type1), + } +} + +func (type2 *UnsafeIFaceType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *UnsafeIFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + return IFaceToEFace(ptr) +} + +func (type2 *UnsafeIFaceType) IsNil(obj interface{}) bool { + if obj == nil { + return true + } + objEFace := unpackEFace(obj) + assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIsNil(objEFace.data) +} + +func (type2 *UnsafeIFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool { + if ptr == nil { + return true + } + iface := (*iface)(ptr) + if iface.itab == nil { + return true + } + return false +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go new file mode 100644 index 000000000..57229c8db --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_link.go @@ -0,0 +1,70 @@ +package reflect2 + +import "unsafe" + +//go:linkname unsafe_New reflect.unsafe_New +func unsafe_New(rtype unsafe.Pointer) unsafe.Pointer + +//go:linkname typedmemmove reflect.typedmemmove +func typedmemmove(rtype unsafe.Pointer, dst, src unsafe.Pointer) + +//go:linkname unsafe_NewArray reflect.unsafe_NewArray +func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer + +// typedslicecopy copies a slice of elemType values from src to dst, +// returning the number of elements copied. +//go:linkname typedslicecopy reflect.typedslicecopy +//go:noescape +func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int + +//go:linkname mapassign reflect.mapassign +//go:noescape +func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer) + +//go:linkname mapaccess reflect.mapaccess +//go:noescape +func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer) + +// m escapes into the return value, but the caller of mapiterinit +// doesn't let the return value escape. +//go:noescape +//go:linkname mapiterinit reflect.mapiterinit +func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter + +//go:noescape +//go:linkname mapiternext reflect.mapiternext +func mapiternext(it *hiter) + +//go:linkname ifaceE2I reflect.ifaceE2I +func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer) + +// A hash iteration structure. +// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate +// the layout of this structure. +type hiter struct { + key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go). + value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go). + // rest fields are ignored +} + +// add returns p+x. +// +// The whySafe string is ignored, so that the function still inlines +// as efficiently as p+x, but all call sites should use the string to +// record why the addition is safe, which is to say why the addition +// does not cause x to advance to the very end of p's allocation +// and therefore point incorrectly at the next block in memory. +func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { + return unsafe.Pointer(uintptr(p) + x) +} + +// arrayAt returns the i-th element of p, +// an array whose elements are eltSize bytes wide. +// The array pointed at by p must have at least i+1 elements: +// it is invalid (but impossible to check here) to pass i >= len, +// because then the result will point outside the array. +// whySafe must explain why i < len. (Passing "i < len" is fine; +// the benefit is to surface this assumption at the call site.) +func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer { + return add(p, uintptr(i)*eltSize, "i < len") +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_map.go b/vendor/github.com/modern-go/reflect2/unsafe_map.go new file mode 100644 index 000000000..f2e76e6bb --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_map.go @@ -0,0 +1,138 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type UnsafeMapType struct { + unsafeType + pKeyRType unsafe.Pointer + pElemRType unsafe.Pointer +} + +func newUnsafeMapType(cfg *frozenConfig, type1 reflect.Type) MapType { + return &UnsafeMapType{ + unsafeType: *newUnsafeType(cfg, type1), + pKeyRType: unpackEFace(reflect.PtrTo(type1.Key())).data, + pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data, + } +} + +func (type2 *UnsafeMapType) IsNil(obj interface{}) bool { + if obj == nil { + return true + } + objEFace := unpackEFace(obj) + assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIsNil(objEFace.data) +} + +func (type2 *UnsafeMapType) UnsafeIsNil(ptr unsafe.Pointer) bool { + if ptr == nil { + return true + } + return *(*unsafe.Pointer)(ptr) == nil +} + +func (type2 *UnsafeMapType) LikePtr() bool { + return true +} + +func (type2 *UnsafeMapType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("MapType.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *UnsafeMapType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr)) +} + +func (type2 *UnsafeMapType) Key() Type { + return type2.cfg.Type2(type2.Type.Key()) +} + +func (type2 *UnsafeMapType) MakeMap(cap int) interface{} { + return packEFace(type2.ptrRType, type2.UnsafeMakeMap(cap)) +} + +func (type2 *UnsafeMapType) UnsafeMakeMap(cap int) unsafe.Pointer { + m := makeMapWithSize(type2.rtype, cap) + return unsafe.Pointer(&m) +} + +func (type2 *UnsafeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) { + objEFace := unpackEFace(obj) + assertType("MapType.SetIndex argument 1", type2.ptrRType, objEFace.rtype) + keyEFace := unpackEFace(key) + assertType("MapType.SetIndex argument 2", type2.pKeyRType, keyEFace.rtype) + elemEFace := unpackEFace(elem) + assertType("MapType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype) + type2.UnsafeSetIndex(objEFace.data, keyEFace.data, elemEFace.data) +} + +func (type2 *UnsafeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) { + mapassign(type2.rtype, *(*unsafe.Pointer)(obj), key, elem) +} + +func (type2 *UnsafeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) { + objEFace := unpackEFace(obj) + assertType("MapType.TryGetIndex argument 1", type2.ptrRType, objEFace.rtype) + keyEFace := unpackEFace(key) + assertType("MapType.TryGetIndex argument 2", type2.pKeyRType, keyEFace.rtype) + elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data) + if elemPtr == nil { + return nil, false + } + return packEFace(type2.pElemRType, elemPtr), true +} + +func (type2 *UnsafeMapType) GetIndex(obj interface{}, key interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("MapType.GetIndex argument 1", type2.ptrRType, objEFace.rtype) + keyEFace := unpackEFace(key) + assertType("MapType.GetIndex argument 2", type2.pKeyRType, keyEFace.rtype) + elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data) + return packEFace(type2.pElemRType, elemPtr) +} + +func (type2 *UnsafeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer { + return mapaccess(type2.rtype, *(*unsafe.Pointer)(obj), key) +} + +func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator { + objEFace := unpackEFace(obj) + assertType("MapType.Iterate argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIterate(objEFace.data) +} + +func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + return &UnsafeMapIterator{ + hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)), + pKeyRType: type2.pKeyRType, + pElemRType: type2.pElemRType, + } +} + +type UnsafeMapIterator struct { + *hiter + pKeyRType unsafe.Pointer + pElemRType unsafe.Pointer +} + +func (iter *UnsafeMapIterator) HasNext() bool { + return iter.key != nil +} + +func (iter *UnsafeMapIterator) Next() (interface{}, interface{}) { + key, elem := iter.UnsafeNext() + return packEFace(iter.pKeyRType, key), packEFace(iter.pElemRType, elem) +} + +func (iter *UnsafeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) { + key := iter.key + elem := iter.value + mapiternext(iter.hiter) + return key, elem +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_ptr.go b/vendor/github.com/modern-go/reflect2/unsafe_ptr.go new file mode 100644 index 000000000..8e5ec9cf4 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_ptr.go @@ -0,0 +1,46 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type UnsafePtrType struct { + unsafeType +} + +func newUnsafePtrType(cfg *frozenConfig, type1 reflect.Type) *UnsafePtrType { + return &UnsafePtrType{ + unsafeType: *newUnsafeType(cfg, type1), + } +} + +func (type2 *UnsafePtrType) IsNil(obj interface{}) bool { + if obj == nil { + return true + } + objEFace := unpackEFace(obj) + assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIsNil(objEFace.data) +} + +func (type2 *UnsafePtrType) UnsafeIsNil(ptr unsafe.Pointer) bool { + if ptr == nil { + return true + } + return *(*unsafe.Pointer)(ptr) == nil +} + +func (type2 *UnsafePtrType) LikePtr() bool { + return true +} + +func (type2 *UnsafePtrType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *UnsafePtrType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr)) +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_slice.go b/vendor/github.com/modern-go/reflect2/unsafe_slice.go new file mode 100644 index 000000000..1c6d876c7 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_slice.go @@ -0,0 +1,177 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +// sliceHeader is a safe version of SliceHeader used within this package. +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +type UnsafeSliceType struct { + unsafeType + elemRType unsafe.Pointer + pElemRType unsafe.Pointer + elemSize uintptr +} + +func newUnsafeSliceType(cfg *frozenConfig, type1 reflect.Type) SliceType { + elemType := type1.Elem() + return &UnsafeSliceType{ + unsafeType: *newUnsafeType(cfg, type1), + pElemRType: unpackEFace(reflect.PtrTo(elemType)).data, + elemRType: unpackEFace(elemType).data, + elemSize: elemType.Size(), + } +} + +func (type2 *UnsafeSliceType) Set(obj interface{}, val interface{}) { + objEFace := unpackEFace(obj) + assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype) + valEFace := unpackEFace(val) + assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype) + type2.UnsafeSet(objEFace.data, valEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) { + *(*sliceHeader)(ptr) = *(*sliceHeader)(val) +} + +func (type2 *UnsafeSliceType) IsNil(obj interface{}) bool { + if obj == nil { + return true + } + objEFace := unpackEFace(obj) + assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIsNil(objEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeIsNil(ptr unsafe.Pointer) bool { + if ptr == nil { + return true + } + return (*sliceHeader)(ptr).Data == nil +} + +func (type2 *UnsafeSliceType) SetNil(obj interface{}) { + objEFace := unpackEFace(obj) + assertType("SliceType.SetNil argument 1", type2.ptrRType, objEFace.rtype) + type2.UnsafeSetNil(objEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeSetNil(ptr unsafe.Pointer) { + header := (*sliceHeader)(ptr) + header.Len = 0 + header.Cap = 0 + header.Data = nil +} + +func (type2 *UnsafeSliceType) MakeSlice(length int, cap int) interface{} { + return packEFace(type2.ptrRType, type2.UnsafeMakeSlice(length, cap)) +} + +func (type2 *UnsafeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer { + header := &sliceHeader{unsafe_NewArray(type2.elemRType, cap), length, cap} + return unsafe.Pointer(header) +} + +func (type2 *UnsafeSliceType) LengthOf(obj interface{}) int { + objEFace := unpackEFace(obj) + assertType("SliceType.Len argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeLengthOf(objEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeLengthOf(obj unsafe.Pointer) int { + header := (*sliceHeader)(obj) + return header.Len +} + +func (type2 *UnsafeSliceType) SetIndex(obj interface{}, index int, elem interface{}) { + objEFace := unpackEFace(obj) + assertType("SliceType.SetIndex argument 1", type2.ptrRType, objEFace.rtype) + elemEFace := unpackEFace(elem) + assertType("SliceType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype) + type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) { + header := (*sliceHeader)(obj) + elemPtr := arrayAt(header.Data, index, type2.elemSize, "i < s.Len") + typedmemmove(type2.elemRType, elemPtr, elem) +} + +func (type2 *UnsafeSliceType) GetIndex(obj interface{}, index int) interface{} { + objEFace := unpackEFace(obj) + assertType("SliceType.GetIndex argument 1", type2.ptrRType, objEFace.rtype) + elemPtr := type2.UnsafeGetIndex(objEFace.data, index) + return packEFace(type2.pElemRType, elemPtr) +} + +func (type2 *UnsafeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer { + header := (*sliceHeader)(obj) + return arrayAt(header.Data, index, type2.elemSize, "i < s.Len") +} + +func (type2 *UnsafeSliceType) Append(obj interface{}, elem interface{}) { + objEFace := unpackEFace(obj) + assertType("SliceType.Append argument 1", type2.ptrRType, objEFace.rtype) + elemEFace := unpackEFace(elem) + assertType("SliceType.Append argument 2", type2.pElemRType, elemEFace.rtype) + type2.UnsafeAppend(objEFace.data, elemEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) { + header := (*sliceHeader)(obj) + oldLen := header.Len + type2.UnsafeGrow(obj, oldLen+1) + type2.UnsafeSetIndex(obj, oldLen, elem) +} + +func (type2 *UnsafeSliceType) Cap(obj interface{}) int { + objEFace := unpackEFace(obj) + assertType("SliceType.Cap argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeCap(objEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeCap(ptr unsafe.Pointer) int { + return (*sliceHeader)(ptr).Cap +} + +func (type2 *UnsafeSliceType) Grow(obj interface{}, newLength int) { + objEFace := unpackEFace(obj) + assertType("SliceType.Grow argument 1", type2.ptrRType, objEFace.rtype) + type2.UnsafeGrow(objEFace.data, newLength) +} + +func (type2 *UnsafeSliceType) UnsafeGrow(obj unsafe.Pointer, newLength int) { + header := (*sliceHeader)(obj) + if newLength <= header.Cap { + header.Len = newLength + return + } + newCap := calcNewCap(header.Cap, newLength) + newHeader := (*sliceHeader)(type2.UnsafeMakeSlice(header.Len, newCap)) + typedslicecopy(type2.elemRType, *newHeader, *header) + header.Data = newHeader.Data + header.Cap = newHeader.Cap + header.Len = newLength +} + +func calcNewCap(cap int, expectedCap int) int { + if cap == 0 { + cap = expectedCap + } else { + for cap < expectedCap { + if cap < 1024 { + cap += cap + } else { + cap += cap / 4 + } + } + } + return cap +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_struct.go b/vendor/github.com/modern-go/reflect2/unsafe_struct.go new file mode 100644 index 000000000..804d91663 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_struct.go @@ -0,0 +1,59 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type UnsafeStructType struct { + unsafeType + likePtr bool +} + +func newUnsafeStructType(cfg *frozenConfig, type1 reflect.Type) *UnsafeStructType { + return &UnsafeStructType{ + unsafeType: *newUnsafeType(cfg, type1), + likePtr: likePtrType(type1), + } +} + +func (type2 *UnsafeStructType) LikePtr() bool { + return type2.likePtr +} + +func (type2 *UnsafeStructType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *UnsafeStructType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + if type2.likePtr { + return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr)) + } + return packEFace(type2.rtype, ptr) +} + +func (type2 *UnsafeStructType) FieldByName(name string) StructField { + structField, found := type2.Type.FieldByName(name) + if !found { + return nil + } + return newUnsafeStructField(type2, structField) +} + +func (type2 *UnsafeStructType) Field(i int) StructField { + return newUnsafeStructField(type2, type2.Type.Field(i)) +} + +func (type2 *UnsafeStructType) FieldByIndex(index []int) StructField { + return newUnsafeStructField(type2, type2.Type.FieldByIndex(index)) +} + +func (type2 *UnsafeStructType) FieldByNameFunc(match func(string) bool) StructField { + structField, found := type2.Type.FieldByNameFunc(match) + if !found { + panic("field match condition not found in " + type2.Type.String()) + } + return newUnsafeStructField(type2, structField) +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_type.go b/vendor/github.com/modern-go/reflect2/unsafe_type.go new file mode 100644 index 000000000..13941716c --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_type.go @@ -0,0 +1,85 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type unsafeType struct { + safeType + rtype unsafe.Pointer + ptrRType unsafe.Pointer +} + +func newUnsafeType(cfg *frozenConfig, type1 reflect.Type) *unsafeType { + return &unsafeType{ + safeType: safeType{ + Type: type1, + cfg: cfg, + }, + rtype: unpackEFace(type1).data, + ptrRType: unpackEFace(reflect.PtrTo(type1)).data, + } +} + +func (type2 *unsafeType) Set(obj interface{}, val interface{}) { + objEFace := unpackEFace(obj) + assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype) + valEFace := unpackEFace(val) + assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype) + type2.UnsafeSet(objEFace.data, valEFace.data) +} + +func (type2 *unsafeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) { + typedmemmove(type2.rtype, ptr, val) +} + +func (type2 *unsafeType) IsNil(obj interface{}) bool { + objEFace := unpackEFace(obj) + assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIsNil(objEFace.data) +} + +func (type2 *unsafeType) UnsafeIsNil(ptr unsafe.Pointer) bool { + return ptr == nil +} + +func (type2 *unsafeType) UnsafeNew() unsafe.Pointer { + return unsafe_New(type2.rtype) +} + +func (type2 *unsafeType) New() interface{} { + return packEFace(type2.ptrRType, type2.UnsafeNew()) +} + +func (type2 *unsafeType) PackEFace(ptr unsafe.Pointer) interface{} { + return packEFace(type2.ptrRType, ptr) +} + +func (type2 *unsafeType) RType() uintptr { + return uintptr(type2.rtype) +} + +func (type2 *unsafeType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *unsafeType) UnsafeIndirect(obj unsafe.Pointer) interface{} { + return packEFace(type2.rtype, obj) +} + +func (type2 *unsafeType) LikePtr() bool { + return false +} + +func assertType(where string, expectRType unsafe.Pointer, actualRType unsafe.Pointer) { + if expectRType != actualRType { + expectType := reflect.TypeOf(0) + (*iface)(unsafe.Pointer(&expectType)).data = expectRType + actualType := reflect.TypeOf(0) + (*iface)(unsafe.Pointer(&actualType)).data = actualRType + panic(where + ": expect " + expectType.String() + ", actual " + actualType.String()) + } +} diff --git a/vendor/github.com/peterbourgon/diskv/LICENSE b/vendor/github.com/peterbourgon/diskv/LICENSE new file mode 100644 index 000000000..41ce7f16e --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2011-2012 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/peterbourgon/diskv/README.md b/vendor/github.com/peterbourgon/diskv/README.md new file mode 100644 index 000000000..c6581e096 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/README.md @@ -0,0 +1,191 @@ +# What is diskv? + +Diskv (disk-vee) is a simple, persistent key-value store written in the Go +language. It starts with an incredibly simple API for storing arbitrary data on +a filesystem by key, and builds several layers of performance-enhancing +abstraction on top. The end result is a conceptually simple, but highly +performant, disk-backed storage system. + +[![Build Status][1]][2] + +[1]: https://drone.io/github.com/peterbourgon/diskv/status.png +[2]: https://drone.io/github.com/peterbourgon/diskv/latest + + +# Installing + +Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5]. +Then, + +```bash +$ go get github.com/peterbourgon/diskv +``` + +[3]: http://golang.org +[4]: http://golang.org/doc/install/source +[5]: http://golang.org/doc/install + + +# Usage + +```go +package main + +import ( + "fmt" + "github.com/peterbourgon/diskv" +) + +func main() { + // Simplest transform function: put all the data files into the base dir. + flatTransform := func(s string) []string { return []string{} } + + // Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache. + d := diskv.New(diskv.Options{ + BasePath: "my-data-dir", + Transform: flatTransform, + CacheSizeMax: 1024 * 1024, + }) + + // Write three bytes to the key "alpha". + key := "alpha" + d.Write(key, []byte{'1', '2', '3'}) + + // Read the value back out of the store. + value, _ := d.Read(key) + fmt.Printf("%v\n", value) + + // Erase the key+value from the store (and the disk). + d.Erase(key) +} +``` + +More complex examples can be found in the "examples" subdirectory. + + +# Theory + +## Basic idea + +At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`). +The data is written to a single file on disk, with the same name as the key. +The key determines where that file will be stored, via a user-provided +`TransformFunc`, which takes a key and returns a slice (`[]string`) +corresponding to a path list where the key file will be stored. The simplest +TransformFunc, + +```go +func SimpleTransform (key string) []string { + return []string{} +} +``` + +will place all keys in the same, base directory. The design is inspired by +[Redis diskstore][6]; a TransformFunc which emulates the default diskstore +behavior is available in the content-addressable-storage example. + +[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1 + +**Note** that your TransformFunc should ensure that one valid key doesn't +transform to a subset of another valid key. That is, it shouldn't be possible +to construct valid keys that resolve to directory names. As a concrete example, +if your TransformFunc splits on every 3 characters, then + +```go +d.Write("abcabc", val) // OK: written to /abc/abc/abcabc +d.Write("abc", val) // Error: attempted write to /abc/abc, but it's a directory +``` + +This will be addressed in an upcoming version of diskv. + +Probably the most important design principle behind diskv is that your data is +always flatly available on the disk. diskv will never do anything that would +prevent you from accessing, copying, backing up, or otherwise interacting with +your data via common UNIX commandline tools. + +## Advanced path transformation + +If you need more control over the file name written to disk or if you want to support +slashes in your key name or special characters in the keys, you can use the +AdvancedTransform property. You must supply a function that returns +a special PathKey structure, which is a breakdown of a path and a file name. Strings +returned must be clean of any slashes or special characters: + +```go +func AdvancedTransformExample(key string) *diskv.PathKey { + path := strings.Split(key, "/") + last := len(path) - 1 + return &diskv.PathKey{ + Path: path[:last], + FileName: path[last] + ".txt", + } +} + +// If you provide an AdvancedTransform, you must also provide its +// inverse: + +func InverseTransformExample(pathKey *diskv.PathKey) (key string) { + txt := pathKey.FileName[len(pathKey.FileName)-4:] + if txt != ".txt" { + panic("Invalid file found in storage folder!") + } + return strings.Join(pathKey.Path, "/") + pathKey.FileName[:len(pathKey.FileName)-4] +} + +func main() { + d := diskv.New(diskv.Options{ + BasePath: "my-data-dir", + AdvancedTransform: AdvancedTransformExample, + InverseTransform: InverseTransformExample, + CacheSizeMax: 1024 * 1024, + }) + // Write some text to the key "alpha/beta/gamma". + key := "alpha/beta/gamma" + d.WriteString(key, "¡Hola!") // will be stored in "/alpha/beta/gamma.txt" + fmt.Println(d.ReadString("alpha/beta/gamma")) +} +``` + + +## Adding a cache + +An in-memory caching layer is provided by combining the BasicStore +functionality with a simple map structure, and keeping it up-to-date as +appropriate. Since the map structure in Go is not threadsafe, it's combined +with a RWMutex to provide safe concurrent access. + +## Adding order + +diskv is a key-value store and therefore inherently unordered. An ordering +system can be injected into the store by passing something which satisfies the +diskv.Index interface. (A default implementation, using Google's +[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a +user-provided Less function) index of the keys, which can be queried. + +[7]: https://github.com/google/btree + +## Adding compression + +Something which implements the diskv.Compression interface may be passed +during store creation, so that all Writes and Reads are filtered through +a compression/decompression pipeline. Several default implementations, +using stdlib compression algorithms, are provided. Note that data is cached +compressed; the cost of decompression is borne with each Read. + +## Streaming + +diskv also now provides ReadStream and WriteStream methods, to allow very large +data to be handled efficiently. + + +# Future plans + + * Needs plenty of robust testing: huge datasets, etc... + * More thorough benchmarking + * Your suggestions for use-cases I haven't thought of + + +# Credits and contributions + +Original idea, design and implementation: [Peter Bourgon](https://github.com/peterbourgon) +Other collaborations: [Javier Peletier](https://github.com/jpeletier) ([Epic Labs](https://www.epiclabs.io)) diff --git a/vendor/github.com/peterbourgon/diskv/compression.go b/vendor/github.com/peterbourgon/diskv/compression.go new file mode 100644 index 000000000..5192b0273 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/compression.go @@ -0,0 +1,64 @@ +package diskv + +import ( + "compress/flate" + "compress/gzip" + "compress/zlib" + "io" +) + +// Compression is an interface that Diskv uses to implement compression of +// data. Writer takes a destination io.Writer and returns a WriteCloser that +// compresses all data written through it. Reader takes a source io.Reader and +// returns a ReadCloser that decompresses all data read through it. You may +// define these methods on your own type, or use one of the NewCompression +// helpers. +type Compression interface { + Writer(dst io.Writer) (io.WriteCloser, error) + Reader(src io.Reader) (io.ReadCloser, error) +} + +// NewGzipCompression returns a Gzip-based Compression. +func NewGzipCompression() Compression { + return NewGzipCompressionLevel(flate.DefaultCompression) +} + +// NewGzipCompressionLevel returns a Gzip-based Compression with the given level. +func NewGzipCompressionLevel(level int) Compression { + return &genericCompression{ + wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) }, + rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) }, + } +} + +// NewZlibCompression returns a Zlib-based Compression. +func NewZlibCompression() Compression { + return NewZlibCompressionLevel(flate.DefaultCompression) +} + +// NewZlibCompressionLevel returns a Zlib-based Compression with the given level. +func NewZlibCompressionLevel(level int) Compression { + return NewZlibCompressionLevelDict(level, nil) +} + +// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given +// level, based on the given dictionary. +func NewZlibCompressionLevelDict(level int, dict []byte) Compression { + return &genericCompression{ + func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) }, + func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) }, + } +} + +type genericCompression struct { + wf func(w io.Writer) (io.WriteCloser, error) + rf func(r io.Reader) (io.ReadCloser, error) +} + +func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) { + return g.wf(dst) +} + +func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) { + return g.rf(src) +} diff --git a/vendor/github.com/peterbourgon/diskv/diskv.go b/vendor/github.com/peterbourgon/diskv/diskv.go new file mode 100644 index 000000000..0716da351 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/diskv.go @@ -0,0 +1,726 @@ +// Diskv (disk-vee) is a simple, persistent, key-value store. +// It stores all data flatly on the filesystem. + +package diskv + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "syscall" +) + +const ( + defaultBasePath = "diskv" + defaultFilePerm os.FileMode = 0666 + defaultPathPerm os.FileMode = 0777 +) + +// PathKey represents a string key that has been transformed to +// a directory and file name where the content will eventually +// be stored +type PathKey struct { + Path []string + FileName string + originalKey string +} + +var ( + defaultAdvancedTransform = func(s string) *PathKey { return &PathKey{Path: []string{}, FileName: s} } + defaultInverseTransform = func(pathKey *PathKey) string { return pathKey.FileName } + errCanceled = errors.New("canceled") + errEmptyKey = errors.New("empty key") + errBadKey = errors.New("bad key") + errImportDirectory = errors.New("can't import a directory") +) + +// TransformFunction transforms a key into a slice of strings, with each +// element in the slice representing a directory in the file path where the +// key's entry will eventually be stored. +// +// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"], +// the final location of the data file will be /ab/cde/f/abcdef +type TransformFunction func(s string) []string + +// AdvancedTransformFunction transforms a key into a PathKey. +// +// A PathKey contains a slice of strings, where each element in the slice +// represents a directory in the file path where the key's entry will eventually +// be stored, as well as the filename. +// +// For example, if AdvancedTransformFunc transforms "abcdef/file.txt" to the +// PathKey {Path: ["ab", "cde", "f"], FileName: "file.txt"}, the final location +// of the data file will be /ab/cde/f/file.txt. +// +// You must provide an InverseTransformFunction if you use an +// AdvancedTransformFunction. +type AdvancedTransformFunction func(s string) *PathKey + +// InverseTransformFunction takes a PathKey and converts it back to a Diskv key. +// In effect, it's the opposite of an AdvancedTransformFunction. +type InverseTransformFunction func(pathKey *PathKey) string + +// Options define a set of properties that dictate Diskv behavior. +// All values are optional. +type Options struct { + BasePath string + Transform TransformFunction + AdvancedTransform AdvancedTransformFunction + InverseTransform InverseTransformFunction + CacheSizeMax uint64 // bytes + PathPerm os.FileMode + FilePerm os.FileMode + // If TempDir is set, it will enable filesystem atomic writes by + // writing temporary files to that location before being moved + // to BasePath. + // Note that TempDir MUST be on the same device/partition as + // BasePath. + TempDir string + + Index Index + IndexLess LessFunction + + Compression Compression +} + +// Diskv implements the Diskv interface. You shouldn't construct Diskv +// structures directly; instead, use the New constructor. +type Diskv struct { + Options + mu sync.RWMutex + cache map[string][]byte + cacheSize uint64 +} + +// New returns an initialized Diskv structure, ready to use. +// If the path identified by baseDir already contains data, +// it will be accessible, but not yet cached. +func New(o Options) *Diskv { + if o.BasePath == "" { + o.BasePath = defaultBasePath + } + + if o.AdvancedTransform == nil { + if o.Transform == nil { + o.AdvancedTransform = defaultAdvancedTransform + } else { + o.AdvancedTransform = convertToAdvancedTransform(o.Transform) + } + if o.InverseTransform == nil { + o.InverseTransform = defaultInverseTransform + } + } else { + if o.InverseTransform == nil { + panic("You must provide an InverseTransform function in advanced mode") + } + } + + if o.PathPerm == 0 { + o.PathPerm = defaultPathPerm + } + if o.FilePerm == 0 { + o.FilePerm = defaultFilePerm + } + + d := &Diskv{ + Options: o, + cache: map[string][]byte{}, + cacheSize: 0, + } + + if d.Index != nil && d.IndexLess != nil { + d.Index.Initialize(d.IndexLess, d.Keys(nil)) + } + + return d +} + +// convertToAdvancedTransform takes a classic Transform function and +// converts it to the new AdvancedTransform +func convertToAdvancedTransform(oldFunc func(s string) []string) AdvancedTransformFunction { + return func(s string) *PathKey { + return &PathKey{Path: oldFunc(s), FileName: s} + } +} + +// Write synchronously writes the key-value pair to disk, making it immediately +// available for reads. Write relies on the filesystem to perform an eventual +// sync to physical media. If you need stronger guarantees, see WriteStream. +func (d *Diskv) Write(key string, val []byte) error { + return d.WriteStream(key, bytes.NewReader(val), false) +} + +// WriteString writes a string key-value pair to disk +func (d *Diskv) WriteString(key string, val string) error { + return d.Write(key, []byte(val)) +} + +func (d *Diskv) transform(key string) (pathKey *PathKey) { + pathKey = d.AdvancedTransform(key) + pathKey.originalKey = key + return pathKey +} + +// WriteStream writes the data represented by the io.Reader to the disk, under +// the provided key. If sync is true, WriteStream performs an explicit sync on +// the file as soon as it's written. +// +// bytes.Buffer provides io.Reader semantics for basic data types. +func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error { + if len(key) <= 0 { + return errEmptyKey + } + + pathKey := d.transform(key) + + // Ensure keys cannot evaluate to paths that would not exist + for _, pathPart := range pathKey.Path { + if strings.ContainsRune(pathPart, os.PathSeparator) { + return errBadKey + } + } + + if strings.ContainsRune(pathKey.FileName, os.PathSeparator) { + return errBadKey + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.writeStreamWithLock(pathKey, r, sync) +} + +// createKeyFileWithLock either creates the key file directly, or +// creates a temporary file in TempDir if it is set. +func (d *Diskv) createKeyFileWithLock(pathKey *PathKey) (*os.File, error) { + if d.TempDir != "" { + if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil { + return nil, fmt.Errorf("temp mkdir: %s", err) + } + f, err := ioutil.TempFile(d.TempDir, "") + if err != nil { + return nil, fmt.Errorf("temp file: %s", err) + } + + if err := f.Chmod(d.FilePerm); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return nil, fmt.Errorf("chmod: %s", err) + } + return f, nil + } + + mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists + f, err := os.OpenFile(d.completeFilename(pathKey), mode, d.FilePerm) + if err != nil { + return nil, fmt.Errorf("open file: %s", err) + } + return f, nil +} + +// writeStream does no input validation checking. +func (d *Diskv) writeStreamWithLock(pathKey *PathKey, r io.Reader, sync bool) error { + if err := d.ensurePathWithLock(pathKey); err != nil { + return fmt.Errorf("ensure path: %s", err) + } + + f, err := d.createKeyFileWithLock(pathKey) + if err != nil { + return fmt.Errorf("create key file: %s", err) + } + + wc := io.WriteCloser(&nopWriteCloser{f}) + if d.Compression != nil { + wc, err = d.Compression.Writer(f) + if err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("compression writer: %s", err) + } + } + + if _, err := io.Copy(wc, r); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("i/o copy: %s", err) + } + + if err := wc.Close(); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("compression close: %s", err) + } + + if sync { + if err := f.Sync(); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("file sync: %s", err) + } + } + + if err := f.Close(); err != nil { + return fmt.Errorf("file close: %s", err) + } + + fullPath := d.completeFilename(pathKey) + if f.Name() != fullPath { + if err := os.Rename(f.Name(), fullPath); err != nil { + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("rename: %s", err) + } + } + + if d.Index != nil { + d.Index.Insert(pathKey.originalKey) + } + + d.bustCacheWithLock(pathKey.originalKey) // cache only on read + + return nil +} + +// Import imports the source file into diskv under the destination key. If the +// destination key already exists, it's overwritten. If move is true, the +// source file is removed after a successful import. +func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) { + if dstKey == "" { + return errEmptyKey + } + + if fi, err := os.Stat(srcFilename); err != nil { + return err + } else if fi.IsDir() { + return errImportDirectory + } + + dstPathKey := d.transform(dstKey) + + d.mu.Lock() + defer d.mu.Unlock() + + if err := d.ensurePathWithLock(dstPathKey); err != nil { + return fmt.Errorf("ensure path: %s", err) + } + + if move { + if err := syscall.Rename(srcFilename, d.completeFilename(dstPathKey)); err == nil { + d.bustCacheWithLock(dstPathKey.originalKey) + return nil + } else if err != syscall.EXDEV { + // If it failed due to being on a different device, fall back to copying + return err + } + } + + f, err := os.Open(srcFilename) + if err != nil { + return err + } + defer f.Close() + err = d.writeStreamWithLock(dstPathKey, f, false) + if err == nil && move { + err = os.Remove(srcFilename) + } + return err +} + +// Read reads the key and returns the value. +// If the key is available in the cache, Read won't touch the disk. +// If the key is not in the cache, Read will have the side-effect of +// lazily caching the value. +func (d *Diskv) Read(key string) ([]byte, error) { + rc, err := d.ReadStream(key, false) + if err != nil { + return []byte{}, err + } + defer rc.Close() + return ioutil.ReadAll(rc) +} + +// ReadString reads the key and returns a string value +// In case of error, an empty string is returned +func (d *Diskv) ReadString(key string) string { + value, _ := d.Read(key) + return string(value) +} + +// ReadStream reads the key and returns the value (data) as an io.ReadCloser. +// If the value is cached from a previous read, and direct is false, +// ReadStream will use the cached value. Otherwise, it will return a handle to +// the file on disk, and cache the data on read. +// +// If direct is true, ReadStream will lazily delete any cached value for the +// key, and return a direct handle to the file on disk. +// +// If compression is enabled, ReadStream taps into the io.Reader stream prior +// to decompression, and caches the compressed data. +func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) { + + pathKey := d.transform(key) + d.mu.RLock() + defer d.mu.RUnlock() + + if val, ok := d.cache[key]; ok { + if !direct { + buf := bytes.NewReader(val) + if d.Compression != nil { + return d.Compression.Reader(buf) + } + return ioutil.NopCloser(buf), nil + } + + go func() { + d.mu.Lock() + defer d.mu.Unlock() + d.uncacheWithLock(key, uint64(len(val))) + }() + } + + return d.readWithRLock(pathKey) +} + +// read ignores the cache, and returns an io.ReadCloser representing the +// decompressed data for the given key, streamed from the disk. Clients should +// acquire a read lock on the Diskv and check the cache themselves before +// calling read. +func (d *Diskv) readWithRLock(pathKey *PathKey) (io.ReadCloser, error) { + filename := d.completeFilename(pathKey) + + fi, err := os.Stat(filename) + if err != nil { + return nil, err + } + if fi.IsDir() { + return nil, os.ErrNotExist + } + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + + var r io.Reader + if d.CacheSizeMax > 0 { + r = newSiphon(f, d, pathKey.originalKey) + } else { + r = &closingReader{f} + } + + var rc = io.ReadCloser(ioutil.NopCloser(r)) + if d.Compression != nil { + rc, err = d.Compression.Reader(r) + if err != nil { + return nil, err + } + } + + return rc, nil +} + +// closingReader provides a Reader that automatically closes the +// embedded ReadCloser when it reaches EOF +type closingReader struct { + rc io.ReadCloser +} + +func (cr closingReader) Read(p []byte) (int, error) { + n, err := cr.rc.Read(p) + if err == io.EOF { + if closeErr := cr.rc.Close(); closeErr != nil { + return n, closeErr // close must succeed for Read to succeed + } + } + return n, err +} + +// siphon is like a TeeReader: it copies all data read through it to an +// internal buffer, and moves that buffer to the cache at EOF. +type siphon struct { + f *os.File + d *Diskv + key string + buf *bytes.Buffer +} + +// newSiphon constructs a siphoning reader that represents the passed file. +// When a successful series of reads ends in an EOF, the siphon will write +// the buffered data to Diskv's cache under the given key. +func newSiphon(f *os.File, d *Diskv, key string) io.Reader { + return &siphon{ + f: f, + d: d, + key: key, + buf: &bytes.Buffer{}, + } +} + +// Read implements the io.Reader interface for siphon. +func (s *siphon) Read(p []byte) (int, error) { + n, err := s.f.Read(p) + + if err == nil { + return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed + } + + if err == io.EOF { + s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail + if closeErr := s.f.Close(); closeErr != nil { + return n, closeErr // close must succeed for Read to succeed + } + return n, err + } + + return n, err +} + +// Erase synchronously erases the given key from the disk and the cache. +func (d *Diskv) Erase(key string) error { + pathKey := d.transform(key) + d.mu.Lock() + defer d.mu.Unlock() + + d.bustCacheWithLock(key) + + // erase from index + if d.Index != nil { + d.Index.Delete(key) + } + + // erase from disk + filename := d.completeFilename(pathKey) + if s, err := os.Stat(filename); err == nil { + if s.IsDir() { + return errBadKey + } + if err = os.Remove(filename); err != nil { + return err + } + } else { + // Return err as-is so caller can do os.IsNotExist(err). + return err + } + + // clean up and return + d.pruneDirsWithLock(key) + return nil +} + +// EraseAll will delete all of the data from the store, both in the cache and on +// the disk. Note that EraseAll doesn't distinguish diskv-related data from non- +// diskv-related data. Care should be taken to always specify a diskv base +// directory that is exclusively for diskv data. +func (d *Diskv) EraseAll() error { + d.mu.Lock() + defer d.mu.Unlock() + d.cache = make(map[string][]byte) + d.cacheSize = 0 + if d.TempDir != "" { + os.RemoveAll(d.TempDir) // errors ignored + } + return os.RemoveAll(d.BasePath) +} + +// Has returns true if the given key exists. +func (d *Diskv) Has(key string) bool { + pathKey := d.transform(key) + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.cache[key]; ok { + return true + } + + filename := d.completeFilename(pathKey) + s, err := os.Stat(filename) + if err != nil { + return false + } + if s.IsDir() { + return false + } + + return true +} + +// Keys returns a channel that will yield every key accessible by the store, +// in undefined order. If a cancel channel is provided, closing it will +// terminate and close the keys channel. +func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string { + return d.KeysPrefix("", cancel) +} + +// KeysPrefix returns a channel that will yield every key accessible by the +// store with the given prefix, in undefined order. If a cancel channel is +// provided, closing it will terminate and close the keys channel. If the +// provided prefix is the empty string, all keys will be yielded. +func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string { + var prepath string + if prefix == "" { + prepath = d.BasePath + } else { + prefixKey := d.transform(prefix) + prepath = d.pathFor(prefixKey) + } + c := make(chan string) + go func() { + filepath.Walk(prepath, d.walker(c, prefix, cancel)) + close(c) + }() + return c +} + +// walker returns a function which satisfies the filepath.WalkFunc interface. +// It sends every non-directory file entry down the channel c. +func (d *Diskv) walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc { + return func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, _ := filepath.Rel(d.BasePath, path) + dir, file := filepath.Split(relPath) + pathSplit := strings.Split(dir, string(filepath.Separator)) + pathSplit = pathSplit[:len(pathSplit)-1] + + pathKey := &PathKey{ + Path: pathSplit, + FileName: file, + } + + key := d.InverseTransform(pathKey) + + if info.IsDir() || !strings.HasPrefix(key, prefix) { + return nil // "pass" + } + + select { + case c <- key: + case <-cancel: + return errCanceled + } + + return nil + } +} + +// pathFor returns the absolute path for location on the filesystem where the +// data for the given key will be stored. +func (d *Diskv) pathFor(pathKey *PathKey) string { + return filepath.Join(d.BasePath, filepath.Join(pathKey.Path...)) +} + +// ensurePathWithLock is a helper function that generates all necessary +// directories on the filesystem for the given key. +func (d *Diskv) ensurePathWithLock(pathKey *PathKey) error { + return os.MkdirAll(d.pathFor(pathKey), d.PathPerm) +} + +// completeFilename returns the absolute path to the file for the given key. +func (d *Diskv) completeFilename(pathKey *PathKey) string { + return filepath.Join(d.pathFor(pathKey), pathKey.FileName) +} + +// cacheWithLock attempts to cache the given key-value pair in the store's +// cache. It can fail if the value is larger than the cache's maximum size. +func (d *Diskv) cacheWithLock(key string, val []byte) error { + valueSize := uint64(len(val)) + if err := d.ensureCacheSpaceWithLock(valueSize); err != nil { + return fmt.Errorf("%s; not caching", err) + } + + // be very strict about memory guarantees + if (d.cacheSize + valueSize) > d.CacheSizeMax { + panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax)) + } + + d.cache[key] = val + d.cacheSize += valueSize + return nil +} + +// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock. +func (d *Diskv) cacheWithoutLock(key string, val []byte) error { + d.mu.Lock() + defer d.mu.Unlock() + return d.cacheWithLock(key, val) +} + +func (d *Diskv) bustCacheWithLock(key string) { + if val, ok := d.cache[key]; ok { + d.uncacheWithLock(key, uint64(len(val))) + } +} + +func (d *Diskv) uncacheWithLock(key string, sz uint64) { + d.cacheSize -= sz + delete(d.cache, key) +} + +// pruneDirsWithLock deletes empty directories in the path walk leading to the +// key k. Typically this function is called after an Erase is made. +func (d *Diskv) pruneDirsWithLock(key string) error { + pathlist := d.transform(key).Path + for i := range pathlist { + dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...)) + + // thanks to Steven Blenkinsop for this snippet + switch fi, err := os.Stat(dir); true { + case err != nil: + return err + case !fi.IsDir(): + panic(fmt.Sprintf("corrupt dirstate at %s", dir)) + } + + nlinks, err := filepath.Glob(filepath.Join(dir, "*")) + if err != nil { + return err + } else if len(nlinks) > 0 { + return nil // has subdirs -- do not prune + } + if err = os.Remove(dir); err != nil { + return err + } + } + + return nil +} + +// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order +// until the cache has at least valueSize bytes available. +func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error { + if valueSize > d.CacheSizeMax { + return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax) + } + + safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax } + + for key, val := range d.cache { + if safe() { + break + } + + d.uncacheWithLock(key, uint64(len(val))) + } + + if !safe() { + panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax)) + } + + return nil +} + +// nopWriteCloser wraps an io.Writer and provides a no-op Close method to +// satisfy the io.WriteCloser interface. +type nopWriteCloser struct { + io.Writer +} + +func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) } +func (wc *nopWriteCloser) Close() error { return nil } diff --git a/vendor/github.com/peterbourgon/diskv/index.go b/vendor/github.com/peterbourgon/diskv/index.go new file mode 100644 index 000000000..96fee5152 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/index.go @@ -0,0 +1,115 @@ +package diskv + +import ( + "sync" + + "github.com/google/btree" +) + +// Index is a generic interface for things that can +// provide an ordered list of keys. +type Index interface { + Initialize(less LessFunction, keys <-chan string) + Insert(key string) + Delete(key string) + Keys(from string, n int) []string +} + +// LessFunction is used to initialize an Index of keys in a specific order. +type LessFunction func(string, string) bool + +// btreeString is a custom data type that satisfies the BTree Less interface, +// making the strings it wraps sortable by the BTree package. +type btreeString struct { + s string + l LessFunction +} + +// Less satisfies the BTree.Less interface using the btreeString's LessFunction. +func (s btreeString) Less(i btree.Item) bool { + return s.l(s.s, i.(btreeString).s) +} + +// BTreeIndex is an implementation of the Index interface using google/btree. +type BTreeIndex struct { + sync.RWMutex + LessFunction + *btree.BTree +} + +// Initialize populates the BTree tree with data from the keys channel, +// according to the passed less function. It's destructive to the BTreeIndex. +func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) { + i.Lock() + defer i.Unlock() + i.LessFunction = less + i.BTree = rebuild(less, keys) +} + +// Insert inserts the given key (only) into the BTree tree. +func (i *BTreeIndex) Insert(key string) { + i.Lock() + defer i.Unlock() + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction}) +} + +// Delete removes the given key (only) from the BTree tree. +func (i *BTreeIndex) Delete(key string) { + i.Lock() + defer i.Unlock() + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + i.BTree.Delete(btreeString{s: key, l: i.LessFunction}) +} + +// Keys yields a maximum of n keys in order. If the passed 'from' key is empty, +// Keys will return the first n keys. If the passed 'from' key is non-empty, the +// first key in the returned slice will be the key that immediately follows the +// passed key, in key order. +func (i *BTreeIndex) Keys(from string, n int) []string { + i.RLock() + defer i.RUnlock() + + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + + if i.BTree.Len() <= 0 { + return []string{} + } + + btreeFrom := btreeString{s: from, l: i.LessFunction} + skipFirst := true + if len(from) <= 0 || !i.BTree.Has(btreeFrom) { + // no such key, so fabricate an always-smallest item + btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }} + skipFirst = false + } + + keys := []string{} + iterator := func(i btree.Item) bool { + keys = append(keys, i.(btreeString).s) + return len(keys) < n + } + i.BTree.AscendGreaterOrEqual(btreeFrom, iterator) + + if skipFirst && len(keys) > 0 { + keys = keys[1:] + } + + return keys +} + +// rebuildIndex does the work of regenerating the index +// with the given keys. +func rebuild(less LessFunction, keys <-chan string) *btree.BTree { + tree := btree.New(2) + for key := range keys { + tree.ReplaceOrInsert(btreeString{s: key, l: less}) + } + return tree +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go new file mode 100644 index 000000000..9a887598f --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -0,0 +1,951 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "bytes" + "io" + "sync" + "unicode/utf8" +) + +// EscapeCodes contains escape sequences that can be written to the terminal in +// order to achieve different styles of text. +type EscapeCodes struct { + // Foreground colors + Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte + + // Reset all attributes + Reset []byte +} + +var vt100EscapeCodes = EscapeCodes{ + Black: []byte{keyEscape, '[', '3', '0', 'm'}, + Red: []byte{keyEscape, '[', '3', '1', 'm'}, + Green: []byte{keyEscape, '[', '3', '2', 'm'}, + Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, + Blue: []byte{keyEscape, '[', '3', '4', 'm'}, + Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, + Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, + White: []byte{keyEscape, '[', '3', '7', 'm'}, + + Reset: []byte{keyEscape, '[', '0', 'm'}, +} + +// Terminal contains the state for running a VT100 terminal that is capable of +// reading lines of input. +type Terminal struct { + // AutoCompleteCallback, if non-null, is called for each keypress with + // the full input line and the current position of the cursor (in + // bytes, as an index into |line|). If it returns ok=false, the key + // press is processed normally. Otherwise it returns a replacement line + // and the new cursor position. + AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) + + // Escape contains a pointer to the escape codes for this terminal. + // It's always a valid pointer, although the escape codes themselves + // may be empty if the terminal doesn't support them. + Escape *EscapeCodes + + // lock protects the terminal and the state in this object from + // concurrent processing of a key press and a Write() call. + lock sync.Mutex + + c io.ReadWriter + prompt []rune + + // line is the current line being entered. + line []rune + // pos is the logical position of the cursor in line + pos int + // echo is true if local echo is enabled + echo bool + // pasteActive is true iff there is a bracketed paste operation in + // progress. + pasteActive bool + + // cursorX contains the current X value of the cursor where the left + // edge is 0. cursorY contains the row number where the first row of + // the current line is 0. + cursorX, cursorY int + // maxLine is the greatest value of cursorY so far. + maxLine int + + termWidth, termHeight int + + // outBuf contains the terminal data to be sent. + outBuf []byte + // remainder contains the remainder of any partial key sequences after + // a read. It aliases into inBuf. + remainder []byte + inBuf [256]byte + + // history contains previously entered commands so that they can be + // accessed with the up and down keys. + history stRingBuffer + // historyIndex stores the currently accessed history entry, where zero + // means the immediately previous entry. + historyIndex int + // When navigating up and down the history it's possible to return to + // the incomplete, initial line. That value is stored in + // historyPending. + historyPending string +} + +// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is +// a local terminal, that terminal must first have been put into raw mode. +// prompt is a string that is written at the start of each input line (i.e. +// "> "). +func NewTerminal(c io.ReadWriter, prompt string) *Terminal { + return &Terminal{ + Escape: &vt100EscapeCodes, + c: c, + prompt: []rune(prompt), + termWidth: 80, + termHeight: 24, + echo: true, + historyIndex: -1, + } +} + +const ( + keyCtrlD = 4 + keyCtrlU = 21 + keyEnter = '\r' + keyEscape = 27 + keyBackspace = 127 + keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota + keyUp + keyDown + keyLeft + keyRight + keyAltLeft + keyAltRight + keyHome + keyEnd + keyDeleteWord + keyDeleteLine + keyClearScreen + keyPasteStart + keyPasteEnd +) + +var ( + crlf = []byte{'\r', '\n'} + pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} + pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} +) + +// bytesToKey tries to parse a key sequence from b. If successful, it returns +// the key and the remainder of the input. Otherwise it returns utf8.RuneError. +func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { + if len(b) == 0 { + return utf8.RuneError, nil + } + + if !pasteActive { + switch b[0] { + case 1: // ^A + return keyHome, b[1:] + case 5: // ^E + return keyEnd, b[1:] + case 8: // ^H + return keyBackspace, b[1:] + case 11: // ^K + return keyDeleteLine, b[1:] + case 12: // ^L + return keyClearScreen, b[1:] + case 23: // ^W + return keyDeleteWord, b[1:] + } + } + + if b[0] != keyEscape { + if !utf8.FullRune(b) { + return utf8.RuneError, b + } + r, l := utf8.DecodeRune(b) + return r, b[l:] + } + + if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { + switch b[2] { + case 'A': + return keyUp, b[3:] + case 'B': + return keyDown, b[3:] + case 'C': + return keyRight, b[3:] + case 'D': + return keyLeft, b[3:] + case 'H': + return keyHome, b[3:] + case 'F': + return keyEnd, b[3:] + } + } + + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { + switch b[5] { + case 'C': + return keyAltRight, b[6:] + case 'D': + return keyAltLeft, b[6:] + } + } + + if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { + return keyPasteStart, b[6:] + } + + if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { + return keyPasteEnd, b[6:] + } + + // If we get here then we have a key that we don't recognise, or a + // partial sequence. It's not clear how one should find the end of a + // sequence without knowing them all, but it seems that [a-zA-Z~] only + // appears at the end of a sequence. + for i, c := range b[0:] { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { + return keyUnknown, b[i+1:] + } + } + + return utf8.RuneError, b +} + +// queue appends data to the end of t.outBuf +func (t *Terminal) queue(data []rune) { + t.outBuf = append(t.outBuf, []byte(string(data))...) +} + +var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} +var space = []rune{' '} + +func isPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// moveCursorToPos appends data to t.outBuf which will move the cursor to the +// given, logical position in the text. +func (t *Terminal) moveCursorToPos(pos int) { + if !t.echo { + return + } + + x := visualLength(t.prompt) + pos + y := x / t.termWidth + x = x % t.termWidth + + up := 0 + if y < t.cursorY { + up = t.cursorY - y + } + + down := 0 + if y > t.cursorY { + down = y - t.cursorY + } + + left := 0 + if x < t.cursorX { + left = t.cursorX - x + } + + right := 0 + if x > t.cursorX { + right = x - t.cursorX + } + + t.cursorX = x + t.cursorY = y + t.move(up, down, left, right) +} + +func (t *Terminal) move(up, down, left, right int) { + movement := make([]rune, 3*(up+down+left+right)) + m := movement + for i := 0; i < up; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'A' + m = m[3:] + } + for i := 0; i < down; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'B' + m = m[3:] + } + for i := 0; i < left; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'D' + m = m[3:] + } + for i := 0; i < right; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'C' + m = m[3:] + } + + t.queue(movement) +} + +func (t *Terminal) clearLineToRight() { + op := []rune{keyEscape, '[', 'K'} + t.queue(op) +} + +const maxLineLength = 4096 + +func (t *Terminal) setLine(newLine []rune, newPos int) { + if t.echo { + t.moveCursorToPos(0) + t.writeLine(newLine) + for i := len(newLine); i < len(t.line); i++ { + t.writeLine(space) + } + t.moveCursorToPos(newPos) + } + t.line = newLine + t.pos = newPos +} + +func (t *Terminal) advanceCursor(places int) { + t.cursorX += places + t.cursorY += t.cursorX / t.termWidth + if t.cursorY > t.maxLine { + t.maxLine = t.cursorY + } + t.cursorX = t.cursorX % t.termWidth + + if places > 0 && t.cursorX == 0 { + // Normally terminals will advance the current position + // when writing a character. But that doesn't happen + // for the last character in a line. However, when + // writing a character (except a new line) that causes + // a line wrap, the position will be advanced two + // places. + // + // So, if we are stopping at the end of a line, we + // need to write a newline so that our cursor can be + // advanced to the next line. + t.outBuf = append(t.outBuf, '\r', '\n') + } +} + +func (t *Terminal) eraseNPreviousChars(n int) { + if n == 0 { + return + } + + if t.pos < n { + n = t.pos + } + t.pos -= n + t.moveCursorToPos(t.pos) + + copy(t.line[t.pos:], t.line[n+t.pos:]) + t.line = t.line[:len(t.line)-n] + if t.echo { + t.writeLine(t.line[t.pos:]) + for i := 0; i < n; i++ { + t.queue(space) + } + t.advanceCursor(n) + t.moveCursorToPos(t.pos) + } +} + +// countToLeftWord returns then number of characters from the cursor to the +// start of the previous word. +func (t *Terminal) countToLeftWord() int { + if t.pos == 0 { + return 0 + } + + pos := t.pos - 1 + for pos > 0 { + if t.line[pos] != ' ' { + break + } + pos-- + } + for pos > 0 { + if t.line[pos] == ' ' { + pos++ + break + } + pos-- + } + + return t.pos - pos +} + +// countToRightWord returns then number of characters from the cursor to the +// start of the next word. +func (t *Terminal) countToRightWord() int { + pos := t.pos + for pos < len(t.line) { + if t.line[pos] == ' ' { + break + } + pos++ + } + for pos < len(t.line) { + if t.line[pos] != ' ' { + break + } + pos++ + } + return pos - t.pos +} + +// visualLength returns the number of visible glyphs in s. +func visualLength(runes []rune) int { + inEscapeSeq := false + length := 0 + + for _, r := range runes { + switch { + case inEscapeSeq: + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { + inEscapeSeq = false + } + case r == '\x1b': + inEscapeSeq = true + default: + length++ + } + } + + return length +} + +// handleKey processes the given key and, optionally, returns a line of text +// that the user has entered. +func (t *Terminal) handleKey(key rune) (line string, ok bool) { + if t.pasteActive && key != keyEnter { + t.addKeyToLine(key) + return + } + + switch key { + case keyBackspace: + if t.pos == 0 { + return + } + t.eraseNPreviousChars(1) + case keyAltLeft: + // move left by a word. + t.pos -= t.countToLeftWord() + t.moveCursorToPos(t.pos) + case keyAltRight: + // move right by a word. + t.pos += t.countToRightWord() + t.moveCursorToPos(t.pos) + case keyLeft: + if t.pos == 0 { + return + } + t.pos-- + t.moveCursorToPos(t.pos) + case keyRight: + if t.pos == len(t.line) { + return + } + t.pos++ + t.moveCursorToPos(t.pos) + case keyHome: + if t.pos == 0 { + return + } + t.pos = 0 + t.moveCursorToPos(t.pos) + case keyEnd: + if t.pos == len(t.line) { + return + } + t.pos = len(t.line) + t.moveCursorToPos(t.pos) + case keyUp: + entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) + if !ok { + return "", false + } + if t.historyIndex == -1 { + t.historyPending = string(t.line) + } + t.historyIndex++ + runes := []rune(entry) + t.setLine(runes, len(runes)) + case keyDown: + switch t.historyIndex { + case -1: + return + case 0: + runes := []rune(t.historyPending) + t.setLine(runes, len(runes)) + t.historyIndex-- + default: + entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) + if ok { + t.historyIndex-- + runes := []rune(entry) + t.setLine(runes, len(runes)) + } + } + case keyEnter: + t.moveCursorToPos(len(t.line)) + t.queue([]rune("\r\n")) + line = string(t.line) + ok = true + t.line = t.line[:0] + t.pos = 0 + t.cursorX = 0 + t.cursorY = 0 + t.maxLine = 0 + case keyDeleteWord: + // Delete zero or more spaces and then one or more characters. + t.eraseNPreviousChars(t.countToLeftWord()) + case keyDeleteLine: + // Delete everything from the current cursor position to the + // end of line. + for i := t.pos; i < len(t.line); i++ { + t.queue(space) + t.advanceCursor(1) + } + t.line = t.line[:t.pos] + t.moveCursorToPos(t.pos) + case keyCtrlD: + // Erase the character under the current position. + // The EOF case when the line is empty is handled in + // readLine(). + if t.pos < len(t.line) { + t.pos++ + t.eraseNPreviousChars(1) + } + case keyCtrlU: + t.eraseNPreviousChars(t.pos) + case keyClearScreen: + // Erases the screen and moves the cursor to the home position. + t.queue([]rune("\x1b[2J\x1b[H")) + t.queue(t.prompt) + t.cursorX, t.cursorY = 0, 0 + t.advanceCursor(visualLength(t.prompt)) + t.setLine(t.line, t.pos) + default: + if t.AutoCompleteCallback != nil { + prefix := string(t.line[:t.pos]) + suffix := string(t.line[t.pos:]) + + t.lock.Unlock() + newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) + t.lock.Lock() + + if completeOk { + t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) + return + } + } + if !isPrintable(key) { + return + } + if len(t.line) == maxLineLength { + return + } + t.addKeyToLine(key) + } + return +} + +// addKeyToLine inserts the given key at the current position in the current +// line. +func (t *Terminal) addKeyToLine(key rune) { + if len(t.line) == cap(t.line) { + newLine := make([]rune, len(t.line), 2*(1+len(t.line))) + copy(newLine, t.line) + t.line = newLine + } + t.line = t.line[:len(t.line)+1] + copy(t.line[t.pos+1:], t.line[t.pos:]) + t.line[t.pos] = key + if t.echo { + t.writeLine(t.line[t.pos:]) + } + t.pos++ + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) writeLine(line []rune) { + for len(line) != 0 { + remainingOnLine := t.termWidth - t.cursorX + todo := len(line) + if todo > remainingOnLine { + todo = remainingOnLine + } + t.queue(line[:todo]) + t.advanceCursor(visualLength(line[:todo])) + line = line[todo:] + } +} + +// writeWithCRLF writes buf to w but replaces all occurrences of \n with \r\n. +func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) { + for len(buf) > 0 { + i := bytes.IndexByte(buf, '\n') + todo := len(buf) + if i >= 0 { + todo = i + } + + var nn int + nn, err = w.Write(buf[:todo]) + n += nn + if err != nil { + return n, err + } + buf = buf[todo:] + + if i >= 0 { + if _, err = w.Write(crlf); err != nil { + return n, err + } + n++ + buf = buf[1:] + } + } + + return n, nil +} + +func (t *Terminal) Write(buf []byte) (n int, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + if t.cursorX == 0 && t.cursorY == 0 { + // This is the easy case: there's nothing on the screen that we + // have to move out of the way. + return writeWithCRLF(t.c, buf) + } + + // We have a prompt and possibly user input on the screen. We + // have to clear it first. + t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) + t.cursorX = 0 + t.clearLineToRight() + + for t.cursorY > 0 { + t.move(1 /* up */, 0, 0, 0) + t.cursorY-- + t.clearLineToRight() + } + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + + if n, err = writeWithCRLF(t.c, buf); err != nil { + return + } + + t.writeLine(t.prompt) + if t.echo { + t.writeLine(t.line) + } + + t.moveCursorToPos(t.pos) + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + return +} + +// ReadPassword temporarily changes the prompt and reads a password, without +// echo, from the terminal. +func (t *Terminal) ReadPassword(prompt string) (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + oldPrompt := t.prompt + t.prompt = []rune(prompt) + t.echo = false + + line, err = t.readLine() + + t.prompt = oldPrompt + t.echo = true + + return +} + +// ReadLine returns a line of input from the terminal. +func (t *Terminal) ReadLine() (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + return t.readLine() +} + +func (t *Terminal) readLine() (line string, err error) { + // t.lock must be held at this point + + if t.cursorX == 0 && t.cursorY == 0 { + t.writeLine(t.prompt) + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + } + + lineIsPasted := t.pasteActive + + for { + rest := t.remainder + lineOk := false + for !lineOk { + var key rune + key, rest = bytesToKey(rest, t.pasteActive) + if key == utf8.RuneError { + break + } + if !t.pasteActive { + if key == keyCtrlD { + if len(t.line) == 0 { + return "", io.EOF + } + } + if key == keyPasteStart { + t.pasteActive = true + if len(t.line) == 0 { + lineIsPasted = true + } + continue + } + } else if key == keyPasteEnd { + t.pasteActive = false + continue + } + if !t.pasteActive { + lineIsPasted = false + } + line, lineOk = t.handleKey(key) + } + if len(rest) > 0 { + n := copy(t.inBuf[:], rest) + t.remainder = t.inBuf[:n] + } else { + t.remainder = nil + } + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + if lineOk { + if t.echo { + t.historyIndex = -1 + t.history.Add(line) + } + if lineIsPasted { + err = ErrPasteIndicator + } + return + } + + // t.remainder is a slice at the beginning of t.inBuf + // containing a partial key sequence + readBuf := t.inBuf[len(t.remainder):] + var n int + + t.lock.Unlock() + n, err = t.c.Read(readBuf) + t.lock.Lock() + + if err != nil { + return + } + + t.remainder = t.inBuf[:n+len(t.remainder)] + } +} + +// SetPrompt sets the prompt to be used when reading subsequent lines. +func (t *Terminal) SetPrompt(prompt string) { + t.lock.Lock() + defer t.lock.Unlock() + + t.prompt = []rune(prompt) +} + +func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { + // Move cursor to column zero at the start of the line. + t.move(t.cursorY, 0, t.cursorX, 0) + t.cursorX, t.cursorY = 0, 0 + t.clearLineToRight() + for t.cursorY < numPrevLines { + // Move down a line + t.move(0, 1, 0, 0) + t.cursorY++ + t.clearLineToRight() + } + // Move back to beginning. + t.move(t.cursorY, 0, 0, 0) + t.cursorX, t.cursorY = 0, 0 + + t.queue(t.prompt) + t.advanceCursor(visualLength(t.prompt)) + t.writeLine(t.line) + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) SetSize(width, height int) error { + t.lock.Lock() + defer t.lock.Unlock() + + if width == 0 { + width = 1 + } + + oldWidth := t.termWidth + t.termWidth, t.termHeight = width, height + + switch { + case width == oldWidth: + // If the width didn't change then nothing else needs to be + // done. + return nil + case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: + // If there is nothing on current line and no prompt printed, + // just do nothing + return nil + case width < oldWidth: + // Some terminals (e.g. xterm) will truncate lines that were + // too long when shinking. Others, (e.g. gnome-terminal) will + // attempt to wrap them. For the former, repainting t.maxLine + // works great, but that behaviour goes badly wrong in the case + // of the latter because they have doubled every full line. + + // We assume that we are working on a terminal that wraps lines + // and adjust the cursor position based on every previous line + // wrapping and turning into two. This causes the prompt on + // xterms to move upwards, which isn't great, but it avoids a + // huge mess with gnome-terminal. + if t.cursorX >= t.termWidth { + t.cursorX = t.termWidth - 1 + } + t.cursorY *= 2 + t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) + case width > oldWidth: + // If the terminal expands then our position calculations will + // be wrong in the future because we think the cursor is + // |t.pos| chars into the string, but there will be a gap at + // the end of any wrapped line. + // + // But the position will actually be correct until we move, so + // we can move back to the beginning and repaint everything. + t.clearAndRepaintLinePlusNPrevious(t.maxLine) + } + + _, err := t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + return err +} + +type pasteIndicatorError struct{} + +func (pasteIndicatorError) Error() string { + return "terminal: ErrPasteIndicator not correctly handled" +} + +// ErrPasteIndicator may be returned from ReadLine as the error, in addition +// to valid line data. It indicates that bracketed paste mode is enabled and +// that the returned line consists only of pasted data. Programs may wish to +// interpret pasted data more literally than typed data. +var ErrPasteIndicator = pasteIndicatorError{} + +// SetBracketedPasteMode requests that the terminal bracket paste operations +// with markers. Not all terminals support this but, if it is supported, then +// enabling this mode will stop any autocomplete callback from running due to +// pastes. Additionally, any lines that are completely pasted will be returned +// from ReadLine with the error set to ErrPasteIndicator. +func (t *Terminal) SetBracketedPasteMode(on bool) { + if on { + io.WriteString(t.c, "\x1b[?2004h") + } else { + io.WriteString(t.c, "\x1b[?2004l") + } +} + +// stRingBuffer is a ring buffer of strings. +type stRingBuffer struct { + // entries contains max elements. + entries []string + max int + // head contains the index of the element most recently added to the ring. + head int + // size contains the number of elements in the ring. + size int +} + +func (s *stRingBuffer) Add(a string) { + if s.entries == nil { + const defaultNumEntries = 100 + s.entries = make([]string, defaultNumEntries) + s.max = defaultNumEntries + } + + s.head = (s.head + 1) % s.max + s.entries[s.head] = a + if s.size < s.max { + s.size++ + } +} + +// NthPreviousEntry returns the value passed to the nth previous call to Add. +// If n is zero then the immediately prior value is returned, if one, then the +// next most recent, and so on. If such an element doesn't exist then ok is +// false. +func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { + if n >= s.size { + return "", false + } + index := s.head - n + if index < 0 { + index += s.max + } + return s.entries[index], true +} + +// readPasswordLine reads from reader until it finds \n or io.EOF. +// The slice returned does not include the \n. +// readPasswordLine also ignores any \r it finds. +func readPasswordLine(reader io.Reader) ([]byte, error) { + var buf [1]byte + var ret []byte + + for { + n, err := reader.Read(buf[:]) + if n > 0 { + switch buf[0] { + case '\n': + return ret, nil + case '\r': + // remove \r from passwords on Windows + default: + ret = append(ret, buf[0]) + } + continue + } + if err != nil { + if err == io.EOF && len(ret) > 0 { + return ret, nil + } + return ret, err + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go new file mode 100644 index 000000000..731c89a28 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -0,0 +1,114 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "golang.org/x/sys/unix" +) + +// State contains the state of a terminal. +type State struct { + termios unix.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + oldState := State{termios: *termios} + + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil { + return nil, err + } + + return &oldState, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.termios) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return -1, -1, err + } + return int(ws.Col), int(ws.Row), nil +} + +// passwordReader is an io.Reader that reads from a specific file descriptor. +type passwordReader int + +func (r passwordReader) Read(buf []byte) (int, error) { + return unix.Read(int(r), buf) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + newState := *termios + newState.Lflag &^= unix.ECHO + newState.Lflag |= unix.ICANON | unix.ISIG + newState.Iflag |= unix.ICRNL + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newState); err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios) + + return readPasswordLine(passwordReader(fd)) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go new file mode 100644 index 000000000..cb23a5904 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package terminal + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA +const ioctlWriteTermios = unix.TIOCSETA diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go new file mode 100644 index 000000000..5fadfe8a1 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go @@ -0,0 +1,10 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go new file mode 100644 index 000000000..799f049f0 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go @@ -0,0 +1,58 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "fmt" + "runtime" +) + +type State struct{} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + return false +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go new file mode 100644 index 000000000..9e41b9f43 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -0,0 +1,124 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "golang.org/x/sys/unix" + "io" + "syscall" +) + +// State contains the state of a terminal. +type State struct { + termios unix.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c + val, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + oldState := *val + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState) + if err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState) + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} + +// MakeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +// see http://cr.illumos.org/~webrev/andy_js/1060/ +func MakeRaw(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + + oldState := State{termios: *termios} + + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil { + return nil, err + } + + return &oldState, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, oldState *State) error { + return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return 0, 0, err + } + return int(ws.Col), int(ws.Row), nil +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go new file mode 100644 index 000000000..8618955df --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -0,0 +1,103 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "os" + + "golang.org/x/sys/windows" +) + +type State struct { + mode uint32 +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &st) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { + return nil, err + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return windows.SetConsoleMode(windows.Handle(fd), state.mode) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var info windows.ConsoleScreenBufferInfo + if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { + return 0, 0, err + } + return int(info.Size.X), int(info.Size.Y), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + old := st + + st &^= (windows.ENABLE_ECHO_INPUT) + st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil { + return nil, err + } + + defer windows.SetConsoleMode(windows.Handle(fd), old) + + var h windows.Handle + p, _ := windows.GetCurrentProcess() + if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil { + return nil, err + } + + f := os.NewFile(uintptr(h), "stdin") + defer f.Close() + return readPasswordLine(f) +} diff --git a/vendor/golang.org/x/sys/windows/asm_windows_386.s b/vendor/golang.org/x/sys/windows/asm_windows_386.s new file mode 100644 index 000000000..21d994d31 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/asm_windows_386.s @@ -0,0 +1,13 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +// System calls for 386, Windows are implemented in runtime/syscall_windows.goc +// + +TEXT ·getprocaddress(SB), 7, $0-16 + JMP syscall·getprocaddress(SB) + +TEXT ·loadlibrary(SB), 7, $0-12 + JMP syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/asm_windows_amd64.s b/vendor/golang.org/x/sys/windows/asm_windows_amd64.s new file mode 100644 index 000000000..5bfdf7974 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/asm_windows_amd64.s @@ -0,0 +1,13 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +// System calls for amd64, Windows are implemented in runtime/syscall_windows.goc +// + +TEXT ·getprocaddress(SB), 7, $0-32 + JMP syscall·getprocaddress(SB) + +TEXT ·loadlibrary(SB), 7, $0-24 + JMP syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go new file mode 100644 index 000000000..e92c05b21 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -0,0 +1,378 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +// DLLError describes reasons for DLL load failures. +type DLLError struct { + Err error + ObjName string + Msg string +} + +func (e *DLLError) Error() string { return e.Msg } + +// Implemented in runtime/syscall_windows.goc; we provide jumps to them in our assembly file. +func loadlibrary(filename *uint16) (handle uintptr, err syscall.Errno) +func getprocaddress(handle uintptr, procname *uint8) (proc uintptr, err syscall.Errno) + +// A DLL implements access to a single DLL. +type DLL struct { + Name string + Handle Handle +} + +// LoadDLL loads DLL file into memory. +// +// Warning: using LoadDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use LazyDLL +// with System set to true, or use LoadLibraryEx directly. +func LoadDLL(name string) (dll *DLL, err error) { + namep, err := UTF16PtrFromString(name) + if err != nil { + return nil, err + } + h, e := loadlibrary(namep) + if e != 0 { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to load " + name + ": " + e.Error(), + } + } + d := &DLL{ + Name: name, + Handle: Handle(h), + } + return d, nil +} + +// MustLoadDLL is like LoadDLL but panics if load operation failes. +func MustLoadDLL(name string) *DLL { + d, e := LoadDLL(name) + if e != nil { + panic(e) + } + return d +} + +// FindProc searches DLL d for procedure named name and returns *Proc +// if found. It returns an error if search fails. +func (d *DLL) FindProc(name string) (proc *Proc, err error) { + namep, err := BytePtrFromString(name) + if err != nil { + return nil, err + } + a, e := getprocaddress(uintptr(d.Handle), namep) + if e != 0 { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), + } + } + p := &Proc{ + Dll: d, + Name: name, + addr: a, + } + return p, nil +} + +// MustFindProc is like FindProc but panics if search fails. +func (d *DLL) MustFindProc(name string) *Proc { + p, e := d.FindProc(name) + if e != nil { + panic(e) + } + return p +} + +// Release unloads DLL d from memory. +func (d *DLL) Release() (err error) { + return FreeLibrary(d.Handle) +} + +// A Proc implements access to a procedure inside a DLL. +type Proc struct { + Dll *DLL + Name string + addr uintptr +} + +// Addr returns the address of the procedure represented by p. +// The return value can be passed to Syscall to run the procedure. +func (p *Proc) Addr() uintptr { + return p.addr +} + +//go:uintptrescapes + +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// are supplied. +// +// The returned error is always non-nil, constructed from the result of GetLastError. +// Callers must inspect the primary return value to decide whether an error occurred +// (according to the semantics of the specific function being called) before consulting +// the error. The error will be guaranteed to contain windows.Errno. +func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { + switch len(a) { + case 0: + return syscall.Syscall(p.Addr(), uintptr(len(a)), 0, 0, 0) + case 1: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], 0, 0) + case 2: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], 0) + case 3: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], a[2]) + case 4: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], 0, 0) + case 5: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], 0) + case 6: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5]) + case 7: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], 0, 0) + case 8: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], 0) + case 9: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]) + case 10: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], 0, 0) + case 11: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], 0) + case 12: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11]) + case 13: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], 0, 0) + case 14: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0) + case 15: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14]) + default: + panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".") + } +} + +// A LazyDLL implements access to a single DLL. +// It will delay the load of the DLL until the first +// call to its Handle method or to one of its +// LazyProc's Addr method. +type LazyDLL struct { + Name string + + // System determines whether the DLL must be loaded from the + // Windows System directory, bypassing the normal DLL search + // path. + System bool + + mu sync.Mutex + dll *DLL // non nil once DLL is loaded +} + +// Load loads DLL file d.Name into memory. It returns an error if fails. +// Load will not try to load DLL, if it is already loaded into memory. +func (d *LazyDLL) Load() error { + // Non-racy version of: + // if d.dll != nil { + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil { + return nil + } + d.mu.Lock() + defer d.mu.Unlock() + if d.dll != nil { + return nil + } + + // kernel32.dll is special, since it's where LoadLibraryEx comes from. + // The kernel already special-cases its name, so it's always + // loaded from system32. + var dll *DLL + var err error + if d.Name == "kernel32.dll" { + dll, err = LoadDLL(d.Name) + } else { + dll, err = loadLibraryEx(d.Name, d.System) + } + if err != nil { + return err + } + + // Non-racy version of: + // d.dll = dll + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll)) + return nil +} + +// mustLoad is like Load but panics if search fails. +func (d *LazyDLL) mustLoad() { + e := d.Load() + if e != nil { + panic(e) + } +} + +// Handle returns d's module handle. +func (d *LazyDLL) Handle() uintptr { + d.mustLoad() + return uintptr(d.dll.Handle) +} + +// NewProc returns a LazyProc for accessing the named procedure in the DLL d. +func (d *LazyDLL) NewProc(name string) *LazyProc { + return &LazyProc{l: d, Name: name} +} + +// NewLazyDLL creates new LazyDLL associated with DLL file. +func NewLazyDLL(name string) *LazyDLL { + return &LazyDLL{Name: name} +} + +// NewLazySystemDLL is like NewLazyDLL, but will only +// search Windows System directory for the DLL if name is +// a base name (like "advapi32.dll"). +func NewLazySystemDLL(name string) *LazyDLL { + return &LazyDLL{Name: name, System: true} +} + +// A LazyProc implements access to a procedure inside a LazyDLL. +// It delays the lookup until the Addr method is called. +type LazyProc struct { + Name string + + mu sync.Mutex + l *LazyDLL + proc *Proc +} + +// Find searches DLL for procedure named p.Name. It returns +// an error if search fails. Find will not search procedure, +// if it is already found and loaded into memory. +func (p *LazyProc) Find() error { + // Non-racy version of: + // if p.proc == nil { + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc))) == nil { + p.mu.Lock() + defer p.mu.Unlock() + if p.proc == nil { + e := p.l.Load() + if e != nil { + return e + } + proc, e := p.l.dll.FindProc(p.Name) + if e != nil { + return e + } + // Non-racy version of: + // p.proc = proc + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc)), unsafe.Pointer(proc)) + } + } + return nil +} + +// mustFind is like Find but panics if search fails. +func (p *LazyProc) mustFind() { + e := p.Find() + if e != nil { + panic(e) + } +} + +// Addr returns the address of the procedure represented by p. +// The return value can be passed to Syscall to run the procedure. +// It will panic if the procedure cannot be found. +func (p *LazyProc) Addr() uintptr { + p.mustFind() + return p.proc.Addr() +} + +//go:uintptrescapes + +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// are supplied. It will also panic if the procedure cannot be found. +// +// The returned error is always non-nil, constructed from the result of GetLastError. +// Callers must inspect the primary return value to decide whether an error occurred +// (according to the semantics of the specific function being called) before consulting +// the error. The error will be guaranteed to contain windows.Errno. +func (p *LazyProc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { + p.mustFind() + return p.proc.Call(a...) +} + +var canDoSearchSystem32Once struct { + sync.Once + v bool +} + +func initCanDoSearchSystem32() { + // https://msdn.microsoft.com/en-us/library/ms684179(v=vs.85).aspx says: + // "Windows 7, Windows Server 2008 R2, Windows Vista, and Windows + // Server 2008: The LOAD_LIBRARY_SEARCH_* flags are available on + // systems that have KB2533623 installed. To determine whether the + // flags are available, use GetProcAddress to get the address of the + // AddDllDirectory, RemoveDllDirectory, or SetDefaultDllDirectories + // function. If GetProcAddress succeeds, the LOAD_LIBRARY_SEARCH_* + // flags can be used with LoadLibraryEx." + canDoSearchSystem32Once.v = (modkernel32.NewProc("AddDllDirectory").Find() == nil) +} + +func canDoSearchSystem32() bool { + canDoSearchSystem32Once.Do(initCanDoSearchSystem32) + return canDoSearchSystem32Once.v +} + +func isBaseName(name string) bool { + for _, c := range name { + if c == ':' || c == '/' || c == '\\' { + return false + } + } + return true +} + +// loadLibraryEx wraps the Windows LoadLibraryEx function. +// +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx +// +// If name is not an absolute path, LoadLibraryEx searches for the DLL +// in a variety of automatic locations unless constrained by flags. +// See: https://msdn.microsoft.com/en-us/library/ff919712%28VS.85%29.aspx +func loadLibraryEx(name string, system bool) (*DLL, error) { + loadDLL := name + var flags uintptr + if system { + if canDoSearchSystem32() { + const LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800 + flags = LOAD_LIBRARY_SEARCH_SYSTEM32 + } else if isBaseName(name) { + // WindowsXP or unpatched Windows machine + // trying to load "foo.dll" out of the system + // folder, but LoadLibraryEx doesn't support + // that yet on their system, so emulate it. + windir, _ := Getenv("WINDIR") // old var; apparently works on XP + if windir == "" { + return nil, errString("%WINDIR% not defined") + } + loadDLL = windir + "\\System32\\" + name + } + } + h, err := LoadLibraryEx(loadDLL, 0, flags) + if err != nil { + return nil, err + } + return &DLL{Name: name, Handle: h}, nil +} + +type errString string + +func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go new file mode 100644 index 000000000..bdc71e241 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -0,0 +1,29 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Windows environment variables. + +package windows + +import "syscall" + +func Getenv(key string) (value string, found bool) { + return syscall.Getenv(key) +} + +func Setenv(key, value string) error { + return syscall.Setenv(key, value) +} + +func Clearenv() { + syscall.Clearenv() +} + +func Environ() []string { + return syscall.Environ() +} + +func Unsetenv(key string) error { + return syscall.Unsetenv(key) +} diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go new file mode 100644 index 000000000..40af946e1 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +const ( + EVENTLOG_SUCCESS = 0 + EVENTLOG_ERROR_TYPE = 1 + EVENTLOG_WARNING_TYPE = 2 + EVENTLOG_INFORMATION_TYPE = 4 + EVENTLOG_AUDIT_SUCCESS = 8 + EVENTLOG_AUDIT_FAILURE = 16 +) + +//sys RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) [failretval==0] = advapi32.RegisterEventSourceW +//sys DeregisterEventSource(handle Handle) (err error) = advapi32.DeregisterEventSource +//sys ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) = advapi32.ReportEventW diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go new file mode 100644 index 000000000..3606c3a8b --- /dev/null +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -0,0 +1,97 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Fork, exec, wait, etc. + +package windows + +// EscapeArg rewrites command line argument s as prescribed +// in http://msdn.microsoft.com/en-us/library/ms880421. +// This function returns "" (2 double quotes) if s is empty. +// Alternatively, these transformations are done: +// - every back slash (\) is doubled, but only if immediately +// followed by double quote ("); +// - every double quote (") is escaped by back slash (\); +// - finally, s is wrapped with double quotes (arg -> "arg"), +// but only if there is space or tab inside s. +func EscapeArg(s string) string { + if len(s) == 0 { + return "\"\"" + } + n := len(s) + hasSpace := false + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '\\': + n++ + case ' ', '\t': + hasSpace = true + } + } + if hasSpace { + n += 2 + } + if n == len(s) { + return s + } + + qs := make([]byte, n) + j := 0 + if hasSpace { + qs[j] = '"' + j++ + } + slashes := 0 + for i := 0; i < len(s); i++ { + switch s[i] { + default: + slashes = 0 + qs[j] = s[i] + case '\\': + slashes++ + qs[j] = s[i] + case '"': + for ; slashes > 0; slashes-- { + qs[j] = '\\' + j++ + } + qs[j] = '\\' + j++ + qs[j] = s[i] + } + j++ + } + if hasSpace { + for ; slashes > 0; slashes-- { + qs[j] = '\\' + j++ + } + qs[j] = '"' + j++ + } + return string(qs[:j]) +} + +func CloseOnExec(fd Handle) { + SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) +} + +// FullPath retrieves the full path of the specified file. +func FullPath(name string) (path string, err error) { + p, err := UTF16PtrFromString(name) + if err != nil { + return "", err + } + n := uint32(100) + for { + buf := make([]uint16, n) + n, err = GetFullPathName(p, uint32(len(buf)), &buf[0], nil) + if err != nil { + return "", err + } + if n <= uint32(len(buf)) { + return UTF16ToString(buf[:n]), nil + } + } +} diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go new file mode 100644 index 000000000..f80a4204f --- /dev/null +++ b/vendor/golang.org/x/sys/windows/memory_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +const ( + MEM_COMMIT = 0x00001000 + MEM_RESERVE = 0x00002000 + MEM_DECOMMIT = 0x00004000 + MEM_RELEASE = 0x00008000 + MEM_RESET = 0x00080000 + MEM_TOP_DOWN = 0x00100000 + MEM_WRITE_WATCH = 0x00200000 + MEM_PHYSICAL = 0x00400000 + MEM_RESET_UNDO = 0x01000000 + MEM_LARGE_PAGES = 0x20000000 + + PAGE_NOACCESS = 0x01 + PAGE_READONLY = 0x02 + PAGE_READWRITE = 0x04 + PAGE_WRITECOPY = 0x08 + PAGE_EXECUTE_READ = 0x20 + PAGE_EXECUTE_READWRITE = 0x40 + PAGE_EXECUTE_WRITECOPY = 0x80 +) diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go new file mode 100644 index 000000000..fb7db0ef8 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -0,0 +1,7 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go new file mode 100644 index 000000000..a74e3e24b --- /dev/null +++ b/vendor/golang.org/x/sys/windows/race.go @@ -0,0 +1,30 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,race + +package windows + +import ( + "runtime" + "unsafe" +) + +const raceenabled = true + +func raceAcquire(addr unsafe.Pointer) { + runtime.RaceAcquire(addr) +} + +func raceReleaseMerge(addr unsafe.Pointer) { + runtime.RaceReleaseMerge(addr) +} + +func raceReadRange(addr unsafe.Pointer, len int) { + runtime.RaceReadRange(addr, len) +} + +func raceWriteRange(addr unsafe.Pointer, len int) { + runtime.RaceWriteRange(addr, len) +} diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go new file mode 100644 index 000000000..e44a3cbf6 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -0,0 +1,25 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!race + +package windows + +import ( + "unsafe" +) + +const raceenabled = false + +func raceAcquire(addr unsafe.Pointer) { +} + +func raceReleaseMerge(addr unsafe.Pointer) { +} + +func raceReadRange(addr unsafe.Pointer, len int) { +} + +func raceWriteRange(addr unsafe.Pointer, len int) { +} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go new file mode 100644 index 000000000..4f17a3331 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -0,0 +1,478 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "syscall" + "unsafe" +) + +const ( + STANDARD_RIGHTS_REQUIRED = 0xf0000 + STANDARD_RIGHTS_READ = 0x20000 + STANDARD_RIGHTS_WRITE = 0x20000 + STANDARD_RIGHTS_EXECUTE = 0x20000 + STANDARD_RIGHTS_ALL = 0x1F0000 +) + +const ( + NameUnknown = 0 + NameFullyQualifiedDN = 1 + NameSamCompatible = 2 + NameDisplay = 3 + NameUniqueId = 6 + NameCanonical = 7 + NameUserPrincipal = 8 + NameCanonicalEx = 9 + NameServicePrincipal = 10 + NameDnsDomain = 12 +) + +// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. +// http://blogs.msdn.com/b/drnick/archive/2007/12/19/windows-and-upn-format-credentials.aspx +//sys TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.TranslateNameW +//sys GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.GetUserNameExW + +// TranslateAccountName converts a directory service +// object name from one format to another. +func TranslateAccountName(username string, from, to uint32, initSize int) (string, error) { + u, e := UTF16PtrFromString(username) + if e != nil { + return "", e + } + n := uint32(50) + for { + b := make([]uint16, n) + e = TranslateName(u, from, to, &b[0], &n) + if e == nil { + return UTF16ToString(b[:n]), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", e + } + if n <= uint32(len(b)) { + return "", e + } + } +} + +const ( + // do not reorder + NetSetupUnknownStatus = iota + NetSetupUnjoined + NetSetupWorkgroupName + NetSetupDomainName +) + +type UserInfo10 struct { + Name *uint16 + Comment *uint16 + UsrComment *uint16 + FullName *uint16 +} + +//sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo +//sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation +//sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree + +const ( + // do not reorder + SidTypeUser = 1 + iota + SidTypeGroup + SidTypeDomain + SidTypeAlias + SidTypeWellKnownGroup + SidTypeDeletedAccount + SidTypeInvalid + SidTypeUnknown + SidTypeComputer + SidTypeLabel +) + +type SidIdentifierAuthority struct { + Value [6]byte +} + +var ( + SECURITY_NULL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 0}} + SECURITY_WORLD_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 1}} + SECURITY_LOCAL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 2}} + SECURITY_CREATOR_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 3}} + SECURITY_NON_UNIQUE_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 4}} + SECURITY_NT_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 5}} + SECURITY_MANDATORY_LABEL_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 16}} +) + +const ( + SECURITY_NULL_RID = 0 + SECURITY_WORLD_RID = 0 + SECURITY_LOCAL_RID = 0 + SECURITY_CREATOR_OWNER_RID = 0 + SECURITY_CREATOR_GROUP_RID = 1 + SECURITY_DIALUP_RID = 1 + SECURITY_NETWORK_RID = 2 + SECURITY_BATCH_RID = 3 + SECURITY_INTERACTIVE_RID = 4 + SECURITY_LOGON_IDS_RID = 5 + SECURITY_SERVICE_RID = 6 + SECURITY_LOCAL_SYSTEM_RID = 18 + SECURITY_BUILTIN_DOMAIN_RID = 32 + SECURITY_PRINCIPAL_SELF_RID = 10 + SECURITY_CREATOR_OWNER_SERVER_RID = 0x2 + SECURITY_CREATOR_GROUP_SERVER_RID = 0x3 + SECURITY_LOGON_IDS_RID_COUNT = 0x3 + SECURITY_ANONYMOUS_LOGON_RID = 0x7 + SECURITY_PROXY_RID = 0x8 + SECURITY_ENTERPRISE_CONTROLLERS_RID = 0x9 + SECURITY_SERVER_LOGON_RID = SECURITY_ENTERPRISE_CONTROLLERS_RID + SECURITY_AUTHENTICATED_USER_RID = 0xb + SECURITY_RESTRICTED_CODE_RID = 0xc + SECURITY_NT_NON_UNIQUE_RID = 0x15 +) + +// Predefined domain-relative RIDs for local groups. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa379649(v=vs.85).aspx +const ( + DOMAIN_ALIAS_RID_ADMINS = 0x220 + DOMAIN_ALIAS_RID_USERS = 0x221 + DOMAIN_ALIAS_RID_GUESTS = 0x222 + DOMAIN_ALIAS_RID_POWER_USERS = 0x223 + DOMAIN_ALIAS_RID_ACCOUNT_OPS = 0x224 + DOMAIN_ALIAS_RID_SYSTEM_OPS = 0x225 + DOMAIN_ALIAS_RID_PRINT_OPS = 0x226 + DOMAIN_ALIAS_RID_BACKUP_OPS = 0x227 + DOMAIN_ALIAS_RID_REPLICATOR = 0x228 + DOMAIN_ALIAS_RID_RAS_SERVERS = 0x229 + DOMAIN_ALIAS_RID_PREW2KCOMPACCESS = 0x22a + DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 0x22b + DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 0x22c + DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d + DOMAIN_ALIAS_RID_MONITORING_USERS = 0X22e + DOMAIN_ALIAS_RID_LOGGING_USERS = 0x22f + DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 0x230 + DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 0x231 + DOMAIN_ALIAS_RID_DCOM_USERS = 0x232 + DOMAIN_ALIAS_RID_IUSERS = 0x238 + DOMAIN_ALIAS_RID_CRYPTO_OPERATORS = 0x239 + DOMAIN_ALIAS_RID_CACHEABLE_PRINCIPALS_GROUP = 0x23b + DOMAIN_ALIAS_RID_NON_CACHEABLE_PRINCIPALS_GROUP = 0x23c + DOMAIN_ALIAS_RID_EVENT_LOG_READERS_GROUP = 0x23d + DOMAIN_ALIAS_RID_CERTSVC_DCOM_ACCESS_GROUP = 0x23e +) + +//sys LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountSidW +//sys LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountNameW +//sys ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) = advapi32.ConvertStringSidToSidW +//sys GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid +//sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid +//sys AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid +//sys FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid +//sys EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid + +// The security identifier (SID) structure is a variable-length +// structure used to uniquely identify users or groups. +type SID struct{} + +// StringToSid converts a string-format security identifier +// sid into a valid, functional sid. +func StringToSid(s string) (*SID, error) { + var sid *SID + p, e := UTF16PtrFromString(s) + if e != nil { + return nil, e + } + e = ConvertStringSidToSid(p, &sid) + if e != nil { + return nil, e + } + defer LocalFree((Handle)(unsafe.Pointer(sid))) + return sid.Copy() +} + +// LookupSID retrieves a security identifier sid for the account +// and the name of the domain on which the account was found. +// System specify target computer to search. +func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) { + if len(account) == 0 { + return nil, "", 0, syscall.EINVAL + } + acc, e := UTF16PtrFromString(account) + if e != nil { + return nil, "", 0, e + } + var sys *uint16 + if len(system) > 0 { + sys, e = UTF16PtrFromString(system) + if e != nil { + return nil, "", 0, e + } + } + n := uint32(50) + dn := uint32(50) + for { + b := make([]byte, n) + db := make([]uint16, dn) + sid = (*SID)(unsafe.Pointer(&b[0])) + e = LookupAccountName(sys, acc, sid, &n, &db[0], &dn, &accType) + if e == nil { + return sid, UTF16ToString(db), accType, nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return nil, "", 0, e + } + if n <= uint32(len(b)) { + return nil, "", 0, e + } + } +} + +// String converts sid to a string format +// suitable for display, storage, or transmission. +func (sid *SID) String() (string, error) { + var s *uint16 + e := ConvertSidToStringSid(sid, &s) + if e != nil { + return "", e + } + defer LocalFree((Handle)(unsafe.Pointer(s))) + return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]), nil +} + +// Len returns the length, in bytes, of a valid security identifier sid. +func (sid *SID) Len() int { + return int(GetLengthSid(sid)) +} + +// Copy creates a duplicate of security identifier sid. +func (sid *SID) Copy() (*SID, error) { + b := make([]byte, sid.Len()) + sid2 := (*SID)(unsafe.Pointer(&b[0])) + e := CopySid(uint32(len(b)), sid2, sid) + if e != nil { + return nil, e + } + return sid2, nil +} + +// LookupAccount retrieves the name of the account for this sid +// and the name of the first domain on which this sid is found. +// System specify target computer to search for. +func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) { + var sys *uint16 + if len(system) > 0 { + sys, err = UTF16PtrFromString(system) + if err != nil { + return "", "", 0, err + } + } + n := uint32(50) + dn := uint32(50) + for { + b := make([]uint16, n) + db := make([]uint16, dn) + e := LookupAccountSid(sys, sid, &b[0], &n, &db[0], &dn, &accType) + if e == nil { + return UTF16ToString(b), UTF16ToString(db), accType, nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", "", 0, e + } + if n <= uint32(len(b)) { + return "", "", 0, e + } + } +} + +const ( + // do not reorder + TOKEN_ASSIGN_PRIMARY = 1 << iota + TOKEN_DUPLICATE + TOKEN_IMPERSONATE + TOKEN_QUERY + TOKEN_QUERY_SOURCE + TOKEN_ADJUST_PRIVILEGES + TOKEN_ADJUST_GROUPS + TOKEN_ADJUST_DEFAULT + TOKEN_ADJUST_SESSIONID + + TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | + TOKEN_ASSIGN_PRIMARY | + TOKEN_DUPLICATE | + TOKEN_IMPERSONATE | + TOKEN_QUERY | + TOKEN_QUERY_SOURCE | + TOKEN_ADJUST_PRIVILEGES | + TOKEN_ADJUST_GROUPS | + TOKEN_ADJUST_DEFAULT | + TOKEN_ADJUST_SESSIONID + TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY + TOKEN_WRITE = STANDARD_RIGHTS_WRITE | + TOKEN_ADJUST_PRIVILEGES | + TOKEN_ADJUST_GROUPS | + TOKEN_ADJUST_DEFAULT + TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE +) + +const ( + // do not reorder + TokenUser = 1 + iota + TokenGroups + TokenPrivileges + TokenOwner + TokenPrimaryGroup + TokenDefaultDacl + TokenSource + TokenType + TokenImpersonationLevel + TokenStatistics + TokenRestrictedSids + TokenSessionId + TokenGroupsAndPrivileges + TokenSessionReference + TokenSandBoxInert + TokenAuditPolicy + TokenOrigin + TokenElevationType + TokenLinkedToken + TokenElevation + TokenHasRestrictions + TokenAccessInformation + TokenVirtualizationAllowed + TokenVirtualizationEnabled + TokenIntegrityLevel + TokenUIAccess + TokenMandatoryPolicy + TokenLogonSid + MaxTokenInfoClass +) + +type SIDAndAttributes struct { + Sid *SID + Attributes uint32 +} + +type Tokenuser struct { + User SIDAndAttributes +} + +type Tokenprimarygroup struct { + PrimaryGroup *SID +} + +type Tokengroups struct { + GroupCount uint32 + Groups [1]SIDAndAttributes +} + +// Authorization Functions +//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership +//sys OpenProcessToken(h Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken +//sys GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation +//sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW + +// An access token contains the security information for a logon session. +// The system creates an access token when a user logs on, and every +// process executed on behalf of the user has a copy of the token. +// The token identifies the user, the user's groups, and the user's +// privileges. The system uses the token to control access to securable +// objects and to control the ability of the user to perform various +// system-related operations on the local computer. +type Token Handle + +// OpenCurrentProcessToken opens the access token +// associated with current process. +func OpenCurrentProcessToken() (Token, error) { + p, e := GetCurrentProcess() + if e != nil { + return 0, e + } + var t Token + e = OpenProcessToken(p, TOKEN_QUERY, &t) + if e != nil { + return 0, e + } + return t, nil +} + +// Close releases access to access token. +func (t Token) Close() error { + return CloseHandle(Handle(t)) +} + +// getInfo retrieves a specified type of information about an access token. +func (t Token) getInfo(class uint32, initSize int) (unsafe.Pointer, error) { + n := uint32(initSize) + for { + b := make([]byte, n) + e := GetTokenInformation(t, class, &b[0], uint32(len(b)), &n) + if e == nil { + return unsafe.Pointer(&b[0]), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return nil, e + } + if n <= uint32(len(b)) { + return nil, e + } + } +} + +// GetTokenUser retrieves access token t user account information. +func (t Token) GetTokenUser() (*Tokenuser, error) { + i, e := t.getInfo(TokenUser, 50) + if e != nil { + return nil, e + } + return (*Tokenuser)(i), nil +} + +// GetTokenGroups retrieves group accounts associated with access token t. +func (t Token) GetTokenGroups() (*Tokengroups, error) { + i, e := t.getInfo(TokenGroups, 50) + if e != nil { + return nil, e + } + return (*Tokengroups)(i), nil +} + +// GetTokenPrimaryGroup retrieves access token t primary group information. +// A pointer to a SID structure representing a group that will become +// the primary group of any objects created by a process using this access token. +func (t Token) GetTokenPrimaryGroup() (*Tokenprimarygroup, error) { + i, e := t.getInfo(TokenPrimaryGroup, 50) + if e != nil { + return nil, e + } + return (*Tokenprimarygroup)(i), nil +} + +// GetUserProfileDirectory retrieves path to the +// root directory of the access token t user's profile. +func (t Token) GetUserProfileDirectory() (string, error) { + n := uint32(100) + for { + b := make([]uint16, n) + e := GetUserProfileDirectory(t, &b[0], &n) + if e == nil { + return UTF16ToString(b), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", e + } + if n <= uint32(len(b)) { + return "", e + } + } +} + +// IsMember reports whether the access token t is a member of the provided SID. +func (t Token) IsMember(sid *SID) (bool, error) { + var b int32 + if e := checkTokenMembership(t, sid, &b); e != nil { + return false, e + } + return b != 0, nil +} diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go new file mode 100644 index 000000000..24aa90bbb --- /dev/null +++ b/vendor/golang.org/x/sys/windows/service.go @@ -0,0 +1,165 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +const ( + SC_MANAGER_CONNECT = 1 + SC_MANAGER_CREATE_SERVICE = 2 + SC_MANAGER_ENUMERATE_SERVICE = 4 + SC_MANAGER_LOCK = 8 + SC_MANAGER_QUERY_LOCK_STATUS = 16 + SC_MANAGER_MODIFY_BOOT_CONFIG = 32 + SC_MANAGER_ALL_ACCESS = 0xf003f +) + +//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW + +const ( + SERVICE_KERNEL_DRIVER = 1 + SERVICE_FILE_SYSTEM_DRIVER = 2 + SERVICE_ADAPTER = 4 + SERVICE_RECOGNIZER_DRIVER = 8 + SERVICE_WIN32_OWN_PROCESS = 16 + SERVICE_WIN32_SHARE_PROCESS = 32 + SERVICE_WIN32 = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS + SERVICE_INTERACTIVE_PROCESS = 256 + SERVICE_DRIVER = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_RECOGNIZER_DRIVER + SERVICE_TYPE_ALL = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER | SERVICE_INTERACTIVE_PROCESS + + SERVICE_BOOT_START = 0 + SERVICE_SYSTEM_START = 1 + SERVICE_AUTO_START = 2 + SERVICE_DEMAND_START = 3 + SERVICE_DISABLED = 4 + + SERVICE_ERROR_IGNORE = 0 + SERVICE_ERROR_NORMAL = 1 + SERVICE_ERROR_SEVERE = 2 + SERVICE_ERROR_CRITICAL = 3 + + SC_STATUS_PROCESS_INFO = 0 + + SERVICE_STOPPED = 1 + SERVICE_START_PENDING = 2 + SERVICE_STOP_PENDING = 3 + SERVICE_RUNNING = 4 + SERVICE_CONTINUE_PENDING = 5 + SERVICE_PAUSE_PENDING = 6 + SERVICE_PAUSED = 7 + SERVICE_NO_CHANGE = 0xffffffff + + SERVICE_ACCEPT_STOP = 1 + SERVICE_ACCEPT_PAUSE_CONTINUE = 2 + SERVICE_ACCEPT_SHUTDOWN = 4 + SERVICE_ACCEPT_PARAMCHANGE = 8 + SERVICE_ACCEPT_NETBINDCHANGE = 16 + SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 32 + SERVICE_ACCEPT_POWEREVENT = 64 + SERVICE_ACCEPT_SESSIONCHANGE = 128 + + SERVICE_CONTROL_STOP = 1 + SERVICE_CONTROL_PAUSE = 2 + SERVICE_CONTROL_CONTINUE = 3 + SERVICE_CONTROL_INTERROGATE = 4 + SERVICE_CONTROL_SHUTDOWN = 5 + SERVICE_CONTROL_PARAMCHANGE = 6 + SERVICE_CONTROL_NETBINDADD = 7 + SERVICE_CONTROL_NETBINDREMOVE = 8 + SERVICE_CONTROL_NETBINDENABLE = 9 + SERVICE_CONTROL_NETBINDDISABLE = 10 + SERVICE_CONTROL_DEVICEEVENT = 11 + SERVICE_CONTROL_HARDWAREPROFILECHANGE = 12 + SERVICE_CONTROL_POWEREVENT = 13 + SERVICE_CONTROL_SESSIONCHANGE = 14 + + SERVICE_ACTIVE = 1 + SERVICE_INACTIVE = 2 + SERVICE_STATE_ALL = 3 + + SERVICE_QUERY_CONFIG = 1 + SERVICE_CHANGE_CONFIG = 2 + SERVICE_QUERY_STATUS = 4 + SERVICE_ENUMERATE_DEPENDENTS = 8 + SERVICE_START = 16 + SERVICE_STOP = 32 + SERVICE_PAUSE_CONTINUE = 64 + SERVICE_INTERROGATE = 128 + SERVICE_USER_DEFINED_CONTROL = 256 + SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL + SERVICE_RUNS_IN_SYSTEM_PROCESS = 1 + SERVICE_CONFIG_DESCRIPTION = 1 + SERVICE_CONFIG_FAILURE_ACTIONS = 2 + + NO_ERROR = 0 + + SC_ENUM_PROCESS_INFO = 0 +) + +type SERVICE_STATUS struct { + ServiceType uint32 + CurrentState uint32 + ControlsAccepted uint32 + Win32ExitCode uint32 + ServiceSpecificExitCode uint32 + CheckPoint uint32 + WaitHint uint32 +} + +type SERVICE_TABLE_ENTRY struct { + ServiceName *uint16 + ServiceProc uintptr +} + +type QUERY_SERVICE_CONFIG struct { + ServiceType uint32 + StartType uint32 + ErrorControl uint32 + BinaryPathName *uint16 + LoadOrderGroup *uint16 + TagId uint32 + Dependencies *uint16 + ServiceStartName *uint16 + DisplayName *uint16 +} + +type SERVICE_DESCRIPTION struct { + Description *uint16 +} + +type SERVICE_STATUS_PROCESS struct { + ServiceType uint32 + CurrentState uint32 + ControlsAccepted uint32 + Win32ExitCode uint32 + ServiceSpecificExitCode uint32 + CheckPoint uint32 + WaitHint uint32 + ProcessId uint32 + ServiceFlags uint32 +} + +type ENUM_SERVICE_STATUS_PROCESS struct { + ServiceName *uint16 + DisplayName *uint16 + ServiceStatusProcess SERVICE_STATUS_PROCESS +} + +//sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle +//sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW +//sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW +//sys DeleteService(service Handle) (err error) = advapi32.DeleteService +//sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW +//sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus +//sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService +//sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW +//sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus +//sys ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) = advapi32.ChangeServiceConfigW +//sys QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfigW +//sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W +//sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W +//sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW +//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go new file mode 100644 index 000000000..917cc2aae --- /dev/null +++ b/vendor/golang.org/x/sys/windows/str.go @@ -0,0 +1,22 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +func itoa(val int) string { // do it here rather than with fmt to avoid dependency + if val < 0 { + return "-" + itoa(-val) + } + var buf [32]byte // big enough for int64 + i := len(buf) - 1 + for val >= 10 { + buf[i] = byte(val%10 + '0') + i-- + val /= 10 + } + buf[i] = byte(val + '0') + return string(buf[i:]) +} diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go new file mode 100644 index 000000000..af828a91b --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -0,0 +1,74 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package windows contains an interface to the low-level operating system +// primitives. OS details vary depending on the underlying system, and +// by default, godoc will display the OS-specific documentation for the current +// system. If you want godoc to display syscall documentation for another +// system, set $GOOS and $GOARCH to the desired system. For example, if +// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS +// to freebsd and $GOARCH to arm. +// +// The primary use of this package is inside other packages that provide a more +// portable interface to the system, such as "os", "time" and "net". Use +// those packages rather than this one if you can. +// +// For details of the functions and data types in this package consult +// the manuals for the appropriate operating system. +// +// These calls return err == nil to indicate success; otherwise +// err represents an operating system error describing the failure and +// holds a value of type syscall.Errno. +package windows // import "golang.org/x/sys/windows" + +import ( + "syscall" +) + +// ByteSliceFromString returns a NUL-terminated slice of bytes +// containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func ByteSliceFromString(s string) ([]byte, error) { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return nil, syscall.EINVAL + } + } + a := make([]byte, len(s)+1) + copy(a, s) + return a, nil +} + +// BytePtrFromString returns a pointer to a NUL-terminated array of +// bytes containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func BytePtrFromString(s string) (*byte, error) { + a, err := ByteSliceFromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +// Single-word zero for use when we need a valid pointer to 0 bytes. +// See mksyscall.pl. +var _zero uintptr + +func (ts *Timespec) Unix() (sec int64, nsec int64) { + return int64(ts.Sec), int64(ts.Nsec) +} + +func (tv *Timeval) Unix() (sec int64, nsec int64) { + return int64(tv.Sec), int64(tv.Usec) * 1000 +} + +func (ts *Timespec) Nano() int64 { + return int64(ts.Sec)*1e9 + int64(ts.Nsec) +} + +func (tv *Timeval) Nano() int64 { + return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 +} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go new file mode 100644 index 000000000..1e9f4bb4a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -0,0 +1,1153 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Windows system calls. + +package windows + +import ( + errorspkg "errors" + "sync" + "syscall" + "unicode/utf16" + "unsafe" +) + +type Handle uintptr + +const ( + InvalidHandle = ^Handle(0) + + // Flags for DefineDosDevice. + DDD_EXACT_MATCH_ON_REMOVE = 0x00000004 + DDD_NO_BROADCAST_SYSTEM = 0x00000008 + DDD_RAW_TARGET_PATH = 0x00000001 + DDD_REMOVE_DEFINITION = 0x00000002 + + // Return values for GetDriveType. + DRIVE_UNKNOWN = 0 + DRIVE_NO_ROOT_DIR = 1 + DRIVE_REMOVABLE = 2 + DRIVE_FIXED = 3 + DRIVE_REMOTE = 4 + DRIVE_CDROM = 5 + DRIVE_RAMDISK = 6 + + // File system flags from GetVolumeInformation and GetVolumeInformationByHandle. + FILE_CASE_SENSITIVE_SEARCH = 0x00000001 + FILE_CASE_PRESERVED_NAMES = 0x00000002 + FILE_FILE_COMPRESSION = 0x00000010 + FILE_DAX_VOLUME = 0x20000000 + FILE_NAMED_STREAMS = 0x00040000 + FILE_PERSISTENT_ACLS = 0x00000008 + FILE_READ_ONLY_VOLUME = 0x00080000 + FILE_SEQUENTIAL_WRITE_ONCE = 0x00100000 + FILE_SUPPORTS_ENCRYPTION = 0x00020000 + FILE_SUPPORTS_EXTENDED_ATTRIBUTES = 0x00800000 + FILE_SUPPORTS_HARD_LINKS = 0x00400000 + FILE_SUPPORTS_OBJECT_IDS = 0x00010000 + FILE_SUPPORTS_OPEN_BY_FILE_ID = 0x01000000 + FILE_SUPPORTS_REPARSE_POINTS = 0x00000080 + FILE_SUPPORTS_SPARSE_FILES = 0x00000040 + FILE_SUPPORTS_TRANSACTIONS = 0x00200000 + FILE_SUPPORTS_USN_JOURNAL = 0x02000000 + FILE_UNICODE_ON_DISK = 0x00000004 + FILE_VOLUME_IS_COMPRESSED = 0x00008000 + FILE_VOLUME_QUOTAS = 0x00000020 +) + +// StringToUTF16 is deprecated. Use UTF16FromString instead. +// If s contains a NUL byte this function panics instead of +// returning an error. +func StringToUTF16(s string) []uint16 { + a, err := UTF16FromString(s) + if err != nil { + panic("windows: string with NUL passed to StringToUTF16") + } + return a +} + +// UTF16FromString returns the UTF-16 encoding of the UTF-8 string +// s, with a terminating NUL added. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func UTF16FromString(s string) ([]uint16, error) { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return nil, syscall.EINVAL + } + } + return utf16.Encode([]rune(s + "\x00")), nil +} + +// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, +// with a terminating NUL removed. +func UTF16ToString(s []uint16) string { + for i, v := range s { + if v == 0 { + s = s[0:i] + break + } + } + return string(utf16.Decode(s)) +} + +// StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead. +// If s contains a NUL byte this function panics instead of +// returning an error. +func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] } + +// UTF16PtrFromString returns pointer to the UTF-16 encoding of +// the UTF-8 string s, with a terminating NUL added. If s +// contains a NUL byte at any location, it returns (nil, syscall.EINVAL). +func UTF16PtrFromString(s string) (*uint16, error) { + a, err := UTF16FromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +func Getpagesize() int { return 4096 } + +// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. +// This is useful when interoperating with Windows code requiring callbacks. +func NewCallback(fn interface{}) uintptr { + return syscall.NewCallback(fn) +} + +// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. +// This is useful when interoperating with Windows code requiring callbacks. +func NewCallbackCDecl(fn interface{}) uintptr { + return syscall.NewCallbackCDecl(fn) +} + +// windows api calls + +//sys GetLastError() (lasterr error) +//sys LoadLibrary(libname string) (handle Handle, err error) = LoadLibraryW +//sys LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW +//sys FreeLibrary(handle Handle) (err error) +//sys GetProcAddress(module Handle, procname string) (proc uintptr, err error) +//sys GetVersion() (ver uint32, err error) +//sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW +//sys ExitProcess(exitcode uint32) +//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW +//sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) +//sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) +//sys SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff] +//sys CloseHandle(handle Handle) (err error) +//sys GetStdHandle(stdhandle uint32) (handle Handle, err error) [failretval==InvalidHandle] +//sys SetStdHandle(stdhandle uint32, handle Handle) (err error) +//sys findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstFileW +//sys findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW +//sys FindClose(handle Handle) (err error) +//sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) +//sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW +//sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW +//sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW +//sys RemoveDirectory(path *uint16) (err error) = RemoveDirectoryW +//sys DeleteFile(path *uint16) (err error) = DeleteFileW +//sys MoveFile(from *uint16, to *uint16) (err error) = MoveFileW +//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW +//sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW +//sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW +//sys SetEndOfFile(handle Handle) (err error) +//sys GetSystemTimeAsFileTime(time *Filetime) +//sys GetSystemTimePreciseAsFileTime(time *Filetime) +//sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] +//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) +//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) +//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) +//sys CancelIo(s Handle) (err error) +//sys CancelIoEx(s Handle, o *Overlapped) (err error) +//sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW +//sys OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) +//sys TerminateProcess(handle Handle, exitcode uint32) (err error) +//sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) +//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys GetCurrentProcess() (pseudoHandle Handle, err error) +//sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) +//sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) +//sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] +//sys GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPathW +//sys CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) +//sys GetFileType(filehandle Handle) (n uint32, err error) +//sys CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) = advapi32.CryptAcquireContextW +//sys CryptReleaseContext(provhandle Handle, flags uint32) (err error) = advapi32.CryptReleaseContext +//sys CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) = advapi32.CryptGenRandom +//sys GetEnvironmentStrings() (envs *uint16, err error) [failretval==nil] = kernel32.GetEnvironmentStringsW +//sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW +//sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW +//sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW +//sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) +//sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW +//sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW +//sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW +//sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW +//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW +//sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] +//sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) +//sys FlushFileBuffers(handle Handle) (err error) +//sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW +//sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW +//sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW +//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) = kernel32.CreateFileMappingW +//sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) +//sys UnmapViewOfFile(addr uintptr) (err error) +//sys FlushViewOfFile(addr uintptr, length uintptr) (err error) +//sys VirtualLock(addr uintptr, length uintptr) (err error) +//sys VirtualUnlock(addr uintptr, length uintptr) (err error) +//sys VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) = kernel32.VirtualAlloc +//sys VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) = kernel32.VirtualFree +//sys VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect +//sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile +//sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW +//sys CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW +//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) [failretval==InvalidHandle] = crypt32.CertOpenStore +//sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore +//sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore +//sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore +//sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain +//sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain +//sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext +//sys CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext +//sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy +//sys RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) = advapi32.RegOpenKeyExW +//sys RegCloseKey(key Handle) (regerrno error) = advapi32.RegCloseKey +//sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW +//sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW +//sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW +//sys getCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId +//sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode +//sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode +//sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo +//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW +//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot +//sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW +//sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW +//sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) +// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. +//sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW +//sys CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW +//sys GetCurrentThreadId() (id uint32) +//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) = kernel32.CreateEventW +//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateEventExW +//sys OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW +//sys SetEvent(event Handle) (err error) = kernel32.SetEvent +//sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent +//sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent + +// Volume Management Functions +//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW +//sys DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) = DeleteVolumeMountPointW +//sys FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeW +//sys FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeMountPointW +//sys FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) = FindNextVolumeW +//sys FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW +//sys FindVolumeClose(findVolume Handle) (err error) +//sys FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) +//sys GetDriveType(rootPathName *uint16) (driveType uint32) = GetDriveTypeW +//sys GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0] +//sys GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW +//sys GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationW +//sys GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW +//sys GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) = GetVolumeNameForVolumeMountPointW +//sys GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) = GetVolumePathNameW +//sys GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) = GetVolumePathNamesForVolumeNameW +//sys QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW +//sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW +//sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW + +// syscall interface implementation for other packages + +// GetProcAddressByOrdinal retrieves the address of the exported +// function from module by ordinal. +func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0) + proc = uintptr(r0) + if proc == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Exit(code int) { ExitProcess(uint32(code)) } + +func makeInheritSa() *SecurityAttributes { + var sa SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func Open(path string, mode int, perm uint32) (fd Handle, err error) { + if len(path) == 0 { + return InvalidHandle, ERROR_FILE_NOT_FOUND + } + pathp, err := UTF16PtrFromString(path) + if err != nil { + return InvalidHandle, err + } + var access uint32 + switch mode & (O_RDONLY | O_WRONLY | O_RDWR) { + case O_RDONLY: + access = GENERIC_READ + case O_WRONLY: + access = GENERIC_WRITE + case O_RDWR: + access = GENERIC_READ | GENERIC_WRITE + } + if mode&O_CREAT != 0 { + access |= GENERIC_WRITE + } + if mode&O_APPEND != 0 { + access &^= GENERIC_WRITE + access |= FILE_APPEND_DATA + } + sharemode := uint32(FILE_SHARE_READ | FILE_SHARE_WRITE) + var sa *SecurityAttributes + if mode&O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(O_CREAT|O_EXCL) == (O_CREAT | O_EXCL): + createmode = CREATE_NEW + case mode&(O_CREAT|O_TRUNC) == (O_CREAT | O_TRUNC): + createmode = CREATE_ALWAYS + case mode&O_CREAT == O_CREAT: + createmode = OPEN_ALWAYS + case mode&O_TRUNC == O_TRUNC: + createmode = TRUNCATE_EXISTING + default: + createmode = OPEN_EXISTING + } + h, e := CreateFile(pathp, access, sharemode, sa, createmode, FILE_ATTRIBUTE_NORMAL, 0) + return h, e +} + +func Read(fd Handle, p []byte) (n int, err error) { + var done uint32 + e := ReadFile(fd, p, &done, nil) + if e != nil { + if e == ERROR_BROKEN_PIPE { + // NOTE(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin + return 0, nil + } + return 0, e + } + if raceenabled { + if done > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), int(done)) + } + raceAcquire(unsafe.Pointer(&ioSync)) + } + return int(done), nil +} + +func Write(fd Handle, p []byte) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + var done uint32 + e := WriteFile(fd, p, &done, nil) + if e != nil { + return 0, e + } + if raceenabled && done > 0 { + raceReadRange(unsafe.Pointer(&p[0]), int(done)) + } + return int(done), nil +} + +var ioSync int64 + +func Seek(fd Handle, offset int64, whence int) (newoffset int64, err error) { + var w uint32 + switch whence { + case 0: + w = FILE_BEGIN + case 1: + w = FILE_CURRENT + case 2: + w = FILE_END + } + hi := int32(offset >> 32) + lo := int32(offset) + // use GetFileType to check pipe, pipe can't do seek + ft, _ := GetFileType(fd) + if ft == FILE_TYPE_PIPE { + return 0, syscall.EPIPE + } + rlo, e := SetFilePointer(fd, lo, &hi, w) + if e != nil { + return 0, e + } + return int64(hi)<<32 + int64(rlo), nil +} + +func Close(fd Handle) (err error) { + return CloseHandle(fd) +} + +var ( + Stdin = getStdHandle(STD_INPUT_HANDLE) + Stdout = getStdHandle(STD_OUTPUT_HANDLE) + Stderr = getStdHandle(STD_ERROR_HANDLE) +) + +func getStdHandle(stdhandle uint32) (fd Handle) { + r, _ := GetStdHandle(stdhandle) + CloseOnExec(r) + return r +} + +const ImplementsGetwd = true + +func Getwd() (wd string, err error) { + b := make([]uint16, 300) + n, e := GetCurrentDirectory(uint32(len(b)), &b[0]) + if e != nil { + return "", e + } + return string(utf16.Decode(b[0:n])), nil +} + +func Chdir(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return SetCurrentDirectory(pathp) +} + +func Mkdir(path string, mode uint32) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return CreateDirectory(pathp, nil) +} + +func Rmdir(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return RemoveDirectory(pathp) +} + +func Unlink(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return DeleteFile(pathp) +} + +func Rename(oldpath, newpath string) (err error) { + from, err := UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := UTF16PtrFromString(newpath) + if err != nil { + return err + } + return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) +} + +func ComputerName() (name string, err error) { + var n uint32 = MAX_COMPUTERNAME_LENGTH + 1 + b := make([]uint16, n) + e := GetComputerName(&b[0], &n) + if e != nil { + return "", e + } + return string(utf16.Decode(b[0:n])), nil +} + +func Ftruncate(fd Handle, length int64) (err error) { + curoffset, e := Seek(fd, 0, 1) + if e != nil { + return e + } + defer Seek(fd, curoffset, 0) + _, e = Seek(fd, length, 0) + if e != nil { + return e + } + e = SetEndOfFile(fd) + if e != nil { + return e + } + return nil +} + +func Gettimeofday(tv *Timeval) (err error) { + var ft Filetime + GetSystemTimeAsFileTime(&ft) + *tv = NsecToTimeval(ft.Nanoseconds()) + return nil +} + +func Pipe(p []Handle) (err error) { + if len(p) != 2 { + return syscall.EINVAL + } + var r, w Handle + e := CreatePipe(&r, &w, makeInheritSa(), 0) + if e != nil { + return e + } + p[0] = r + p[1] = w + return nil +} + +func Utimes(path string, tv []Timeval) (err error) { + if len(tv) != 2 { + return syscall.EINVAL + } + pathp, e := UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := CreateFile(pathp, + FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer Close(h) + a := NsecToFiletime(tv[0].Nanoseconds()) + w := NsecToFiletime(tv[1].Nanoseconds()) + return SetFileTime(h, nil, &a, &w) +} + +func UtimesNano(path string, ts []Timespec) (err error) { + if len(ts) != 2 { + return syscall.EINVAL + } + pathp, e := UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := CreateFile(pathp, + FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer Close(h) + a := NsecToFiletime(TimespecToNsec(ts[0])) + w := NsecToFiletime(TimespecToNsec(ts[1])) + return SetFileTime(h, nil, &a, &w) +} + +func Fsync(fd Handle) (err error) { + return FlushFileBuffers(fd) +} + +func Chmod(path string, mode uint32) (err error) { + if mode == 0 { + return syscall.EINVAL + } + p, e := UTF16PtrFromString(path) + if e != nil { + return e + } + attrs, e := GetFileAttributes(p) + if e != nil { + return e + } + if mode&S_IWRITE != 0 { + attrs &^= FILE_ATTRIBUTE_READONLY + } else { + attrs |= FILE_ATTRIBUTE_READONLY + } + return SetFileAttributes(p, attrs) +} + +func LoadGetSystemTimePreciseAsFileTime() error { + return procGetSystemTimePreciseAsFileTime.Find() +} + +func LoadCancelIoEx() error { + return procCancelIoEx.Find() +} + +func LoadSetFileCompletionNotificationModes() error { + return procSetFileCompletionNotificationModes.Find() +} + +// net api calls + +const socket_error = uintptr(^uint32(0)) + +//sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup +//sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup +//sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl +//sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket +//sys Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) [failretval==socket_error] = ws2_32.setsockopt +//sys Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockopt +//sys bind(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.bind +//sys connect(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.connect +//sys getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockname +//sys getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getpeername +//sys listen(s Handle, backlog int32) (err error) [failretval==socket_error] = ws2_32.listen +//sys shutdown(s Handle, how int32) (err error) [failretval==socket_error] = ws2_32.shutdown +//sys Closesocket(s Handle) (err error) [failretval==socket_error] = ws2_32.closesocket +//sys AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) = mswsock.AcceptEx +//sys GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) = mswsock.GetAcceptExSockaddrs +//sys WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecv +//sys WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend +//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom +//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo +//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname +//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname +//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs +//sys GetProtoByName(name string) (p *Protoent, err error) [failretval==nil] = ws2_32.getprotobyname +//sys DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) = dnsapi.DnsQuery_W +//sys DnsRecordListFree(rl *DNSRecord, freetype uint32) = dnsapi.DnsRecordListFree +//sys DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) = dnsapi.DnsNameCompare_W +//sys GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) = ws2_32.GetAddrInfoW +//sys FreeAddrInfoW(addrinfo *AddrinfoW) = ws2_32.FreeAddrInfoW +//sys GetIfEntry(pIfRow *MibIfRow) (errcode error) = iphlpapi.GetIfEntry +//sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo +//sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes +//sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW +//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses +//sys GetACP() (acp uint32) = kernel32.GetACP +//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar + +// For testing: clients can set this flag to force +// creation of IPv6 sockets to return EAFNOSUPPORT. +var SocketDisableIPv6 bool + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type Sockaddr interface { + sockaddr() (ptr unsafe.Pointer, len int32, err error) // lowercase; only we can define Sockaddrs +} + +type SockaddrInet4 struct { + Port int + Addr [4]byte + raw RawSockaddrInet4 +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + +type SockaddrInet6 struct { + Port int + ZoneId uint32 + Addr [16]byte + raw RawSockaddrInet6 +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + +type SockaddrUnix struct { + Name string +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { + // TODO(brainman): implement SockaddrUnix.sockaddr() + return nil, 0, syscall.EWINDOWS +} + +func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_UNIX: + return nil, syscall.EWINDOWS + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, syscall.EAFNOSUPPORT +} + +func Socket(domain, typ, proto int) (fd Handle, err error) { + if domain == AF_INET6 && SocketDisableIPv6 { + return InvalidHandle, syscall.EAFNOSUPPORT + } + return socket(int32(domain), int32(typ), int32(proto)) +} + +func SetsockoptInt(fd Handle, level, opt int, value int) (err error) { + v := int32(value) + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), int32(unsafe.Sizeof(v))) +} + +func Bind(fd Handle, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return bind(fd, ptr, n) +} + +func Connect(fd Handle, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connect(fd, ptr, n) +} + +func Getsockname(fd Handle) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + if err = getsockname(fd, &rsa, &l); err != nil { + return + } + return rsa.Sockaddr() +} + +func Getpeername(fd Handle) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + if err = getpeername(fd, &rsa, &l); err != nil { + return + } + return rsa.Sockaddr() +} + +func Listen(s Handle, n int) (err error) { + return listen(s, int32(n)) +} + +func Shutdown(fd Handle, how int) (err error) { + return shutdown(fd, int32(how)) +} + +func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) { + rsa, l, err := to.sockaddr() + if err != nil { + return err + } + return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine) +} + +func LoadGetAddrInfo() error { + return procGetAddrInfoW.Find() +} + +var connectExFunc struct { + once sync.Once + addr uintptr + err error +} + +func LoadConnectEx() error { + connectExFunc.once.Do(func() { + var s Handle + s, connectExFunc.err = Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) + if connectExFunc.err != nil { + return + } + defer CloseHandle(s) + var n uint32 + connectExFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_CONNECTEX)), + uint32(unsafe.Sizeof(WSAID_CONNECTEX)), + (*byte)(unsafe.Pointer(&connectExFunc.addr)), + uint32(unsafe.Sizeof(connectExFunc.addr)), + &n, nil, 0) + }) + return connectExFunc.err +} + +func connectEx(s Handle, name unsafe.Pointer, namelen int32, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(connectExFunc.addr, 7, uintptr(s), uintptr(name), uintptr(namelen), uintptr(unsafe.Pointer(sendBuf)), uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConnectEx(fd Handle, sa Sockaddr, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) error { + err := LoadConnectEx() + if err != nil { + return errorspkg.New("failed to find ConnectEx: " + err.Error()) + } + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) +} + +var sendRecvMsgFunc struct { + once sync.Once + sendAddr uintptr + recvAddr uintptr + err error +} + +func loadWSASendRecvMsg() error { + sendRecvMsgFunc.once.Do(func() { + var s Handle + s, sendRecvMsgFunc.err = Socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP) + if sendRecvMsgFunc.err != nil { + return + } + defer CloseHandle(s) + var n uint32 + sendRecvMsgFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)), + uint32(unsafe.Sizeof(WSAID_WSARECVMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)), + &n, nil, 0) + if sendRecvMsgFunc.err != nil { + return + } + sendRecvMsgFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)), + uint32(unsafe.Sizeof(WSAID_WSASENDMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)), + &n, nil, 0) + }) + return sendRecvMsgFunc.err +} + +func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +// Invented structures to support what package os expects. +type Rusage struct { + CreationTime Filetime + ExitTime Filetime + KernelTime Filetime + UserTime Filetime +} + +type WaitStatus struct { + ExitCode uint32 +} + +func (w WaitStatus) Exited() bool { return true } + +func (w WaitStatus) ExitStatus() int { return int(w.ExitCode) } + +func (w WaitStatus) Signal() Signal { return -1 } + +func (w WaitStatus) CoreDump() bool { return false } + +func (w WaitStatus) Stopped() bool { return false } + +func (w WaitStatus) Continued() bool { return false } + +func (w WaitStatus) StopSignal() Signal { return -1 } + +func (w WaitStatus) Signaled() bool { return false } + +func (w WaitStatus) TrapCause() int { return -1 } + +// Timespec is an invented structure on Windows, but here for +// consistency with the corresponding package for other operating systems. +type Timespec struct { + Sec int64 + Nsec int64 +} + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +// TODO(brainman): fix all needed for net + +func Accept(fd Handle) (nfd Handle, sa Sockaddr, err error) { return 0, nil, syscall.EWINDOWS } +func Recvfrom(fd Handle, p []byte, flags int) (n int, from Sockaddr, err error) { + return 0, nil, syscall.EWINDOWS +} +func Sendto(fd Handle, p []byte, flags int, to Sockaddr) (err error) { return syscall.EWINDOWS } +func SetsockoptTimeval(fd Handle, level, opt int, tv *Timeval) (err error) { return syscall.EWINDOWS } + +// The Linger struct is wrong but we only noticed after Go 1. +// sysLinger is the real system call structure. + +// BUG(brainman): The definition of Linger is not appropriate for direct use +// with Setsockopt and Getsockopt. +// Use SetsockoptLinger instead. + +type Linger struct { + Onoff int32 + Linger int32 +} + +type sysLinger struct { + Onoff uint16 + Linger uint16 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +func GetsockoptInt(fd Handle, level, opt int) (int, error) { return -1, syscall.EWINDOWS } + +func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { + sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)} + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&sys)), int32(unsafe.Sizeof(sys))) +} + +func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) +} +func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) +} +func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { + return syscall.EWINDOWS +} + +func Getpid() (pid int) { return int(getCurrentProcessId()) } + +func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { + // NOTE(rsc): The Win32finddata struct is wrong for the system call: + // the two paths are each one uint16 short. Use the correct struct, + // a win32finddata1, and then copy the results out. + // There is no loss of expressivity here, because the final + // uint16, if it is used, is supposed to be a NUL, and Go doesn't need that. + // For Go 1.1, we might avoid the allocation of win32finddata1 here + // by adding a final Bug [2]uint16 field to the struct and then + // adjusting the fields in the result directly. + var data1 win32finddata1 + handle, err = findFirstFile1(name, &data1) + if err == nil { + copyFindData(data, &data1) + } + return +} + +func FindNextFile(handle Handle, data *Win32finddata) (err error) { + var data1 win32finddata1 + err = findNextFile1(handle, &data1) + if err == nil { + copyFindData(data, &data1) + } + return +} + +func getProcessEntry(pid int) (*ProcessEntry32, error) { + snapshot, err := CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err + } + defer CloseHandle(snapshot) + var procEntry ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } + } +} + +func Getppid() (ppid int) { + pe, err := getProcessEntry(Getpid()) + if err != nil { + return -1 + } + return int(pe.ParentProcessID) +} + +// TODO(brainman): fix all needed for os +func Fchdir(fd Handle) (err error) { return syscall.EWINDOWS } +func Link(oldpath, newpath string) (err error) { return syscall.EWINDOWS } +func Symlink(path, link string) (err error) { return syscall.EWINDOWS } + +func Fchmod(fd Handle, mode uint32) (err error) { return syscall.EWINDOWS } +func Chown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } +func Lchown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } +func Fchown(fd Handle, uid int, gid int) (err error) { return syscall.EWINDOWS } + +func Getuid() (uid int) { return -1 } +func Geteuid() (euid int) { return -1 } +func Getgid() (gid int) { return -1 } +func Getegid() (egid int) { return -1 } +func Getgroups() (gids []int, err error) { return nil, syscall.EWINDOWS } + +type Signal int + +func (s Signal) Signal() {} + +func (s Signal) String() string { + if 0 <= s && int(s) < len(signals) { + str := signals[s] + if str != "" { + return str + } + } + return "signal " + itoa(int(s)) +} + +func LoadCreateSymbolicLink() error { + return procCreateSymbolicLinkW.Find() +} + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + fd, err := CreateFile(StringToUTF16Ptr(path), GENERIC_READ, 0, nil, OPEN_EXISTING, + FILE_FLAG_OPEN_REPARSE_POINT|FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return -1, err + } + defer CloseHandle(fd) + + rdbbuf := make([]byte, MAXIMUM_REPARSE_DATA_BUFFER_SIZE) + var bytesReturned uint32 + err = DeviceIoControl(fd, FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) + if err != nil { + return -1, err + } + + rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) + var s string + switch rdb.ReparseTag { + case IO_REPARSE_TAG_SYMLINK: + data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) + case IO_REPARSE_TAG_MOUNT_POINT: + data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) + default: + // the path is not a symlink or junction but another type of reparse + // point + return -1, syscall.ENOENT + } + n = copy(buf, []byte(s)) + + return n, nil +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go new file mode 100644 index 000000000..b4e424788 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -0,0 +1,1353 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import "syscall" + +const ( + // Windows errors. + ERROR_FILE_NOT_FOUND syscall.Errno = 2 + ERROR_PATH_NOT_FOUND syscall.Errno = 3 + ERROR_ACCESS_DENIED syscall.Errno = 5 + ERROR_NO_MORE_FILES syscall.Errno = 18 + ERROR_HANDLE_EOF syscall.Errno = 38 + ERROR_NETNAME_DELETED syscall.Errno = 64 + ERROR_FILE_EXISTS syscall.Errno = 80 + ERROR_BROKEN_PIPE syscall.Errno = 109 + ERROR_BUFFER_OVERFLOW syscall.Errno = 111 + ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 + ERROR_MOD_NOT_FOUND syscall.Errno = 126 + ERROR_PROC_NOT_FOUND syscall.Errno = 127 + ERROR_ALREADY_EXISTS syscall.Errno = 183 + ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203 + ERROR_MORE_DATA syscall.Errno = 234 + ERROR_OPERATION_ABORTED syscall.Errno = 995 + ERROR_IO_PENDING syscall.Errno = 997 + ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066 + ERROR_NOT_FOUND syscall.Errno = 1168 + ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 + WSAEACCES syscall.Errno = 10013 + WSAEMSGSIZE syscall.Errno = 10040 + WSAECONNRESET syscall.Errno = 10054 +) + +const ( + // Invented values to support what package os expects. + O_RDONLY = 0x00000 + O_WRONLY = 0x00001 + O_RDWR = 0x00002 + O_CREAT = 0x00040 + O_EXCL = 0x00080 + O_NOCTTY = 0x00100 + O_TRUNC = 0x00200 + O_NONBLOCK = 0x00800 + O_APPEND = 0x00400 + O_SYNC = 0x01000 + O_ASYNC = 0x02000 + O_CLOEXEC = 0x80000 +) + +const ( + // More invented values for signals + SIGHUP = Signal(0x1) + SIGINT = Signal(0x2) + SIGQUIT = Signal(0x3) + SIGILL = Signal(0x4) + SIGTRAP = Signal(0x5) + SIGABRT = Signal(0x6) + SIGBUS = Signal(0x7) + SIGFPE = Signal(0x8) + SIGKILL = Signal(0x9) + SIGSEGV = Signal(0xb) + SIGPIPE = Signal(0xd) + SIGALRM = Signal(0xe) + SIGTERM = Signal(0xf) +) + +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", +} + +const ( + GENERIC_READ = 0x80000000 + GENERIC_WRITE = 0x40000000 + GENERIC_EXECUTE = 0x20000000 + GENERIC_ALL = 0x10000000 + + FILE_LIST_DIRECTORY = 0x00000001 + FILE_APPEND_DATA = 0x00000004 + FILE_WRITE_ATTRIBUTES = 0x00000100 + + FILE_SHARE_READ = 0x00000001 + FILE_SHARE_WRITE = 0x00000002 + FILE_SHARE_DELETE = 0x00000004 + FILE_ATTRIBUTE_READONLY = 0x00000001 + FILE_ATTRIBUTE_HIDDEN = 0x00000002 + FILE_ATTRIBUTE_SYSTEM = 0x00000004 + FILE_ATTRIBUTE_DIRECTORY = 0x00000010 + FILE_ATTRIBUTE_ARCHIVE = 0x00000020 + FILE_ATTRIBUTE_NORMAL = 0x00000080 + FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + + INVALID_FILE_ATTRIBUTES = 0xffffffff + + CREATE_NEW = 1 + CREATE_ALWAYS = 2 + OPEN_EXISTING = 3 + OPEN_ALWAYS = 4 + TRUNCATE_EXISTING = 5 + + FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000 + FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 + FILE_FLAG_OVERLAPPED = 0x40000000 + + HANDLE_FLAG_INHERIT = 0x00000001 + STARTF_USESTDHANDLES = 0x00000100 + STARTF_USESHOWWINDOW = 0x00000001 + DUPLICATE_CLOSE_SOURCE = 0x00000001 + DUPLICATE_SAME_ACCESS = 0x00000002 + + STD_INPUT_HANDLE = -10 & (1<<32 - 1) + STD_OUTPUT_HANDLE = -11 & (1<<32 - 1) + STD_ERROR_HANDLE = -12 & (1<<32 - 1) + + FILE_BEGIN = 0 + FILE_CURRENT = 1 + FILE_END = 2 + + LANG_ENGLISH = 0x09 + SUBLANG_ENGLISH_US = 0x01 + + FORMAT_MESSAGE_ALLOCATE_BUFFER = 256 + FORMAT_MESSAGE_IGNORE_INSERTS = 512 + FORMAT_MESSAGE_FROM_STRING = 1024 + FORMAT_MESSAGE_FROM_HMODULE = 2048 + FORMAT_MESSAGE_FROM_SYSTEM = 4096 + FORMAT_MESSAGE_ARGUMENT_ARRAY = 8192 + FORMAT_MESSAGE_MAX_WIDTH_MASK = 255 + + MAX_PATH = 260 + MAX_LONG_PATH = 32768 + + MAX_COMPUTERNAME_LENGTH = 15 + + TIME_ZONE_ID_UNKNOWN = 0 + TIME_ZONE_ID_STANDARD = 1 + + TIME_ZONE_ID_DAYLIGHT = 2 + IGNORE = 0 + INFINITE = 0xffffffff + + WAIT_TIMEOUT = 258 + WAIT_ABANDONED = 0x00000080 + WAIT_OBJECT_0 = 0x00000000 + WAIT_FAILED = 0xFFFFFFFF + + PROCESS_TERMINATE = 1 + PROCESS_QUERY_INFORMATION = 0x00000400 + SYNCHRONIZE = 0x00100000 + + FILE_MAP_COPY = 0x01 + FILE_MAP_WRITE = 0x02 + FILE_MAP_READ = 0x04 + FILE_MAP_EXECUTE = 0x20 + + CTRL_C_EVENT = 0 + CTRL_BREAK_EVENT = 1 + + // Windows reserves errors >= 1<<29 for application use. + APPLICATION_ERROR = 1 << 29 +) + +const ( + // Process creation flags. + CREATE_BREAKAWAY_FROM_JOB = 0x01000000 + CREATE_DEFAULT_ERROR_MODE = 0x04000000 + CREATE_NEW_CONSOLE = 0x00000010 + CREATE_NEW_PROCESS_GROUP = 0x00000200 + CREATE_NO_WINDOW = 0x08000000 + CREATE_PROTECTED_PROCESS = 0x00040000 + CREATE_PRESERVE_CODE_AUTHZ_LEVEL = 0x02000000 + CREATE_SEPARATE_WOW_VDM = 0x00000800 + CREATE_SHARED_WOW_VDM = 0x00001000 + CREATE_SUSPENDED = 0x00000004 + CREATE_UNICODE_ENVIRONMENT = 0x00000400 + DEBUG_ONLY_THIS_PROCESS = 0x00000002 + DEBUG_PROCESS = 0x00000001 + DETACHED_PROCESS = 0x00000008 + EXTENDED_STARTUPINFO_PRESENT = 0x00080000 + INHERIT_PARENT_AFFINITY = 0x00010000 +) + +const ( + // flags for CreateToolhelp32Snapshot + TH32CS_SNAPHEAPLIST = 0x01 + TH32CS_SNAPPROCESS = 0x02 + TH32CS_SNAPTHREAD = 0x04 + TH32CS_SNAPMODULE = 0x08 + TH32CS_SNAPMODULE32 = 0x10 + TH32CS_SNAPALL = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD + TH32CS_INHERIT = 0x80000000 +) + +const ( + // filters for ReadDirectoryChangesW + FILE_NOTIFY_CHANGE_FILE_NAME = 0x001 + FILE_NOTIFY_CHANGE_DIR_NAME = 0x002 + FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x004 + FILE_NOTIFY_CHANGE_SIZE = 0x008 + FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 + FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 + FILE_NOTIFY_CHANGE_CREATION = 0x040 + FILE_NOTIFY_CHANGE_SECURITY = 0x100 +) + +const ( + // do not reorder + FILE_ACTION_ADDED = iota + 1 + FILE_ACTION_REMOVED + FILE_ACTION_MODIFIED + FILE_ACTION_RENAMED_OLD_NAME + FILE_ACTION_RENAMED_NEW_NAME +) + +const ( + // wincrypt.h + PROV_RSA_FULL = 1 + PROV_RSA_SIG = 2 + PROV_DSS = 3 + PROV_FORTEZZA = 4 + PROV_MS_EXCHANGE = 5 + PROV_SSL = 6 + PROV_RSA_SCHANNEL = 12 + PROV_DSS_DH = 13 + PROV_EC_ECDSA_SIG = 14 + PROV_EC_ECNRA_SIG = 15 + PROV_EC_ECDSA_FULL = 16 + PROV_EC_ECNRA_FULL = 17 + PROV_DH_SCHANNEL = 18 + PROV_SPYRUS_LYNKS = 20 + PROV_RNG = 21 + PROV_INTEL_SEC = 22 + PROV_REPLACE_OWF = 23 + PROV_RSA_AES = 24 + CRYPT_VERIFYCONTEXT = 0xF0000000 + CRYPT_NEWKEYSET = 0x00000008 + CRYPT_DELETEKEYSET = 0x00000010 + CRYPT_MACHINE_KEYSET = 0x00000020 + CRYPT_SILENT = 0x00000040 + CRYPT_DEFAULT_CONTAINER_OPTIONAL = 0x00000080 + + USAGE_MATCH_TYPE_AND = 0 + USAGE_MATCH_TYPE_OR = 1 + + X509_ASN_ENCODING = 0x00000001 + PKCS_7_ASN_ENCODING = 0x00010000 + + CERT_STORE_PROV_MEMORY = 2 + + CERT_STORE_ADD_ALWAYS = 4 + + CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004 + + CERT_TRUST_NO_ERROR = 0x00000000 + CERT_TRUST_IS_NOT_TIME_VALID = 0x00000001 + CERT_TRUST_IS_REVOKED = 0x00000004 + CERT_TRUST_IS_NOT_SIGNATURE_VALID = 0x00000008 + CERT_TRUST_IS_NOT_VALID_FOR_USAGE = 0x00000010 + CERT_TRUST_IS_UNTRUSTED_ROOT = 0x00000020 + CERT_TRUST_REVOCATION_STATUS_UNKNOWN = 0x00000040 + CERT_TRUST_IS_CYCLIC = 0x00000080 + CERT_TRUST_INVALID_EXTENSION = 0x00000100 + CERT_TRUST_INVALID_POLICY_CONSTRAINTS = 0x00000200 + CERT_TRUST_INVALID_BASIC_CONSTRAINTS = 0x00000400 + CERT_TRUST_INVALID_NAME_CONSTRAINTS = 0x00000800 + CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT = 0x00001000 + CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT = 0x00002000 + CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000 + CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT = 0x00008000 + CERT_TRUST_IS_OFFLINE_REVOCATION = 0x01000000 + CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY = 0x02000000 + CERT_TRUST_IS_EXPLICIT_DISTRUST = 0x04000000 + CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT = 0x08000000 + + CERT_CHAIN_POLICY_BASE = 1 + CERT_CHAIN_POLICY_AUTHENTICODE = 2 + CERT_CHAIN_POLICY_AUTHENTICODE_TS = 3 + CERT_CHAIN_POLICY_SSL = 4 + CERT_CHAIN_POLICY_BASIC_CONSTRAINTS = 5 + CERT_CHAIN_POLICY_NT_AUTH = 6 + CERT_CHAIN_POLICY_MICROSOFT_ROOT = 7 + CERT_CHAIN_POLICY_EV = 8 + + CERT_E_EXPIRED = 0x800B0101 + CERT_E_ROLE = 0x800B0103 + CERT_E_PURPOSE = 0x800B0106 + CERT_E_UNTRUSTEDROOT = 0x800B0109 + CERT_E_CN_NO_MATCH = 0x800B010F + + AUTHTYPE_CLIENT = 1 + AUTHTYPE_SERVER = 2 +) + +var ( + OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00") + OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00") + OID_SGC_NETSCAPE = []byte("2.16.840.1.113730.4.1\x00") +) + +// Pointer represents a pointer to an arbitrary Windows type. +// +// Pointer-typed fields may point to one of many different types. It's +// up to the caller to provide a pointer to the appropriate type, cast +// to Pointer. The caller must obey the unsafe.Pointer rules while +// doing so. +type Pointer *struct{} + +// Invented values to support what package os expects. +type Timeval struct { + Sec int32 + Usec int32 +} + +func (tv *Timeval) Nanoseconds() int64 { + return (int64(tv.Sec)*1e6 + int64(tv.Usec)) * 1e3 +} + +func NsecToTimeval(nsec int64) (tv Timeval) { + tv.Sec = int32(nsec / 1e9) + tv.Usec = int32(nsec % 1e9 / 1e3) + return +} + +type SecurityAttributes struct { + Length uint32 + SecurityDescriptor uintptr + InheritHandle uint32 +} + +type Overlapped struct { + Internal uintptr + InternalHigh uintptr + Offset uint32 + OffsetHigh uint32 + HEvent Handle +} + +type FileNotifyInformation struct { + NextEntryOffset uint32 + Action uint32 + FileNameLength uint32 + FileName uint16 +} + +type Filetime struct { + LowDateTime uint32 + HighDateTime uint32 +} + +// Nanoseconds returns Filetime ft in nanoseconds +// since Epoch (00:00:00 UTC, January 1, 1970). +func (ft *Filetime) Nanoseconds() int64 { + // 100-nanosecond intervals since January 1, 1601 + nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) + // change starting time to the Epoch (00:00:00 UTC, January 1, 1970) + nsec -= 116444736000000000 + // convert into nanoseconds + nsec *= 100 + return nsec +} + +func NsecToFiletime(nsec int64) (ft Filetime) { + // convert into 100-nanosecond + nsec /= 100 + // change starting time to January 1, 1601 + nsec += 116444736000000000 + // split into high / low + ft.LowDateTime = uint32(nsec & 0xffffffff) + ft.HighDateTime = uint32(nsec >> 32 & 0xffffffff) + return ft +} + +type Win32finddata struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 + Reserved0 uint32 + Reserved1 uint32 + FileName [MAX_PATH - 1]uint16 + AlternateFileName [13]uint16 +} + +// This is the actual system call structure. +// Win32finddata is what we committed to in Go 1. +type win32finddata1 struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 + Reserved0 uint32 + Reserved1 uint32 + FileName [MAX_PATH]uint16 + AlternateFileName [14]uint16 +} + +func copyFindData(dst *Win32finddata, src *win32finddata1) { + dst.FileAttributes = src.FileAttributes + dst.CreationTime = src.CreationTime + dst.LastAccessTime = src.LastAccessTime + dst.LastWriteTime = src.LastWriteTime + dst.FileSizeHigh = src.FileSizeHigh + dst.FileSizeLow = src.FileSizeLow + dst.Reserved0 = src.Reserved0 + dst.Reserved1 = src.Reserved1 + + // The src is 1 element bigger than dst, but it must be NUL. + copy(dst.FileName[:], src.FileName[:]) + copy(dst.AlternateFileName[:], src.AlternateFileName[:]) +} + +type ByHandleFileInformation struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + VolumeSerialNumber uint32 + FileSizeHigh uint32 + FileSizeLow uint32 + NumberOfLinks uint32 + FileIndexHigh uint32 + FileIndexLow uint32 +} + +const ( + GetFileExInfoStandard = 0 + GetFileExMaxInfoLevel = 1 +) + +type Win32FileAttributeData struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 +} + +// ShowWindow constants +const ( + // winuser.h + SW_HIDE = 0 + SW_NORMAL = 1 + SW_SHOWNORMAL = 1 + SW_SHOWMINIMIZED = 2 + SW_SHOWMAXIMIZED = 3 + SW_MAXIMIZE = 3 + SW_SHOWNOACTIVATE = 4 + SW_SHOW = 5 + SW_MINIMIZE = 6 + SW_SHOWMINNOACTIVE = 7 + SW_SHOWNA = 8 + SW_RESTORE = 9 + SW_SHOWDEFAULT = 10 + SW_FORCEMINIMIZE = 11 +) + +type StartupInfo struct { + Cb uint32 + _ *uint16 + Desktop *uint16 + Title *uint16 + X uint32 + Y uint32 + XSize uint32 + YSize uint32 + XCountChars uint32 + YCountChars uint32 + FillAttribute uint32 + Flags uint32 + ShowWindow uint16 + _ uint16 + _ *byte + StdInput Handle + StdOutput Handle + StdErr Handle +} + +type ProcessInformation struct { + Process Handle + Thread Handle + ProcessId uint32 + ThreadId uint32 +} + +type ProcessEntry32 struct { + Size uint32 + Usage uint32 + ProcessID uint32 + DefaultHeapID uintptr + ModuleID uint32 + Threads uint32 + ParentProcessID uint32 + PriClassBase int32 + Flags uint32 + ExeFile [MAX_PATH]uint16 +} + +type Systemtime struct { + Year uint16 + Month uint16 + DayOfWeek uint16 + Day uint16 + Hour uint16 + Minute uint16 + Second uint16 + Milliseconds uint16 +} + +type Timezoneinformation struct { + Bias int32 + StandardName [32]uint16 + StandardDate Systemtime + StandardBias int32 + DaylightName [32]uint16 + DaylightDate Systemtime + DaylightBias int32 +} + +// Socket related. + +const ( + AF_UNSPEC = 0 + AF_UNIX = 1 + AF_INET = 2 + AF_INET6 = 23 + AF_NETBIOS = 17 + + SOCK_STREAM = 1 + SOCK_DGRAM = 2 + SOCK_RAW = 3 + SOCK_SEQPACKET = 5 + + IPPROTO_IP = 0 + IPPROTO_IPV6 = 0x29 + IPPROTO_TCP = 6 + IPPROTO_UDP = 17 + + SOL_SOCKET = 0xffff + SO_REUSEADDR = 4 + SO_KEEPALIVE = 8 + SO_DONTROUTE = 16 + SO_BROADCAST = 32 + SO_LINGER = 128 + SO_RCVBUF = 0x1002 + SO_SNDBUF = 0x1001 + SO_UPDATE_ACCEPT_CONTEXT = 0x700b + SO_UPDATE_CONNECT_CONTEXT = 0x7010 + + IOC_OUT = 0x40000000 + IOC_IN = 0x80000000 + IOC_VENDOR = 0x18000000 + IOC_INOUT = IOC_IN | IOC_OUT + IOC_WS2 = 0x08000000 + SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 + SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 + SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + + // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 + + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_LOOP = 0xb + IP_ADD_MEMBERSHIP = 0xc + IP_DROP_MEMBERSHIP = 0xd + + IPV6_V6ONLY = 0x1b + IPV6_UNICAST_HOPS = 0x4 + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_LOOP = 0xb + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_DONTROUTE = 0x4 + MSG_WAITALL = 0x8 + + MSG_TRUNC = 0x0100 + MSG_CTRUNC = 0x0200 + MSG_BCAST = 0x0400 + MSG_MCAST = 0x0800 + + SOMAXCONN = 0x7fffffff + + TCP_NODELAY = 1 + + SHUT_RD = 0 + SHUT_WR = 1 + SHUT_RDWR = 2 + + WSADESCRIPTION_LEN = 256 + WSASYS_STATUS_LEN = 128 +) + +type WSABuf struct { + Len uint32 + Buf *byte +} + +type WSAMsg struct { + Name *syscall.RawSockaddrAny + Namelen int32 + Buffers *WSABuf + BufferCount uint32 + Control WSABuf + Flags uint32 +} + +// Invented values to support what package os expects. +const ( + S_IFMT = 0x1f000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +const ( + FILE_TYPE_CHAR = 0x0002 + FILE_TYPE_DISK = 0x0001 + FILE_TYPE_PIPE = 0x0003 + FILE_TYPE_REMOTE = 0x8000 + FILE_TYPE_UNKNOWN = 0x0000 +) + +type Hostent struct { + Name *byte + Aliases **byte + AddrType uint16 + Length uint16 + AddrList **byte +} + +type Protoent struct { + Name *byte + Aliases **byte + Proto uint16 +} + +const ( + DNS_TYPE_A = 0x0001 + DNS_TYPE_NS = 0x0002 + DNS_TYPE_MD = 0x0003 + DNS_TYPE_MF = 0x0004 + DNS_TYPE_CNAME = 0x0005 + DNS_TYPE_SOA = 0x0006 + DNS_TYPE_MB = 0x0007 + DNS_TYPE_MG = 0x0008 + DNS_TYPE_MR = 0x0009 + DNS_TYPE_NULL = 0x000a + DNS_TYPE_WKS = 0x000b + DNS_TYPE_PTR = 0x000c + DNS_TYPE_HINFO = 0x000d + DNS_TYPE_MINFO = 0x000e + DNS_TYPE_MX = 0x000f + DNS_TYPE_TEXT = 0x0010 + DNS_TYPE_RP = 0x0011 + DNS_TYPE_AFSDB = 0x0012 + DNS_TYPE_X25 = 0x0013 + DNS_TYPE_ISDN = 0x0014 + DNS_TYPE_RT = 0x0015 + DNS_TYPE_NSAP = 0x0016 + DNS_TYPE_NSAPPTR = 0x0017 + DNS_TYPE_SIG = 0x0018 + DNS_TYPE_KEY = 0x0019 + DNS_TYPE_PX = 0x001a + DNS_TYPE_GPOS = 0x001b + DNS_TYPE_AAAA = 0x001c + DNS_TYPE_LOC = 0x001d + DNS_TYPE_NXT = 0x001e + DNS_TYPE_EID = 0x001f + DNS_TYPE_NIMLOC = 0x0020 + DNS_TYPE_SRV = 0x0021 + DNS_TYPE_ATMA = 0x0022 + DNS_TYPE_NAPTR = 0x0023 + DNS_TYPE_KX = 0x0024 + DNS_TYPE_CERT = 0x0025 + DNS_TYPE_A6 = 0x0026 + DNS_TYPE_DNAME = 0x0027 + DNS_TYPE_SINK = 0x0028 + DNS_TYPE_OPT = 0x0029 + DNS_TYPE_DS = 0x002B + DNS_TYPE_RRSIG = 0x002E + DNS_TYPE_NSEC = 0x002F + DNS_TYPE_DNSKEY = 0x0030 + DNS_TYPE_DHCID = 0x0031 + DNS_TYPE_UINFO = 0x0064 + DNS_TYPE_UID = 0x0065 + DNS_TYPE_GID = 0x0066 + DNS_TYPE_UNSPEC = 0x0067 + DNS_TYPE_ADDRS = 0x00f8 + DNS_TYPE_TKEY = 0x00f9 + DNS_TYPE_TSIG = 0x00fa + DNS_TYPE_IXFR = 0x00fb + DNS_TYPE_AXFR = 0x00fc + DNS_TYPE_MAILB = 0x00fd + DNS_TYPE_MAILA = 0x00fe + DNS_TYPE_ALL = 0x00ff + DNS_TYPE_ANY = 0x00ff + DNS_TYPE_WINS = 0xff01 + DNS_TYPE_WINSR = 0xff02 + DNS_TYPE_NBSTAT = 0xff01 +) + +const ( + DNS_INFO_NO_RECORDS = 0x251D +) + +const ( + // flags inside DNSRecord.Dw + DnsSectionQuestion = 0x0000 + DnsSectionAnswer = 0x0001 + DnsSectionAuthority = 0x0002 + DnsSectionAdditional = 0x0003 +) + +type DNSSRVData struct { + Target *uint16 + Priority uint16 + Weight uint16 + Port uint16 + Pad uint16 +} + +type DNSPTRData struct { + Host *uint16 +} + +type DNSMXData struct { + NameExchange *uint16 + Preference uint16 + Pad uint16 +} + +type DNSTXTData struct { + StringCount uint16 + StringArray [1]*uint16 +} + +type DNSRecord struct { + Next *DNSRecord + Name *uint16 + Type uint16 + Length uint16 + Dw uint32 + Ttl uint32 + Reserved uint32 + Data [40]byte +} + +const ( + TF_DISCONNECT = 1 + TF_REUSE_SOCKET = 2 + TF_WRITE_BEHIND = 4 + TF_USE_DEFAULT_WORKER = 0 + TF_USE_SYSTEM_THREAD = 16 + TF_USE_KERNEL_APC = 32 +) + +type TransmitFileBuffers struct { + Head uintptr + HeadLength uint32 + Tail uintptr + TailLength uint32 +} + +const ( + IFF_UP = 1 + IFF_BROADCAST = 2 + IFF_LOOPBACK = 4 + IFF_POINTTOPOINT = 8 + IFF_MULTICAST = 16 +) + +const SIO_GET_INTERFACE_LIST = 0x4004747F + +// TODO(mattn): SockaddrGen is union of sockaddr/sockaddr_in/sockaddr_in6_old. +// will be fixed to change variable type as suitable. + +type SockaddrGen [24]byte + +type InterfaceInfo struct { + Flags uint32 + Address SockaddrGen + BroadcastAddress SockaddrGen + Netmask SockaddrGen +} + +type IpAddressString struct { + String [16]byte +} + +type IpMaskString IpAddressString + +type IpAddrString struct { + Next *IpAddrString + IpAddress IpAddressString + IpMask IpMaskString + Context uint32 +} + +const MAX_ADAPTER_NAME_LENGTH = 256 +const MAX_ADAPTER_DESCRIPTION_LENGTH = 128 +const MAX_ADAPTER_ADDRESS_LENGTH = 8 + +type IpAdapterInfo struct { + Next *IpAdapterInfo + ComboIndex uint32 + AdapterName [MAX_ADAPTER_NAME_LENGTH + 4]byte + Description [MAX_ADAPTER_DESCRIPTION_LENGTH + 4]byte + AddressLength uint32 + Address [MAX_ADAPTER_ADDRESS_LENGTH]byte + Index uint32 + Type uint32 + DhcpEnabled uint32 + CurrentIpAddress *IpAddrString + IpAddressList IpAddrString + GatewayList IpAddrString + DhcpServer IpAddrString + HaveWins bool + PrimaryWinsServer IpAddrString + SecondaryWinsServer IpAddrString + LeaseObtained int64 + LeaseExpires int64 +} + +const MAXLEN_PHYSADDR = 8 +const MAX_INTERFACE_NAME_LEN = 256 +const MAXLEN_IFDESCR = 256 + +type MibIfRow struct { + Name [MAX_INTERFACE_NAME_LEN]uint16 + Index uint32 + Type uint32 + Mtu uint32 + Speed uint32 + PhysAddrLen uint32 + PhysAddr [MAXLEN_PHYSADDR]byte + AdminStatus uint32 + OperStatus uint32 + LastChange uint32 + InOctets uint32 + InUcastPkts uint32 + InNUcastPkts uint32 + InDiscards uint32 + InErrors uint32 + InUnknownProtos uint32 + OutOctets uint32 + OutUcastPkts uint32 + OutNUcastPkts uint32 + OutDiscards uint32 + OutErrors uint32 + OutQLen uint32 + DescrLen uint32 + Descr [MAXLEN_IFDESCR]byte +} + +type CertInfo struct { + // Not implemented +} + +type CertContext struct { + EncodingType uint32 + EncodedCert *byte + Length uint32 + CertInfo *CertInfo + Store Handle +} + +type CertChainContext struct { + Size uint32 + TrustStatus CertTrustStatus + ChainCount uint32 + Chains **CertSimpleChain + LowerQualityChainCount uint32 + LowerQualityChains **CertChainContext + HasRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 +} + +type CertTrustListInfo struct { + // Not implemented +} + +type CertSimpleChain struct { + Size uint32 + TrustStatus CertTrustStatus + NumElements uint32 + Elements **CertChainElement + TrustListInfo *CertTrustListInfo + HasRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 +} + +type CertChainElement struct { + Size uint32 + CertContext *CertContext + TrustStatus CertTrustStatus + RevocationInfo *CertRevocationInfo + IssuanceUsage *CertEnhKeyUsage + ApplicationUsage *CertEnhKeyUsage + ExtendedErrorInfo *uint16 +} + +type CertRevocationCrlInfo struct { + // Not implemented +} + +type CertRevocationInfo struct { + Size uint32 + RevocationResult uint32 + RevocationOid *byte + OidSpecificInfo Pointer + HasFreshnessTime uint32 + FreshnessTime uint32 + CrlInfo *CertRevocationCrlInfo +} + +type CertTrustStatus struct { + ErrorStatus uint32 + InfoStatus uint32 +} + +type CertUsageMatch struct { + Type uint32 + Usage CertEnhKeyUsage +} + +type CertEnhKeyUsage struct { + Length uint32 + UsageIdentifiers **byte +} + +type CertChainPara struct { + Size uint32 + RequestedUsage CertUsageMatch + RequstedIssuancePolicy CertUsageMatch + URLRetrievalTimeout uint32 + CheckRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 + CacheResync *Filetime +} + +type CertChainPolicyPara struct { + Size uint32 + Flags uint32 + ExtraPolicyPara Pointer +} + +type SSLExtraCertChainPolicyPara struct { + Size uint32 + AuthType uint32 + Checks uint32 + ServerName *uint16 +} + +type CertChainPolicyStatus struct { + Size uint32 + Error uint32 + ChainIndex uint32 + ElementIndex uint32 + ExtraPolicyStatus Pointer +} + +const ( + // do not reorder + HKEY_CLASSES_ROOT = 0x80000000 + iota + HKEY_CURRENT_USER + HKEY_LOCAL_MACHINE + HKEY_USERS + HKEY_PERFORMANCE_DATA + HKEY_CURRENT_CONFIG + HKEY_DYN_DATA + + KEY_QUERY_VALUE = 1 + KEY_SET_VALUE = 2 + KEY_CREATE_SUB_KEY = 4 + KEY_ENUMERATE_SUB_KEYS = 8 + KEY_NOTIFY = 16 + KEY_CREATE_LINK = 32 + KEY_WRITE = 0x20006 + KEY_EXECUTE = 0x20019 + KEY_READ = 0x20019 + KEY_WOW64_64KEY = 0x0100 + KEY_WOW64_32KEY = 0x0200 + KEY_ALL_ACCESS = 0xf003f +) + +const ( + // do not reorder + REG_NONE = iota + REG_SZ + REG_EXPAND_SZ + REG_BINARY + REG_DWORD_LITTLE_ENDIAN + REG_DWORD_BIG_ENDIAN + REG_LINK + REG_MULTI_SZ + REG_RESOURCE_LIST + REG_FULL_RESOURCE_DESCRIPTOR + REG_RESOURCE_REQUIREMENTS_LIST + REG_QWORD_LITTLE_ENDIAN + REG_DWORD = REG_DWORD_LITTLE_ENDIAN + REG_QWORD = REG_QWORD_LITTLE_ENDIAN +) + +type AddrinfoW struct { + Flags int32 + Family int32 + Socktype int32 + Protocol int32 + Addrlen uintptr + Canonname *uint16 + Addr uintptr + Next *AddrinfoW +} + +const ( + AI_PASSIVE = 1 + AI_CANONNAME = 2 + AI_NUMERICHOST = 4 +) + +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +var WSAID_CONNECTEX = GUID{ + 0x25a207b9, + 0xddf3, + 0x4660, + [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, +} + +var WSAID_WSASENDMSG = GUID{ + 0xa441e712, + 0x754f, + 0x43ca, + [8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d}, +} + +var WSAID_WSARECVMSG = GUID{ + 0xf689d7c8, + 0x6f1f, + 0x436b, + [8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22}, +} + +const ( + FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + FILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +const ( + WSAPROTOCOL_LEN = 255 + MAX_PROTOCOL_CHAIN = 7 + BASE_PROTOCOL = 1 + LAYERED_PROTOCOL = 0 + + XP1_CONNECTIONLESS = 0x00000001 + XP1_GUARANTEED_DELIVERY = 0x00000002 + XP1_GUARANTEED_ORDER = 0x00000004 + XP1_MESSAGE_ORIENTED = 0x00000008 + XP1_PSEUDO_STREAM = 0x00000010 + XP1_GRACEFUL_CLOSE = 0x00000020 + XP1_EXPEDITED_DATA = 0x00000040 + XP1_CONNECT_DATA = 0x00000080 + XP1_DISCONNECT_DATA = 0x00000100 + XP1_SUPPORT_BROADCAST = 0x00000200 + XP1_SUPPORT_MULTIPOINT = 0x00000400 + XP1_MULTIPOINT_CONTROL_PLANE = 0x00000800 + XP1_MULTIPOINT_DATA_PLANE = 0x00001000 + XP1_QOS_SUPPORTED = 0x00002000 + XP1_UNI_SEND = 0x00008000 + XP1_UNI_RECV = 0x00010000 + XP1_IFS_HANDLES = 0x00020000 + XP1_PARTIAL_MESSAGE = 0x00040000 + XP1_SAN_SUPPORT_SDP = 0x00080000 + + PFL_MULTIPLE_PROTO_ENTRIES = 0x00000001 + PFL_RECOMMENDED_PROTO_ENTRY = 0x00000002 + PFL_HIDDEN = 0x00000004 + PFL_MATCHES_PROTOCOL_ZERO = 0x00000008 + PFL_NETWORKDIRECT_PROVIDER = 0x00000010 +) + +type WSAProtocolInfo struct { + ServiceFlags1 uint32 + ServiceFlags2 uint32 + ServiceFlags3 uint32 + ServiceFlags4 uint32 + ProviderFlags uint32 + ProviderId GUID + CatalogEntryId uint32 + ProtocolChain WSAProtocolChain + Version int32 + AddressFamily int32 + MaxSockAddr int32 + MinSockAddr int32 + SocketType int32 + Protocol int32 + ProtocolMaxOffset int32 + NetworkByteOrder int32 + SecurityScheme int32 + MessageSize uint32 + ProviderReserved uint32 + ProtocolName [WSAPROTOCOL_LEN + 1]uint16 +} + +type WSAProtocolChain struct { + ChainLen int32 + ChainEntries [MAX_PROTOCOL_CHAIN]uint32 +} + +type TCPKeepalive struct { + OnOff uint32 + Time uint32 + Interval uint32 +} + +type symbolicLinkReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + Flags uint32 + PathBuffer [1]uint16 +} + +type mountPointReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + PathBuffer [1]uint16 +} + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + + // GenericReparseBuffer + reparseBuffer byte +} + +const ( + FSCTL_GET_REPARSE_POINT = 0x900A8 + MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024 + IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + IO_REPARSE_TAG_SYMLINK = 0xA000000C + SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 +) + +const ( + ComputerNameNetBIOS = 0 + ComputerNameDnsHostname = 1 + ComputerNameDnsDomain = 2 + ComputerNameDnsFullyQualified = 3 + ComputerNamePhysicalNetBIOS = 4 + ComputerNamePhysicalDnsHostname = 5 + ComputerNamePhysicalDnsDomain = 6 + ComputerNamePhysicalDnsFullyQualified = 7 + ComputerNameMax = 8 +) + +const ( + MOVEFILE_REPLACE_EXISTING = 0x1 + MOVEFILE_COPY_ALLOWED = 0x2 + MOVEFILE_DELAY_UNTIL_REBOOT = 0x4 + MOVEFILE_WRITE_THROUGH = 0x8 + MOVEFILE_CREATE_HARDLINK = 0x10 + MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 +) + +const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 + +const ( + IF_TYPE_OTHER = 1 + IF_TYPE_ETHERNET_CSMACD = 6 + IF_TYPE_ISO88025_TOKENRING = 9 + IF_TYPE_PPP = 23 + IF_TYPE_SOFTWARE_LOOPBACK = 24 + IF_TYPE_ATM = 37 + IF_TYPE_IEEE80211 = 71 + IF_TYPE_TUNNEL = 131 + IF_TYPE_IEEE1394 = 144 +) + +type SocketAddress struct { + Sockaddr *syscall.RawSockaddrAny + SockaddrLength int32 +} + +type IpAdapterUnicastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterUnicastAddress + Address SocketAddress + PrefixOrigin int32 + SuffixOrigin int32 + DadState int32 + ValidLifetime uint32 + PreferredLifetime uint32 + LeaseLifetime uint32 + OnLinkPrefixLength uint8 +} + +type IpAdapterAnycastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterAnycastAddress + Address SocketAddress +} + +type IpAdapterMulticastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterMulticastAddress + Address SocketAddress +} + +type IpAdapterDnsServerAdapter struct { + Length uint32 + Reserved uint32 + Next *IpAdapterDnsServerAdapter + Address SocketAddress +} + +type IpAdapterPrefix struct { + Length uint32 + Flags uint32 + Next *IpAdapterPrefix + Address SocketAddress + PrefixLength uint32 +} + +type IpAdapterAddresses struct { + Length uint32 + IfIndex uint32 + Next *IpAdapterAddresses + AdapterName *byte + FirstUnicastAddress *IpAdapterUnicastAddress + FirstAnycastAddress *IpAdapterAnycastAddress + FirstMulticastAddress *IpAdapterMulticastAddress + FirstDnsServerAddress *IpAdapterDnsServerAdapter + DnsSuffix *uint16 + Description *uint16 + FriendlyName *uint16 + PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte + PhysicalAddressLength uint32 + Flags uint32 + Mtu uint32 + IfType uint32 + OperStatus uint32 + Ipv6IfIndex uint32 + ZoneIndices [16]uint32 + FirstPrefix *IpAdapterPrefix + /* more fields might be present here. */ +} + +const ( + IfOperStatusUp = 1 + IfOperStatusDown = 2 + IfOperStatusTesting = 3 + IfOperStatusUnknown = 4 + IfOperStatusDormant = 5 + IfOperStatusNotPresent = 6 + IfOperStatusLowerLayerDown = 7 +) + +// Console related constants used for the mode parameter to SetConsoleMode. See +// https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. + +const ( + ENABLE_PROCESSED_INPUT = 0x1 + ENABLE_LINE_INPUT = 0x2 + ENABLE_ECHO_INPUT = 0x4 + ENABLE_WINDOW_INPUT = 0x8 + ENABLE_MOUSE_INPUT = 0x10 + ENABLE_INSERT_MODE = 0x20 + ENABLE_QUICK_EDIT_MODE = 0x40 + ENABLE_EXTENDED_FLAGS = 0x80 + ENABLE_AUTO_POSITION = 0x100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x200 + + ENABLE_PROCESSED_OUTPUT = 0x1 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x2 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 + DISABLE_NEWLINE_AUTO_RETURN = 0x8 + ENABLE_LVB_GRID_WORLDWIDE = 0x10 +) + +type Coord struct { + X int16 + Y int16 +} + +type SmallRect struct { + Left int16 + Top int16 + Right int16 + Bottom int16 +} + +// Used with GetConsoleScreenBuffer to retreive information about a console +// screen buffer. See +// https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str +// for details. + +type ConsoleScreenBufferInfo struct { + Size Coord + CursorPosition Coord + Attributes uint16 + Window SmallRect + MaximumWindowSize Coord +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go new file mode 100644 index 000000000..fe0ddd031 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_386.go @@ -0,0 +1,22 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte +} + +type Servent struct { + Name *byte + Aliases **byte + Port uint16 + Proto *byte +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go new file mode 100644 index 000000000..7e154c2df --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_amd64.go @@ -0,0 +1,22 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte +} + +type Servent struct { + Name *byte + Aliases **byte + Proto *byte + Port uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go new file mode 100644 index 000000000..fc56aec03 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -0,0 +1,2700 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package windows + +import ( + "syscall" + "unsafe" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = NewLazySystemDLL("advapi32.dll") + modkernel32 = NewLazySystemDLL("kernel32.dll") + modshell32 = NewLazySystemDLL("shell32.dll") + modmswsock = NewLazySystemDLL("mswsock.dll") + modcrypt32 = NewLazySystemDLL("crypt32.dll") + modws2_32 = NewLazySystemDLL("ws2_32.dll") + moddnsapi = NewLazySystemDLL("dnsapi.dll") + modiphlpapi = NewLazySystemDLL("iphlpapi.dll") + modsecur32 = NewLazySystemDLL("secur32.dll") + modnetapi32 = NewLazySystemDLL("netapi32.dll") + moduserenv = NewLazySystemDLL("userenv.dll") + + procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") + procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") + procReportEventW = modadvapi32.NewProc("ReportEventW") + procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") + procCreateServiceW = modadvapi32.NewProc("CreateServiceW") + procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procDeleteService = modadvapi32.NewProc("DeleteService") + procStartServiceW = modadvapi32.NewProc("StartServiceW") + procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procControlService = modadvapi32.NewProc("ControlService") + procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") + procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") + procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") + procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") + procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") + procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") + procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procGetLastError = modkernel32.NewProc("GetLastError") + procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") + procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") + procFreeLibrary = modkernel32.NewProc("FreeLibrary") + procGetProcAddress = modkernel32.NewProc("GetProcAddress") + procGetVersion = modkernel32.NewProc("GetVersion") + procFormatMessageW = modkernel32.NewProc("FormatMessageW") + procExitProcess = modkernel32.NewProc("ExitProcess") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procReadFile = modkernel32.NewProc("ReadFile") + procWriteFile = modkernel32.NewProc("WriteFile") + procSetFilePointer = modkernel32.NewProc("SetFilePointer") + procCloseHandle = modkernel32.NewProc("CloseHandle") + procGetStdHandle = modkernel32.NewProc("GetStdHandle") + procSetStdHandle = modkernel32.NewProc("SetStdHandle") + procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") + procFindNextFileW = modkernel32.NewProc("FindNextFileW") + procFindClose = modkernel32.NewProc("FindClose") + procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") + procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") + procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") + procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procDeleteFileW = modkernel32.NewProc("DeleteFileW") + procMoveFileW = modkernel32.NewProc("MoveFileW") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") + procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") + procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") + procCancelIo = modkernel32.NewProc("CancelIo") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procOpenProcess = modkernel32.NewProc("OpenProcess") + procTerminateProcess = modkernel32.NewProc("TerminateProcess") + procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") + procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") + procGetCurrentProcess = modkernel32.NewProc("GetCurrentProcess") + procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") + procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") + procGetTempPathW = modkernel32.NewProc("GetTempPathW") + procCreatePipe = modkernel32.NewProc("CreatePipe") + procGetFileType = modkernel32.NewProc("GetFileType") + procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") + procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") + procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") + procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") + procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") + procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") + procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procSetFileTime = modkernel32.NewProc("SetFileTime") + procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") + procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") + procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") + procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") + procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") + procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") + procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") + procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") + procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") + procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") + procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") + procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") + procVirtualLock = modkernel32.NewProc("VirtualLock") + procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") + procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") + procVirtualFree = modkernel32.NewProc("VirtualFree") + procVirtualProtect = modkernel32.NewProc("VirtualProtect") + procTransmitFile = modmswsock.NewProc("TransmitFile") + procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") + procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") + procCertOpenStore = modcrypt32.NewProc("CertOpenStore") + procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") + procCertCloseStore = modcrypt32.NewProc("CertCloseStore") + procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") + procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") + procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") + procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") + procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") + procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") + procRegCloseKey = modadvapi32.NewProc("RegCloseKey") + procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") + procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") + procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") + procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") + procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") + procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") + procReadConsoleW = modkernel32.NewProc("ReadConsoleW") + procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") + procProcess32FirstW = modkernel32.NewProc("Process32FirstW") + procProcess32NextW = modkernel32.NewProc("Process32NextW") + procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") + procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") + procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procCreateEventExW = modkernel32.NewProc("CreateEventExW") + procOpenEventW = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") + procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") + procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") + procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") + procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") + procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") + procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") + procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") + procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") + procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") + procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") + procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") + procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") + procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") + procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") + procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") + procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procWSAStartup = modws2_32.NewProc("WSAStartup") + procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procsocket = modws2_32.NewProc("socket") + procsetsockopt = modws2_32.NewProc("setsockopt") + procgetsockopt = modws2_32.NewProc("getsockopt") + procbind = modws2_32.NewProc("bind") + procconnect = modws2_32.NewProc("connect") + procgetsockname = modws2_32.NewProc("getsockname") + procgetpeername = modws2_32.NewProc("getpeername") + proclisten = modws2_32.NewProc("listen") + procshutdown = modws2_32.NewProc("shutdown") + procclosesocket = modws2_32.NewProc("closesocket") + procAcceptEx = modmswsock.NewProc("AcceptEx") + procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") + procWSARecv = modws2_32.NewProc("WSARecv") + procWSASend = modws2_32.NewProc("WSASend") + procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") + procWSASendTo = modws2_32.NewProc("WSASendTo") + procgethostbyname = modws2_32.NewProc("gethostbyname") + procgetservbyname = modws2_32.NewProc("getservbyname") + procntohs = modws2_32.NewProc("ntohs") + procgetprotobyname = modws2_32.NewProc("getprotobyname") + procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") + procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") + procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") + procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") + procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetACP = modkernel32.NewProc("GetACP") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") + procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") + procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procGetLengthSid = modadvapi32.NewProc("GetLengthSid") + procCopySid = modadvapi32.NewProc("CopySid") + procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") + procFreeSid = modadvapi32.NewProc("FreeSid") + procEqualSid = modadvapi32.NewProc("EqualSid") + procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") + procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") + procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") + procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") +) + +func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeregisterEventSource(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CloseServiceHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeleteService(service Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLastError() (lasterr error) { + r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + if r0 != 0 { + lasterr = syscall.Errno(r0) + } + return +} + +func LoadLibrary(libname string) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibrary(_p0) +} + +func _LoadLibrary(libname *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibraryEx(_p0, zero, flags) +} + +func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FreeLibrary(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(procname) + if err != nil { + return + } + return _GetProcAddress(module, _p0) +} + +func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + proc = uintptr(r0) + if proc == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVersion() (ver uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + ver = uint32(r0) + if ver == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { + var _p0 *uint16 + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ExitProcess(exitcode uint32) { + syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + return +} + +func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + newlowoffset = uint32(r0) + if newlowoffset == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CloseHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetStdHandle(stdhandle uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetStdHandle(stdhandle uint32, handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func findNextFile1(handle Handle, data *win32finddata1) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindClose(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetCurrentDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { + r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RemoveDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeleteFile(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MoveFile(from *uint16, to *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetComputerName(buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetEndOfFile(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetSystemTimeAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + return +} + +func GetSystemTimePreciseAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + return +} + +func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + rc = uint32(r0) + if rc == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CancelIo(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CancelIoEx(s Handle, o *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { + var _p0 uint32 + if inheritHandles { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(da), uintptr(_p0), uintptr(pid)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TerminateProcess(handle Handle, exitcode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetStartupInfo(startupInfo *StartupInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCurrentProcess() (pseudoHandle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentProcess.Addr(), 0, 0, 0, 0) + pseudoHandle = Handle(r0) + if pseudoHandle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { + var _p0 uint32 + if bInheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { + r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + event = uint32(r0) + if event == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileType(filehandle Handle) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { + r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetEnvironmentStrings() (envs *uint16, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + envs = (*uint16)(unsafe.Pointer(r0)) + if envs == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FreeEnvironmentStrings(envs *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileAttributes(name *uint16) (attrs uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + attrs = uint32(r0) + if attrs == INVALID_FILE_ATTRIBUTES { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetFileAttributes(name *uint16, attrs uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCommandLine() (cmd *uint16) { + r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + cmd = (*uint16)(unsafe.Pointer(r0)) + return +} + +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + if argv == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LocalFree(hmem Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + handle = Handle(r0) + if handle != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FlushFileBuffers(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + addr = uintptr(r0) + if addr == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func UnmapViewOfFile(addr uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FlushViewOfFile(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualLock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualUnlock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + value = uintptr(r0) + if value == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + var _p0 uint32 + if watchSubTree { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { + r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + store = Handle(r0) + if store == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { + r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertCloseStore(store Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { + r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertFreeCertificateChain(ctx *CertChainContext) { + syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + return +} + +func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertFreeCertificateContext(ctx *CertContext) (err error) { + r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { + r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegCloseKey(key Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func getCurrentProcessId() (pid uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + pid = uint32(r0) + return +} + +func GetConsoleMode(console Handle, mode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetConsoleMode(console Handle, mode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCurrentThreadId() (id uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + id = uint32(r0) + return +} + +func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ResetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func PulseEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindVolumeClose(findVolume Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetDriveType(rootPathName *uint16) (driveType uint32) { + r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + driveType = uint32(r0) + return +} + +func GetLogicalDrives() (drivesBitMask uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + drivesBitMask = uint32(r0) + if drivesBitMask == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { + r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) + } + return +} + +func WSACleanup() (err error) { + r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { + r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { + r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func listen(s Handle, backlog int32) (err error) { + r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func shutdown(s Handle, how int32) (err error) { + r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Closesocket(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { + syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + return +} + +func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetHostByName(name string) (h *Hostent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + return _GetHostByName(_p0) +} + +func _GetHostByName(name *byte) (h *Hostent, err error) { + r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + h = (*Hostent)(unsafe.Pointer(r0)) + if h == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetServByName(name string, proto string) (s *Servent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(proto) + if err != nil { + return + } + return _GetServByName(_p0, _p1) +} + +func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { + r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + s = (*Servent)(unsafe.Pointer(r0)) + if s == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Ntohs(netshort uint16) (u uint16) { + r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + u = uint16(r0) + return +} + +func GetProtoByName(name string) (p *Protoent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + return _GetProtoByName(_p0) +} + +func _GetProtoByName(name *byte) (p *Protoent, err error) { + r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + p = (*Protoent)(unsafe.Pointer(r0)) + if p == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + var _p0 *uint16 + _p0, status = syscall.UTF16PtrFromString(name) + if status != nil { + return + } + return _DnsQuery(_p0, qtype, options, extra, qrs, pr) +} + +func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + if r0 != 0 { + status = syscall.Errno(r0) + } + return +} + +func DnsRecordListFree(rl *DNSRecord, freetype uint32) { + syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + return +} + +func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { + r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + same = r0 != 0 + return +} + +func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { + r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) + } + return +} + +func FreeAddrInfoW(addrinfo *AddrinfoW) { + syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + return +} + +func GetIfEntry(pIfRow *MibIfRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { + r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + n = int32(r0) + if n == -1 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { + r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetACP() (acp uint32) { + r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + acp = uint32(r0) + return +} + +func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { + r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + nwrite = int32(r0) + if nwrite == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { + r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { + r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetApiBufferFree(buf *byte) (neterr error) { + r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLengthSid(sid *SID) (len uint32) { + r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + len = uint32(r0) + return +} + +func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FreeSid(sid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { + r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + isEqual = r0 != 0 + return +} + +func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { + r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenProcessToken(h Handle, access uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(h), uintptr(access), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(t), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/k8s.io/api/LICENSE b/vendor/k8s.io/api/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go new file mode 100644 index 000000000..8a5d1fbbb --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// Package v1alpha1 is the v1alpha1 version of the API. +// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration +// InitializerConfiguration and validatingWebhookConfiguration is for the +// new dynamic admission controller configuration. +// +groupName=admissionregistration.k8s.io +package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1" diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go new file mode 100644 index 000000000..542d8268a --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -0,0 +1,1028 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto + + It has these top-level messages: + Initializer + InitializerConfiguration + InitializerConfigurationList + Rule +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Initializer) Reset() { *m = Initializer{} } +func (*Initializer) ProtoMessage() {} +func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *InitializerConfiguration) Reset() { *m = InitializerConfiguration{} } +func (*InitializerConfiguration) ProtoMessage() {} +func (*InitializerConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *InitializerConfigurationList) Reset() { *m = InitializerConfigurationList{} } +func (*InitializerConfigurationList) ProtoMessage() {} +func (*InitializerConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func init() { + proto.RegisterType((*Initializer)(nil), "k8s.io.api.admissionregistration.v1alpha1.Initializer") + proto.RegisterType((*InitializerConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfiguration") + proto.RegisterType((*InitializerConfigurationList)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfigurationList") + proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1alpha1.Rule") +} +func (m *Initializer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *InitializerConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InitializerConfiguration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Initializers) > 0 { + for _, msg := range m.Initializers { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *InitializerConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InitializerConfigurationList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Initializer) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *InitializerConfiguration) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Initializers) > 0 { + for _, e := range m.Initializers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *InitializerConfigurationList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Rule) Size() (n int) { + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Initializer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Initializer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *InitializerConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InitializerConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Initializers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *InitializerConfigurationList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InitializerConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "InitializerConfiguration", "InitializerConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Initializer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Initializer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, Rule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitializerConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitializerConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitializerConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Initializers = append(m.Initializers, Initializer{}) + if err := m.Initializers[len(m.Initializers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitializerConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitializerConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitializerConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, InitializerConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 545 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x51, 0x4d, 0x8b, 0x13, 0x3f, + 0x18, 0x6f, 0xfe, 0xdb, 0x42, 0x9b, 0x76, 0xf9, 0xcb, 0xe0, 0xa1, 0x14, 0x99, 0x96, 0x9e, 0x2a, + 0x62, 0x62, 0x57, 0x59, 0xbc, 0xee, 0xec, 0x41, 0x0a, 0xbe, 0x2c, 0x41, 0x3c, 0x88, 0x07, 0xd3, + 0xf6, 0xd9, 0x69, 0x6c, 0x27, 0x19, 0x92, 0x4c, 0x41, 0x4f, 0x5e, 0xbc, 0x0b, 0x7e, 0xa9, 0x1e, + 0xf7, 0xb8, 0xa7, 0x62, 0x47, 0xf0, 0xe8, 0x67, 0x90, 0x99, 0xe9, 0xec, 0xcc, 0x5a, 0x8b, 0xab, + 0xb7, 0x3c, 0xbf, 0x27, 0xbf, 0xb7, 0x04, 0xb3, 0xf9, 0x63, 0x43, 0x84, 0xa2, 0xf3, 0x68, 0x0c, + 0x5a, 0x82, 0x05, 0x43, 0x97, 0x20, 0xa7, 0x4a, 0xd3, 0xed, 0x82, 0x87, 0x82, 0xf2, 0x69, 0x20, + 0x8c, 0x11, 0x4a, 0x6a, 0xf0, 0x85, 0xb1, 0x9a, 0x5b, 0xa1, 0x24, 0x5d, 0x0e, 0xf9, 0x22, 0x9c, + 0xf1, 0x21, 0xf5, 0x41, 0x82, 0xe6, 0x16, 0xa6, 0x24, 0xd4, 0xca, 0x2a, 0xe7, 0x6e, 0x46, 0x25, + 0x3c, 0x14, 0xe4, 0xb7, 0x54, 0x92, 0x53, 0x3b, 0xf7, 0x7d, 0x61, 0x67, 0xd1, 0x98, 0x4c, 0x54, + 0x40, 0x7d, 0xe5, 0x2b, 0x9a, 0x2a, 0x8c, 0xa3, 0xf3, 0x74, 0x4a, 0x87, 0xf4, 0x94, 0x29, 0x77, + 0x1e, 0x15, 0xa1, 0x02, 0x3e, 0x99, 0x09, 0x09, 0xfa, 0x3d, 0x0d, 0xe7, 0x7e, 0x02, 0x18, 0x1a, + 0x80, 0xe5, 0x74, 0xb9, 0x93, 0xa7, 0x43, 0xf7, 0xb1, 0x74, 0x24, 0xad, 0x08, 0x60, 0x87, 0x70, + 0xfc, 0x27, 0x82, 0x99, 0xcc, 0x20, 0xe0, 0x3b, 0xbc, 0x87, 0xfb, 0x78, 0x91, 0x15, 0x0b, 0x2a, + 0xa4, 0x35, 0x56, 0xff, 0x4a, 0xea, 0x7f, 0x42, 0xb8, 0x39, 0x92, 0xc2, 0x0a, 0xbe, 0x10, 0x1f, + 0x40, 0x3b, 0x3d, 0x5c, 0x95, 0x3c, 0x80, 0x36, 0xea, 0xa1, 0x41, 0xc3, 0x6b, 0xad, 0xd6, 0xdd, + 0x4a, 0xbc, 0xee, 0x56, 0x9f, 0xf3, 0x00, 0x58, 0xba, 0x71, 0x5e, 0xe2, 0x9a, 0x8e, 0x16, 0x60, + 0xda, 0xff, 0xf5, 0x0e, 0x06, 0xcd, 0x23, 0x4a, 0x6e, 0xfc, 0xde, 0x84, 0x45, 0x0b, 0xf0, 0x0e, + 0xb7, 0x9a, 0xb5, 0x64, 0x32, 0x2c, 0x13, 0xeb, 0xff, 0x40, 0xb8, 0x5d, 0xca, 0x71, 0xaa, 0xe4, + 0xb9, 0xf0, 0xa3, 0x4c, 0xc0, 0x79, 0x8b, 0xeb, 0xc9, 0xeb, 0x4e, 0xb9, 0xe5, 0x69, 0xb0, 0xe6, + 0xd1, 0x83, 0x92, 0xeb, 0x55, 0x59, 0x12, 0xce, 0xfd, 0x04, 0x30, 0x24, 0xb9, 0x4d, 0x96, 0x43, + 0xf2, 0x62, 0xfc, 0x0e, 0x26, 0xf6, 0x19, 0x58, 0xee, 0x39, 0x5b, 0x5b, 0x5c, 0x60, 0xec, 0x4a, + 0xd5, 0x09, 0x71, 0x4b, 0x14, 0xee, 0x79, 0xb7, 0xe3, 0xbf, 0xe8, 0x56, 0x0a, 0xef, 0xdd, 0xde, + 0x7a, 0xb5, 0x4a, 0xa0, 0x61, 0xd7, 0x1c, 0xfa, 0xdf, 0x11, 0xbe, 0xb3, 0xaf, 0xf0, 0x53, 0x61, + 0xac, 0xf3, 0x66, 0xa7, 0x34, 0xb9, 0x59, 0xe9, 0x84, 0x9d, 0x56, 0xbe, 0xb5, 0x8d, 0x51, 0xcf, + 0x91, 0x52, 0xe1, 0x19, 0xae, 0x09, 0x0b, 0x41, 0xde, 0xf4, 0xf4, 0xdf, 0x9a, 0x5e, 0x4b, 0x5d, + 0xfc, 0xec, 0x28, 0x51, 0x66, 0x99, 0x41, 0xff, 0x0b, 0xc2, 0xd5, 0xe4, 0xab, 0x9d, 0x7b, 0xb8, + 0xc1, 0x43, 0xf1, 0x44, 0xab, 0x28, 0x34, 0x6d, 0xd4, 0x3b, 0x18, 0x34, 0xbc, 0xc3, 0x78, 0xdd, + 0x6d, 0x9c, 0x9c, 0x8d, 0x32, 0x90, 0x15, 0x7b, 0x67, 0x88, 0x9b, 0x3c, 0x14, 0xaf, 0x40, 0x27, + 0x39, 0xb2, 0x94, 0x0d, 0xef, 0xff, 0x78, 0xdd, 0x6d, 0x9e, 0x9c, 0x8d, 0x72, 0x98, 0x95, 0xef, + 0x24, 0xfa, 0x1a, 0x8c, 0x8a, 0xf4, 0x04, 0x4c, 0xfb, 0xa0, 0xd0, 0x67, 0x39, 0xc8, 0x8a, 0xbd, + 0x47, 0x56, 0x1b, 0xb7, 0x72, 0xb1, 0x71, 0x2b, 0x97, 0x1b, 0xb7, 0xf2, 0x31, 0x76, 0xd1, 0x2a, + 0x76, 0xd1, 0x45, 0xec, 0xa2, 0xcb, 0xd8, 0x45, 0x5f, 0x63, 0x17, 0x7d, 0xfe, 0xe6, 0x56, 0x5e, + 0xd7, 0xf3, 0xd2, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x1d, 0xfb, 0x23, 0x89, 0xaa, 0x04, 0x00, + 0x00, +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto new file mode 100644 index 000000000..4f0e400e7 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -0,0 +1,108 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.admissionregistration.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// Initializer describes the name and the failure policy of an initializer, and +// what resources it applies to. +message Initializer { + // Name is the identifier of the initializer. It will be added to the + // object that needs to be initialized. + // Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where + // "alwayspullimages" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required + optional string name = 1; + + // Rules describes what resources/subresources the initializer cares about. + // The initializer cares about an operation if it matches _any_ Rule. + // Rule.Resources must not include subresources. + repeated Rule rules = 2; +} + +// InitializerConfiguration describes the configuration of initializers. +message InitializerConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Initializers is a list of resources and their default initializers + // Order-sensitive. + // When merging multiple InitializerConfigurations, we sort the initializers + // from different InitializerConfigurations by the name of the + // InitializerConfigurations; the order of the initializers from the same + // InitializerConfiguration is preserved. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + repeated Initializer initializers = 2; +} + +// InitializerConfigurationList is a list of InitializerConfiguration. +message InitializerConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of InitializerConfiguration. + repeated InitializerConfiguration items = 2; +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +message Rule { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiGroups = 1; + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiVersions = 2; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + repeated string resources = 3; +} + diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go new file mode 100644 index 000000000..e9a4164c3 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "admissionregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &InitializerConfiguration{}, + &InitializerConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go new file mode 100644 index 000000000..a245f1e85 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -0,0 +1,106 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InitializerConfiguration describes the configuration of initializers. +type InitializerConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Initializers is a list of resources and their default initializers + // Order-sensitive. + // When merging multiple InitializerConfigurations, we sort the initializers + // from different InitializerConfigurations by the name of the + // InitializerConfigurations; the order of the initializers from the same + // InitializerConfiguration is preserved. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Initializers []Initializer `json:"initializers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=initializers"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InitializerConfigurationList is a list of InitializerConfiguration. +type InitializerConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of InitializerConfiguration. + Items []InitializerConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Initializer describes the name and the failure policy of an initializer, and +// what resources it applies to. +type Initializer struct { + // Name is the identifier of the initializer. It will be added to the + // object that needs to be initialized. + // Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where + // "alwayspullimages" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Rules describes what resources/subresources the initializer cares about. + // The initializer cares about an operation if it matches _any_ Rule. + // Rule.Resources must not include subresources. + Rules []Rule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"` +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule struct { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..69e4b7c64 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Initializer = map[string]string{ + "": "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.", + "name": "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required", + "rules": "Rules describes what resources/subresources the initializer cares about. The initializer cares about an operation if it matches _any_ Rule. Rule.Resources must not include subresources.", +} + +func (Initializer) SwaggerDoc() map[string]string { + return map_Initializer +} + +var map_InitializerConfiguration = map[string]string{ + "": "InitializerConfiguration describes the configuration of initializers.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "initializers": "Initializers is a list of resources and their default initializers Order-sensitive. When merging multiple InitializerConfigurations, we sort the initializers from different InitializerConfigurations by the name of the InitializerConfigurations; the order of the initializers from the same InitializerConfiguration is preserved.", +} + +func (InitializerConfiguration) SwaggerDoc() map[string]string { + return map_InitializerConfiguration +} + +var map_InitializerConfigurationList = map[string]string{ + "": "InitializerConfigurationList is a list of InitializerConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of InitializerConfiguration.", +} + +func (InitializerConfigurationList) SwaggerDoc() map[string]string { + return map_InitializerConfigurationList +} + +var map_Rule = map[string]string{ + "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", + "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", +} + +func (Rule) SwaggerDoc() map[string]string { + return map_Rule +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..9f636b484 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,145 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Initializer) DeepCopyInto(out *Initializer) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. +func (in *Initializer) DeepCopy() *Initializer { + if in == nil { + return nil + } + out := new(Initializer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializerConfiguration) DeepCopyInto(out *InitializerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + *out = make([]Initializer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfiguration. +func (in *InitializerConfiguration) DeepCopy() *InitializerConfiguration { + if in == nil { + return nil + } + out := new(InitializerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InitializerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializerConfigurationList) DeepCopyInto(out *InitializerConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InitializerConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfigurationList. +func (in *InitializerConfigurationList) DeepCopy() *InitializerConfigurationList { + if in == nil { + return nil + } + out := new(InitializerConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InitializerConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go new file mode 100644 index 000000000..afbb3d6d3 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// Package v1beta1 is the v1beta1 version of the API. +// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration +// InitializerConfiguration and validatingWebhookConfiguration is for the +// new dynamic admission controller configuration. +// +groupName=admissionregistration.k8s.io +package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1" diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go new file mode 100644 index 000000000..af35e1e25 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -0,0 +1,2150 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto + + It has these top-level messages: + MutatingWebhookConfiguration + MutatingWebhookConfigurationList + Rule + RuleWithOperations + ServiceReference + ValidatingWebhookConfiguration + ValidatingWebhookConfigurationList + Webhook + WebhookClientConfig +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } +func (*MutatingWebhookConfiguration) ProtoMessage() {} +func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } +func (*MutatingWebhookConfigurationList) ProtoMessage() {} +func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } +func (*ValidatingWebhookConfiguration) ProtoMessage() {} +func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} +} + +func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } +func (*ValidatingWebhookConfigurationList) ProtoMessage() {} +func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{6} +} + +func (m *Webhook) Reset() { *m = Webhook{} } +func (*Webhook) ProtoMessage() {} +func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func init() { + proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") + proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") + proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") + proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") + proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") + proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") + proto.RegisterType((*Webhook)(nil), "k8s.io.api.admissionregistration.v1beta1.Webhook") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") +} +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Webhooks) > 0 { + for _, msg := range m.Webhooks { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) + n3, err := m.Rule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Path != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) + } + return i, nil +} + +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Webhooks) > 0 { + for _, msg := range m.Webhooks { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Webhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) + n6, err := m.ClientConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.FailurePolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i += copy(dAtA[i:], *m.FailurePolicy) + } + if m.NamespaceSelector != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) + n7, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) + n8, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.CABundle != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i += copy(dAtA[i:], m.CABundle) + } + if m.URL != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i += copy(dAtA[i:], *m.URL) + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *MutatingWebhookConfiguration) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MutatingWebhookConfigurationList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Rule) Size() (n int) { + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RuleWithOperations) Size() (n int) { + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Rule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceReference) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Webhook) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `}`, + }, "") + return s +} +func (this *RuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleWithOperations{`, + `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, + `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Webhook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Webhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, Webhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, Webhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Webhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Webhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 916 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xf6, 0xd6, 0x8e, 0x6c, 0x8f, 0x6d, 0xd1, 0x0c, 0x20, 0x99, 0xa8, 0xda, 0xb5, 0x7c, 0x40, + 0x96, 0x50, 0x76, 0x71, 0x8a, 0x10, 0x42, 0x20, 0x94, 0x8d, 0x54, 0x88, 0x94, 0xb4, 0x66, 0x02, + 0xad, 0x84, 0x38, 0x30, 0x5e, 0xbf, 0xd8, 0x83, 0xf7, 0x97, 0x66, 0x66, 0xdd, 0xe6, 0x86, 0xc4, + 0x3f, 0x80, 0xc4, 0x1f, 0xc1, 0x5f, 0xc1, 0x3d, 0x37, 0x7a, 0x41, 0xf4, 0xb4, 0x22, 0xcb, 0x99, + 0x03, 0xd7, 0x9e, 0xd0, 0xce, 0xae, 0xbd, 0x76, 0x1c, 0xa7, 0xee, 0x85, 0x03, 0x37, 0xcf, 0xf7, + 0xde, 0xf7, 0xbd, 0xf7, 0x3d, 0xbf, 0xb7, 0xe8, 0xcb, 0xe9, 0x47, 0xc2, 0x64, 0x81, 0x35, 0x8d, + 0x86, 0xc0, 0x7d, 0x90, 0x20, 0xac, 0x19, 0xf8, 0xa3, 0x80, 0x5b, 0x79, 0x80, 0x86, 0xcc, 0xa2, + 0x23, 0x8f, 0x09, 0xc1, 0x02, 0x9f, 0xc3, 0x98, 0x09, 0xc9, 0xa9, 0x64, 0x81, 0x6f, 0xcd, 0xfa, + 0x43, 0x90, 0xb4, 0x6f, 0x8d, 0xc1, 0x07, 0x4e, 0x25, 0x8c, 0xcc, 0x90, 0x07, 0x32, 0xc0, 0xbd, + 0x8c, 0x69, 0xd2, 0x90, 0x99, 0x37, 0x32, 0xcd, 0x9c, 0xb9, 0xb7, 0x3f, 0x66, 0x72, 0x12, 0x0d, + 0x4d, 0x27, 0xf0, 0xac, 0x71, 0x30, 0x0e, 0x2c, 0x25, 0x30, 0x8c, 0xce, 0xd5, 0x4b, 0x3d, 0xd4, + 0xaf, 0x4c, 0x78, 0xaf, 0xbb, 0xd4, 0x92, 0x13, 0x70, 0xb0, 0x66, 0x6b, 0xc5, 0xf7, 0x4e, 0x8b, + 0x1c, 0x78, 0x26, 0xc1, 0x4f, 0x6b, 0x8b, 0x7d, 0x1a, 0x32, 0x01, 0x7c, 0x06, 0xdc, 0x0a, 0xa7, + 0xe3, 0x34, 0x26, 0x56, 0x13, 0x36, 0x79, 0xd9, 0xfb, 0xa0, 0x90, 0xf3, 0xa8, 0x33, 0x61, 0x3e, + 0xf0, 0x8b, 0x42, 0xc3, 0x03, 0x49, 0x6f, 0x6a, 0xc2, 0xda, 0xc4, 0xe2, 0x91, 0x2f, 0x99, 0x07, + 0x6b, 0x84, 0x0f, 0x5f, 0x45, 0x10, 0xce, 0x04, 0x3c, 0xba, 0xc6, 0xbb, 0xbf, 0x89, 0x17, 0x49, + 0xe6, 0x5a, 0xcc, 0x97, 0x42, 0xf2, 0xeb, 0xa4, 0xee, 0xef, 0x1a, 0xba, 0x77, 0x1a, 0x49, 0x2a, + 0x99, 0x3f, 0x7e, 0x02, 0xc3, 0x49, 0x10, 0x4c, 0x8f, 0x02, 0xff, 0x9c, 0x8d, 0xa3, 0xec, 0xef, + 0xc1, 0xdf, 0xa1, 0x5a, 0xea, 0x6c, 0x44, 0x25, 0x6d, 0x6b, 0x1d, 0xad, 0xd7, 0x38, 0x78, 0xdf, + 0x2c, 0xfe, 0xd3, 0x45, 0x21, 0x33, 0x9c, 0x8e, 0x53, 0x40, 0x98, 0x69, 0xb6, 0x39, 0xeb, 0x9b, + 0x8f, 0x86, 0xdf, 0x83, 0x23, 0x4f, 0x41, 0x52, 0x1b, 0x5f, 0xc6, 0x46, 0x29, 0x89, 0x0d, 0x54, + 0x60, 0x64, 0xa1, 0x8a, 0xcf, 0x50, 0x2d, 0xaf, 0x2c, 0xda, 0x77, 0x3a, 0xe5, 0x5e, 0xe3, 0xa0, + 0x6f, 0x6e, 0xbb, 0x35, 0x66, 0xce, 0xb4, 0x2b, 0x69, 0x09, 0x52, 0x7b, 0x9a, 0x0b, 0x75, 0xff, + 0xd6, 0x50, 0xe7, 0x36, 0x5f, 0x27, 0x4c, 0x48, 0xfc, 0xed, 0x9a, 0x37, 0x73, 0x3b, 0x6f, 0x29, + 0x5b, 0x39, 0xbb, 0x9b, 0x3b, 0xab, 0xcd, 0x91, 0x25, 0x5f, 0x53, 0xb4, 0xc3, 0x24, 0x78, 0x73, + 0x53, 0x0f, 0xb6, 0x37, 0x75, 0x5b, 0xe3, 0x76, 0x2b, 0x2f, 0xb9, 0x73, 0x9c, 0x8a, 0x93, 0xac, + 0x46, 0xf7, 0x67, 0x0d, 0x55, 0x48, 0xe4, 0x02, 0x7e, 0x0f, 0xd5, 0x69, 0xc8, 0x3e, 0xe7, 0x41, + 0x14, 0x8a, 0xb6, 0xd6, 0x29, 0xf7, 0xea, 0x76, 0x2b, 0x89, 0x8d, 0xfa, 0xe1, 0xe0, 0x38, 0x03, + 0x49, 0x11, 0xc7, 0x7d, 0xd4, 0xa0, 0x21, 0x7b, 0x0c, 0x5c, 0x2d, 0xbe, 0x6a, 0xb4, 0x6e, 0xbf, + 0x91, 0xc4, 0x46, 0xe3, 0x70, 0x70, 0x3c, 0x87, 0xc9, 0x72, 0x4e, 0xaa, 0xcf, 0x41, 0x04, 0x11, + 0x77, 0x40, 0xb4, 0xcb, 0x85, 0x3e, 0x99, 0x83, 0xa4, 0x88, 0x77, 0x7f, 0xd1, 0x10, 0x4e, 0xbb, + 0x7a, 0xc2, 0xe4, 0xe4, 0x51, 0x08, 0x99, 0x03, 0x81, 0x3f, 0x43, 0x28, 0x58, 0xbc, 0xf2, 0x26, + 0x0d, 0xb5, 0x1f, 0x0b, 0xf4, 0x65, 0x6c, 0xb4, 0x16, 0xaf, 0xaf, 0x2e, 0x42, 0x20, 0x4b, 0x14, + 0x3c, 0x40, 0x15, 0x1e, 0xb9, 0xd0, 0xbe, 0xb3, 0xf6, 0xa7, 0xbd, 0x62, 0xb2, 0x69, 0x33, 0x76, + 0x33, 0x9f, 0xa0, 0x1a, 0x18, 0x51, 0x4a, 0xdd, 0x1f, 0x35, 0x74, 0xf7, 0x0c, 0xf8, 0x8c, 0x39, + 0x40, 0xe0, 0x1c, 0x38, 0xf8, 0x0e, 0x60, 0x0b, 0xd5, 0x7d, 0xea, 0x81, 0x08, 0xa9, 0x03, 0x6a, + 0x41, 0xea, 0xf6, 0x6e, 0xce, 0xad, 0x3f, 0x9c, 0x07, 0x48, 0x91, 0x83, 0x3b, 0xa8, 0x92, 0x3e, + 0x54, 0x5f, 0xf5, 0xa2, 0x4e, 0x9a, 0x4b, 0x54, 0x04, 0xdf, 0x43, 0x95, 0x90, 0xca, 0x49, 0xbb, + 0xac, 0x32, 0x6a, 0x69, 0x74, 0x40, 0xe5, 0x84, 0x28, 0xb4, 0xfb, 0x87, 0x86, 0xf4, 0xc7, 0xd4, + 0x65, 0xa3, 0xff, 0xdd, 0x3d, 0xfe, 0xa3, 0xa1, 0xee, 0xed, 0xce, 0xfe, 0x83, 0x8b, 0xf4, 0x56, + 0x2f, 0xf2, 0x8b, 0xed, 0x6d, 0xdd, 0xde, 0xfa, 0x86, 0x9b, 0xfc, 0xad, 0x8c, 0xaa, 0x79, 0xfa, + 0x62, 0x33, 0xb4, 0x8d, 0x9b, 0xf1, 0x14, 0x35, 0x1d, 0x97, 0x81, 0x2f, 0x33, 0xe9, 0x7c, 0xb7, + 0x3f, 0x7d, 0xed, 0xd1, 0x1f, 0x2d, 0x89, 0xd8, 0x6f, 0xe5, 0x85, 0x9a, 0xcb, 0x28, 0x59, 0x29, + 0x84, 0x29, 0xda, 0x49, 0x4f, 0x20, 0xbb, 0xe6, 0xc6, 0xc1, 0x27, 0xaf, 0x77, 0x4d, 0xab, 0xa7, + 0x5d, 0x4c, 0x22, 0x8d, 0x09, 0x92, 0x29, 0xe3, 0x13, 0xd4, 0x3a, 0xa7, 0xcc, 0x8d, 0x38, 0x0c, + 0x02, 0x97, 0x39, 0x17, 0xed, 0x8a, 0x1a, 0xc3, 0xbb, 0x49, 0x6c, 0xb4, 0x1e, 0x2c, 0x07, 0x5e, + 0xc6, 0xc6, 0xee, 0x0a, 0xa0, 0x4e, 0x7f, 0x95, 0x8c, 0x9f, 0xa1, 0xdd, 0xc5, 0xc9, 0x9d, 0x81, + 0x0b, 0x8e, 0x0c, 0x78, 0x7b, 0x47, 0x8d, 0xeb, 0xfe, 0x96, 0xdb, 0x42, 0x87, 0xe0, 0xce, 0xa9, + 0xf6, 0xdb, 0x49, 0x6c, 0xec, 0x3e, 0xbc, 0xae, 0x48, 0xd6, 0x8b, 0x74, 0x7f, 0xd5, 0xd0, 0x9b, + 0x37, 0x8c, 0x19, 0x53, 0x54, 0x15, 0xd9, 0xc7, 0x23, 0xdf, 0xda, 0x8f, 0xb7, 0x1f, 0xe2, 0xf5, + 0xaf, 0x8e, 0xdd, 0x48, 0x62, 0xa3, 0x3a, 0x47, 0xe7, 0xba, 0xb8, 0x87, 0x6a, 0x0e, 0xb5, 0x23, + 0x7f, 0x94, 0x7f, 0xf6, 0x9a, 0x76, 0x33, 0xdd, 0xf2, 0xa3, 0xc3, 0x0c, 0x23, 0x8b, 0x28, 0x7e, + 0x07, 0x95, 0x23, 0xee, 0xe6, 0x5f, 0x98, 0x6a, 0x12, 0x1b, 0xe5, 0xaf, 0xc9, 0x09, 0x49, 0x31, + 0x7b, 0xff, 0xf2, 0x4a, 0x2f, 0x3d, 0xbf, 0xd2, 0x4b, 0x2f, 0xae, 0xf4, 0xd2, 0x0f, 0x89, 0xae, + 0x5d, 0x26, 0xba, 0xf6, 0x3c, 0xd1, 0xb5, 0x17, 0x89, 0xae, 0xfd, 0x99, 0xe8, 0xda, 0x4f, 0x7f, + 0xe9, 0xa5, 0x6f, 0xaa, 0x79, 0x6b, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x78, 0x57, 0x76, 0x28, + 0x10, 0x0a, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto new file mode 100644 index 000000000..118be06f8 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -0,0 +1,263 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.admissionregistration.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +message MutatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated Webhook Webhooks = 2; +} + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +message MutatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of MutatingWebhookConfiguration. + repeated MutatingWebhookConfiguration items = 2; +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +message Rule { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiGroups = 1; + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiVersions = 2; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + repeated string resources = 3; +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +message RuleWithOperations { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string operations = 1; + + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + optional Rule rule = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // `namespace` is the namespace of the service. + // Required + optional string namespace = 1; + + // `name` is the name of the service. + // Required + optional string name = 2; + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + optional string path = 3; +} + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +message ValidatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated Webhook Webhooks = 2; +} + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +message ValidatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingWebhookConfiguration. + repeated ValidatingWebhookConfiguration items = 2; +} + +// Webhook describes an admission webhook and the resources and operations it applies to. +message Webhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + optional string failurePolicy = 4; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; +} + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +message WebhookClientConfig { + // `url` gives the location of the webhook, in standard URL form + // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // Port 443 will be used if it is open, otherwise it is an error. + // + // +optional + optional ServiceReference service = 1; + + // `caBundle` is a PEM encoded CA bundle which will be used to validate + // the webhook's server certificate. + // Required. + optional bytes caBundle = 2; +} + diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go new file mode 100644 index 000000000..d126da9fb --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "admissionregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ValidatingWebhookConfiguration{}, + &ValidatingWebhookConfigurationList{}, + &MutatingWebhookConfiguration{}, + &MutatingWebhookConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go new file mode 100644 index 000000000..f209e7acc --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -0,0 +1,281 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule struct { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` +} + +type FailurePolicyType string + +const ( + // Ignore means that an error calling the webhook is ignored. + Ignore FailurePolicyType = "Ignore" + // Fail means that an error calling the webhook causes the admission to fail. + Fail FailurePolicyType = "Fail" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +type ValidatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +type ValidatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingWebhookConfiguration. + Items []ValidatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +type MutatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +type MutatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of MutatingWebhookConfiguration. + Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Webhook describes an admission webhook and the resources and operations it applies to. +type Webhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +type RuleWithOperations struct { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` +} + +type OperationType string + +// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. +const ( + OperationAll OperationType = "*" + Create OperationType = "CREATE" + Update OperationType = "UPDATE" + Delete OperationType = "DELETE" + Connect OperationType = "CONNECT" +) + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // Port 443 will be used if it is open, otherwise it is an error. + // + // +optional + Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate + // the webhook's server certificate. + // Required. + CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..e90bdc911 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,125 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MutatingWebhookConfiguration = map[string]string{ + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfiguration +} + +var map_MutatingWebhookConfigurationList = map[string]string{ + "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of MutatingWebhookConfiguration.", +} + +func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfigurationList +} + +var map_Rule = map[string]string{ + "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", + "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", +} + +func (Rule) SwaggerDoc() map[string]string { + return map_Rule +} + +var map_RuleWithOperations = map[string]string{ + "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", + "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.", +} + +func (RuleWithOperations) SwaggerDoc() map[string]string { + return map_RuleWithOperations +} + +var map_ServiceReference = map[string]string{ + "": "ServiceReference holds a reference to Service.legacy.k8s.io", + "namespace": "`namespace` is the namespace of the service. Required", + "name": "`name` is the name of the service. Required", + "path": "`path` is an optional URL path which will be sent in any request to this service.", +} + +func (ServiceReference) SwaggerDoc() map[string]string { + return map_ServiceReference +} + +var map_ValidatingWebhookConfiguration = map[string]string{ + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfiguration +} + +var map_ValidatingWebhookConfigurationList = map[string]string{ + "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of ValidatingWebhookConfiguration.", +} + +func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfigurationList +} + +var map_Webhook = map[string]string{ + "": "Webhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", +} + +func (Webhook) SwaggerDoc() map[string]string { + return map_Webhook +} + +var map_WebhookClientConfig = map[string]string{ + "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", + "url": "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.", +} + +func (WebhookClientConfig) SwaggerDoc() map[string]string { + return map_WebhookClientConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..d8c1e4f62 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,317 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]Webhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration. +func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(MutatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList. +func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(MutatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { + *out = *in + if in.Operations != nil { + in, out := &in.Operations, &out.Operations + *out = make([]OperationType, len(*in)) + copy(*out, *in) + } + in.Rule.DeepCopyInto(&out.Rule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. +func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { + if in == nil { + return nil + } + out := new(RuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]Webhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration. +func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList. +func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Webhook) DeepCopyInto(out *Webhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + if *in == nil { + *out = nil + } else { + *out = new(FailurePolicyType) + **out = **in + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. +func (in *Webhook) DeepCopy() *Webhook { + if in == nil { + return nil + } + out := new(Webhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + if *in == nil { + *out = nil + } else { + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go similarity index 85% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go rename to vendor/k8s.io/api/apps/v1/doc.go index 8140e47c5..1d66c2223 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go +++ b/vendor/k8s.io/api/apps/v1/doc.go @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +groupName=authentication.k8s.io -package v1 +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +package v1 // import "k8s.io/api/apps/v1" diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go new file mode 100644 index 000000000..1823a8440 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -0,0 +1,6945 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto + + It has these top-level messages: + ControllerRevision + ControllerRevisionList + DaemonSet + DaemonSetCondition + DaemonSetList + DaemonSetSpec + DaemonSetStatus + DaemonSetUpdateStrategy + Deployment + DeploymentCondition + DeploymentList + DeploymentSpec + DeploymentStatus + DeploymentStrategy + ReplicaSet + ReplicaSetCondition + ReplicaSetList + ReplicaSetSpec + ReplicaSetStatus + RollingUpdateDaemonSet + RollingUpdateDeployment + RollingUpdateStatefulSetStrategy + StatefulSet + StatefulSetCondition + StatefulSetList + StatefulSetSpec + StatefulSetStatus + StatefulSetUpdateStrategy +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{20} +} + +func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } +func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} +func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{21} +} + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } +func (*StatefulSetUpdateStrategy) ProtoMessage() {} +func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{27} +} + +func init() { + proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision") + proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1.ControllerRevisionList") + proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1.DaemonSetCondition") + proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1.DaemonSetList") + proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1.DaemonSetSpec") + proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1.DaemonSetStatus") + proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.DaemonSetUpdateStrategy") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1.DeploymentList") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1.DeploymentStrategy") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1.ReplicaSet") + proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1.ReplicaSetCondition") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1.ReplicaSetStatus") + proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1.RollingUpdateDaemonSet") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1.RollingUpdateDeployment") + proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1.RollingUpdateStatefulSetStrategy") + proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1.StatefulSetCondition") + proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1.StatefulSetList") + proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1.StatefulSetSpec") + proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") + proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy") +} +func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) + n2, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + return i, nil +} + +func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil +} + +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Selector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n9, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n10, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + return i, nil +} + +func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *Deployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n14, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n15, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + return i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *DeploymentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n18, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n19, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n20, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + dAtA[i] = 0x38 + i++ + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.ProgressDeadlineSeconds != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + } + return i, nil +} + +func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + return i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} + +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n24, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n25, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + return i, nil +} + +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n27, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n28, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n29, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + return i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if m.MaxSurge != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) + n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} + +func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Partition != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + } + return i, nil +} + +func (m *StatefulSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n35, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + return i, nil +} + +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n36, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n37, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n38, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n39, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + if len(m.VolumeClaimTemplates) > 0 { + for _, msg := range m.VolumeClaimTemplates { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i += copy(dAtA[i:], m.ServiceName) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i += copy(dAtA[i:], m.PodManagementPolicy) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n40, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + return i, nil +} + +func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i += copy(dAtA[i:], m.CurrentRevision) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i += copy(dAtA[i:], m.UpdateRevision) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n41, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ControllerRevision) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *ControllerRevisionList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *DaemonSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetUpdateStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Deployment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicaSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicaSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicaSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RollingUpdateDaemonSet) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateDeployment) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + var l int + _ = l + if m.Partition != nil { + n += 1 + sovGenerated(uint64(*m.Partition)) + } + return n +} + +func (m *StatefulSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodManagementPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *StatefulSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + l = len(m.CurrentRevision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UpdateRevision) + n += 1 + l + sovGenerated(uint64(l)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetUpdateStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ControllerRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevision{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerRevisionList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevisionList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateStatefulSetStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, + `Partition:` + valueToStringGenerated(this.Partition) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, + `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ControllerRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ControllerRevision{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) + } + m.CurrentNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) + } + m.NumberMisscheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) + } + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) + } + m.NumberReady = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberReady |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) + } + m.UpdatedNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) + } + m.NumberAvailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberAvailable |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) + } + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberUnavailable |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDaemonSet{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Partition = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StatefulSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpdateRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateStatefulSetStrategy{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 2051 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, + 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, + 0x60, 0xe3, 0xcd, 0x66, 0x7b, 0xf0, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, + 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0x35, 0x33, 0xb5, 0xe3, 0x8e, 0xfb, 0x4b, 0xdd, 0xd5, + 0xc3, 0x8e, 0xb8, 0x20, 0x24, 0x38, 0x71, 0xe0, 0x3f, 0x41, 0x08, 0xc1, 0x0d, 0x45, 0x88, 0xcb, + 0x5e, 0x90, 0x22, 0x2e, 0xe4, 0x64, 0xb1, 0x93, 0x13, 0x42, 0x39, 0x72, 0xc9, 0x05, 0x54, 0xd5, + 0xd5, 0xdf, 0xd5, 0x9e, 0xb1, 0x37, 0xeb, 0xa0, 0x68, 0x6f, 0x9e, 0xaa, 0xf7, 0x7b, 0xfd, 0xab, + 0xaa, 0x5f, 0xd5, 0x7b, 0x5d, 0x6d, 0x70, 0xef, 0xf8, 0xdb, 0xae, 0xaa, 0x59, 0xcd, 0x63, 0xaf, + 0x4b, 0x1c, 0x93, 0x50, 0xe2, 0x36, 0x87, 0xc4, 0xec, 0x5b, 0x4e, 0x53, 0x74, 0x60, 0x5b, 0x6b, + 0x62, 0xdb, 0x76, 0x9b, 0xc3, 0xcd, 0xe6, 0x80, 0x98, 0xc4, 0xc1, 0x94, 0xf4, 0x55, 0xdb, 0xb1, + 0xa8, 0x05, 0xa1, 0x8f, 0x51, 0xb1, 0xad, 0xa9, 0x0c, 0xa3, 0x0e, 0x37, 0xaf, 0xde, 0x1e, 0x68, + 0xf4, 0xc8, 0xeb, 0xaa, 0x3d, 0xcb, 0x68, 0x0e, 0xac, 0x81, 0xd5, 0xe4, 0xd0, 0xae, 0xf7, 0x88, + 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x3e, 0xc5, 0xd5, 0x46, 0xec, 0x31, 0x3d, 0xcb, 0x21, 0x92, 0xc7, + 0x5c, 0xbd, 0x19, 0xc3, 0xd8, 0x96, 0xae, 0xf5, 0x46, 0xcd, 0xe1, 0x66, 0x97, 0x50, 0x9c, 0x85, + 0xde, 0x8d, 0xa0, 0x06, 0xee, 0x1d, 0x69, 0x26, 0x71, 0x46, 0x4d, 0xfb, 0x78, 0xc0, 0x1a, 0xdc, + 0xa6, 0x41, 0x28, 0x96, 0x3d, 0xa0, 0x99, 0x17, 0xe5, 0x78, 0x26, 0xd5, 0x0c, 0x92, 0x09, 0x78, + 0x73, 0x52, 0x80, 0xdb, 0x3b, 0x22, 0x06, 0xce, 0xc4, 0xbd, 0x9e, 0x17, 0xe7, 0x51, 0x4d, 0x6f, + 0x6a, 0x26, 0x75, 0xa9, 0x93, 0x0e, 0x6a, 0xfc, 0x47, 0x01, 0xb0, 0x6d, 0x99, 0xd4, 0xb1, 0x74, + 0x9d, 0x38, 0x88, 0x0c, 0x35, 0x57, 0xb3, 0x4c, 0xf8, 0x53, 0x50, 0x61, 0xe3, 0xe9, 0x63, 0x8a, + 0xab, 0xca, 0x75, 0x65, 0x63, 0xe1, 0xce, 0xb7, 0xd4, 0x68, 0x3d, 0x42, 0x7a, 0xd5, 0x3e, 0x1e, + 0xb0, 0x06, 0x57, 0x65, 0x68, 0x75, 0xb8, 0xa9, 0xee, 0x75, 0x3f, 0x20, 0x3d, 0xda, 0x21, 0x14, + 0xb7, 0xe0, 0x93, 0x93, 0xfa, 0xcc, 0xf8, 0xa4, 0x0e, 0xa2, 0x36, 0x14, 0xb2, 0xc2, 0x3d, 0x50, + 0xe2, 0xec, 0x05, 0xce, 0x7e, 0x3b, 0x97, 0x5d, 0x0c, 0x5a, 0x45, 0xf8, 0x67, 0x6f, 0x3f, 0xa6, + 0xc4, 0x64, 0xe9, 0xb5, 0x2e, 0x09, 0xea, 0xd2, 0x36, 0xa6, 0x18, 0x71, 0x22, 0xf8, 0x1a, 0xa8, + 0x38, 0x22, 0xfd, 0x6a, 0xf1, 0xba, 0xb2, 0x51, 0x6c, 0x5d, 0x16, 0xa8, 0x4a, 0x30, 0x2c, 0x14, + 0x22, 0x1a, 0x7f, 0x55, 0xc0, 0x5a, 0x76, 0xdc, 0x3b, 0x9a, 0x4b, 0xe1, 0xfb, 0x99, 0xb1, 0xab, + 0xd3, 0x8d, 0x9d, 0x45, 0xf3, 0x91, 0x87, 0x0f, 0x0e, 0x5a, 0x62, 0xe3, 0x7e, 0x0f, 0x94, 0x35, + 0x4a, 0x0c, 0xb7, 0x5a, 0xb8, 0x5e, 0xdc, 0x58, 0xb8, 0x73, 0x43, 0xcd, 0x96, 0xb9, 0x9a, 0x4d, + 0xac, 0xb5, 0x28, 0x28, 0xcb, 0xef, 0xb2, 0x60, 0xe4, 0x73, 0x34, 0xfe, 0xab, 0x80, 0xf9, 0x6d, + 0x4c, 0x0c, 0xcb, 0x3c, 0x20, 0xf4, 0x02, 0x16, 0xad, 0x0d, 0x4a, 0xae, 0x4d, 0x7a, 0x62, 0xd1, + 0xbe, 0x26, 0xcb, 0x3d, 0x4c, 0xe7, 0xc0, 0x26, 0xbd, 0x68, 0xa1, 0xd8, 0x2f, 0xc4, 0x83, 0xe1, + 0x7b, 0x60, 0xd6, 0xa5, 0x98, 0x7a, 0x2e, 0x5f, 0xa6, 0x85, 0x3b, 0x5f, 0x3f, 0x9d, 0x86, 0x43, + 0x5b, 0x4b, 0x82, 0x68, 0xd6, 0xff, 0x8d, 0x04, 0x45, 0xe3, 0x5f, 0x05, 0x00, 0x43, 0x6c, 0xdb, + 0x32, 0xfb, 0x1a, 0x65, 0xf5, 0xfb, 0x16, 0x28, 0xd1, 0x91, 0x4d, 0xf8, 0x34, 0xcc, 0xb7, 0x6e, + 0x04, 0x59, 0xdc, 0x1f, 0xd9, 0xe4, 0xb3, 0x93, 0xfa, 0x5a, 0x36, 0x82, 0xf5, 0x20, 0x1e, 0x03, + 0x77, 0xc2, 0xfc, 0x0a, 0x3c, 0xfa, 0x6e, 0xf2, 0xd1, 0x9f, 0x9d, 0xd4, 0x25, 0xe7, 0x8a, 0x1a, + 0x32, 0x25, 0x13, 0x84, 0x43, 0x00, 0x75, 0xec, 0xd2, 0xfb, 0x0e, 0x36, 0x5d, 0xff, 0x49, 0x9a, + 0x41, 0xc4, 0xc8, 0x5f, 0x9d, 0x6e, 0x79, 0x58, 0x44, 0xeb, 0xaa, 0xc8, 0x02, 0xee, 0x64, 0xd8, + 0x90, 0xe4, 0x09, 0xf0, 0x06, 0x98, 0x75, 0x08, 0x76, 0x2d, 0xb3, 0x5a, 0xe2, 0xa3, 0x08, 0x27, + 0x10, 0xf1, 0x56, 0x24, 0x7a, 0xe1, 0x4d, 0x30, 0x67, 0x10, 0xd7, 0xc5, 0x03, 0x52, 0x2d, 0x73, + 0xe0, 0xb2, 0x00, 0xce, 0x75, 0xfc, 0x66, 0x14, 0xf4, 0x37, 0x7e, 0xaf, 0x80, 0xc5, 0x70, 0xe6, + 0x2e, 0x60, 0xab, 0xb4, 0x92, 0x5b, 0xe5, 0xe5, 0x53, 0xeb, 0x24, 0x67, 0x87, 0x7c, 0x58, 0x8c, + 0xe5, 0xcc, 0x8a, 0x10, 0xfe, 0x04, 0x54, 0x5c, 0xa2, 0x93, 0x1e, 0xb5, 0x1c, 0x91, 0xf3, 0xeb, + 0x53, 0xe6, 0x8c, 0xbb, 0x44, 0x3f, 0x10, 0xa1, 0xad, 0x4b, 0x2c, 0xe9, 0xe0, 0x17, 0x0a, 0x29, + 0xe1, 0x8f, 0x40, 0x85, 0x12, 0xc3, 0xd6, 0x31, 0x25, 0x62, 0x9b, 0x24, 0xea, 0x9b, 0x95, 0x0b, + 0x23, 0xdb, 0xb7, 0xfa, 0xf7, 0x05, 0x8c, 0x6f, 0x94, 0x70, 0x1e, 0x82, 0x56, 0x14, 0xd2, 0xc0, + 0x63, 0xb0, 0xe4, 0xd9, 0x7d, 0x86, 0xa4, 0xec, 0xe8, 0x1e, 0x8c, 0x44, 0xf9, 0xdc, 0x3a, 0x75, + 0x42, 0x0e, 0x13, 0x21, 0xad, 0x35, 0xf1, 0x80, 0xa5, 0x64, 0x3b, 0x4a, 0x51, 0xc3, 0x2d, 0xb0, + 0x6c, 0x68, 0x26, 0x22, 0xb8, 0x3f, 0x3a, 0x20, 0x3d, 0xcb, 0xec, 0xbb, 0xbc, 0x80, 0xca, 0xad, + 0x75, 0x41, 0xb0, 0xdc, 0x49, 0x76, 0xa3, 0x34, 0x1e, 0xee, 0x80, 0xd5, 0xe0, 0x9c, 0xfd, 0x81, + 0xe6, 0x52, 0xcb, 0x19, 0xed, 0x68, 0x86, 0x46, 0xab, 0xb3, 0x9c, 0xa7, 0x3a, 0x3e, 0xa9, 0xaf, + 0x22, 0x49, 0x3f, 0x92, 0x46, 0x35, 0x7e, 0x33, 0x0b, 0x96, 0x53, 0xa7, 0x01, 0x7c, 0x00, 0xd6, + 0x7a, 0x9e, 0xe3, 0x10, 0x93, 0xee, 0x7a, 0x46, 0x97, 0x38, 0x07, 0xbd, 0x23, 0xd2, 0xf7, 0x74, + 0xd2, 0xe7, 0x2b, 0x5a, 0x6e, 0xd5, 0x44, 0xae, 0x6b, 0x6d, 0x29, 0x0a, 0xe5, 0x44, 0xc3, 0x1f, + 0x02, 0x68, 0xf2, 0xa6, 0x8e, 0xe6, 0xba, 0x21, 0x67, 0x81, 0x73, 0x86, 0x1b, 0x70, 0x37, 0x83, + 0x40, 0x92, 0x28, 0x96, 0x63, 0x9f, 0xb8, 0x9a, 0x43, 0xfa, 0xe9, 0x1c, 0x8b, 0xc9, 0x1c, 0xb7, + 0xa5, 0x28, 0x94, 0x13, 0x0d, 0xdf, 0x00, 0x0b, 0xfe, 0xd3, 0xf8, 0x9c, 0x8b, 0xc5, 0x59, 0x11, + 0x64, 0x0b, 0xbb, 0x51, 0x17, 0x8a, 0xe3, 0xd8, 0xd0, 0xac, 0xae, 0x4b, 0x9c, 0x21, 0xe9, 0xbf, + 0xe3, 0x7b, 0x00, 0x26, 0x94, 0x65, 0x2e, 0x94, 0xe1, 0xd0, 0xf6, 0x32, 0x08, 0x24, 0x89, 0x62, + 0x43, 0xf3, 0xab, 0x26, 0x33, 0xb4, 0xd9, 0xe4, 0xd0, 0x0e, 0xa5, 0x28, 0x94, 0x13, 0xcd, 0x6a, + 0xcf, 0x4f, 0x79, 0x6b, 0x88, 0x35, 0x1d, 0x77, 0x75, 0x52, 0x9d, 0x4b, 0xd6, 0xde, 0x6e, 0xb2, + 0x1b, 0xa5, 0xf1, 0xf0, 0x1d, 0x70, 0xc5, 0x6f, 0x3a, 0x34, 0x71, 0x48, 0x52, 0xe1, 0x24, 0x2f, + 0x09, 0x92, 0x2b, 0xbb, 0x69, 0x00, 0xca, 0xc6, 0xc0, 0xb7, 0xc0, 0x52, 0xcf, 0xd2, 0x75, 0x5e, + 0x8f, 0x6d, 0xcb, 0x33, 0x69, 0x75, 0x9e, 0xb3, 0x40, 0xb6, 0x87, 0xda, 0x89, 0x1e, 0x94, 0x42, + 0xc2, 0x87, 0x00, 0xf4, 0x02, 0x39, 0x70, 0xab, 0x20, 0x5f, 0xe8, 0xb3, 0x3a, 0x14, 0x09, 0x70, + 0xd8, 0xe4, 0xa2, 0x18, 0x5b, 0xe3, 0x43, 0x05, 0xac, 0xe7, 0xec, 0x71, 0xf8, 0xbd, 0x84, 0xea, + 0xdd, 0x4a, 0xa9, 0xde, 0xb5, 0x9c, 0xb0, 0x98, 0xf4, 0xf5, 0xc0, 0x22, 0xf3, 0x1d, 0x9a, 0x39, + 0xf0, 0x21, 0xe2, 0x04, 0x7b, 0x55, 0x96, 0x3b, 0x8a, 0x03, 0xa3, 0x63, 0xf8, 0xca, 0xf8, 0xa4, + 0xbe, 0x98, 0xe8, 0x43, 0x49, 0xce, 0xc6, 0x2f, 0x0b, 0x00, 0x6c, 0x13, 0x5b, 0xb7, 0x46, 0x06, + 0x31, 0x2f, 0xc2, 0xb5, 0x6c, 0x27, 0x5c, 0x4b, 0x43, 0xba, 0x10, 0x61, 0x3e, 0xb9, 0xb6, 0x65, + 0x27, 0x65, 0x5b, 0xbe, 0x31, 0x81, 0xe7, 0x74, 0xdf, 0xf2, 0x8f, 0x22, 0x58, 0x89, 0xc0, 0x91, + 0x71, 0xb9, 0x97, 0x58, 0xc2, 0x57, 0x52, 0x4b, 0xb8, 0x2e, 0x09, 0x79, 0x6e, 0xce, 0xe5, 0xf3, + 0x77, 0x10, 0xf0, 0x03, 0xb0, 0xc4, 0xac, 0x8a, 0x5f, 0x08, 0xdc, 0x08, 0xcd, 0x9e, 0xd9, 0x08, + 0x85, 0x42, 0xb6, 0x93, 0x60, 0x42, 0x29, 0xe6, 0x1c, 0xe3, 0x35, 0xf7, 0xbc, 0x8d, 0x57, 0xe3, + 0x0f, 0x0a, 0x58, 0x8a, 0x96, 0xe9, 0x02, 0x6c, 0x52, 0x3b, 0x69, 0x93, 0x6a, 0xa7, 0xd7, 0x65, + 0x8e, 0x4f, 0xfa, 0x7b, 0x29, 0x9e, 0x35, 0x37, 0x4a, 0x1b, 0xec, 0x85, 0xca, 0xd6, 0xb5, 0x1e, + 0x76, 0x85, 0xac, 0x5e, 0xf2, 0x5f, 0xa6, 0xfc, 0x36, 0x14, 0xf6, 0x26, 0x2c, 0x55, 0xe1, 0xf9, + 0x5a, 0xaa, 0xe2, 0xe7, 0x63, 0xa9, 0xee, 0x83, 0x8a, 0x1b, 0x98, 0xa9, 0x12, 0xa7, 0xbc, 0x31, + 0x69, 0x3b, 0x0b, 0x1f, 0x15, 0xb2, 0x86, 0x0e, 0x2a, 0x64, 0x92, 0x79, 0xa7, 0xf2, 0x17, 0xe9, + 0x9d, 0xd8, 0x16, 0xb6, 0xb1, 0xe7, 0x92, 0x3e, 0xaf, 0xfb, 0x4a, 0xb4, 0x85, 0xf7, 0x79, 0x2b, + 0x12, 0xbd, 0xf0, 0x10, 0xac, 0xdb, 0x8e, 0x35, 0x70, 0x88, 0xeb, 0x6e, 0x13, 0xdc, 0xd7, 0x35, + 0x93, 0x04, 0x03, 0xf0, 0x55, 0xef, 0xda, 0xf8, 0xa4, 0xbe, 0xbe, 0x2f, 0x87, 0xa0, 0xbc, 0xd8, + 0xc6, 0x9f, 0x4b, 0xe0, 0x72, 0xfa, 0x44, 0xcc, 0x31, 0x22, 0xca, 0xb9, 0x8c, 0xc8, 0x6b, 0xb1, + 0x12, 0xf5, 0x5d, 0x5a, 0xec, 0x9d, 0x3f, 0x53, 0xa6, 0x5b, 0x60, 0x59, 0x18, 0x8f, 0xa0, 0x53, + 0x58, 0xb1, 0x70, 0x79, 0x0e, 0x93, 0xdd, 0x28, 0x8d, 0x67, 0xf6, 0x22, 0x72, 0x0d, 0x01, 0x49, + 0x29, 0x69, 0x2f, 0xb6, 0xd2, 0x00, 0x94, 0x8d, 0x81, 0x1d, 0xb0, 0xe2, 0x99, 0x59, 0x2a, 0xbf, + 0x5c, 0xae, 0x09, 0xaa, 0x95, 0xc3, 0x2c, 0x04, 0xc9, 0xe2, 0xe0, 0x8f, 0x13, 0x8e, 0x63, 0x96, + 0x1f, 0x04, 0xaf, 0x9c, 0x5e, 0xd1, 0x53, 0x5b, 0x0e, 0x78, 0x0f, 0x2c, 0x3a, 0xdc, 0x50, 0x06, + 0x59, 0xfa, 0xa6, 0xec, 0x2b, 0x22, 0x6c, 0x11, 0xc5, 0x3b, 0x51, 0x12, 0x2b, 0xf1, 0x51, 0x95, + 0x69, 0x7d, 0x54, 0xe3, 0x4f, 0x0a, 0x80, 0xd9, 0x2d, 0x38, 0xf1, 0xe5, 0x3e, 0x13, 0x11, 0x93, + 0xc8, 0xbe, 0xdc, 0xe1, 0xdc, 0x9a, 0xec, 0x70, 0xa2, 0x13, 0x74, 0x3a, 0x8b, 0x23, 0x66, 0xe0, + 0x62, 0x2e, 0x66, 0xa6, 0xb0, 0x38, 0x51, 0x3e, 0xcf, 0x66, 0x71, 0x62, 0x3c, 0xa7, 0x5b, 0x9c, + 0x7f, 0x17, 0xc0, 0x4a, 0x04, 0x9e, 0xda, 0xe2, 0x48, 0x42, 0x5e, 0x5c, 0xce, 0x4c, 0xbe, 0x9c, + 0x61, 0xb6, 0x23, 0x9a, 0xba, 0xff, 0x13, 0xdb, 0x11, 0x25, 0x94, 0x63, 0x3b, 0x7e, 0x57, 0x88, + 0x67, 0xfd, 0xa5, 0xb7, 0x1d, 0xcf, 0x7e, 0xb9, 0xd2, 0xf8, 0x4b, 0x11, 0x5c, 0x4e, 0x6f, 0xc1, + 0x84, 0x0e, 0x2a, 0x13, 0x75, 0x70, 0x1f, 0xac, 0x3e, 0xf2, 0x74, 0x7d, 0xc4, 0xa7, 0x21, 0x26, + 0x86, 0xbe, 0x82, 0x7e, 0x55, 0x44, 0xae, 0x7e, 0x5f, 0x82, 0x41, 0xd2, 0xc8, 0x1c, 0x4d, 0x2f, + 0x9e, 0x4b, 0xd3, 0x33, 0x6a, 0x53, 0x3a, 0x83, 0xda, 0x48, 0xf5, 0xb9, 0x7c, 0x0e, 0x7d, 0x9e, + 0x5a, 0x50, 0x25, 0xc7, 0xd5, 0xc4, 0x77, 0xf8, 0x5f, 0x2b, 0x60, 0x4d, 0xfe, 0xfa, 0x0c, 0x75, + 0xb0, 0x64, 0xe0, 0xc7, 0xf1, 0xcb, 0x8b, 0x49, 0x82, 0xe1, 0x51, 0x4d, 0x57, 0xfd, 0xaf, 0x3b, + 0xea, 0xbb, 0x26, 0xdd, 0x73, 0x0e, 0xa8, 0xa3, 0x99, 0x03, 0x5f, 0x60, 0x3b, 0x09, 0x2e, 0x94, + 0xe2, 0x6e, 0x7c, 0xa2, 0x80, 0xf5, 0x1c, 0x95, 0xbb, 0xd8, 0x4c, 0xe0, 0x43, 0x50, 0x31, 0xf0, + 0xe3, 0x03, 0xcf, 0x19, 0x04, 0x92, 0x7c, 0xf6, 0xe7, 0xf0, 0x8d, 0xdc, 0x11, 0x2c, 0x28, 0xe4, + 0x6b, 0xec, 0x81, 0xeb, 0x89, 0x41, 0xb2, 0x4d, 0x43, 0x1e, 0x79, 0x3a, 0xdf, 0x3f, 0xc2, 0x53, + 0xdc, 0x02, 0xf3, 0x36, 0x76, 0xa8, 0x16, 0x9a, 0xd1, 0x72, 0x6b, 0x71, 0x7c, 0x52, 0x9f, 0xdf, + 0x0f, 0x1a, 0x51, 0xd4, 0xdf, 0xf8, 0x55, 0x01, 0x2c, 0xc4, 0x48, 0x2e, 0x40, 0xdf, 0xdf, 0x4e, + 0xe8, 0xbb, 0xf4, 0x8b, 0x49, 0x7c, 0x54, 0x79, 0x02, 0xdf, 0x49, 0x09, 0xfc, 0x37, 0x27, 0x11, + 0x9d, 0xae, 0xf0, 0x9f, 0x16, 0xc0, 0x6a, 0x0c, 0x1d, 0x49, 0xfc, 0x77, 0x12, 0x12, 0xbf, 0x91, + 0x92, 0xf8, 0xaa, 0x2c, 0xe6, 0x85, 0xc6, 0x4f, 0xd6, 0xf8, 0x3f, 0x2a, 0x60, 0x39, 0x36, 0x77, + 0x17, 0x20, 0xf2, 0xdb, 0x49, 0x91, 0xaf, 0x4f, 0xa8, 0x97, 0x1c, 0x95, 0x7f, 0x52, 0x4e, 0xe4, + 0xfd, 0xa5, 0x97, 0xf9, 0x9f, 0x83, 0xd5, 0xa1, 0xa5, 0x7b, 0x06, 0x69, 0xeb, 0x58, 0x33, 0x02, + 0x00, 0x53, 0x32, 0x36, 0x89, 0x37, 0xa5, 0xf4, 0xc4, 0x71, 0x35, 0x97, 0x12, 0x93, 0x3e, 0x88, + 0x22, 0x23, 0x2d, 0x7e, 0x20, 0xa1, 0x43, 0xd2, 0x87, 0xc0, 0x37, 0xc0, 0x02, 0xd3, 0x54, 0xad, + 0x47, 0x76, 0xb1, 0x11, 0xd4, 0x54, 0xf8, 0x7d, 0xe0, 0x20, 0xea, 0x42, 0x71, 0x1c, 0x3c, 0x02, + 0x2b, 0xb6, 0xd5, 0xef, 0x60, 0x13, 0x0f, 0x08, 0x3b, 0xff, 0xf7, 0xf9, 0xff, 0x42, 0xf0, 0x7b, + 0x87, 0xf9, 0xd6, 0x9b, 0xc1, 0x0b, 0xe9, 0x7e, 0x16, 0xc2, 0x3c, 0xbb, 0xa4, 0x99, 0xef, 0x67, + 0x19, 0x25, 0x34, 0x32, 0x9f, 0xb3, 0xe6, 0x32, 0xff, 0x03, 0x20, 0x2b, 0xae, 0x73, 0x7e, 0xd0, + 0xca, 0xbb, 0x51, 0xa9, 0x9c, 0xeb, 0x6b, 0xd4, 0xa7, 0x25, 0x70, 0x25, 0x73, 0x40, 0x7e, 0x81, + 0x77, 0x1a, 0x19, 0xb7, 0x54, 0x3c, 0x83, 0x5b, 0xda, 0x02, 0xcb, 0xe2, 0x43, 0x58, 0xca, 0x6c, + 0x85, 0x76, 0xb4, 0x9d, 0xec, 0x46, 0x69, 0xbc, 0xec, 0x4e, 0xa5, 0x7c, 0xc6, 0x3b, 0x95, 0x78, + 0x16, 0xe2, 0xff, 0x37, 0xfc, 0xaa, 0xcb, 0x66, 0x21, 0xfe, 0x8d, 0x23, 0x8d, 0x87, 0xdf, 0x0d, + 0x4a, 0x2a, 0x64, 0x98, 0xe3, 0x0c, 0xa9, 0x1a, 0x09, 0x09, 0x52, 0xe8, 0x67, 0xfa, 0xd8, 0xf3, + 0xbe, 0xe4, 0x63, 0xcf, 0xc6, 0x84, 0x52, 0x9e, 0xde, 0x2a, 0xfe, 0x4d, 0x01, 0x2f, 0xe5, 0xee, + 0x01, 0xb8, 0x95, 0xd0, 0xd9, 0xdb, 0x29, 0x9d, 0x7d, 0x39, 0x37, 0x30, 0x26, 0xb6, 0x86, 0xfc, + 0x42, 0xe4, 0xee, 0xc4, 0x0b, 0x11, 0x89, 0x8b, 0x9a, 0x7c, 0x33, 0xd2, 0xda, 0x78, 0xf2, 0xb4, + 0x36, 0xf3, 0xd1, 0xd3, 0xda, 0xcc, 0xc7, 0x4f, 0x6b, 0x33, 0xbf, 0x18, 0xd7, 0x94, 0x27, 0xe3, + 0x9a, 0xf2, 0xd1, 0xb8, 0xa6, 0x7c, 0x3c, 0xae, 0x29, 0xff, 0x1c, 0xd7, 0x94, 0xdf, 0x7e, 0x52, + 0x9b, 0x79, 0x58, 0x18, 0x6e, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x70, 0x5f, 0xbf, 0x58, 0x3d, + 0x26, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto new file mode 100644 index 000000000..38d7c08cc --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -0,0 +1,701 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.apps.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/api/policy/v1beta1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +message ControllerRevision { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data is the serialized representation of the state. + optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + + // Revision indicates the revision of the state represented by Data. + optional int64 revision = 3; +} + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +message ControllerRevisionList { + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ControllerRevisions + repeated ControllerRevision items = 2; +} + +// DaemonSet represents the configuration of a daemon set. +message DaemonSet { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional DaemonSetSpec spec = 2; + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional DaemonSetStatus status = 3; +} + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// DaemonSetList is a collection of daemon sets. +message DaemonSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // A list of daemon sets. + repeated DaemonSet items = 2; +} + +// DaemonSetSpec is the specification of a daemon set. +message DaemonSetSpec { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + optional DaemonSetUpdateStrategy updateStrategy = 3; + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + optional int32 minReadySeconds = 4; + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; +} + +// DaemonSetStatus represents the current status of a daemon set. +message DaemonSetStatus { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 currentNumberScheduled = 1; + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 numberMisscheduled = 2; + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 desiredNumberScheduled = 3; + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + optional int32 numberReady = 4; + + // The most recent generation observed by the daemon set controller. + // +optional + optional int64 observedGeneration = 5; + + // The total number of nodes that are running updated daemon pod + // +optional + optional int32 updatedNumberScheduled = 6; + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberAvailable = 7; + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberUnavailable = 8; + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; +} + +// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. +message DaemonSetUpdateStrategy { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if type = "RollingUpdate". + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + optional RollingUpdateDaemonSet rollingUpdate = 2; +} + +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused. + // +optional + optional bool paused = 7; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated DeploymentCondition conditions = 6; + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + optional int32 collisionCount = 8; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +message ReplicaSet { + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetSpec spec = 2; + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetStatus status = 3; +} + +// ReplicaSetCondition describes the state of a replica set at a certain point. +message ReplicaSetCondition { + // Type of replica set condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicaSetList is a collection of ReplicaSets. +message ReplicaSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + repeated ReplicaSet items = 2; +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +message ReplicaSetSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +message ReplicaSetStatus { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replica set. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated ReplicaSetCondition conditions = 6; +} + +// Spec to control the desired behavior of daemon set rolling update. +message RollingUpdateDaemonSet { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; +} + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new ReplicaSet can be scaled up further, ensuring that total number of pods running + // at any time during the update is at most 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +message RollingUpdateStatefulSetStrategy { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + optional int32 partition = 1; +} + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +message StatefulSet { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired identities of pods in this set. + // +optional + optional StatefulSetSpec spec = 2; + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + optional StatefulSetStatus status = 3; +} + +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// StatefulSetList is a collection of StatefulSets. +message StatefulSetList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated StatefulSet items = 2; +} + +// A StatefulSetSpec is the specification of a StatefulSet. +message StatefulSetSpec { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + optional int32 replicas = 1; + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + optional string serviceName = 5; + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + optional string podManagementPolicy = 6; + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + optional StatefulSetUpdateStrategy updateStrategy = 7; + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + optional int32 revisionHistoryLimit = 8; +} + +// StatefulSetStatus represents the current state of a StatefulSet. +message StatefulSetStatus { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + optional int64 observedGeneration = 1; + + // replicas is the number of Pods created by the StatefulSet controller. + optional int32 replicas = 2; + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + optional int32 readyReplicas = 3; + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + optional int32 currentReplicas = 4; + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + optional int32 updatedReplicas = 5; + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + optional string currentRevision = 6; + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + optional string updateRevision = 7; + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; +} + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +message StatefulSetUpdateStrategy { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + optional string type = 1; + + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; +} + diff --git a/vendor/k8s.io/api/apps/v1/register.go b/vendor/k8s.io/api/apps/v1/register.go new file mode 100644 index 000000000..027101046 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/register.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &StatefulSet{}, + &StatefulSetList{}, + &DaemonSet{}, + &DaemonSetList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + &ControllerRevision{}, + &ControllerRevisionList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go new file mode 100644 index 000000000..4431ca2c3 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -0,0 +1,819 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + ControllerRevisionHashLabelKey = "controller-revision-hash" + StatefulSetRevisionLabel = ControllerRevisionHashLabelKey + DeprecatedRollbackTo = "deprecated.deployment.rollback.to" + DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +type StatefulSet struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired identities of pods in this set. + // +optional + Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodManagementPolicyType defines the policy for creating pods under a stateful set. +type PodManagementPolicyType string + +const ( + // OrderedReadyPodManagement will create pods in strictly increasing order on + // scale up and strictly decreasing order on scale down, progressing only when + // the previous pod is ready or terminated. At most one pod will be changed + // at any time. + OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" + // ParallelPodManagement will create and delete pods as soon as the stateful set + // replica count is changed, and will not wait for pods to be ready or complete + // termination. + ParallelPodManagement = "Parallel" +) + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +type StatefulSetUpdateStrategy struct { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"` + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +// StatefulSetUpdateStrategyType is a string enumeration type that enumerates +// all possible update strategies for the StatefulSet controller. +type StatefulSetUpdateStrategyType string + +const ( + // RollingUpdateStatefulSetStrategyType indicates that update will be + // applied to all Pods in the StatefulSet with respect to the StatefulSet + // ordering constraints. When a scale operation is performed with this + // strategy, new Pods will be created from the specification version indicated + // by the StatefulSet's updateRevision. + RollingUpdateStatefulSetStrategyType = "RollingUpdate" + // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version + // tracking and ordered rolling restarts are disabled. Pods are recreated + // from the StatefulSetSpec when they are manually deleted. When a scale + // operation is performed with this strategy,specification version indicated + // by the StatefulSet's currentRevision. + OnDeleteStatefulSetStrategyType = "OnDelete" +) + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +type RollingUpdateStatefulSetStrategy struct { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` +} + +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"` + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"` + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` +} + +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // replicas is the number of Pods created by the StatefulSet controller. + Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"` + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"` + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"` + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"` + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets + // to select new pods (and old pods being select by new ReplicaSet). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new ReplicaSet can be scaled up further, ensuring that total number of pods running + // at any time during the update is at most 130% of desired pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. + // +optional + Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"` + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"` + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"` + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +const ( + // DefaultDaemonSetUniqueLabelKey is the default label key that is added + // to existing DaemonSet pods to distinguish between old and new + // DaemonSet pods during DaemonSet template updates. + DefaultDaemonSetUniqueLabelKey = ControllerRevisionHashLabelKey +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of daemon sets. + Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +type ReplicaSet struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +type ControllerRevision struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data is the serialized representation of the state. + Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` + + // Revision indicates the revision of the state represented by Data. + Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +type ControllerRevisionList struct { + metav1.TypeMeta `json:",inline"` + + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ControllerRevisions + Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..85fb159dd --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -0,0 +1,365 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ControllerRevision = map[string]string{ + "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "data": "Data is the serialized representation of the state.", + "revision": "Revision indicates the revision of the state represented by Data.", +} + +func (ControllerRevision) SwaggerDoc() map[string]string { + return map_ControllerRevision +} + +var map_ControllerRevisionList = map[string]string{ + "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of ControllerRevisions", +} + +func (ControllerRevisionList) SwaggerDoc() map[string]string { + return map_ControllerRevisionList +} + +var map_DaemonSet = map[string]string{ + "": "DaemonSet represents the configuration of a daemon set.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (DaemonSet) SwaggerDoc() map[string]string { + return map_DaemonSet +} + +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + +var map_DaemonSetList = map[string]string{ + "": "DaemonSetList is a collection of daemon sets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "A list of daemon sets.", +} + +func (DaemonSetList) SwaggerDoc() map[string]string { + return map_DaemonSetList +} + +var map_DaemonSetSpec = map[string]string{ + "": "DaemonSetSpec is the specification of a daemon set.", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", + "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", +} + +func (DaemonSetSpec) SwaggerDoc() map[string]string { + return map_DaemonSetSpec +} + +var map_DaemonSetStatus = map[string]string{ + "": "DaemonSetStatus represents the current status of a daemon set.", + "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "observedGeneration": "The most recent generation observed by the daemon set controller.", + "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", + "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", +} + +func (DaemonSetStatus) SwaggerDoc() map[string]string { + return map_DaemonSetStatus +} + +var map_DaemonSetUpdateStrategy = map[string]string{ + "": "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.", + "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".", +} + +func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_DaemonSetUpdateStrategy +} + +var map_Deployment = map[string]string{ + "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + "paused": "Indicates that the deployment is paused.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Represents the latest available observations of a deployment's current state.", + "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_ReplicaSet = map[string]string{ + "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (ReplicaSet) SwaggerDoc() map[string]string { + return map_ReplicaSet +} + +var map_ReplicaSetCondition = map[string]string{ + "": "ReplicaSetCondition describes the state of a replica set at a certain point.", + "type": "Type of replica set condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicaSetCondition) SwaggerDoc() map[string]string { + return map_ReplicaSetCondition +} + +var map_ReplicaSetList = map[string]string{ + "": "ReplicaSetList is a collection of ReplicaSets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", +} + +func (ReplicaSetList) SwaggerDoc() map[string]string { + return map_ReplicaSetList +} + +var map_ReplicaSetSpec = map[string]string{ + "": "ReplicaSetSpec is the specification of a ReplicaSet.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", +} + +func (ReplicaSetSpec) SwaggerDoc() map[string]string { + return map_ReplicaSetSpec +} + +var map_ReplicaSetStatus = map[string]string{ + "": "ReplicaSetStatus represents the current status of a ReplicaSet.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", + "readyReplicas": "The number of ready replicas for this replica set.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + "conditions": "Represents the latest available observations of a replica set's current state.", +} + +func (ReplicaSetStatus) SwaggerDoc() map[string]string { + return map_ReplicaSetStatus +} + +var map_RollingUpdateDaemonSet = map[string]string{ + "": "Spec to control the desired behavior of daemon set rolling update.", + "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", +} + +func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { + return map_RollingUpdateDaemonSet +} + +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_RollingUpdateStatefulSetStrategy = map[string]string{ + "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", +} + +func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { + return map_RollingUpdateStatefulSetStrategy +} + +var map_StatefulSet = map[string]string{ + "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "spec": "Spec defines the desired identities of pods in this set.", + "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", +} + +func (StatefulSet) SwaggerDoc() map[string]string { + return map_StatefulSet +} + +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + +var map_StatefulSetList = map[string]string{ + "": "StatefulSetList is a collection of StatefulSets.", +} + +func (StatefulSetList) SwaggerDoc() map[string]string { + return map_StatefulSetList +} + +var map_StatefulSetSpec = map[string]string{ + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", +} + +func (StatefulSetSpec) SwaggerDoc() map[string]string { + return map_StatefulSetSpec +} + +var map_StatefulSetStatus = map[string]string{ + "": "StatefulSetStatus represents the current state of a StatefulSet.", + "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", + "replicas": "replicas is the number of Pods created by the StatefulSet controller.", + "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", + "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", + "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", +} + +func (StatefulSetStatus) SwaggerDoc() map[string]string { + return map_StatefulSetStatus +} + +var map_StatefulSetUpdateStrategy = map[string]string{ + "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + "type": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", + "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", +} + +func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_StatefulSetUpdateStrategy +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..ccbb29061 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -0,0 +1,856 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Data.DeepCopyInto(&out.Data) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision. +func (in *ControllerRevision) DeepCopy() *ControllerRevision { + if in == nil { + return nil + } + out := new(ControllerRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList. +func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList { + if in == nil { + return nil + } + out := new(ControllerRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. +func (in *DaemonSet) DeepCopy() *DaemonSet { + if in == nil { + return nil + } + out := new(DaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. +func (in *DaemonSetList) DeepCopy() *DaemonSetList { + if in == nil { + return nil + } + out := new(DaemonSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. +func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { + if in == nil { + return nil + } + out := new(DaemonSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. +func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { + if in == nil { + return nil + } + out := new(DaemonSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateDaemonSet) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. +func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { + if in == nil { + return nil + } + out := new(DaemonSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. +func (in *ReplicaSet) DeepCopy() *ReplicaSet { + if in == nil { + return nil + } + out := new(ReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. +func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { + if in == nil { + return nil + } + out := new(ReplicaSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. +func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { + if in == nil { + return nil + } + out := new(ReplicaSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. +func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { + if in == nil { + return nil + } + out := new(ReplicaSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. +func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { + if in == nil { + return nil + } + out := new(ReplicaSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. +func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { + if in == nil { + return nil + } + out := new(RollingUpdateDaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { + *out = *in + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. +func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStatefulSetStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet. +func (in *StatefulSet) DeepCopy() *StatefulSet { + if in == nil { + return nil + } + out := new(StatefulSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList. +func (in *StatefulSetList) DeepCopy() *StatefulSetList { + if in == nil { + return nil + } + out := new(StatefulSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]core_v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. +func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec { + if in == nil { + return nil + } + out := new(StatefulSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus. +func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus { + if in == nil { + return nil + } + out := new(StatefulSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateStatefulSetStrategy) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. +func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { + if in == nil { + return nil + } + out := new(StatefulSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go similarity index 84% rename from vendor/k8s.io/client-go/pkg/apis/authentication/doc.go rename to vendor/k8s.io/api/apps/v1beta1/doc.go index 194de434d..6047ed501 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/doc.go +++ b/vendor/k8s.io/api/apps/v1beta1/doc.go @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +groupName=authentication.k8s.io -package authentication +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +package v1beta1 // import "k8s.io/api/apps/v1beta1" diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go similarity index 53% rename from vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.pb.go rename to vendor/k8s.io/api/apps/v1beta1/generated.pb.go index 3e215241b..608308259 100644 --- a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,16 +15,18 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto // DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.proto + k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto It has these top-level messages: + ControllerRevision + ControllerRevisionList Deployment DeploymentCondition DeploymentList @@ -34,13 +36,16 @@ limitations under the License. DeploymentStrategy RollbackConfig RollingUpdateDeployment + RollingUpdateStatefulSetStrategy Scale ScaleSpec ScaleStatus StatefulSet + StatefulSetCondition StatefulSetList StatefulSetSpec StatefulSetStatus + StatefulSetUpdateStrategy */ package v1beta1 @@ -48,15 +53,16 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" import strings "strings" import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" import io "io" @@ -67,211 +73,189 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } -func (*RollingUpdateDeployment) ProtoMessage() {} -func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{10} +} + +func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } +func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} +func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{11} +} func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } +func (*StatefulSetUpdateStrategy) ProtoMessage() {} +func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{20} +} func init() { - proto.RegisterType((*Deployment)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.Deployment") - proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentCondition") - proto.RegisterType((*DeploymentList)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentList") - proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentRollback") - proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentSpec") - proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentStatus") - proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentStrategy") - proto.RegisterType((*RollbackConfig)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.RollbackConfig") - proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*Scale)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.ScaleStatus") - proto.RegisterType((*StatefulSet)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.StatefulSet") - proto.RegisterType((*StatefulSetList)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.StatefulSetList") - proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.StatefulSetSpec") - proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.StatefulSetStatus") + proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta1.ControllerRevision") + proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta1.ControllerRevisionList") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1beta1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta1.DeploymentList") + proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta1.DeploymentStrategy") + proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.apps.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1beta1.RollingUpdateStatefulSetStrategy") + proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus") + proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") + proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") + proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta1.StatefulSetSpec") + proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") + proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetUpdateStrategy") } -func (m *Deployment) Marshal() (data []byte, err error) { +func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Deployment) MarshalTo(data []byte) (int, error) { +func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) + n2, err := m.Data.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 - data[i] = 0x1a + dAtA[i] = 0x18 i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + return i, nil +} + +func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n3 - return i, nil -} - -func (m *DeploymentCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DeploymentCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size())) - n4, err := m.LastUpdateTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - return i, nil -} - -func (m *DeploymentList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DeploymentList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -281,412 +265,470 @@ func (m *DeploymentList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *DeploymentRollback) Marshal() (data []byte, err error) { +func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *DeploymentRollback) MarshalTo(data []byte) (int, error) { +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - if len(m.UpdatedAnnotations) > 0 { - for k := range m.UpdatedAnnotations { - data[i] = 0x12 - i++ - v := m.UpdatedAnnotations[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - data[i] = 0x1a + i += n4 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) - n7, err := m.RollbackTo.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n7, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n7 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 return i, nil } -func (m *DeploymentSpec) Marshal() (data []byte, err error) { +func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *DeploymentSpec) MarshalTo(data []byte) (int, error) { +func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if len(m.UpdatedAnnotations) > 0 { + keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) + for k := range m.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + for _, k := range keysForUpdatedAnnotations { + dAtA[i] = 0x12 + i++ + v := m.UpdatedAnnotations[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) + n10, err := m.RollbackTo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + return i, nil +} + +func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Replicas != nil { - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) } if m.Selector != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n9, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n9 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Strategy.Size())) - n10, err := m.Strategy.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.RevisionHistoryLimit)) - } - data[i] = 0x38 - i++ - if m.Paused { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - if m.RollbackTo != nil { - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) - n11, err := m.RollbackTo.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n11, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n11 } - if m.ProgressDeadlineSeconds != nil { - data[i] = 0x48 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n12, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n13, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 i++ - i = encodeVarintGenerated(data, i, uint64(*m.ProgressDeadlineSeconds)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + dAtA[i] = 0x38 + i++ + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.RollbackTo != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) + n14, err := m.RollbackTo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.ProgressDeadlineSeconds != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) } return i, nil } -func (m *DeploymentStatus) Marshal() (data []byte, err error) { +func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *DeploymentStatus) MarshalTo(data []byte) (int, error) { +func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) - data[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - data[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 i++ - i = encodeVarintGenerated(data, i, uint64(m.UpdatedReplicas)) - data[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x20 i++ - i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) - data[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + dAtA[i] = 0x28 i++ - i = encodeVarintGenerated(data, i, uint64(m.UnavailableReplicas)) + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) if len(m.Conditions) > 0 { for _, msg := range m.Conditions { - data[i] = 0x32 + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - data[i] = 0x38 + dAtA[i] = 0x38 i++ - i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } return i, nil } -func (m *DeploymentStrategy) Marshal() (data []byte, err error) { +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *DeploymentStrategy) MarshalTo(data []byte) (int, error) { +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) - n12, err := m.RollingUpdate.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n15, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n15 } return i, nil } -func (m *RollbackConfig) Marshal() (data []byte, err error) { +func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RollbackConfig) MarshalTo(data []byte) (int, error) { +func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.Revision)) + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) return i, nil } -func (m *RollingUpdateDeployment) Marshal() (data []byte, err error) { +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RollingUpdateDeployment) MarshalTo(data []byte) (int, error) { +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.MaxUnavailable != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) - n13, err := m.MaxUnavailable.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n16, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n16 } if m.MaxSurge != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxSurge.Size())) - n14, err := m.MaxSurge.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) + n17, err := m.MaxSurge.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n17 } return i, nil } -func (m *Scale) Marshal() (data []byte, err error) { +func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Scale) MarshalTo(data []byte) (int, error) { +func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n15, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err + if m.Partition != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) } - i += n15 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n16, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n16 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n17, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n17 return i, nil } -func (m *ScaleSpec) Marshal() (data []byte, err error) { +func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - return i, nil -} - -func (m *ScaleStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k := range m.Selector { - data[i] = 0x12 - i++ - v := m.Selector[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.TargetSelector))) - i += copy(data[i:], m.TargetSelector) - return i, nil -} - -func (m *StatefulSet) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *StatefulSet) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n18, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n18, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n18 - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n19, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n19, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n19 - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n20, err := m.Status.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n20, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -694,35 +736,187 @@ func (m *StatefulSet) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *StatefulSetList) Marshal() (data []byte, err error) { +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *StatefulSetList) MarshalTo(data []byte) (int, error) { +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n21, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for _, k := range keysForSelector { + dAtA[i] = 0x12 + i++ + v := m.Selector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i += copy(dAtA[i:], m.TargetSelector) + return i, nil +} + +func (m *StatefulSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n21, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n21 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n22, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n23, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + return i, nil +} + +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n24, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n25, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -732,116 +926,224 @@ func (m *StatefulSetList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *StatefulSetSpec) Marshal() (data []byte, err error) { +func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *StatefulSetSpec) MarshalTo(data []byte) (int, error) { +func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Replicas != nil { - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) } if m.Selector != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n22, err := m.Selector.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n26, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n26 } - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n23, err := m.Template.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n27, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n27 if len(m.VolumeClaimTemplates) > 0 { for _, msg := range m.VolumeClaimTemplates { - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - data[i] = 0x2a + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) - i += copy(data[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i += copy(dAtA[i:], m.ServiceName) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i += copy(dAtA[i:], m.PodManagementPolicy) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n28, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } return i, nil } -func (m *StatefulSetStatus) Marshal() (data []byte, err error) { +func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *StatefulSetStatus) MarshalTo(data []byte) (int, error) { +func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.ObservedGeneration != nil { - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) } - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i += copy(dAtA[i:], m.CurrentRevision) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i += copy(dAtA[i:], m.UpdateRevision) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n29, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } +func (m *ControllerRevision) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *ControllerRevisionList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *Deployment) Size() (n int) { var l int _ = l @@ -948,6 +1250,9 @@ func (m *DeploymentStatus) Size() (n int) { } } n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } return n } @@ -984,6 +1289,15 @@ func (m *RollingUpdateDeployment) Size() (n int) { return n } +func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + var l int + _ = l + if m.Partition != nil { + n += 1 + sovGenerated(uint64(*m.Partition)) + } + return n +} + func (m *Scale) Size() (n int) { var l int _ = l @@ -1032,6 +1346,22 @@ func (m *StatefulSet) Size() (n int) { return n } +func (m *StatefulSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *StatefulSetList) Size() (n int) { var l int _ = l @@ -1066,6 +1396,13 @@ func (m *StatefulSetSpec) Size() (n int) { } l = len(m.ServiceName) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodManagementPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } return n } @@ -1076,6 +1413,34 @@ func (m *StatefulSetStatus) Size() (n int) { n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) } n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + l = len(m.CurrentRevision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UpdateRevision) + n += 1 + l + sovGenerated(uint64(l)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetUpdateStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1092,6 +1457,29 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ControllerRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevision{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerRevisionList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevisionList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *Deployment) String() string { if this == nil { return "nil" @@ -1159,7 +1547,7 @@ func (this *DeploymentSpec) String() string { s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -1182,6 +1570,7 @@ func (this *DeploymentStatus) String() string { `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, }, "") return s @@ -1218,6 +1607,16 @@ func (this *RollingUpdateDeployment) String() string { }, "") return s } +func (this *RollingUpdateStatefulSetStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, + `Partition:` + valueToStringGenerated(this.Partition) + `,`, + `}`, + }, "") + return s +} func (this *Scale) String() string { if this == nil { return "nil" @@ -1274,6 +1673,20 @@ func (this *StatefulSet) String() string { }, "") return s } +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetList) String() string { if this == nil { return "nil" @@ -1292,9 +1705,12 @@ func (this *StatefulSetSpec) String() string { s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `}`, }, "") return s @@ -1306,6 +1722,24 @@ func (this *StatefulSetStatus) String() string { s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, + `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -1318,8 +1752,8 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *Deployment) Unmarshal(data []byte) error { - l := len(data) +func (m *ControllerRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1331,7 +1765,247 @@ func (m *Deployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ControllerRevision{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1359,7 +2033,7 @@ func (m *Deployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1373,7 +2047,7 @@ func (m *Deployment) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1389,7 +2063,7 @@ func (m *Deployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1403,7 +2077,7 @@ func (m *Deployment) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1419,7 +2093,7 @@ func (m *Deployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1433,13 +2107,13 @@ func (m *Deployment) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1458,8 +2132,8 @@ func (m *Deployment) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentCondition) Unmarshal(data []byte) error { - l := len(data) +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1471,7 +2145,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1499,7 +2173,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1514,7 +2188,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DeploymentConditionType(data[iNdEx:postIndex]) + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -1528,7 +2202,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1543,7 +2217,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -1557,7 +2231,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1572,7 +2246,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -1586,7 +2260,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1601,7 +2275,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -1615,7 +2289,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1629,7 +2303,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1645,7 +2319,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1659,13 +2333,13 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1684,8 +2358,8 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentList) Unmarshal(data []byte) error { - l := len(data) +func (m *DeploymentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1697,7 +2371,7 @@ func (m *DeploymentList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1725,7 +2399,7 @@ func (m *DeploymentList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1739,7 +2413,7 @@ func (m *DeploymentList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1755,7 +2429,7 @@ func (m *DeploymentList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1770,13 +2444,13 @@ func (m *DeploymentList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, Deployment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1795,8 +2469,8 @@ func (m *DeploymentList) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentRollback) Unmarshal(data []byte) error { - l := len(data) +func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1808,7 +2482,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1836,7 +2510,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1851,7 +2525,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -1865,7 +2539,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1887,7 +2561,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1902,7 +2576,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1917,52 +2591,57 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - m.UpdatedAnnotations[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.UpdatedAnnotations[mapkey] = mapvalue + } else { + var mapvalue string + m.UpdatedAnnotations[mapkey] = mapvalue + } iNdEx = postIndex case 3: if wireType != 2 { @@ -1976,7 +2655,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1990,13 +2669,13 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2015,8 +2694,8 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2028,7 +2707,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2056,7 +2735,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -2076,7 +2755,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2093,7 +2772,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if m.Selector == nil { m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2109,7 +2788,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2123,7 +2802,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2139,7 +2818,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2153,7 +2832,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Strategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2169,7 +2848,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -2188,7 +2867,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -2208,7 +2887,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2228,7 +2907,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2245,7 +2924,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if m.RollbackTo == nil { m.RollbackTo = &RollbackConfig{} } - if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2261,7 +2940,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -2271,7 +2950,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { m.ProgressDeadlineSeconds = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2290,8 +2969,8 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2303,7 +2982,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2331,7 +3010,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -2350,7 +3029,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -2369,7 +3048,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.UpdatedReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -2388,7 +3067,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -2407,7 +3086,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.UnavailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -2426,7 +3105,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2441,7 +3120,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Conditions = append(m.Conditions, DeploymentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2457,16 +3136,36 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2485,8 +3184,8 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentStrategy) Unmarshal(data []byte) error { - l := len(data) +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2498,7 +3197,7 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2526,7 +3225,7 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2541,7 +3240,7 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DeploymentStrategyType(data[iNdEx:postIndex]) + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2555,7 +3254,7 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2572,13 +3271,13 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { if m.RollingUpdate == nil { m.RollingUpdate = &RollingUpdateDeployment{} } - if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2597,8 +3296,8 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { } return nil } -func (m *RollbackConfig) Unmarshal(data []byte) error { - l := len(data) +func (m *RollbackConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2610,7 +3309,7 @@ func (m *RollbackConfig) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2638,7 +3337,7 @@ func (m *RollbackConfig) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -2647,7 +3346,7 @@ func (m *RollbackConfig) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2666,8 +3365,8 @@ func (m *RollbackConfig) Unmarshal(data []byte) error { } return nil } -func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { - l := len(data) +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2679,7 +3378,7 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2707,7 +3406,7 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2724,7 +3423,7 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { if m.MaxUnavailable == nil { m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } - if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2740,7 +3439,7 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2757,13 +3456,13 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { if m.MaxSurge == nil { m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } - if err := m.MaxSurge.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2782,8 +3481,8 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { } return nil } -func (m *Scale) Unmarshal(data []byte) error { - l := len(data) +func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2795,7 +3494,77 @@ func (m *Scale) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Partition = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2823,7 +3592,7 @@ func (m *Scale) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2837,7 +3606,7 @@ func (m *Scale) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2853,7 +3622,7 @@ func (m *Scale) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2867,7 +3636,7 @@ func (m *Scale) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2883,7 +3652,7 @@ func (m *Scale) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2897,13 +3666,13 @@ func (m *Scale) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2922,8 +3691,8 @@ func (m *Scale) Unmarshal(data []byte) error { } return nil } -func (m *ScaleSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *ScaleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2935,7 +3704,7 @@ func (m *ScaleSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2963,7 +3732,7 @@ func (m *ScaleSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -2972,7 +3741,7 @@ func (m *ScaleSpec) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2991,8 +3760,8 @@ func (m *ScaleSpec) Unmarshal(data []byte) error { } return nil } -func (m *ScaleStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *ScaleStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3004,7 +3773,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3032,7 +3801,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -3051,7 +3820,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3073,7 +3842,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3088,7 +3857,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3103,52 +3872,57 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue if m.Selector == nil { m.Selector = make(map[string]string) } - m.Selector[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue + } iNdEx = postIndex case 3: if wireType != 2 { @@ -3162,7 +3936,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3177,11 +3951,11 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetSelector = string(data[iNdEx:postIndex]) + m.TargetSelector = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3200,8 +3974,8 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { } return nil } -func (m *StatefulSet) Unmarshal(data []byte) error { - l := len(data) +func (m *StatefulSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3213,7 +3987,7 @@ func (m *StatefulSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3241,7 +4015,7 @@ func (m *StatefulSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3255,7 +4029,7 @@ func (m *StatefulSet) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3271,7 +4045,7 @@ func (m *StatefulSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3285,7 +4059,7 @@ func (m *StatefulSet) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3301,7 +4075,7 @@ func (m *StatefulSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3315,13 +4089,13 @@ func (m *StatefulSet) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3340,8 +4114,8 @@ func (m *StatefulSet) Unmarshal(data []byte) error { } return nil } -func (m *StatefulSetList) Unmarshal(data []byte) error { - l := len(data) +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3353,7 +4127,203 @@ func (m *StatefulSetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3381,7 +4351,7 @@ func (m *StatefulSetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3395,7 +4365,7 @@ func (m *StatefulSetList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3411,7 +4381,7 @@ func (m *StatefulSetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3426,13 +4396,13 @@ func (m *StatefulSetList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, StatefulSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3451,8 +4421,8 @@ func (m *StatefulSetList) Unmarshal(data []byte) error { } return nil } -func (m *StatefulSetSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3464,7 +4434,7 @@ func (m *StatefulSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3492,7 +4462,7 @@ func (m *StatefulSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -3512,7 +4482,7 @@ func (m *StatefulSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3529,7 +4499,7 @@ func (m *StatefulSetSpec) Unmarshal(data []byte) error { if m.Selector == nil { m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3545,7 +4515,7 @@ func (m *StatefulSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3559,7 +4529,7 @@ func (m *StatefulSetSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3575,7 +4545,7 @@ func (m *StatefulSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3589,8 +4559,8 @@ func (m *StatefulSetSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim{}) - if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3606,7 +4576,7 @@ func (m *StatefulSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3621,11 +4591,90 @@ func (m *StatefulSetSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceName = string(data[iNdEx:postIndex]) + m.ServiceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3644,8 +4693,8 @@ func (m *StatefulSetSpec) Unmarshal(data []byte) error { } return nil } -func (m *StatefulSetStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3657,7 +4706,7 @@ func (m *StatefulSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3685,7 +4734,7 @@ func (m *StatefulSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -3705,16 +4754,182 @@ func (m *StatefulSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpdateRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3733,8 +4948,120 @@ func (m *StatefulSetStatus) Unmarshal(data []byte) error { } return nil } -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateStatefulSetStrategy{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -3745,7 +5072,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3763,7 +5090,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -3780,7 +5107,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3803,7 +5130,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3814,7 +5141,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -3838,102 +5165,127 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 1525 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x58, 0xcb, 0x6f, 0x5b, 0xc5, - 0x17, 0xce, 0x4d, 0xec, 0xc4, 0x99, 0x34, 0x4e, 0x33, 0xc9, 0xaf, 0xf1, 0x2f, 0x45, 0x4e, 0xe5, - 0x45, 0x1f, 0xa8, 0xbd, 0xa6, 0x69, 0xa1, 0x8f, 0x40, 0x45, 0xdc, 0x96, 0x52, 0x94, 0xd0, 0x6a, - 0xec, 0x54, 0xb4, 0x14, 0x89, 0xb1, 0x3d, 0xbd, 0x9d, 0xfa, 0xbe, 0x74, 0x67, 0x6c, 0xc5, 0x3b, - 0x36, 0x2c, 0x90, 0x58, 0xb0, 0x62, 0x87, 0xd8, 0x23, 0x24, 0x76, 0xfc, 0x0d, 0x11, 0x6c, 0xba, - 0x44, 0x2c, 0x22, 0xe2, 0xfe, 0x17, 0x5d, 0xa1, 0x99, 0x3b, 0xf7, 0xe5, 0x6b, 0x27, 0x8e, 0x11, - 0xdd, 0xb0, 0xf3, 0x9d, 0x39, 0xdf, 0x77, 0xce, 0xcc, 0x7c, 0xe7, 0xcc, 0x19, 0x83, 0x6b, 0xad, - 0xeb, 0x4c, 0xa7, 0x4e, 0xb9, 0xd5, 0xae, 0x13, 0xcf, 0x26, 0x9c, 0xb0, 0xb2, 0xdb, 0x32, 0xca, - 0xd8, 0xa5, 0xac, 0x8c, 0x5d, 0x97, 0x95, 0x3b, 0x97, 0xeb, 0x84, 0xe3, 0xcb, 0x65, 0x83, 0xd8, - 0xc4, 0xc3, 0x9c, 0x34, 0x75, 0xd7, 0x73, 0xb8, 0x03, 0xcf, 0xf9, 0x40, 0x3d, 0x02, 0xea, 0x6e, - 0xcb, 0xd0, 0x05, 0x50, 0x17, 0x40, 0x5d, 0x01, 0x57, 0x2f, 0x19, 0x94, 0x3f, 0x6f, 0xd7, 0xf5, - 0x86, 0x63, 0x95, 0x0d, 0xc7, 0x70, 0xca, 0x12, 0x5f, 0x6f, 0x3f, 0x93, 0x5f, 0xf2, 0x43, 0xfe, - 0xf2, 0x79, 0x57, 0xaf, 0xaa, 0x80, 0xb0, 0x4b, 0x2d, 0xdc, 0x78, 0x4e, 0x6d, 0xe2, 0x75, 0xa3, - 0x90, 0x2c, 0xc2, 0x71, 0xb9, 0x93, 0x8a, 0x66, 0xb5, 0x3c, 0x0c, 0xe5, 0xb5, 0x6d, 0x4e, 0x2d, - 0x92, 0x02, 0xbc, 0x77, 0x14, 0x80, 0x35, 0x9e, 0x13, 0x0b, 0xa7, 0x70, 0x57, 0x86, 0xe1, 0xda, - 0x9c, 0x9a, 0x65, 0x6a, 0x73, 0xc6, 0xbd, 0x14, 0x28, 0xb6, 0x26, 0x46, 0xbc, 0x0e, 0xf1, 0xa2, - 0x05, 0x91, 0x5d, 0x6c, 0xb9, 0x26, 0x19, 0xb4, 0xa6, 0x8b, 0x43, 0x8f, 0x66, 0x90, 0xf5, 0x07, - 0x87, 0x1c, 0x24, 0xd9, 0xe5, 0xc4, 0x66, 0xd4, 0xb1, 0x87, 0x1e, 0x67, 0xe9, 0xe7, 0x49, 0x00, - 0xee, 0x10, 0xd7, 0x74, 0xba, 0x16, 0xb1, 0x39, 0xfc, 0x12, 0xe4, 0xc4, 0x56, 0x37, 0x31, 0xc7, - 0x05, 0xed, 0x8c, 0x76, 0x7e, 0x6e, 0xfd, 0x1d, 0x5d, 0x1d, 0x78, 0x7c, 0xe5, 0xd1, 0x91, 0x0b, - 0x6b, 0xbd, 0x73, 0x59, 0x7f, 0x50, 0x7f, 0x41, 0x1a, 0x7c, 0x9b, 0x70, 0x5c, 0x81, 0x7b, 0xfb, - 0x6b, 0x13, 0xbd, 0xfd, 0x35, 0x10, 0x8d, 0xa1, 0x90, 0x15, 0x3e, 0x06, 0x19, 0xe6, 0x92, 0x46, - 0x61, 0x52, 0xb2, 0x5f, 0xd3, 0x47, 0x94, 0x93, 0x1e, 0x05, 0x59, 0x75, 0x49, 0xa3, 0x72, 0x42, - 0x39, 0xc9, 0x88, 0x2f, 0x24, 0x29, 0x21, 0x06, 0xd3, 0x8c, 0x63, 0xde, 0x66, 0x85, 0x29, 0x49, - 0x7e, 0x63, 0x1c, 0x72, 0x49, 0x50, 0xc9, 0x2b, 0xfa, 0x69, 0xff, 0x1b, 0x29, 0xe2, 0xd2, 0xc1, - 0x14, 0x58, 0x8a, 0x8c, 0x6f, 0x3b, 0x76, 0x93, 0x72, 0xea, 0xd8, 0x70, 0x03, 0x64, 0x78, 0xd7, - 0x25, 0x72, 0xcf, 0x66, 0x2b, 0xe7, 0x82, 0xe0, 0x6a, 0x5d, 0x97, 0xbc, 0xde, 0x5f, 0x5b, 0x19, - 0x00, 0x11, 0x53, 0x48, 0x82, 0xe0, 0xa3, 0x30, 0xee, 0x49, 0x09, 0xbf, 0x95, 0x74, 0xfe, 0x7a, - 0x7f, 0xed, 0x50, 0x49, 0xe8, 0x21, 0x67, 0x32, 0x58, 0x78, 0x16, 0x4c, 0x7b, 0x04, 0x33, 0xc7, - 0x2e, 0x64, 0x24, 0x6f, 0xb8, 0x28, 0x24, 0x47, 0x91, 0x9a, 0x85, 0x17, 0xc0, 0x8c, 0x45, 0x18, - 0xc3, 0x06, 0x29, 0x64, 0xa5, 0xe1, 0x82, 0x32, 0x9c, 0xd9, 0xf6, 0x87, 0x51, 0x30, 0x0f, 0x5f, - 0x80, 0xbc, 0x89, 0x19, 0xdf, 0x71, 0x9b, 0x98, 0x93, 0x1a, 0xb5, 0x48, 0x61, 0x5a, 0x6e, 0xf5, - 0xdb, 0xa3, 0xa9, 0x44, 0x20, 0x2a, 0xa7, 0x14, 0x7b, 0x7e, 0x2b, 0xc1, 0x84, 0xfa, 0x98, 0x61, - 0x07, 0x40, 0x31, 0x52, 0xf3, 0xb0, 0xcd, 0xfc, 0x2d, 0x13, 0xfe, 0x66, 0x8e, 0xed, 0x6f, 0x55, - 0xf9, 0x83, 0x5b, 0x29, 0x36, 0x34, 0xc0, 0x43, 0x69, 0x4f, 0x03, 0xf9, 0xe8, 0xc0, 0xb6, 0x28, - 0xe3, 0xf0, 0x69, 0x2a, 0x2d, 0xf4, 0xd1, 0x02, 0x10, 0x68, 0x99, 0x14, 0x27, 0x55, 0x10, 0xb9, - 0x60, 0x24, 0x96, 0x12, 0x9f, 0x81, 0x2c, 0xe5, 0xc4, 0x12, 0xc7, 0x3f, 0x75, 0x7e, 0x6e, 0xfd, - 0xca, 0x18, 0xb2, 0xad, 0xcc, 0x2b, 0xfe, 0xec, 0x7d, 0xc1, 0x84, 0x7c, 0xc2, 0xd2, 0xb7, 0x53, - 0x00, 0x46, 0x46, 0xc8, 0x31, 0xcd, 0x3a, 0x6e, 0xb4, 0xe0, 0x19, 0x90, 0xb1, 0xb1, 0x15, 0xa8, - 0x35, 0x4c, 0xa5, 0x4f, 0xb1, 0x45, 0x90, 0x9c, 0x81, 0x3f, 0x6a, 0x00, 0xb6, 0xe5, 0x51, 0x34, - 0x37, 0x6d, 0xdb, 0xe1, 0x58, 0xec, 0x4e, 0x10, 0x60, 0x75, 0x8c, 0x00, 0x03, 0xdf, 0xfa, 0x4e, - 0x8a, 0xf5, 0xae, 0xcd, 0xbd, 0x6e, 0x74, 0x4a, 0x69, 0x03, 0x34, 0x20, 0x14, 0xd8, 0x02, 0xc0, - 0x53, 0x9c, 0x35, 0x47, 0x25, 0xfc, 0xe8, 0xd5, 0x24, 0x08, 0xe7, 0xb6, 0x63, 0x3f, 0xa3, 0x46, - 0x54, 0xb2, 0x50, 0x48, 0x89, 0x62, 0xf4, 0xab, 0x77, 0xc1, 0xca, 0x90, 0xb8, 0xe1, 0x49, 0x30, - 0xd5, 0x22, 0x5d, 0x7f, 0x2b, 0x91, 0xf8, 0x09, 0x97, 0x41, 0xb6, 0x83, 0xcd, 0x36, 0xf1, 0xb3, - 0x19, 0xf9, 0x1f, 0x37, 0x27, 0xaf, 0x6b, 0xa5, 0x3f, 0xb3, 0x71, 0x65, 0x89, 0xca, 0x05, 0xcf, - 0x83, 0x9c, 0x47, 0x5c, 0x93, 0x36, 0x30, 0x93, 0x1c, 0xd9, 0xca, 0x09, 0xa1, 0x12, 0xa4, 0xc6, - 0x50, 0x38, 0x0b, 0xbf, 0x00, 0x39, 0x46, 0x4c, 0xd2, 0xe0, 0x8e, 0xa7, 0x8a, 0xe7, 0x95, 0x11, - 0x35, 0x88, 0xeb, 0xc4, 0xac, 0x2a, 0xa8, 0x4f, 0x1f, 0x7c, 0xa1, 0x90, 0x12, 0x7e, 0x0e, 0x72, - 0x9c, 0x58, 0xae, 0x89, 0x39, 0x51, 0xbb, 0x79, 0x69, 0xf8, 0x6e, 0x0a, 0xda, 0x87, 0x4e, 0xb3, - 0xa6, 0x00, 0xb2, 0x22, 0x87, 0x0a, 0x0f, 0x46, 0x51, 0x48, 0x08, 0x29, 0xc8, 0x31, 0x2e, 0xae, - 0x1d, 0xa3, 0x2b, 0x6b, 0xd1, 0xdc, 0xfa, 0xc6, 0x58, 0xb5, 0xd9, 0xa7, 0x88, 0x5c, 0x05, 0x23, - 0x28, 0xa4, 0x87, 0x9b, 0x60, 0xc1, 0xa2, 0x36, 0x22, 0xb8, 0xd9, 0xad, 0x92, 0x86, 0x63, 0x37, - 0x99, 0x2c, 0x6a, 0xd9, 0xca, 0x8a, 0x02, 0x2d, 0x6c, 0x27, 0xa7, 0x51, 0xbf, 0x3d, 0xdc, 0x02, - 0xcb, 0x1e, 0xe9, 0x50, 0x71, 0x71, 0x7e, 0x4c, 0x19, 0x77, 0xbc, 0xee, 0x16, 0xb5, 0x28, 0x97, - 0xa5, 0x2e, 0x5b, 0x29, 0xf4, 0xf6, 0xd7, 0x96, 0xd1, 0x80, 0x79, 0x34, 0x10, 0x25, 0xaa, 0xb0, - 0x8b, 0xdb, 0x8c, 0x34, 0x65, 0xe9, 0xca, 0x45, 0x55, 0xf8, 0xa1, 0x1c, 0x45, 0x6a, 0x16, 0x1a, - 0x09, 0x41, 0xe7, 0xfe, 0x99, 0xa0, 0xf3, 0xc3, 0xc5, 0x0c, 0x77, 0xc0, 0x8a, 0xeb, 0x39, 0x86, - 0x47, 0x18, 0xbb, 0x43, 0x70, 0xd3, 0xa4, 0x36, 0x09, 0x76, 0x6a, 0x56, 0xae, 0xf0, 0x74, 0x6f, - 0x7f, 0x6d, 0xe5, 0xe1, 0x60, 0x13, 0x34, 0x0c, 0x5b, 0xfa, 0x3e, 0x03, 0x4e, 0xf6, 0xdf, 0xa3, - 0xf0, 0x13, 0x00, 0x9d, 0xba, 0xec, 0x7d, 0x9a, 0xf7, 0xfc, 0xce, 0x83, 0x3a, 0xb6, 0x14, 0xfa, - 0x54, 0x94, 0xf1, 0x0f, 0x52, 0x16, 0x68, 0x00, 0x0a, 0x5e, 0x8c, 0xa5, 0xca, 0xa4, 0x0c, 0x34, - 0xd4, 0xc1, 0x80, 0x74, 0xd9, 0x04, 0x0b, 0xaa, 0x6a, 0x04, 0x93, 0x52, 0xd6, 0x31, 0x1d, 0xec, - 0x24, 0xa7, 0x51, 0xbf, 0x3d, 0xbc, 0x07, 0x16, 0x71, 0x07, 0x53, 0x13, 0xd7, 0x4d, 0x12, 0x92, - 0x64, 0x24, 0xc9, 0xff, 0x15, 0xc9, 0xe2, 0x66, 0xbf, 0x01, 0x4a, 0x63, 0xe0, 0x36, 0x58, 0x6a, - 0xdb, 0x69, 0x2a, 0x5f, 0x97, 0xa7, 0x15, 0xd5, 0xd2, 0x4e, 0xda, 0x04, 0x0d, 0xc2, 0x41, 0x17, - 0x80, 0x46, 0x70, 0xe5, 0xb3, 0xc2, 0xb4, 0xac, 0xc9, 0xef, 0x8f, 0x91, 0x4f, 0x61, 0xdf, 0x10, - 0xd5, 0xbf, 0x70, 0x88, 0xa1, 0x98, 0x0f, 0xb8, 0x01, 0xe6, 0x3d, 0x91, 0x21, 0x61, 0xe8, 0x33, - 0x32, 0xf4, 0xff, 0x29, 0xd8, 0x3c, 0x8a, 0x4f, 0xa2, 0xa4, 0x6d, 0xe9, 0x77, 0x2d, 0x7e, 0x09, - 0x05, 0x29, 0x0b, 0x6f, 0x26, 0x5a, 0xa6, 0xb3, 0x7d, 0x2d, 0xd3, 0xa9, 0x34, 0x22, 0xd6, 0x31, - 0x75, 0xc1, 0xbc, 0x10, 0x34, 0xb5, 0x0d, 0xff, 0x10, 0x55, 0x41, 0xfc, 0xf0, 0x58, 0xe9, 0x12, - 0xa2, 0x63, 0xd7, 0xe8, 0xa2, 0x5c, 0x4d, 0x7c, 0x12, 0x25, 0x3d, 0x95, 0x6e, 0x81, 0x7c, 0x32, - 0xd7, 0x7c, 0x5d, 0xfa, 0x89, 0xaf, 0x94, 0x1d, 0xd3, 0xa5, 0x3f, 0x8e, 0x42, 0x8b, 0xd2, 0x2b, - 0x0d, 0xac, 0x0c, 0xf1, 0x0e, 0x4d, 0x90, 0xb7, 0xf0, 0x6e, 0x4c, 0x07, 0x47, 0xf6, 0xe0, 0xe2, - 0xf5, 0xa1, 0xfb, 0xaf, 0x0f, 0xfd, 0xbe, 0xcd, 0x1f, 0x78, 0x55, 0xee, 0x51, 0xdb, 0xa8, 0x40, - 0xd1, 0x5f, 0x6d, 0x27, 0xb8, 0x50, 0x1f, 0x37, 0x7c, 0x02, 0x72, 0x16, 0xde, 0xad, 0xb6, 0x3d, - 0x23, 0xd8, 0xbf, 0xe3, 0xfb, 0x91, 0xb7, 0xc9, 0xb6, 0x62, 0x41, 0x21, 0x5f, 0xe9, 0x87, 0x49, - 0x90, 0xad, 0x36, 0xb0, 0x49, 0xde, 0xc0, 0x8b, 0xa2, 0x96, 0x78, 0x51, 0xac, 0x8f, 0xac, 0x01, - 0x19, 0xdf, 0xd0, 0xc7, 0xc4, 0xd3, 0xbe, 0xc7, 0xc4, 0xd5, 0x63, 0xf2, 0x1e, 0xfe, 0x8e, 0xb8, - 0x01, 0x66, 0x43, 0xf7, 0x89, 0xc2, 0xa6, 0x1d, 0x55, 0xd8, 0x4a, 0x3f, 0x4d, 0x82, 0xb9, 0x98, - 0x8b, 0xe3, 0xa1, 0xa1, 0x9b, 0xe8, 0x22, 0x44, 0xe5, 0xa8, 0x8c, 0xb3, 0x30, 0x3d, 0xe8, 0x20, - 0xfc, 0xe6, 0x2d, 0xba, 0x90, 0xd3, 0x8d, 0xc5, 0x2d, 0x90, 0xe7, 0xd8, 0x33, 0x08, 0x0f, 0xe6, - 0xe4, 0x86, 0xce, 0x46, 0xcf, 0x80, 0x5a, 0x62, 0x16, 0xf5, 0x59, 0xaf, 0x6e, 0x80, 0xf9, 0x84, - 0xb3, 0x63, 0x75, 0x5c, 0xbf, 0x88, 0xcd, 0xe2, 0x98, 0x93, 0x67, 0x6d, 0xb3, 0x4a, 0xde, 0xc4, - 0xfb, 0xf6, 0x49, 0x42, 0x8d, 0xd7, 0x47, 0xdf, 0xdc, 0x28, 0xca, 0xa1, 0x9a, 0xac, 0xf7, 0x69, - 0xf2, 0xe6, 0x58, 0xec, 0x87, 0x2b, 0xf3, 0x37, 0x0d, 0x2c, 0xc4, 0xac, 0xdf, 0xc0, 0xf3, 0xe7, - 0x71, 0xf2, 0xf9, 0x73, 0x75, 0x9c, 0x45, 0x0d, 0x79, 0xff, 0xfc, 0x3a, 0x95, 0x58, 0xcc, 0x7f, - 0xa8, 0xe3, 0xfe, 0x5a, 0x03, 0xcb, 0x1d, 0xc7, 0x6c, 0x5b, 0xe4, 0xb6, 0x89, 0xa9, 0x15, 0x58, - 0x88, 0xfe, 0xe5, 0x88, 0x37, 0xa6, 0xf4, 0x44, 0x3c, 0x46, 0x19, 0x27, 0x36, 0x7f, 0x14, 0x71, - 0x54, 0xde, 0x52, 0xfe, 0x96, 0x1f, 0x0d, 0x20, 0x46, 0x03, 0xdd, 0xc1, 0x77, 0xc1, 0x9c, 0x68, - 0xe4, 0x68, 0x83, 0x88, 0xd7, 0xa5, 0xfa, 0x7f, 0x61, 0x49, 0x11, 0xcd, 0x55, 0xa3, 0x29, 0x14, - 0xb7, 0x2b, 0x7d, 0xa3, 0x81, 0xc5, 0x94, 0x66, 0xe1, 0x47, 0x87, 0x74, 0x93, 0xa7, 0xfe, 0xad, - 0x4e, 0xb2, 0x72, 0x61, 0xef, 0xa0, 0x38, 0xf1, 0xf2, 0xa0, 0x38, 0xf1, 0xc7, 0x41, 0x71, 0xe2, - 0xab, 0x5e, 0x51, 0xdb, 0xeb, 0x15, 0xb5, 0x97, 0xbd, 0xa2, 0xf6, 0x57, 0xaf, 0xa8, 0x7d, 0xf7, - 0xaa, 0x38, 0xf1, 0x64, 0x46, 0x29, 0xf2, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd6, 0xb9, 0xde, - 0x1a, 0x56, 0x15, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1871 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x52, 0xa4, 0x44, 0x3d, 0x45, 0x94, 0x3d, 0x52, 0x2d, 0x46, 0x69, 0x29, 0x61, 0x1b, + 0x24, 0x72, 0x12, 0x2d, 0x63, 0x25, 0x0d, 0x12, 0xbb, 0x08, 0x2a, 0xca, 0x6e, 0xe2, 0x40, 0xaa, + 0x94, 0xa1, 0x94, 0xa2, 0x69, 0x0b, 0x64, 0xb8, 0x1c, 0xd3, 0x1b, 0xed, 0x3f, 0xec, 0x0e, 0x59, + 0x13, 0xbd, 0xf4, 0x03, 0x14, 0x48, 0xcf, 0xfd, 0x14, 0x3d, 0x16, 0xed, 0xad, 0x27, 0x5f, 0x0a, + 0x04, 0xbd, 0x34, 0x27, 0xa1, 0xa6, 0xaf, 0x6d, 0x6f, 0xbd, 0x18, 0x28, 0x50, 0xcc, 0xec, 0xec, + 0xff, 0x5d, 0x89, 0x2a, 0x20, 0x1d, 0x72, 0xe3, 0xce, 0x7b, 0xef, 0xf7, 0xde, 0xcc, 0xbc, 0xf7, + 0xe6, 0xfd, 0x08, 0x3f, 0x3a, 0x7d, 0xdf, 0xd7, 0x0c, 0xa7, 0x7d, 0x3a, 0xec, 0x51, 0xcf, 0xa6, + 0x8c, 0xfa, 0xed, 0x11, 0xb5, 0xfb, 0x8e, 0xd7, 0x96, 0x02, 0xe2, 0x1a, 0x6d, 0xe2, 0xba, 0x7e, + 0x7b, 0x74, 0xa7, 0x47, 0x19, 0xb9, 0xd3, 0x1e, 0x50, 0x9b, 0x7a, 0x84, 0xd1, 0xbe, 0xe6, 0x7a, + 0x0e, 0x73, 0xd0, 0x5a, 0xa0, 0xa8, 0x11, 0xd7, 0xd0, 0xb8, 0xa2, 0x26, 0x15, 0xd7, 0xb7, 0x07, + 0x06, 0x7b, 0x3c, 0xec, 0x69, 0xba, 0x63, 0xb5, 0x07, 0xce, 0xc0, 0x69, 0x0b, 0xfd, 0xde, 0xf0, + 0x91, 0xf8, 0x12, 0x1f, 0xe2, 0x57, 0x80, 0xb3, 0xae, 0x26, 0x1c, 0xea, 0x8e, 0x47, 0xdb, 0xa3, + 0x9c, 0xaf, 0xf5, 0xdb, 0x09, 0x1d, 0xd7, 0x31, 0x0d, 0x7d, 0x5c, 0x16, 0xd6, 0xfa, 0xbb, 0xb1, + 0xaa, 0x45, 0xf4, 0xc7, 0x86, 0x4d, 0xbd, 0x71, 0xdb, 0x3d, 0x1d, 0xf0, 0x05, 0xbf, 0x6d, 0x51, + 0x46, 0x8a, 0x1c, 0xb4, 0xcb, 0xac, 0xbc, 0xa1, 0xcd, 0x0c, 0x8b, 0xe6, 0x0c, 0xde, 0xbb, 0xc8, + 0xc0, 0xd7, 0x1f, 0x53, 0x8b, 0xe4, 0xec, 0xde, 0x29, 0xb3, 0x1b, 0x32, 0xc3, 0x6c, 0x1b, 0x36, + 0xf3, 0x99, 0x97, 0x35, 0x52, 0xff, 0xa3, 0x00, 0xda, 0x73, 0x6c, 0xe6, 0x39, 0xa6, 0x49, 0x3d, + 0x4c, 0x47, 0x86, 0x6f, 0x38, 0x36, 0xfa, 0x02, 0xea, 0x7c, 0x3f, 0x7d, 0xc2, 0x48, 0x53, 0xd9, + 0x54, 0xb6, 0x16, 0x77, 0xde, 0xd6, 0xe2, 0x4b, 0x89, 0xe0, 0x35, 0xf7, 0x74, 0xc0, 0x17, 0x7c, + 0x8d, 0x6b, 0x6b, 0xa3, 0x3b, 0xda, 0x61, 0xef, 0x4b, 0xaa, 0xb3, 0x03, 0xca, 0x48, 0x07, 0x3d, + 0x3d, 0xdb, 0x98, 0x99, 0x9c, 0x6d, 0x40, 0xbc, 0x86, 0x23, 0x54, 0x74, 0x08, 0x55, 0x81, 0x5e, + 0x11, 0xe8, 0xdb, 0xa5, 0xe8, 0x72, 0xd3, 0x1a, 0x26, 0xbf, 0x7a, 0xf0, 0x84, 0x51, 0x9b, 0x87, + 0xd7, 0x79, 0x49, 0x42, 0x57, 0xef, 0x13, 0x46, 0xb0, 0x00, 0x42, 0x6f, 0x41, 0xdd, 0x93, 0xe1, + 0x37, 0x67, 0x37, 0x95, 0xad, 0xd9, 0xce, 0x0d, 0xa9, 0x55, 0x0f, 0xb7, 0x85, 0x23, 0x0d, 0xf5, + 0xa9, 0x02, 0xb7, 0xf2, 0xfb, 0xde, 0x37, 0x7c, 0x86, 0x7e, 0x91, 0xdb, 0xbb, 0x36, 0xdd, 0xde, + 0xb9, 0xb5, 0xd8, 0x79, 0xe4, 0x38, 0x5c, 0x49, 0xec, 0xfb, 0x08, 0x6a, 0x06, 0xa3, 0x96, 0xdf, + 0xac, 0x6c, 0xce, 0x6e, 0x2d, 0xee, 0xbc, 0xa9, 0x95, 0xe4, 0xba, 0x96, 0x8f, 0xae, 0xb3, 0x24, + 0x71, 0x6b, 0x0f, 0x39, 0x02, 0x0e, 0x80, 0xd4, 0xdf, 0x56, 0x00, 0xee, 0x53, 0xd7, 0x74, 0xc6, + 0x16, 0xb5, 0xd9, 0x35, 0x5c, 0xdd, 0x43, 0xa8, 0xfa, 0x2e, 0xd5, 0xe5, 0xd5, 0xbd, 0x5e, 0xba, + 0x83, 0x38, 0xa8, 0xae, 0x4b, 0xf5, 0xf8, 0xd2, 0xf8, 0x17, 0x16, 0x10, 0xe8, 0x53, 0x98, 0xf3, + 0x19, 0x61, 0x43, 0x5f, 0x5c, 0xd9, 0xe2, 0xce, 0xed, 0x69, 0xc0, 0x84, 0x41, 0xa7, 0x21, 0xe1, + 0xe6, 0x82, 0x6f, 0x2c, 0x81, 0xd4, 0xbf, 0xcf, 0xc2, 0x4a, 0xac, 0xbc, 0xe7, 0xd8, 0x7d, 0x83, + 0xf1, 0x94, 0xbe, 0x07, 0x55, 0x36, 0x76, 0xa9, 0x38, 0x93, 0x85, 0xce, 0xeb, 0x61, 0x30, 0xc7, + 0x63, 0x97, 0xbe, 0x38, 0xdb, 0x58, 0x2b, 0x30, 0xe1, 0x22, 0x2c, 0x8c, 0xd0, 0x7e, 0x14, 0x67, + 0x45, 0x98, 0xbf, 0x9b, 0x76, 0xfe, 0xe2, 0x6c, 0xa3, 0xa0, 0xd7, 0x68, 0x11, 0x52, 0x3a, 0x44, + 0xf4, 0x1a, 0xcc, 0x79, 0x94, 0xf8, 0x8e, 0xdd, 0xac, 0x0a, 0xb4, 0x68, 0x2b, 0x58, 0xac, 0x62, + 0x29, 0x45, 0xb7, 0x61, 0xde, 0xa2, 0xbe, 0x4f, 0x06, 0xb4, 0x59, 0x13, 0x8a, 0xcb, 0x52, 0x71, + 0xfe, 0x20, 0x58, 0xc6, 0xa1, 0x1c, 0x7d, 0x09, 0x0d, 0x93, 0xf8, 0xec, 0xc4, 0xed, 0x13, 0x46, + 0x8f, 0x0d, 0x8b, 0x36, 0xe7, 0xc4, 0x81, 0xbe, 0x31, 0xdd, 0xdd, 0x73, 0x8b, 0xce, 0x2d, 0x89, + 0xde, 0xd8, 0x4f, 0x21, 0xe1, 0x0c, 0x32, 0x1a, 0x01, 0xe2, 0x2b, 0xc7, 0x1e, 0xb1, 0xfd, 0xe0, + 0xa0, 0xb8, 0xbf, 0xf9, 0x4b, 0xfb, 0x5b, 0x97, 0xfe, 0xd0, 0x7e, 0x0e, 0x0d, 0x17, 0x78, 0x50, + 0xff, 0xa8, 0x40, 0x23, 0xbe, 0xa6, 0x6b, 0xa8, 0xd5, 0x8f, 0xd3, 0xb5, 0xfa, 0xfd, 0x29, 0x92, + 0xb3, 0xa4, 0x46, 0xff, 0x59, 0x01, 0x14, 0x2b, 0x61, 0xc7, 0x34, 0x7b, 0x44, 0x3f, 0x45, 0x9b, + 0x50, 0xb5, 0x89, 0x15, 0xe6, 0x64, 0x54, 0x20, 0x3f, 0x21, 0x16, 0xc5, 0x42, 0x82, 0xbe, 0x52, + 0x00, 0x0d, 0xc5, 0xd1, 0xf7, 0x77, 0x6d, 0xdb, 0x61, 0x84, 0x9f, 0x46, 0x18, 0xd0, 0xde, 0x14, + 0x01, 0x85, 0xbe, 0xb4, 0x93, 0x1c, 0xca, 0x03, 0x9b, 0x79, 0xe3, 0xf8, 0x16, 0xf2, 0x0a, 0xb8, + 0xc0, 0x35, 0xfa, 0x39, 0x80, 0x27, 0x31, 0x8f, 0x1d, 0x59, 0xb6, 0xe5, 0x3d, 0x20, 0x74, 0xbf, + 0xe7, 0xd8, 0x8f, 0x8c, 0x41, 0xdc, 0x58, 0x70, 0x04, 0x81, 0x13, 0x70, 0xeb, 0x0f, 0x60, 0xad, + 0x24, 0x4e, 0x74, 0x03, 0x66, 0x4f, 0xe9, 0x38, 0x38, 0x2a, 0xcc, 0x7f, 0xa2, 0x55, 0xa8, 0x8d, + 0x88, 0x39, 0xa4, 0x41, 0x4d, 0xe2, 0xe0, 0xe3, 0x6e, 0xe5, 0x7d, 0x45, 0xfd, 0x43, 0x2d, 0x99, + 0x29, 0xbc, 0xdf, 0xa0, 0x2d, 0xfe, 0x3c, 0xb8, 0xa6, 0xa1, 0x13, 0x5f, 0x60, 0xd4, 0x3a, 0x2f, + 0x05, 0x4f, 0x43, 0xb0, 0x86, 0x23, 0x29, 0xfa, 0x25, 0xd4, 0x7d, 0x6a, 0x52, 0x9d, 0x39, 0x9e, + 0x6c, 0x71, 0xef, 0x4c, 0x99, 0x53, 0xa4, 0x47, 0xcd, 0xae, 0x34, 0x0d, 0xe0, 0xc3, 0x2f, 0x1c, + 0x41, 0xa2, 0x4f, 0xa1, 0xce, 0xa8, 0xe5, 0x9a, 0x84, 0x51, 0x79, 0x7a, 0xa9, 0xbc, 0xe2, 0xbd, + 0x83, 0x83, 0x1d, 0x39, 0xfd, 0x63, 0xa9, 0x26, 0xba, 0x67, 0x94, 0xa7, 0xe1, 0x2a, 0x8e, 0x60, + 0xd0, 0xcf, 0xa0, 0xee, 0x33, 0xfe, 0xaa, 0x0f, 0xc6, 0xa2, 0xa3, 0x9c, 0xf7, 0xac, 0x24, 0xfb, + 0x68, 0x60, 0x12, 0x43, 0x87, 0x2b, 0x38, 0x82, 0x43, 0xbb, 0xb0, 0x6c, 0x19, 0x36, 0xa6, 0xa4, + 0x3f, 0xee, 0x52, 0xdd, 0xb1, 0xfb, 0xbe, 0x68, 0x45, 0xb5, 0xce, 0x9a, 0x34, 0x5a, 0x3e, 0x48, + 0x8b, 0x71, 0x56, 0x1f, 0xed, 0xc3, 0x6a, 0xf8, 0xec, 0x7e, 0x6c, 0xf8, 0xcc, 0xf1, 0xc6, 0xfb, + 0x86, 0x65, 0x30, 0xd1, 0xa0, 0x6a, 0x9d, 0xe6, 0xe4, 0x6c, 0x63, 0x15, 0x17, 0xc8, 0x71, 0xa1, + 0x15, 0xef, 0x9d, 0x2e, 0x19, 0xfa, 0xb4, 0x2f, 0x1a, 0x4e, 0x3d, 0xee, 0x9d, 0x47, 0x62, 0x15, + 0x4b, 0x29, 0xfa, 0x69, 0x2a, 0x4d, 0xeb, 0x97, 0x4b, 0xd3, 0x46, 0x79, 0x8a, 0xa2, 0x13, 0x58, + 0x73, 0x3d, 0x67, 0xe0, 0x51, 0xdf, 0xbf, 0x4f, 0x49, 0xdf, 0x34, 0x6c, 0x1a, 0x9e, 0xcc, 0x82, + 0xd8, 0xd1, 0x2b, 0x93, 0xb3, 0x8d, 0xb5, 0xa3, 0x62, 0x15, 0x5c, 0x66, 0xab, 0xfe, 0xa5, 0x0a, + 0x37, 0xb2, 0x6f, 0x1c, 0xfa, 0x04, 0x90, 0xd3, 0xf3, 0xa9, 0x37, 0xa2, 0xfd, 0x8f, 0x82, 0xc1, + 0x8d, 0x4f, 0x37, 0x8a, 0x98, 0x6e, 0xa2, 0xba, 0x3d, 0xcc, 0x69, 0xe0, 0x02, 0xab, 0x60, 0x3e, + 0x92, 0x05, 0x50, 0x11, 0x81, 0x26, 0xe6, 0xa3, 0x5c, 0x11, 0xec, 0xc2, 0xb2, 0xac, 0xfd, 0x50, + 0x28, 0x92, 0x35, 0x71, 0xef, 0x27, 0x69, 0x31, 0xce, 0xea, 0xa3, 0x8f, 0xe0, 0x26, 0x19, 0x11, + 0xc3, 0x24, 0x3d, 0x93, 0x46, 0x20, 0x55, 0x01, 0xf2, 0xb2, 0x04, 0xb9, 0xb9, 0x9b, 0x55, 0xc0, + 0x79, 0x1b, 0x74, 0x00, 0x2b, 0x43, 0x3b, 0x0f, 0x15, 0xe4, 0xe1, 0x2b, 0x12, 0x6a, 0xe5, 0x24, + 0xaf, 0x82, 0x8b, 0xec, 0xd0, 0x17, 0x00, 0x7a, 0xf8, 0x30, 0xfb, 0xcd, 0x39, 0xd1, 0x49, 0xdf, + 0x9a, 0xa2, 0x5e, 0xa2, 0xd7, 0x3c, 0xee, 0x62, 0xd1, 0x92, 0x8f, 0x13, 0x98, 0xe8, 0x1e, 0x2c, + 0x79, 0xbc, 0x02, 0xa2, 0x50, 0xe7, 0x45, 0xa8, 0xdf, 0x91, 0x66, 0x4b, 0x38, 0x29, 0xc4, 0x69, + 0x5d, 0x74, 0x17, 0x1a, 0xba, 0x63, 0x9a, 0x22, 0xf3, 0xf7, 0x9c, 0xa1, 0xcd, 0x44, 0xf2, 0xd6, + 0x3a, 0x88, 0xbf, 0xcc, 0x7b, 0x29, 0x09, 0xce, 0x68, 0xaa, 0x7f, 0x56, 0x92, 0xcf, 0x4c, 0x58, + 0xce, 0xe8, 0x6e, 0x6a, 0xf4, 0x79, 0x2d, 0x33, 0xfa, 0xdc, 0xca, 0x5b, 0x24, 0x26, 0x1f, 0x03, + 0x96, 0x78, 0xf2, 0x1b, 0xf6, 0x20, 0xb8, 0x70, 0xd9, 0x12, 0xdf, 0x3e, 0xb7, 0x94, 0x22, 0xed, + 0xc4, 0xc3, 0x78, 0x53, 0xec, 0x3c, 0x29, 0xc4, 0x69, 0x64, 0xf5, 0x43, 0x68, 0xa4, 0xeb, 0x30, + 0x35, 0xd3, 0x2b, 0x17, 0xce, 0xf4, 0xcf, 0x15, 0x58, 0x2b, 0xf1, 0x8e, 0x4c, 0x68, 0x58, 0xe4, + 0x49, 0x22, 0x47, 0x2e, 0x9c, 0x8d, 0x39, 0x6b, 0xd2, 0x02, 0xd6, 0xa4, 0x3d, 0xb4, 0xd9, 0xa1, + 0xd7, 0x65, 0x9e, 0x61, 0x0f, 0x82, 0x7b, 0x38, 0x48, 0x61, 0xe1, 0x0c, 0x36, 0xfa, 0x1c, 0xea, + 0x16, 0x79, 0xd2, 0x1d, 0x7a, 0x83, 0xa2, 0xf3, 0x9a, 0xce, 0x8f, 0x78, 0x3f, 0x0e, 0x24, 0x0a, + 0x8e, 0xf0, 0xd4, 0x43, 0xd8, 0x4c, 0x6d, 0x92, 0xb7, 0x0a, 0xfa, 0x68, 0x68, 0x76, 0x69, 0x7c, + 0xe1, 0x6f, 0xc2, 0x82, 0x4b, 0x3c, 0x66, 0x44, 0xed, 0xa2, 0xd6, 0x59, 0x9a, 0x9c, 0x6d, 0x2c, + 0x1c, 0x85, 0x8b, 0x38, 0x96, 0xab, 0xff, 0x55, 0xa0, 0xd6, 0xd5, 0x89, 0x49, 0xaf, 0x81, 0x3a, + 0xdc, 0x4f, 0x51, 0x07, 0xb5, 0x34, 0x89, 0x44, 0x3c, 0xa5, 0xac, 0x61, 0x3f, 0xc3, 0x1a, 0x5e, + 0xbd, 0x00, 0xe7, 0x7c, 0xc2, 0xf0, 0x01, 0x2c, 0x44, 0xee, 0x52, 0x5d, 0x52, 0xb9, 0xa8, 0x4b, + 0xaa, 0xbf, 0xaf, 0xc0, 0x62, 0xc2, 0xc5, 0xe5, 0xac, 0xf9, 0x71, 0x27, 0x06, 0x0d, 0xde, 0x86, + 0x76, 0xa6, 0xd9, 0x88, 0x16, 0x0e, 0x15, 0xc1, 0xfc, 0x16, 0xbf, 0xde, 0xf9, 0x59, 0xe3, 0x43, + 0x68, 0x30, 0xe2, 0x0d, 0x28, 0x0b, 0x65, 0xe2, 0xc0, 0x16, 0xe2, 0x49, 0xff, 0x38, 0x25, 0xc5, + 0x19, 0xed, 0xf5, 0x7b, 0xb0, 0x94, 0x72, 0x76, 0xa9, 0x21, 0xec, 0x2b, 0x7e, 0x38, 0x71, 0x72, + 0x5e, 0x43, 0x76, 0x7d, 0x92, 0xca, 0xae, 0xad, 0xf2, 0xc3, 0x4c, 0x94, 0x4c, 0x59, 0x8e, 0xe1, + 0x4c, 0x8e, 0xbd, 0x31, 0x15, 0xda, 0xf9, 0x99, 0xf6, 0xaf, 0x0a, 0xac, 0x26, 0xb4, 0x63, 0x6e, + 0xfa, 0xc3, 0x54, 0x83, 0xde, 0xca, 0x34, 0xe8, 0x66, 0x91, 0xcd, 0x95, 0x91, 0xd3, 0x62, 0x76, + 0x37, 0x7b, 0xd5, 0xec, 0xee, 0x0a, 0x48, 0xb1, 0xfa, 0x27, 0x05, 0x96, 0x13, 0x67, 0x77, 0x0d, + 0x8c, 0xf1, 0x61, 0x9a, 0x31, 0xbe, 0x3a, 0x4d, 0xd2, 0x94, 0x50, 0xc6, 0xbf, 0xd6, 0x52, 0xc1, + 0x7f, 0xeb, 0x49, 0xcc, 0xaf, 0x61, 0x75, 0xe4, 0x98, 0x43, 0x8b, 0xee, 0x99, 0xc4, 0xb0, 0x42, + 0x05, 0x3e, 0x31, 0xce, 0x66, 0xff, 0x18, 0x8a, 0xe0, 0xa9, 0xe7, 0x1b, 0x3e, 0xa3, 0x36, 0xfb, + 0x2c, 0xb6, 0xec, 0x7c, 0x57, 0x3a, 0x59, 0xfd, 0xac, 0x00, 0x0e, 0x17, 0x3a, 0x41, 0x3f, 0x80, + 0x45, 0x3e, 0x30, 0x1b, 0x3a, 0xe5, 0xdc, 0x5b, 0x26, 0xd6, 0x8a, 0x04, 0x5a, 0xec, 0xc6, 0x22, + 0x9c, 0xd4, 0x43, 0x8f, 0x61, 0xc5, 0x75, 0xfa, 0x07, 0xc4, 0x26, 0x03, 0xca, 0xc7, 0x8c, 0x23, + 0xf1, 0x07, 0xb2, 0x60, 0x36, 0x0b, 0x9d, 0xf7, 0xc2, 0xc9, 0xf4, 0x28, 0xaf, 0xf2, 0x82, 0x53, + 0x84, 0xfc, 0xb2, 0x28, 0xea, 0x22, 0x48, 0xe4, 0x41, 0x63, 0x28, 0x9f, 0x7b, 0x49, 0xf4, 0x82, + 0xff, 0x5b, 0x76, 0xa6, 0xc9, 0xb0, 0x93, 0x94, 0x65, 0xdc, 0xfd, 0xd3, 0xeb, 0x38, 0xe3, 0xa1, + 0x94, 0xb8, 0xd5, 0xff, 0x1f, 0xe2, 0xa6, 0xfe, 0xbb, 0x0a, 0x37, 0x73, 0xad, 0x12, 0xfd, 0xf8, + 0x1c, 0x86, 0x73, 0xeb, 0xca, 0xd8, 0x4d, 0x6e, 0x40, 0x9f, 0xbd, 0xc4, 0x80, 0xbe, 0x0b, 0xcb, + 0xfa, 0xd0, 0xf3, 0xa8, 0xcd, 0x32, 0xac, 0x26, 0xa2, 0x46, 0x7b, 0x69, 0x31, 0xce, 0xea, 0x17, + 0xb1, 0xab, 0xda, 0x25, 0xd9, 0x55, 0x32, 0x0a, 0x39, 0x21, 0x07, 0x69, 0x97, 0x8f, 0x42, 0x0e, + 0xca, 0x59, 0x7d, 0x3e, 0x1d, 0x04, 0xa8, 0x11, 0xc2, 0x7c, 0x7a, 0x3a, 0x38, 0x49, 0x49, 0x71, + 0x46, 0xbb, 0x80, 0xa9, 0x2c, 0x4c, 0xcb, 0x54, 0x10, 0x49, 0x91, 0x30, 0x10, 0x35, 0xbe, 0x3d, + 0x4d, 0x2e, 0x4f, 0xcd, 0xc2, 0xd4, 0xbf, 0x29, 0xf0, 0x72, 0x69, 0x11, 0xa0, 0xdd, 0xd4, 0x93, + 0xbb, 0x9d, 0x79, 0x72, 0xbf, 0x57, 0x6a, 0x98, 0x78, 0x77, 0xbd, 0x62, 0x6a, 0xf4, 0xc1, 0x74, + 0xd4, 0xa8, 0x60, 0x6e, 0xbf, 0x98, 0x23, 0x75, 0xb6, 0x9f, 0x3e, 0x6b, 0xcd, 0x7c, 0xfd, 0xac, + 0x35, 0xf3, 0xcd, 0xb3, 0xd6, 0xcc, 0x6f, 0x26, 0x2d, 0xe5, 0xe9, 0xa4, 0xa5, 0x7c, 0x3d, 0x69, + 0x29, 0xdf, 0x4c, 0x5a, 0xca, 0x3f, 0x26, 0x2d, 0xe5, 0x77, 0xcf, 0x5b, 0x33, 0x9f, 0xcf, 0x4b, + 0x8f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x5d, 0x9e, 0x04, 0x8c, 0x1b, 0x00, 0x00, } diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto similarity index 56% rename from vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.proto rename to vendor/k8s.io/api/apps/v1beta1/generated.proto index 8ca77b2e6..ba6134d53 100644 --- a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,19 +19,54 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.apps.v1beta1; +package k8s.io.api.apps.v1beta1; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/api/policy/v1beta1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the +// release notes for more information. +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +message ControllerRevision { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data is the serialized representation of the state. + optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + + // Revision indicates the revision of the state represented by Data. + optional int64 revision = 3; +} + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +message ControllerRevisionList { + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ControllerRevisions + repeated ControllerRevision items = 2; +} + +// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for +// more information. // Deployment enables declarative updates for Pods and ReplicaSets. message Deployment { // Standard object metadata. @@ -78,6 +113,7 @@ message DeploymentList { repeated Deployment items = 2; } +// DEPRECATED. // DeploymentRollback stores the information required to rollback a deployment. message DeploymentRollback { // Required: This must match the Name of a deployment. @@ -104,7 +140,7 @@ message DeploymentSpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -126,6 +162,7 @@ message DeploymentSpec { // +optional optional bool paused = 7; + // DEPRECATED. // The config this deployment is rolling back to. Will be cleared after rollback is done. // +optional optional RollbackConfig rollbackTo = 8; @@ -133,10 +170,9 @@ message DeploymentSpec { // The maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Once autoRollback is - // implemented, the deployment controller will automatically rollback failed - // deployments. Note that progress will not be estimated during the time a - // deployment is paused. Defaults to 600s. + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + // +optional optional int32 progressDeadlineSeconds = 9; } @@ -162,12 +198,22 @@ message DeploymentStatus { // +optional optional int32 availableReplicas = 4; - // Total number of unavailable pods targeted by this deployment. + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. // +optional optional int32 unavailableReplicas = 5; // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge repeated DeploymentCondition conditions = 6; + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + optional int32 collisionCount = 8; } // DeploymentStrategy describes how to replace existing pods with new ones. @@ -185,8 +231,9 @@ message DeploymentStrategy { optional RollingUpdateDeployment rollingUpdate = 2; } +// DEPRECATED. message RollbackConfig { - // The revision to rollback to. If set to 0, rollbck to the last revision. + // The revision to rollback to. If set to 0, rollback to the last revision. // +optional optional int64 revision = 1; } @@ -198,9 +245,9 @@ message RollingUpdateDeployment { // Absolute number is calculated from percentage by rounding down. // This can not be 0 if MaxSurge is 0. // Defaults to 25%. - // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring + // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional @@ -212,26 +259,33 @@ message RollingUpdateDeployment { // This can not be 0 if MaxUnavailable is 0. // Absolute number is calculated from percentage by rounding up. // Defaults to 25%. - // Example: when this is set to 30%, the new RC can be scaled up immediately when + // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, - // new RC can be scaled up further, ensuring that total number of pods running + // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is atmost 130% of desired pods. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +message RollingUpdateStatefulSetStrategy { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + optional int32 partition = 1; +} + // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } @@ -257,11 +311,13 @@ message ScaleStatus { // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this // field and map-based selector field are populated. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional optional string targetSelector = 3; } +// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for +// more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: // - Network: A single stable DNS and hostname. @@ -282,6 +338,27 @@ message StatefulSet { optional StatefulSetStatus status = 3; } +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional @@ -292,7 +369,7 @@ message StatefulSetList { // A StatefulSetSpec is the specification of a StatefulSet. message StatefulSetSpec { - // Replicas is the desired number of replicas of the given Template. + // replicas is the desired number of replicas of the given Template. // These are replicas in the sense that they are instantiations of the // same Template, but individual replicas also have a consistent identity. // If unspecified, defaults to 1. @@ -300,19 +377,19 @@ message StatefulSetSpec { // +optional optional int32 replicas = 1; - // Selector is a label query over pods that should match the replica count. + // selector is a label query over pods that should match the replica count. // If empty, defaulted to labels on the pod template. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; - // Template is the object that describes the pod that will be created if + // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest // of the StatefulSet. - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; - // VolumeClaimTemplates is a list of claims that pods are allowed to reference. + // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to // claims in a way that maintains the identity of a pod. Every claim in // this list must have at least one matching (by name) volumeMount in one @@ -320,23 +397,88 @@ message StatefulSetSpec { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional - repeated k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; - // ServiceName is the name of the service that governs this StatefulSet. + // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for // the network identity of the set. Pods get DNS/hostnames that follow the // pattern: pod-specific-string.serviceName.default.svc.cluster.local // where "pod-specific-string" is managed by the StatefulSet controller. optional string serviceName = 5; + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + optional string podManagementPolicy = 6; + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + optional StatefulSetUpdateStrategy updateStrategy = 7; + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + optional int32 revisionHistoryLimit = 8; } // StatefulSetStatus represents the current state of a StatefulSet. message StatefulSetStatus { - // most recent generation observed by this StatefulSet. + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. // +optional optional int64 observedGeneration = 1; - // Replicas is the number of actual replicas. + // replicas is the number of Pods created by the StatefulSet controller. optional int32 replicas = 2; + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + optional int32 readyReplicas = 3; + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + optional int32 currentReplicas = 4; + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + optional int32 updatedReplicas = 5; + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + optional string currentRevision = 6; + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + optional string updateRevision = 7; + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; +} + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +message StatefulSetUpdateStrategy { + // Type indicates the type of the StatefulSetUpdateStrategy. + optional string type = 1; + + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; } diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/register.go b/vendor/k8s.io/api/apps/v1beta1/register.go similarity index 78% rename from vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/register.go rename to vendor/k8s.io/api/apps/v1beta1/register.go index 6e618e1d8..5b16819f2 100644 --- a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/register.go +++ b/vendor/k8s.io/api/apps/v1beta1/register.go @@ -34,11 +34,14 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Deployment{}, @@ -47,6 +50,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &Scale{}, &StatefulSet{}, &StatefulSetList{}, + &ControllerRevision{}, + &ControllerRevisionList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go similarity index 54% rename from vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.go rename to vendor/k8s.io/api/apps/v1beta1/types.go index a5675d5d3..d462604d7 100644 --- a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -17,14 +17,16 @@ limitations under the License. package v1beta1 import ( + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/pkg/api/v1" ) const ( - // StatefulSetInitAnnotation if present, and set to false, indicates that a Pod's readiness should be ignored. - StatefulSetInitAnnotation = "pod.alpha.kubernetes.io/initialized" + ControllerRevisionHashLabelKey = "controller-revision-hash" + StatefulSetRevisionLabel = ControllerRevisionHashLabelKey + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" ) // ScaleSpec describes the attributes of a scale subresource @@ -48,32 +50,36 @@ type ScaleStatus struct { // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this // field and map-based selector field are populated. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } -// +genclient=true -// +noMethods=true +// +genclient +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for +// more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: // - Network: A single stable DNS and hostname. @@ -95,9 +101,60 @@ type StatefulSet struct { Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// PodManagementPolicyType defines the policy for creating pods under a stateful set. +type PodManagementPolicyType string + +const ( + // OrderedReadyPodManagement will create pods in strictly increasing order on + // scale up and strictly decreasing order on scale down, progressing only when + // the previous pod is ready or terminated. At most one pod will be changed + // at any time. + OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" + // ParallelPodManagement will create and delete pods as soon as the stateful set + // replica count is changed, and will not wait for pods to be ready or complete + // termination. + ParallelPodManagement = "Parallel" +) + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +type StatefulSetUpdateStrategy struct { + // Type indicates the type of the StatefulSetUpdateStrategy. + Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"` + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +// StatefulSetUpdateStrategyType is a string enumeration type that enumerates +// all possible update strategies for the StatefulSet controller. +type StatefulSetUpdateStrategyType string + +const ( + // RollingUpdateStatefulSetStrategyType indicates that update will be + // applied to all Pods in the StatefulSet with respect to the StatefulSet + // ordering constraints. When a scale operation is performed with this + // strategy, new Pods will be created from the specification version indicated + // by the StatefulSet's updateRevision. + RollingUpdateStatefulSetStrategyType = "RollingUpdate" + // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version + // tracking and ordered rolling restarts are disabled. Pods are recreated + // from the StatefulSetSpec when they are manually deleted. When a scale + // operation is performed with this strategy,specification version indicated + // by the StatefulSet's currentRevision. + OnDeleteStatefulSetStrategyType = "OnDelete" +) + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +type RollingUpdateStatefulSetStrategy struct { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` +} + // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { - // Replicas is the desired number of replicas of the given Template. + // replicas is the desired number of replicas of the given Template. // These are replicas in the sense that they are instantiations of the // same Template, but individual replicas also have a consistent identity. // If unspecified, defaults to 1. @@ -105,19 +162,19 @@ type StatefulSetSpec struct { // +optional Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - // Selector is a label query over pods that should match the replica count. + // selector is a label query over pods that should match the replica count. // If empty, defaulted to labels on the pod template. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` - // Template is the object that describes the pod that will be created if + // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest // of the StatefulSet. Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` - // VolumeClaimTemplates is a list of claims that pods are allowed to reference. + // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to // claims in a way that maintains the identity of a pod. Every claim in // this list must have at least one matching (by name) volumeMount in one @@ -127,24 +184,99 @@ type StatefulSetSpec struct { // +optional VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` - // ServiceName is the name of the service that governs this StatefulSet. + // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for // the network identity of the set. Pods get DNS/hostnames that follow the // pattern: pod-specific-string.serviceName.default.svc.cluster.local // where "pod-specific-string" is managed by the StatefulSet controller. ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"` + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"` + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` } // StatefulSetStatus represents the current state of a StatefulSet. type StatefulSetStatus struct { - // most recent generation observed by this StatefulSet. + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // Replicas is the number of actual replicas. + // replicas is the number of Pods created by the StatefulSet controller. Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"` + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"` + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"` + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"` + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` } +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // StatefulSetList is a collection of StatefulSets. type StatefulSetList struct { metav1.TypeMeta `json:",inline"` @@ -153,8 +285,11 @@ type StatefulSetList struct { Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for +// more information. // Deployment enables declarative updates for Pods and ReplicaSets. type Deployment struct { metav1.TypeMeta `json:",inline"` @@ -206,6 +341,7 @@ type DeploymentSpec struct { // +optional Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + // DEPRECATED. // The config this deployment is rolling back to. Will be cleared after rollback is done. // +optional RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` @@ -213,13 +349,15 @@ type DeploymentSpec struct { // The maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Once autoRollback is - // implemented, the deployment controller will automatically rollback failed - // deployments. Note that progress will not be estimated during the time a - // deployment is paused. Defaults to 600s. + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + // +optional ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED. // DeploymentRollback stores the information required to rollback a deployment. type DeploymentRollback struct { metav1.TypeMeta `json:",inline"` @@ -232,16 +370,17 @@ type DeploymentRollback struct { RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"` } +// DEPRECATED. type RollbackConfig struct { - // The revision to rollback to. If set to 0, rollbck to the last revision. + // The revision to rollback to. If set to 0, rollback to the last revision. // +optional Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"` } const ( // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added - // to existing RCs (and label key that is added to its pods) to prevent the existing RCs - // to select new pods (and old pods being select by new RC). + // to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets + // to select new pods (and old pods being select by new ReplicaSet). DefaultDeploymentUniqueLabelKey string = "pod-template-hash" ) @@ -266,7 +405,7 @@ const ( // Kill all existing pods before creating new ones. RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" - // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + // Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one. RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" ) @@ -277,9 +416,9 @@ type RollingUpdateDeployment struct { // Absolute number is calculated from percentage by rounding down. // This can not be 0 if MaxSurge is 0. // Defaults to 25%. - // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring + // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional @@ -291,10 +430,10 @@ type RollingUpdateDeployment struct { // This can not be 0 if MaxUnavailable is 0. // Absolute number is calculated from percentage by rounding up. // Defaults to 25%. - // Example: when this is set to 30%, the new RC can be scaled up immediately when + // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, - // new RC can be scaled up further, ensuring that total number of pods running + // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is atmost 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` @@ -322,12 +461,22 @@ type DeploymentStatus struct { // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - // Total number of unavailable pods targeted by this deployment. + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. // +optional UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` } type DeploymentConditionType string @@ -352,7 +501,7 @@ type DeploymentCondition struct { // Type of deployment condition. Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` // The last time this condition was updated. LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` // Last time the condition transitioned from one status to another. @@ -363,6 +512,8 @@ type DeploymentCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // DeploymentList is a list of Deployments. type DeploymentList struct { metav1.TypeMeta `json:",inline"` @@ -373,3 +524,45 @@ type DeploymentList struct { // Items is the list of Deployments. Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the +// release notes for more information. +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +type ControllerRevision struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data is the serialized representation of the state. + Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` + + // Revision indicates the revision of the state represented by Data. + Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +type ControllerRevisionList struct { + metav1.TypeMeta `json:",inline"` + + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ControllerRevisions + Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go similarity index 52% rename from vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 44e9f3e45..68ebef348 100644 --- a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,9 +26,30 @@ package v1beta1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ControllerRevision = map[string]string{ + "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "data": "Data is the serialized representation of the state.", + "revision": "Revision indicates the revision of the state represented by Data.", +} + +func (ControllerRevision) SwaggerDoc() map[string]string { + return map_ControllerRevision +} + +var map_ControllerRevisionList = map[string]string{ + "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of ControllerRevisions", +} + +func (ControllerRevisionList) SwaggerDoc() map[string]string { + return map_ControllerRevisionList +} + var map_Deployment = map[string]string{ - "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "": "DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", "metadata": "Standard object metadata.", "spec": "Specification of the desired behavior of the Deployment.", "status": "Most recently observed status of the Deployment.", @@ -63,7 +84,7 @@ func (DeploymentList) SwaggerDoc() map[string]string { } var map_DeploymentRollback = map[string]string{ - "": "DeploymentRollback stores the information required to rollback a deployment.", + "": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.", "name": "Required: This must match the Name of a deployment.", "updatedAnnotations": "The annotations to be updated to a deployment", "rollbackTo": "The config of this deployment rollback.", @@ -82,8 +103,8 @@ var map_DeploymentSpec = map[string]string{ "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", "paused": "Indicates that the deployment is paused.", - "rollbackTo": "The config this deployment is rolling back to. Will be cleared after rollback is done.", - "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Once autoRollback is implemented, the deployment controller will automatically rollback failed deployments. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", + "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", } func (DeploymentSpec) SwaggerDoc() map[string]string { @@ -97,8 +118,9 @@ var map_DeploymentStatus = map[string]string{ "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", "readyReplicas": "Total number of ready pods targeted by this deployment.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", - "unavailableReplicas": "Total number of unavailable pods targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", "conditions": "Represents the latest available observations of a deployment's current state.", + "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", } func (DeploymentStatus) SwaggerDoc() map[string]string { @@ -116,7 +138,8 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { } var map_RollbackConfig = map[string]string{ - "revision": "The revision to rollback to. If set to 0, rollbck to the last revision.", + "": "DEPRECATED.", + "revision": "The revision to rollback to. If set to 0, rollback to the last revision.", } func (RollbackConfig) SwaggerDoc() map[string]string { @@ -125,19 +148,28 @@ func (RollbackConfig) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", - "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { return map_RollingUpdateDeployment } +var map_RollingUpdateStatefulSetStrategy = map[string]string{ + "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned.", +} + +func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { + return map_RollingUpdateStatefulSetStrategy +} + var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -157,7 +189,7 @@ var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } func (ScaleStatus) SwaggerDoc() map[string]string { @@ -165,7 +197,7 @@ func (ScaleStatus) SwaggerDoc() map[string]string { } var map_StatefulSet = map[string]string{ - "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "spec": "Spec defines the desired identities of pods in this set.", "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", } @@ -174,6 +206,19 @@ func (StatefulSet) SwaggerDoc() map[string]string { return map_StatefulSet } +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + var map_StatefulSetList = map[string]string{ "": "StatefulSetList is a collection of StatefulSets.", } @@ -184,11 +229,14 @@ func (StatefulSetList) SwaggerDoc() map[string]string { var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", - "replicas": "Replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "Selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", - "volumeClaimTemplates": "VolumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", - "serviceName": "ServiceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -197,12 +245,29 @@ func (StatefulSetSpec) SwaggerDoc() map[string]string { var map_StatefulSetStatus = map[string]string{ "": "StatefulSetStatus represents the current state of a StatefulSet.", - "observedGeneration": "most recent generation observed by this StatefulSet.", - "replicas": "Replicas is the number of actual replicas.", + "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", + "replicas": "replicas is the number of Pods created by the StatefulSet controller.", + "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", + "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", + "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { return map_StatefulSetStatus } +var map_StatefulSetUpdateStrategy = map[string]string{ + "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + "type": "Type indicates the type of the StatefulSetUpdateStrategy.", + "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", +} + +func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_StatefulSetUpdateStrategy +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..dd37f8dd8 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,658 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Data.DeepCopyInto(&out.Data) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision. +func (in *ControllerRevision) DeepCopy() *ControllerRevision { + if in == nil { + return nil + } + out := new(ControllerRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList. +func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList { + if in == nil { + return nil + } + out := new(ControllerRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.RollbackTo = in.RollbackTo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback. +func (in *DeploymentRollback) DeepCopy() *DeploymentRollback { + if in == nil { + return nil + } + out := new(DeploymentRollback) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentRollback) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + if *in == nil { + *out = nil + } else { + *out = new(RollbackConfig) + **out = **in + } + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig. +func (in *RollbackConfig) DeepCopy() *RollbackConfig { + if in == nil { + return nil + } + out := new(RollbackConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { + *out = *in + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. +func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStatefulSetStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet. +func (in *StatefulSet) DeepCopy() *StatefulSet { + if in == nil { + return nil + } + out := new(StatefulSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList. +func (in *StatefulSetList) DeepCopy() *StatefulSetList { + if in == nil { + return nil + } + out := new(StatefulSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]core_v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. +func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec { + if in == nil { + return nil + } + out := new(StatefulSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus. +func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus { + if in == nil { + return nil + } + out := new(StatefulSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateStatefulSetStrategy) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. +func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { + if in == nil { + return nil + } + out := new(StatefulSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go similarity index 84% rename from vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/doc.go rename to vendor/k8s.io/api/apps/v1beta2/doc.go index a397b30e9..e93e164e1 100644 --- a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/doc.go +++ b/vendor/k8s.io/api/apps/v1beta2/doc.go @@ -14,4 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +package v1beta2 // import "k8s.io/api/apps/v1beta2" diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go new file mode 100644 index 000000000..49f9f8771 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -0,0 +1,7586 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto +// DO NOT EDIT! + +/* + Package v1beta2 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto + + It has these top-level messages: + ControllerRevision + ControllerRevisionList + DaemonSet + DaemonSetCondition + DaemonSetList + DaemonSetSpec + DaemonSetStatus + DaemonSetUpdateStrategy + Deployment + DeploymentCondition + DeploymentList + DeploymentSpec + DeploymentStatus + DeploymentStrategy + ReplicaSet + ReplicaSetCondition + ReplicaSetList + ReplicaSetSpec + ReplicaSetStatus + RollingUpdateDaemonSet + RollingUpdateDeployment + RollingUpdateStatefulSetStrategy + Scale + ScaleSpec + ScaleStatus + StatefulSet + StatefulSetCondition + StatefulSetList + StatefulSetSpec + StatefulSetStatus + StatefulSetUpdateStrategy +*/ +package v1beta2 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{20} +} + +func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } +func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} +func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{21} +} + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } + +func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } +func (*StatefulSetUpdateStrategy) ProtoMessage() {} +func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{30} +} + +func init() { + proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta2.ControllerRevision") + proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta2.ControllerRevisionList") + proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1beta2.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1beta2.DaemonSetCondition") + proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1beta2.DaemonSetList") + proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1beta2.DaemonSetSpec") + proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1beta2.DaemonSetStatus") + proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.DaemonSetUpdateStrategy") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1beta2.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta2.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta2.DeploymentList") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta2.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta2.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta2.DeploymentStrategy") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1beta2.ReplicaSet") + proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetCondition") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetStatus") + proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateDaemonSet") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateDeployment") + proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateStatefulSetStrategy") + proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta2.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta2.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus") + proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") + proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") + proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta2.StatefulSetSpec") + proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") + proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetUpdateStrategy") +} +func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) + n2, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + return i, nil +} + +func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil +} + +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Selector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n9, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n10, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + return i, nil +} + +func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *Deployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n14, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n15, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + return i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *DeploymentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n18, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n19, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n20, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + dAtA[i] = 0x38 + i++ + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.ProgressDeadlineSeconds != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + } + return i, nil +} + +func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + return i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} + +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n24, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n25, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + return i, nil +} + +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n27, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n28, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n29, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + return i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if m.MaxSurge != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) + n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} + +func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Partition != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + } + return i, nil +} + +func (m *Scale) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n35, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + return i, nil +} + +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for _, k := range keysForSelector { + dAtA[i] = 0x12 + i++ + v := m.Selector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i += copy(dAtA[i:], m.TargetSelector) + return i, nil +} + +func (m *StatefulSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n38, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + return i, nil +} + +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n39, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n40, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n41, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n42, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + if len(m.VolumeClaimTemplates) > 0 { + for _, msg := range m.VolumeClaimTemplates { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i += copy(dAtA[i:], m.ServiceName) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i += copy(dAtA[i:], m.PodManagementPolicy) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n43, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + return i, nil +} + +func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i += copy(dAtA[i:], m.CurrentRevision) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i += copy(dAtA[i:], m.UpdateRevision) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n44, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ControllerRevision) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *ControllerRevisionList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *DaemonSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetUpdateStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Deployment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicaSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicaSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicaSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RollingUpdateDaemonSet) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateDeployment) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + var l int + _ = l + if m.Partition != nil { + n += 1 + sovGenerated(uint64(*m.Partition)) + } + return n +} + +func (m *Scale) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.TargetSelector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodManagementPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *StatefulSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + l = len(m.CurrentRevision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UpdateRevision) + n += 1 + l + sovGenerated(uint64(l)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetUpdateStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ControllerRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevision{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerRevisionList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevisionList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateStatefulSetStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, + `Partition:` + valueToStringGenerated(this.Partition) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, + `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ControllerRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ControllerRevision{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) + } + m.CurrentNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) + } + m.NumberMisscheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) + } + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) + } + m.NumberReady = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberReady |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) + } + m.UpdatedNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) + } + m.NumberAvailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberAvailable |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) + } + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberUnavailable |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDaemonSet{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Partition = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Selector == nil { + m.Selector = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StatefulSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpdateRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateStatefulSetStrategy{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 2186 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1c, 0xb7, + 0x19, 0xf7, 0xec, 0x43, 0x5a, 0x51, 0x96, 0x64, 0x53, 0xaa, 0xb4, 0x91, 0xdb, 0x95, 0xb1, 0x09, + 0x1c, 0x39, 0xb6, 0x66, 0x6d, 0xe5, 0x81, 0xc4, 0x2e, 0xda, 0x6a, 0xa5, 0xd4, 0x76, 0xa0, 0x57, + 0x28, 0xcb, 0x40, 0x83, 0x16, 0x35, 0xb5, 0x4b, 0xaf, 0x26, 0x9a, 0x17, 0x66, 0x38, 0x5b, 0x2f, + 0x7a, 0xe9, 0xa9, 0x40, 0x81, 0x02, 0x6d, 0xaf, 0xfd, 0x27, 0x7a, 0x2b, 0x8a, 0xf6, 0x56, 0x04, + 0x85, 0x2f, 0x05, 0x82, 0x5e, 0x92, 0x93, 0x50, 0x6f, 0x4e, 0x45, 0xd1, 0x4b, 0x81, 0x5e, 0x02, + 0x14, 0x28, 0xc8, 0xe1, 0x3c, 0x38, 0x0f, 0xef, 0x48, 0xb1, 0x95, 0x22, 0xc8, 0x6d, 0x87, 0xfc, + 0x7d, 0x3f, 0x7e, 0x24, 0xbf, 0x8f, 0xdf, 0x6f, 0x38, 0x0b, 0xbe, 0x77, 0xf4, 0xb6, 0xab, 0x6a, + 0x56, 0xeb, 0xc8, 0x3b, 0x20, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, 0x25, + 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0xcd, 0x03, 0x42, 0xf1, 0x6a, 0xab, 0x47, + 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x0b, 0x3e, 0x50, 0xc5, 0xb6, 0xa6, + 0x32, 0xa0, 0x2a, 0x80, 0x8b, 0x2b, 0x3d, 0x8d, 0x1e, 0x7a, 0x07, 0x6a, 0xc7, 0x32, 0x5a, 0x3d, + 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xc0, 0x7b, 0xc4, 0x9f, 0xf8, 0x03, 0xff, 0xe5, 0xf3, 0x2c, 0x36, + 0x63, 0x03, 0x76, 0x2c, 0x87, 0xb4, 0xfa, 0x37, 0x93, 0x63, 0x2d, 0x5e, 0x8d, 0x61, 0x6c, 0x4b, + 0xd7, 0x3a, 0x03, 0xe1, 0x56, 0x1a, 0xfa, 0x46, 0x04, 0x35, 0x70, 0xe7, 0x50, 0x33, 0x89, 0x33, + 0x68, 0xd9, 0x47, 0x3d, 0xd6, 0xe0, 0xb6, 0x0c, 0x42, 0x71, 0xd6, 0x00, 0xad, 0x3c, 0x2b, 0xc7, + 0x33, 0xa9, 0x66, 0x90, 0x94, 0xc1, 0x5b, 0xa3, 0x0c, 0xdc, 0xce, 0x21, 0x31, 0x70, 0xca, 0xee, + 0xf5, 0x3c, 0x3b, 0x8f, 0x6a, 0x7a, 0x4b, 0x33, 0xa9, 0x4b, 0x9d, 0xa4, 0x51, 0xf3, 0x3f, 0x0a, + 0x80, 0xeb, 0x96, 0x49, 0x1d, 0x4b, 0xd7, 0x89, 0x83, 0x48, 0x5f, 0x73, 0x35, 0xcb, 0x84, 0x0f, + 0x41, 0x8d, 0xcd, 0xa7, 0x8b, 0x29, 0xae, 0x2b, 0x97, 0x95, 0xe5, 0xc9, 0xd5, 0x1b, 0x6a, 0xb4, + 0x29, 0x21, 0xbd, 0x6a, 0x1f, 0xf5, 0x58, 0x83, 0xab, 0x32, 0xb4, 0xda, 0xbf, 0xa9, 0xee, 0x1c, + 0x7c, 0x48, 0x3a, 0x74, 0x8b, 0x50, 0xdc, 0x86, 0x4f, 0x8e, 0x97, 0xce, 0x0d, 0x8f, 0x97, 0x40, + 0xd4, 0x86, 0x42, 0x56, 0xb8, 0x03, 0x2a, 0x9c, 0xbd, 0xc4, 0xd9, 0x57, 0x72, 0xd9, 0xc5, 0xa4, + 0x55, 0x84, 0x7f, 0xf2, 0xee, 0x63, 0x4a, 0x4c, 0xe6, 0x5e, 0xfb, 0xbc, 0xa0, 0xae, 0x6c, 0x60, + 0x8a, 0x11, 0x27, 0x82, 0xd7, 0x41, 0xcd, 0x11, 0xee, 0xd7, 0xcb, 0x97, 0x95, 0xe5, 0x72, 0xfb, + 0x82, 0x40, 0xd5, 0x82, 0x69, 0xa1, 0x10, 0xd1, 0x7c, 0xa2, 0x80, 0xf9, 0xf4, 0xbc, 0x37, 0x35, + 0x97, 0xc2, 0x1f, 0xa6, 0xe6, 0xae, 0x16, 0x9b, 0x3b, 0xb3, 0xe6, 0x33, 0x0f, 0x07, 0x0e, 0x5a, + 0x62, 0xf3, 0xde, 0x05, 0x55, 0x8d, 0x12, 0xc3, 0xad, 0x97, 0x2e, 0x97, 0x97, 0x27, 0x57, 0xaf, + 0xa9, 0x39, 0xb1, 0xae, 0xa6, 0xbd, 0x6b, 0x4f, 0x09, 0xde, 0xea, 0x3d, 0xc6, 0x80, 0x7c, 0xa2, + 0xe6, 0x2f, 0x4a, 0x60, 0x62, 0x03, 0x13, 0xc3, 0x32, 0xf7, 0x08, 0x3d, 0x83, 0x9d, 0xbb, 0x0b, + 0x2a, 0xae, 0x4d, 0x3a, 0x62, 0xe7, 0xae, 0xe4, 0x4e, 0x20, 0xf4, 0x69, 0xcf, 0x26, 0x9d, 0x68, + 0xcb, 0xd8, 0x13, 0xe2, 0x0c, 0x70, 0x17, 0x8c, 0xb9, 0x14, 0x53, 0xcf, 0xe5, 0x1b, 0x36, 0xb9, + 0xba, 0x5c, 0x80, 0x8b, 0xe3, 0xdb, 0xd3, 0x82, 0x6d, 0xcc, 0x7f, 0x46, 0x82, 0xa7, 0xf9, 0x8f, + 0x12, 0x80, 0x21, 0x76, 0xdd, 0x32, 0xbb, 0x1a, 0x65, 0xe1, 0x7c, 0x0b, 0x54, 0xe8, 0xc0, 0x26, + 0x7c, 0x41, 0x26, 0xda, 0x57, 0x02, 0x57, 0xee, 0x0f, 0x6c, 0xf2, 0xf9, 0xf1, 0xd2, 0x7c, 0xda, + 0x82, 0xf5, 0x20, 0x6e, 0x03, 0x37, 0x43, 0x27, 0x4b, 0xdc, 0xfa, 0x0d, 0x79, 0xe8, 0xcf, 0x8f, + 0x97, 0x32, 0x8e, 0x19, 0x35, 0x64, 0x92, 0x1d, 0x84, 0x7d, 0x00, 0x75, 0xec, 0xd2, 0xfb, 0x0e, + 0x36, 0x5d, 0x7f, 0x24, 0xcd, 0x20, 0x62, 0xfa, 0xaf, 0x15, 0xdb, 0x28, 0x66, 0xd1, 0x5e, 0x14, + 0x5e, 0xc0, 0xcd, 0x14, 0x1b, 0xca, 0x18, 0x01, 0x5e, 0x01, 0x63, 0x0e, 0xc1, 0xae, 0x65, 0xd6, + 0x2b, 0x7c, 0x16, 0xe1, 0x02, 0x22, 0xde, 0x8a, 0x44, 0x2f, 0xbc, 0x0a, 0xc6, 0x0d, 0xe2, 0xba, + 0xb8, 0x47, 0xea, 0x55, 0x0e, 0x9c, 0x11, 0xc0, 0xf1, 0x2d, 0xbf, 0x19, 0x05, 0xfd, 0xcd, 0xdf, + 0x2b, 0x60, 0x2a, 0x5c, 0xb9, 0x33, 0xc8, 0x9c, 0x3b, 0x72, 0xe6, 0x34, 0x47, 0x07, 0x4b, 0x4e, + 0xc2, 0x7c, 0x54, 0x8e, 0x39, 0xce, 0xc2, 0x11, 0xfe, 0x08, 0xd4, 0x5c, 0xa2, 0x93, 0x0e, 0xb5, + 0x1c, 0xe1, 0xf8, 0xeb, 0x05, 0x1d, 0xc7, 0x07, 0x44, 0xdf, 0x13, 0xa6, 0xed, 0xf3, 0xcc, 0xf3, + 0xe0, 0x09, 0x85, 0x94, 0xf0, 0x7d, 0x50, 0xa3, 0xc4, 0xb0, 0x75, 0x4c, 0x89, 0xc8, 0x9a, 0x97, + 0xe3, 0xce, 0xb3, 0x98, 0x61, 0x64, 0xbb, 0x56, 0xf7, 0xbe, 0x80, 0xf1, 0x94, 0x09, 0x17, 0x23, + 0x68, 0x45, 0x21, 0x0d, 0xb4, 0xc1, 0xb4, 0x67, 0x77, 0x19, 0x92, 0xb2, 0xe3, 0xbc, 0x37, 0x10, + 0x31, 0x74, 0x63, 0xf4, 0xaa, 0xec, 0x4b, 0x76, 0xed, 0x79, 0x31, 0xca, 0xb4, 0xdc, 0x8e, 0x12, + 0xfc, 0x70, 0x0d, 0xcc, 0x18, 0x9a, 0x89, 0x08, 0xee, 0x0e, 0xf6, 0x48, 0xc7, 0x32, 0xbb, 0x2e, + 0x0f, 0xa5, 0x6a, 0x7b, 0x41, 0x10, 0xcc, 0x6c, 0xc9, 0xdd, 0x28, 0x89, 0x87, 0x9b, 0x60, 0x2e, + 0x38, 0x80, 0xef, 0x6a, 0x2e, 0xb5, 0x9c, 0xc1, 0xa6, 0x66, 0x68, 0xb4, 0x3e, 0xc6, 0x79, 0xea, + 0xc3, 0xe3, 0xa5, 0x39, 0x94, 0xd1, 0x8f, 0x32, 0xad, 0x9a, 0xbf, 0x19, 0x03, 0x33, 0x89, 0x73, + 0x01, 0x3e, 0x00, 0xf3, 0x1d, 0xcf, 0x71, 0x88, 0x49, 0xb7, 0x3d, 0xe3, 0x80, 0x38, 0x7b, 0x9d, + 0x43, 0xd2, 0xf5, 0x74, 0xd2, 0xe5, 0xdb, 0x5a, 0x6d, 0x37, 0x84, 0xaf, 0xf3, 0xeb, 0x99, 0x28, + 0x94, 0x63, 0x0d, 0xdf, 0x03, 0xd0, 0xe4, 0x4d, 0x5b, 0x9a, 0xeb, 0x86, 0x9c, 0x25, 0xce, 0x19, + 0xa6, 0xe2, 0x76, 0x0a, 0x81, 0x32, 0xac, 0x98, 0x8f, 0x5d, 0xe2, 0x6a, 0x0e, 0xe9, 0x26, 0x7d, + 0x2c, 0xcb, 0x3e, 0x6e, 0x64, 0xa2, 0x50, 0x8e, 0x35, 0x7c, 0x13, 0x4c, 0xfa, 0xa3, 0xf1, 0x35, + 0x17, 0x9b, 0x33, 0x2b, 0xc8, 0x26, 0xb7, 0xa3, 0x2e, 0x14, 0xc7, 0xb1, 0xa9, 0x59, 0x07, 0x2e, + 0x71, 0xfa, 0xa4, 0x7b, 0xc7, 0x17, 0x07, 0xac, 0x82, 0x56, 0x79, 0x05, 0x0d, 0xa7, 0xb6, 0x93, + 0x42, 0xa0, 0x0c, 0x2b, 0x36, 0x35, 0x3f, 0x6a, 0x52, 0x53, 0x1b, 0x93, 0xa7, 0xb6, 0x9f, 0x89, + 0x42, 0x39, 0xd6, 0x2c, 0xf6, 0x7c, 0x97, 0xd7, 0xfa, 0x58, 0xd3, 0xf1, 0x81, 0x4e, 0xea, 0xe3, + 0x72, 0xec, 0x6d, 0xcb, 0xdd, 0x28, 0x89, 0x87, 0x77, 0xc0, 0x45, 0xbf, 0x69, 0xdf, 0xc4, 0x21, + 0x49, 0x8d, 0x93, 0xbc, 0x24, 0x48, 0x2e, 0x6e, 0x27, 0x01, 0x28, 0x6d, 0x03, 0x6f, 0x81, 0xe9, + 0x8e, 0xa5, 0xeb, 0x3c, 0x1e, 0xd7, 0x2d, 0xcf, 0xa4, 0xf5, 0x09, 0xce, 0x02, 0x59, 0x0e, 0xad, + 0x4b, 0x3d, 0x28, 0x81, 0x84, 0x3f, 0x06, 0xa0, 0x13, 0x14, 0x06, 0xb7, 0x0e, 0x46, 0x28, 0x80, + 0x74, 0x59, 0x8a, 0x2a, 0x73, 0xd8, 0xe4, 0xa2, 0x18, 0x65, 0xf3, 0x23, 0x05, 0x2c, 0xe4, 0x24, + 0x3a, 0xfc, 0xae, 0x54, 0x04, 0xaf, 0x25, 0x8a, 0xe0, 0xa5, 0x1c, 0xb3, 0x58, 0x25, 0x3c, 0x04, + 0x53, 0x4c, 0x90, 0x68, 0x66, 0xcf, 0x87, 0x88, 0xb3, 0xac, 0x95, 0x3b, 0x01, 0x14, 0x47, 0x47, + 0xa7, 0xf2, 0xc5, 0xe1, 0xf1, 0xd2, 0x94, 0xd4, 0x87, 0x64, 0xe2, 0xe6, 0x2f, 0x4b, 0x00, 0x6c, + 0x10, 0x5b, 0xb7, 0x06, 0x06, 0x31, 0xcf, 0x42, 0xd3, 0xdc, 0x93, 0x34, 0xcd, 0xab, 0xf9, 0x5b, + 0x12, 0x3a, 0x95, 0x2b, 0x6a, 0xde, 0x4f, 0x88, 0x9a, 0xab, 0x45, 0xc8, 0x9e, 0xad, 0x6a, 0x3e, + 0x29, 0x83, 0xd9, 0x08, 0x1c, 0xc9, 0x9a, 0xdb, 0xd2, 0x8e, 0xbe, 0x9a, 0xd8, 0xd1, 0x85, 0x0c, + 0x93, 0x17, 0xa6, 0x6b, 0x9e, 0xbf, 0xbe, 0x80, 0x1f, 0x82, 0x69, 0x26, 0x64, 0xfc, 0x90, 0xe0, + 0x32, 0x69, 0xec, 0xc4, 0x32, 0x29, 0x2c, 0x6e, 0x9b, 0x12, 0x13, 0x4a, 0x30, 0xe7, 0xc8, 0xb2, + 0xf1, 0x17, 0x2d, 0xcb, 0x9a, 0x7f, 0x50, 0xc0, 0x74, 0xb4, 0x4d, 0x67, 0x20, 0xa2, 0xee, 0xca, + 0x22, 0xea, 0xe5, 0x02, 0xc1, 0x99, 0xa3, 0xa2, 0x3e, 0xa9, 0xc4, 0x5d, 0xe7, 0x32, 0x6a, 0x99, + 0xbd, 0x82, 0xd9, 0xba, 0xd6, 0xc1, 0xae, 0xa8, 0xb7, 0xe7, 0xfd, 0xd7, 0x2f, 0xbf, 0x0d, 0x85, + 0xbd, 0x92, 0xe0, 0x2a, 0xbd, 0x58, 0xc1, 0x55, 0x7e, 0x3e, 0x82, 0xeb, 0x07, 0xa0, 0xe6, 0x06, + 0x52, 0xab, 0xc2, 0x29, 0xaf, 0x15, 0x4a, 0x6c, 0xa1, 0xb2, 0x42, 0xea, 0x50, 0x5f, 0x85, 0x74, + 0x59, 0xca, 0xaa, 0xfa, 0x65, 0x2a, 0x2b, 0x96, 0xcc, 0x36, 0xf6, 0x5c, 0xd2, 0xe5, 0x19, 0x50, + 0x8b, 0x92, 0x79, 0x97, 0xb7, 0x22, 0xd1, 0x0b, 0xf7, 0xc1, 0x82, 0xed, 0x58, 0x3d, 0x87, 0xb8, + 0xee, 0x06, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x30, 0x01, 0xbf, 0x26, 0x5e, 0x1a, 0x1e, 0x2f, 0x2d, + 0xec, 0x66, 0x43, 0x50, 0x9e, 0x6d, 0xf3, 0xcf, 0x15, 0x70, 0x21, 0x79, 0x36, 0xe6, 0xc8, 0x14, + 0xe5, 0x54, 0x32, 0xe5, 0x7a, 0x2c, 0x4e, 0x7d, 0x0d, 0x17, 0xbb, 0x2a, 0x48, 0xc5, 0xea, 0x1a, + 0x98, 0x11, 0xb2, 0x24, 0xe8, 0x14, 0x42, 0x2d, 0xdc, 0x9e, 0x7d, 0xb9, 0x1b, 0x25, 0xf1, 0x4c, + 0x7c, 0x44, 0x9a, 0x22, 0x20, 0xa9, 0xc8, 0xe2, 0x63, 0x2d, 0x09, 0x40, 0x69, 0x1b, 0xb8, 0x05, + 0x66, 0x3d, 0x33, 0x4d, 0xe5, 0x87, 0xcb, 0x25, 0x41, 0x35, 0xbb, 0x9f, 0x86, 0xa0, 0x2c, 0x3b, + 0xf8, 0x50, 0xd2, 0x23, 0x63, 0xfc, 0x48, 0xb8, 0x5e, 0x20, 0xac, 0x0b, 0x0b, 0x12, 0x78, 0x1b, + 0x4c, 0x39, 0x5c, 0x73, 0x06, 0xae, 0xfa, 0xba, 0xed, 0x1b, 0xc2, 0x6c, 0x0a, 0xc5, 0x3b, 0x91, + 0x8c, 0xcd, 0x90, 0x5a, 0xb5, 0xa2, 0x52, 0xab, 0xf9, 0x27, 0x05, 0xc0, 0x74, 0x1e, 0x8e, 0xbc, + 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0x6e, 0x14, 0xd4, 0x3f, 0xd1, 0x81, 0x5a, + 0x4c, 0x00, 0x89, 0x65, 0x38, 0x9b, 0x4b, 0x9d, 0xa2, 0x02, 0x28, 0x72, 0xea, 0x39, 0x08, 0xa0, + 0x18, 0xd9, 0xb3, 0x05, 0xd0, 0x3f, 0x4b, 0x60, 0x36, 0x02, 0x17, 0x16, 0x40, 0x19, 0x26, 0x5f, + 0x5f, 0xec, 0x8c, 0xbe, 0xd8, 0x61, 0xa2, 0x24, 0x5a, 0xba, 0xff, 0x27, 0x51, 0x12, 0x79, 0x95, + 0x23, 0x4a, 0x7e, 0x57, 0x8a, 0xbb, 0xfe, 0x95, 0x17, 0x25, 0x5f, 0xfc, 0x4e, 0xa6, 0xf9, 0x97, + 0x32, 0xb8, 0x90, 0xcc, 0x43, 0xa9, 0x40, 0x2a, 0x23, 0x0b, 0xe4, 0x2e, 0x98, 0x7b, 0xe4, 0xe9, + 0xfa, 0x80, 0x2f, 0x43, 0xac, 0x4a, 0xfa, 0xa5, 0xf5, 0x9b, 0xc2, 0x72, 0xee, 0xfb, 0x19, 0x18, + 0x94, 0x69, 0x99, 0x53, 0xec, 0xcb, 0xa7, 0x2a, 0xf6, 0xa9, 0x0a, 0x54, 0x39, 0x41, 0x05, 0xca, + 0x2c, 0xdc, 0xd5, 0x53, 0x14, 0xee, 0x93, 0x55, 0xda, 0x8c, 0x83, 0x6b, 0xe4, 0xab, 0xff, 0xcf, + 0x15, 0x30, 0x9f, 0xfd, 0xc2, 0x0d, 0x75, 0x30, 0x6d, 0xe0, 0xc7, 0xf1, 0x8b, 0x8f, 0x51, 0x45, + 0xc4, 0xa3, 0x9a, 0xae, 0xfa, 0x9f, 0x8c, 0xd4, 0x7b, 0x26, 0xdd, 0x71, 0xf6, 0xa8, 0xa3, 0x99, + 0x3d, 0xbf, 0xf2, 0x6e, 0x49, 0x5c, 0x28, 0xc1, 0xdd, 0xfc, 0x4c, 0x01, 0x0b, 0x39, 0x95, 0xef, + 0x6c, 0x3d, 0x81, 0x1f, 0x80, 0x9a, 0x81, 0x1f, 0xef, 0x79, 0x4e, 0x2f, 0xab, 0x56, 0x17, 0x1b, + 0x87, 0x67, 0xf3, 0x96, 0x60, 0x41, 0x21, 0x5f, 0x73, 0x07, 0x5c, 0x96, 0x26, 0xc9, 0x32, 0x87, + 0x3c, 0xf2, 0x74, 0x9e, 0x44, 0x42, 0x6c, 0x5c, 0x03, 0x13, 0x36, 0x76, 0xa8, 0x16, 0x4a, 0xd5, + 0x6a, 0x7b, 0x6a, 0x78, 0xbc, 0x34, 0xb1, 0x1b, 0x34, 0xa2, 0xa8, 0xbf, 0xf9, 0x5f, 0x05, 0x54, + 0xf7, 0x3a, 0x58, 0x27, 0x67, 0x50, 0xed, 0x37, 0xa4, 0x6a, 0x9f, 0x7f, 0x93, 0xce, 0xfd, 0xc9, + 0x2d, 0xf4, 0x9b, 0x89, 0x42, 0xff, 0xca, 0x08, 0x9e, 0x67, 0xd7, 0xf8, 0x77, 0xc0, 0x44, 0x38, + 0xdc, 0xc9, 0x0e, 0xa0, 0xe6, 0x6f, 0x4b, 0x60, 0x32, 0x36, 0xc4, 0x09, 0x8f, 0xaf, 0x87, 0xd2, + 0xb1, 0xcf, 0x12, 0x73, 0xb5, 0xc8, 0x44, 0xd4, 0xe0, 0x88, 0x7f, 0xd7, 0xa4, 0x4e, 0xfc, 0x05, + 0x2f, 0x7d, 0xf2, 0x7f, 0x07, 0x4c, 0x53, 0xec, 0xf4, 0x08, 0x0d, 0xfa, 0xf8, 0x82, 0x4d, 0x44, + 0xb7, 0x13, 0xf7, 0xa5, 0x5e, 0x94, 0x40, 0x2f, 0xde, 0x06, 0x53, 0xd2, 0x60, 0xf0, 0x02, 0x28, + 0x1f, 0x91, 0x81, 0x2f, 0x7b, 0x10, 0xfb, 0x09, 0xe7, 0x40, 0xb5, 0x8f, 0x75, 0xcf, 0x8f, 0xf3, + 0x09, 0xe4, 0x3f, 0xdc, 0x2a, 0xbd, 0xad, 0x34, 0x7f, 0xc5, 0x16, 0x27, 0x0a, 0xce, 0x33, 0x88, + 0xae, 0xf7, 0xa4, 0xe8, 0xca, 0xff, 0xa8, 0x17, 0x4f, 0x99, 0xbc, 0x18, 0x43, 0x89, 0x18, 0x7b, + 0xad, 0x10, 0xdb, 0xb3, 0x23, 0xed, 0x5f, 0x25, 0x30, 0x17, 0x43, 0x47, 0x72, 0xf2, 0xdb, 0x92, + 0x9c, 0x5c, 0x4e, 0xc8, 0xc9, 0x7a, 0x96, 0xcd, 0xd7, 0x7a, 0x72, 0xb4, 0x9e, 0xfc, 0xa3, 0x02, + 0x66, 0x62, 0x6b, 0x77, 0x06, 0x82, 0xf2, 0x9e, 0x2c, 0x28, 0x5f, 0x29, 0x12, 0x34, 0x39, 0x8a, + 0xf2, 0xaf, 0x55, 0xc9, 0xf9, 0xaf, 0xbc, 0xa4, 0xfc, 0x29, 0x98, 0xeb, 0x5b, 0xba, 0x67, 0x90, + 0x75, 0x1d, 0x6b, 0x46, 0x00, 0x60, 0xaa, 0xa9, 0x9c, 0x7c, 0x97, 0x0b, 0xe9, 0x89, 0xe3, 0x6a, + 0x2e, 0x25, 0x26, 0x7d, 0x10, 0x59, 0x46, 0xba, 0xef, 0x41, 0x06, 0x1d, 0xca, 0x1c, 0x04, 0xbe, + 0x09, 0x26, 0x99, 0x7e, 0xd3, 0x3a, 0x64, 0x1b, 0x1b, 0x41, 0x60, 0x85, 0x9f, 0xb0, 0xf6, 0xa2, + 0x2e, 0x14, 0xc7, 0xc1, 0x43, 0x30, 0x6b, 0x5b, 0xdd, 0x2d, 0x6c, 0xe2, 0x1e, 0x61, 0x32, 0x63, + 0x97, 0xff, 0x8f, 0x87, 0x5f, 0x7e, 0x4d, 0xb4, 0xdf, 0x0a, 0x6e, 0x45, 0x76, 0xd3, 0x10, 0xf6, + 0x92, 0x98, 0xd1, 0xcc, 0x93, 0x3a, 0x8b, 0x12, 0x3a, 0xa9, 0xcf, 0xae, 0xfe, 0x1d, 0xf1, 0x6a, + 0x91, 0x08, 0x3b, 0xe5, 0x87, 0xd7, 0xbc, 0xbb, 0xbd, 0xda, 0xa9, 0xbe, 0x9a, 0xfe, 0xbb, 0x02, + 0x2e, 0xa6, 0x8e, 0xca, 0x2f, 0xf1, 0x76, 0x2d, 0x25, 0xcf, 0xcb, 0x27, 0x90, 0xe7, 0x6b, 0x60, + 0x46, 0x7c, 0xb0, 0x4d, 0xa8, 0xfb, 0xf0, 0xfd, 0x67, 0x5d, 0xee, 0x46, 0x49, 0x7c, 0xd6, 0xed, + 0x5e, 0xf5, 0x84, 0xb7, 0x7b, 0x71, 0x2f, 0xc4, 0x1f, 0x90, 0xfc, 0xd0, 0x4b, 0x7b, 0x21, 0xfe, + 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0xe3, 0xb2, 0x42, 0xd8, 0x97, 0x7a, 0x51, + 0x02, 0xfd, 0x85, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0x2b, 0x45, 0xe2, 0xb9, 0xf8, 0xbb, 0xc9, + 0xdf, 0x14, 0xf0, 0x52, 0x6e, 0x22, 0xc0, 0x35, 0xa9, 0xec, 0xae, 0x24, 0xca, 0xee, 0xb7, 0x72, + 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0xbe, 0x9a, 0x7b, 0xa7, 0xd8, 0xd5, 0x5c, 0x86, 0x76, 0x1f, 0x7d, + 0x47, 0xd7, 0x5e, 0x79, 0xf2, 0xb4, 0x71, 0xee, 0xe3, 0xa7, 0x8d, 0x73, 0x9f, 0x3e, 0x6d, 0x9c, + 0xfb, 0xd9, 0xb0, 0xa1, 0x3c, 0x19, 0x36, 0x94, 0x8f, 0x87, 0x0d, 0xe5, 0xd3, 0x61, 0x43, 0xf9, + 0xfb, 0xb0, 0xa1, 0xfc, 0xfa, 0xb3, 0xc6, 0xb9, 0x0f, 0xc6, 0xc5, 0x88, 0xff, 0x0b, 0x00, 0x00, + 0xff, 0xff, 0xe4, 0x8f, 0x6a, 0x57, 0x17, 0x29, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto new file mode 100644 index 000000000..7327c3369 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -0,0 +1,752 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.apps.v1beta2; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/api/policy/v1beta1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta2"; + +// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the +// release notes for more information. +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +message ControllerRevision { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data is the serialized representation of the state. + optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + + // Revision indicates the revision of the state represented by Data. + optional int64 revision = 3; +} + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +message ControllerRevisionList { + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ControllerRevisions + repeated ControllerRevision items = 2; +} + +// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for +// more information. +// DaemonSet represents the configuration of a daemon set. +message DaemonSet { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional DaemonSetSpec spec = 2; + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional DaemonSetStatus status = 3; +} + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// DaemonSetList is a collection of daemon sets. +message DaemonSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // A list of daemon sets. + repeated DaemonSet items = 2; +} + +// DaemonSetSpec is the specification of a daemon set. +message DaemonSetSpec { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + optional DaemonSetUpdateStrategy updateStrategy = 3; + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + optional int32 minReadySeconds = 4; + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; +} + +// DaemonSetStatus represents the current status of a daemon set. +message DaemonSetStatus { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 currentNumberScheduled = 1; + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 numberMisscheduled = 2; + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 desiredNumberScheduled = 3; + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + optional int32 numberReady = 4; + + // The most recent generation observed by the daemon set controller. + // +optional + optional int64 observedGeneration = 5; + + // The total number of nodes that are running updated daemon pod + // +optional + optional int32 updatedNumberScheduled = 6; + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberAvailable = 7; + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberUnavailable = 8; + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; +} + +// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. +message DaemonSetUpdateStrategy { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if type = "RollingUpdate". + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + optional RollingUpdateDaemonSet rollingUpdate = 2; +} + +// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for +// more information. +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused. + // +optional + optional bool paused = 7; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated DeploymentCondition conditions = 6; + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + optional int32 collisionCount = 8; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for +// more information. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +message ReplicaSet { + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetSpec spec = 2; + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetStatus status = 3; +} + +// ReplicaSetCondition describes the state of a replica set at a certain point. +message ReplicaSetCondition { + // Type of replica set condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicaSetList is a collection of ReplicaSets. +message ReplicaSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + repeated ReplicaSet items = 2; +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +message ReplicaSetSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +message ReplicaSetStatus { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replica set. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated ReplicaSetCondition conditions = 6; +} + +// Spec to control the desired behavior of daemon set rolling update. +message RollingUpdateDaemonSet { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; +} + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new ReplicaSet can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +message RollingUpdateStatefulSetStrategy { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + optional int32 partition = 1; +} + +// Scale represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // +optional + optional ScaleSpec spec = 2; + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + optional ScaleStatus status = 3; +} + +// ScaleSpec describes the attributes of a scale subresource +message ScaleSpec { + // desired number of instances for the scaled object. + // +optional + optional int32 replicas = 1; +} + +// ScaleStatus represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + map selector = 2; + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + optional string targetSelector = 3; +} + +// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for +// more information. +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +message StatefulSet { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired identities of pods in this set. + // +optional + optional StatefulSetSpec spec = 2; + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + optional StatefulSetStatus status = 3; +} + +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// StatefulSetList is a collection of StatefulSets. +message StatefulSetList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated StatefulSet items = 2; +} + +// A StatefulSetSpec is the specification of a StatefulSet. +message StatefulSetSpec { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + optional int32 replicas = 1; + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + optional string serviceName = 5; + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + optional string podManagementPolicy = 6; + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + optional StatefulSetUpdateStrategy updateStrategy = 7; + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + optional int32 revisionHistoryLimit = 8; +} + +// StatefulSetStatus represents the current state of a StatefulSet. +message StatefulSetStatus { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + optional int64 observedGeneration = 1; + + // replicas is the number of Pods created by the StatefulSet controller. + optional int32 replicas = 2; + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + optional int32 readyReplicas = 3; + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + optional int32 currentReplicas = 4; + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + optional int32 updatedReplicas = 5; + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + optional string currentRevision = 6; + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + optional string updateRevision = 7; + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; +} + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +message StatefulSetUpdateStrategy { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + optional string type = 1; + + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; +} + diff --git a/vendor/k8s.io/api/apps/v1beta2/register.go b/vendor/k8s.io/api/apps/v1beta2/register.go new file mode 100644 index 000000000..2784ee377 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta2/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &Scale{}, + &StatefulSet{}, + &StatefulSetList{}, + &DaemonSet{}, + &DaemonSetList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + &ControllerRevision{}, + &ControllerRevisionList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go new file mode 100644 index 000000000..e5525222a --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -0,0 +1,877 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + ControllerRevisionHashLabelKey = "controller-revision-hash" + StatefulSetRevisionLabel = ControllerRevisionHashLabelKey + DeprecatedRollbackTo = "deprecated.deployment.rollback.to" + DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" +) + +// ScaleSpec describes the attributes of a scale subresource +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` +} + +// +genclient +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for +// more information. +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +type StatefulSet struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired identities of pods in this set. + // +optional + Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodManagementPolicyType defines the policy for creating pods under a stateful set. +type PodManagementPolicyType string + +const ( + // OrderedReadyPodManagement will create pods in strictly increasing order on + // scale up and strictly decreasing order on scale down, progressing only when + // the previous pod is ready or terminated. At most one pod will be changed + // at any time. + OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" + // ParallelPodManagement will create and delete pods as soon as the stateful set + // replica count is changed, and will not wait for pods to be ready or complete + // termination. + ParallelPodManagement = "Parallel" +) + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +type StatefulSetUpdateStrategy struct { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"` + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +// StatefulSetUpdateStrategyType is a string enumeration type that enumerates +// all possible update strategies for the StatefulSet controller. +type StatefulSetUpdateStrategyType string + +const ( + // RollingUpdateStatefulSetStrategyType indicates that update will be + // applied to all Pods in the StatefulSet with respect to the StatefulSet + // ordering constraints. When a scale operation is performed with this + // strategy, new Pods will be created from the specification version indicated + // by the StatefulSet's updateRevision. + RollingUpdateStatefulSetStrategyType = "RollingUpdate" + // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version + // tracking and ordered rolling restarts are disabled. Pods are recreated + // from the StatefulSetSpec when they are manually deleted. When a scale + // operation is performed with this strategy,specification version indicated + // by the StatefulSet's currentRevision. + OnDeleteStatefulSetStrategyType = "OnDelete" +) + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +type RollingUpdateStatefulSetStrategy struct { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` +} + +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"` + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"` + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` +} + +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // replicas is the number of Pods created by the StatefulSet controller. + Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"` + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"` + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"` + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"` + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for +// more information. +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets + // to select new pods (and old pods being select by new ReplicaSet). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new ReplicaSet can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. + // +optional + Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"` + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"` + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"` + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for +// more information. +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +const ( + // DefaultDaemonSetUniqueLabelKey is the default label key that is added + // to existing DaemonSet pods to distinguish between old and new + // DaemonSet pods during DaemonSet template updates. + DefaultDaemonSetUniqueLabelKey = ControllerRevisionHashLabelKey +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of daemon sets. + Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for +// more information. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +type ReplicaSet struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the +// release notes for more information. +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +type ControllerRevision struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data is the serialized representation of the state. + Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` + + // Revision indicates the revision of the state represented by Data. + Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +type ControllerRevisionList struct { + metav1.TypeMeta `json:",inline"` + + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ControllerRevisions + Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go new file mode 100644 index 000000000..627df3ab7 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -0,0 +1,396 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ControllerRevision = map[string]string{ + "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "data": "Data is the serialized representation of the state.", + "revision": "Revision indicates the revision of the state represented by Data.", +} + +func (ControllerRevision) SwaggerDoc() map[string]string { + return map_ControllerRevision +} + +var map_ControllerRevisionList = map[string]string{ + "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of ControllerRevisions", +} + +func (ControllerRevisionList) SwaggerDoc() map[string]string { + return map_ControllerRevisionList +} + +var map_DaemonSet = map[string]string{ + "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (DaemonSet) SwaggerDoc() map[string]string { + return map_DaemonSet +} + +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + +var map_DaemonSetList = map[string]string{ + "": "DaemonSetList is a collection of daemon sets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "A list of daemon sets.", +} + +func (DaemonSetList) SwaggerDoc() map[string]string { + return map_DaemonSetList +} + +var map_DaemonSetSpec = map[string]string{ + "": "DaemonSetSpec is the specification of a daemon set.", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", + "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", +} + +func (DaemonSetSpec) SwaggerDoc() map[string]string { + return map_DaemonSetSpec +} + +var map_DaemonSetStatus = map[string]string{ + "": "DaemonSetStatus represents the current status of a daemon set.", + "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "observedGeneration": "The most recent generation observed by the daemon set controller.", + "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", + "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", +} + +func (DaemonSetStatus) SwaggerDoc() map[string]string { + return map_DaemonSetStatus +} + +var map_DaemonSetUpdateStrategy = map[string]string{ + "": "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.", + "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".", +} + +func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_DaemonSetUpdateStrategy +} + +var map_Deployment = map[string]string{ + "": "DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + "paused": "Indicates that the deployment is paused.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Represents the latest available observations of a deployment's current state.", + "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_ReplicaSet = map[string]string{ + "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (ReplicaSet) SwaggerDoc() map[string]string { + return map_ReplicaSet +} + +var map_ReplicaSetCondition = map[string]string{ + "": "ReplicaSetCondition describes the state of a replica set at a certain point.", + "type": "Type of replica set condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicaSetCondition) SwaggerDoc() map[string]string { + return map_ReplicaSetCondition +} + +var map_ReplicaSetList = map[string]string{ + "": "ReplicaSetList is a collection of ReplicaSets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", +} + +func (ReplicaSetList) SwaggerDoc() map[string]string { + return map_ReplicaSetList +} + +var map_ReplicaSetSpec = map[string]string{ + "": "ReplicaSetSpec is the specification of a ReplicaSet.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", +} + +func (ReplicaSetSpec) SwaggerDoc() map[string]string { + return map_ReplicaSetSpec +} + +var map_ReplicaSetStatus = map[string]string{ + "": "ReplicaSetStatus represents the current status of a ReplicaSet.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", + "readyReplicas": "The number of ready replicas for this replica set.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + "conditions": "Represents the latest available observations of a replica set's current state.", +} + +func (ReplicaSetStatus) SwaggerDoc() map[string]string { + return map_ReplicaSetStatus +} + +var map_RollingUpdateDaemonSet = map[string]string{ + "": "Spec to control the desired behavior of daemon set rolling update.", + "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", +} + +func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { + return map_RollingUpdateDaemonSet +} + +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_RollingUpdateStatefulSetStrategy = map[string]string{ + "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", +} + +func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { + return map_RollingUpdateStatefulSetStrategy +} + +var map_Scale = map[string]string{ + "": "Scale represents a scaling request for a resource.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", +} + +func (Scale) SwaggerDoc() map[string]string { + return map_Scale +} + +var map_ScaleSpec = map[string]string{ + "": "ScaleSpec describes the attributes of a scale subresource", + "replicas": "desired number of instances for the scaled object.", +} + +func (ScaleSpec) SwaggerDoc() map[string]string { + return map_ScaleSpec +} + +var map_ScaleStatus = map[string]string{ + "": "ScaleStatus represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", +} + +func (ScaleStatus) SwaggerDoc() map[string]string { + return map_ScaleStatus +} + +var map_StatefulSet = map[string]string{ + "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "spec": "Spec defines the desired identities of pods in this set.", + "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", +} + +func (StatefulSet) SwaggerDoc() map[string]string { + return map_StatefulSet +} + +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + +var map_StatefulSetList = map[string]string{ + "": "StatefulSetList is a collection of StatefulSets.", +} + +func (StatefulSetList) SwaggerDoc() map[string]string { + return map_StatefulSetList +} + +var map_StatefulSetSpec = map[string]string{ + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", +} + +func (StatefulSetSpec) SwaggerDoc() map[string]string { + return map_StatefulSetSpec +} + +var map_StatefulSetStatus = map[string]string{ + "": "StatefulSetStatus represents the current state of a StatefulSet.", + "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", + "replicas": "replicas is the number of Pods created by the StatefulSet controller.", + "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", + "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", + "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", +} + +func (StatefulSetStatus) SwaggerDoc() map[string]string { + return map_StatefulSetStatus +} + +var map_StatefulSetUpdateStrategy = map[string]string{ + "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + "type": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", + "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", +} + +func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_StatefulSetUpdateStrategy +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..a3bd8afdc --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,923 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta2 + +import ( + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Data.DeepCopyInto(&out.Data) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision. +func (in *ControllerRevision) DeepCopy() *ControllerRevision { + if in == nil { + return nil + } + out := new(ControllerRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList. +func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList { + if in == nil { + return nil + } + out := new(ControllerRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. +func (in *DaemonSet) DeepCopy() *DaemonSet { + if in == nil { + return nil + } + out := new(DaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. +func (in *DaemonSetList) DeepCopy() *DaemonSetList { + if in == nil { + return nil + } + out := new(DaemonSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. +func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { + if in == nil { + return nil + } + out := new(DaemonSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. +func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { + if in == nil { + return nil + } + out := new(DaemonSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateDaemonSet) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. +func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { + if in == nil { + return nil + } + out := new(DaemonSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. +func (in *ReplicaSet) DeepCopy() *ReplicaSet { + if in == nil { + return nil + } + out := new(ReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. +func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { + if in == nil { + return nil + } + out := new(ReplicaSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. +func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { + if in == nil { + return nil + } + out := new(ReplicaSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. +func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { + if in == nil { + return nil + } + out := new(ReplicaSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. +func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { + if in == nil { + return nil + } + out := new(ReplicaSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. +func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { + if in == nil { + return nil + } + out := new(RollingUpdateDaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { + *out = *in + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. +func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStatefulSetStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet. +func (in *StatefulSet) DeepCopy() *StatefulSet { + if in == nil { + return nil + } + out := new(StatefulSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList. +func (in *StatefulSetList) DeepCopy() *StatefulSetList { + if in == nil { + return nil + } + out := new(StatefulSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]core_v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. +func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec { + if in == nil { + return nil + } + out := new(StatefulSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus. +func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus { + if in == nil { + return nil + } + out := new(StatefulSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateStatefulSetStrategy) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. +func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { + if in == nil { + return nil + } + out := new(StatefulSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go new file mode 100644 index 000000000..2d2ed2ee8 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=authentication.k8s.io +// +k8s:openapi-gen=true +package v1 // import "k8s.io/api/authentication/v1" diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go new file mode 100644 index 000000000..29697280a --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -0,0 +1,2148 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto + + It has these top-level messages: + BoundObjectReference + ExtraValue + TokenRequest + TokenRequestSpec + TokenRequestStatus + TokenReview + TokenReviewSpec + TokenReviewStatus + UserInfo +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } +func (*BoundObjectReference) ProtoMessage() {} +func (*BoundObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } +func (*TokenRequestSpec) ProtoMessage() {} +func (*TokenRequestSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } +func (*TokenRequestStatus) ProtoMessage() {} +func (*TokenRequestStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func init() { + proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference") + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1.ExtraValue") + proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.authentication.v1.TokenRequest") + proto.RegisterType((*TokenRequestSpec)(nil), "k8s.io.api.authentication.v1.TokenRequestSpec") + proto.RegisterType((*TokenRequestStatus)(nil), "k8s.io.api.authentication.v1.TokenRequestStatus") + proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1.TokenReview") + proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec") + proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus") + proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo") +} +func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoundObjectReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + return i, nil +} + +func (m ExtraValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *TokenRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenRequestSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.BoundObjectRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.BoundObjectRef.Size())) + n4, err := m.BoundObjectRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.ExpirationSeconds != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + } + return i, nil +} + +func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenRequestStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpirationTimestamp.Size())) + n5, err := m.ExpirationTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *TokenReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n7, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n8, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + return i, nil +} + +func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Authenticated { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) + n9, err := m.User.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + return i, nil +} + +func (m *UserInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for _, k := range keysForExtra { + dAtA[i] = 0x22 + i++ + v := m.Extra[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n10, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *BoundObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TokenRequest) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenRequestSpec) Size() (n int) { + var l int + _ = l + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.BoundObjectRef != nil { + l = m.BoundObjectRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + return n +} + +func (m *TokenRequestStatus) Size() (n int) { + var l int + _ = l + l = len(m.Token) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ExpirationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewSpec) Size() (n int) { + var l int + _ = l + l = len(m.Token) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserInfo) Size() (n int) { + var l int + _ = l + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BoundObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BoundObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *TokenRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenRequestSpec", "TokenRequestSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenRequestStatus", "TokenRequestStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TokenRequestSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenRequestSpec{`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, + `BoundObjectRef:` + strings.Replace(fmt.Sprintf("%v", this.BoundObjectRef), "BoundObjectReference", "BoundObjectReference", 1) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *TokenRequestStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenRequestStatus{`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `ExpirationTimestamp:` + strings.Replace(strings.Replace(this.ExpirationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewSpec{`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewStatus{`, + `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, + `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *UserInfo) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&UserInfo{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoundObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoundObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenRequestSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BoundObjectRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BoundObjectRef == nil { + m.BoundObjectRef = &BoundObjectReference{} + } + if err := m.BoundObjectRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenRequestStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ExpirationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Authenticated = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 905 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xf6, 0xfa, 0x47, 0x64, 0x8f, 0x9b, 0x90, 0x4c, 0xa9, 0x64, 0x05, 0xb0, 0x8d, 0x91, 0x90, + 0x05, 0x74, 0xb7, 0x36, 0xa8, 0x54, 0x45, 0x42, 0xca, 0x12, 0x0b, 0x2c, 0x04, 0xad, 0xa6, 0x4d, + 0x40, 0x9c, 0x18, 0xdb, 0x2f, 0xce, 0xe0, 0xee, 0xec, 0x32, 0x3b, 0x6b, 0xea, 0x5b, 0xff, 0x04, + 0x8e, 0x20, 0x71, 0xe0, 0x8f, 0x40, 0xe2, 0xc8, 0x35, 0xc7, 0x8a, 0x53, 0x0f, 0xc8, 0x22, 0xcb, + 0xbf, 0xc0, 0x89, 0x13, 0x9a, 0xd9, 0x89, 0xd7, 0x3f, 0x12, 0xc7, 0xa7, 0xde, 0x3c, 0xef, 0x7d, + 0xef, 0x9b, 0xf7, 0xbe, 0xf9, 0xfc, 0x16, 0x75, 0x46, 0xf7, 0x42, 0x9b, 0xf9, 0xce, 0x28, 0xea, + 0x81, 0xe0, 0x20, 0x21, 0x74, 0xc6, 0xc0, 0x07, 0xbe, 0x70, 0x4c, 0x82, 0x06, 0xcc, 0xa1, 0x91, + 0x3c, 0x05, 0x2e, 0x59, 0x9f, 0x4a, 0xe6, 0x73, 0x67, 0xdc, 0x72, 0x86, 0xc0, 0x41, 0x50, 0x09, + 0x03, 0x3b, 0x10, 0xbe, 0xf4, 0xf1, 0xeb, 0x09, 0xda, 0xa6, 0x01, 0xb3, 0x17, 0xd1, 0xf6, 0xb8, + 0xb5, 0x7f, 0x7b, 0xc8, 0xe4, 0x69, 0xd4, 0xb3, 0xfb, 0xbe, 0xe7, 0x0c, 0xfd, 0xa1, 0xef, 0xe8, + 0xa2, 0x5e, 0x74, 0xa2, 0x4f, 0xfa, 0xa0, 0x7f, 0x25, 0x64, 0xfb, 0x1f, 0xa4, 0x57, 0x7b, 0xb4, + 0x7f, 0xca, 0x38, 0x88, 0x89, 0x13, 0x8c, 0x86, 0x2a, 0x10, 0x3a, 0x1e, 0x48, 0x7a, 0x49, 0x0b, + 0xfb, 0xce, 0x55, 0x55, 0x22, 0xe2, 0x92, 0x79, 0xb0, 0x52, 0x70, 0xf7, 0xba, 0x82, 0xb0, 0x7f, + 0x0a, 0x1e, 0x5d, 0xa9, 0x7b, 0xff, 0xaa, 0xba, 0x48, 0xb2, 0x27, 0x0e, 0xe3, 0x32, 0x94, 0x62, + 0xb9, 0xa8, 0xf1, 0xa7, 0x85, 0x5e, 0x75, 0xfd, 0x88, 0x0f, 0x1e, 0xf4, 0xbe, 0x83, 0xbe, 0x24, + 0x70, 0x02, 0x02, 0x78, 0x1f, 0x70, 0x1d, 0xe5, 0x47, 0x8c, 0x0f, 0x2a, 0x56, 0xdd, 0x6a, 0x96, + 0xdc, 0x1b, 0x67, 0xd3, 0x5a, 0x26, 0x9e, 0xd6, 0xf2, 0x9f, 0x33, 0x3e, 0x20, 0x3a, 0x83, 0xdb, + 0x08, 0xd1, 0x87, 0xdd, 0x63, 0x10, 0x21, 0xf3, 0x79, 0x25, 0xab, 0x71, 0xd8, 0xe0, 0xd0, 0xc1, + 0x2c, 0x43, 0xe6, 0x50, 0x8a, 0x95, 0x53, 0x0f, 0x2a, 0xb9, 0x45, 0xd6, 0x2f, 0xa9, 0x07, 0x44, + 0x67, 0xb0, 0x8b, 0x72, 0x51, 0xf7, 0xb0, 0x92, 0xd7, 0x80, 0x3b, 0x06, 0x90, 0x3b, 0xea, 0x1e, + 0xfe, 0x37, 0xad, 0xbd, 0x79, 0xd5, 0x84, 0x72, 0x12, 0x40, 0x68, 0x1f, 0x75, 0x0f, 0x89, 0x2a, + 0x6e, 0x7c, 0x88, 0x50, 0xe7, 0xa9, 0x14, 0xf4, 0x98, 0x3e, 0x89, 0x00, 0xd7, 0x50, 0x81, 0x49, + 0xf0, 0xc2, 0x8a, 0x55, 0xcf, 0x35, 0x4b, 0x6e, 0x29, 0x9e, 0xd6, 0x0a, 0x5d, 0x15, 0x20, 0x49, + 0xfc, 0x7e, 0xf1, 0xa7, 0x5f, 0x6b, 0x99, 0x67, 0x7f, 0xd5, 0x33, 0x8d, 0x5f, 0xb2, 0xe8, 0xc6, + 0x63, 0x7f, 0x04, 0x9c, 0xc0, 0xf7, 0x11, 0x84, 0x12, 0x7f, 0x8b, 0x8a, 0xea, 0x5d, 0x07, 0x54, + 0x52, 0xad, 0x44, 0xb9, 0x7d, 0xc7, 0x4e, 0x2d, 0x35, 0x6b, 0xc2, 0x0e, 0x46, 0x43, 0x15, 0x08, + 0x6d, 0x85, 0xb6, 0xc7, 0x2d, 0x3b, 0x91, 0xf3, 0x0b, 0x90, 0x34, 0xd5, 0x24, 0x8d, 0x91, 0x19, + 0x2b, 0x7e, 0x88, 0xf2, 0x61, 0x00, 0x7d, 0xad, 0x5f, 0xb9, 0x6d, 0xdb, 0xeb, 0x0c, 0x6b, 0xcf, + 0xf7, 0xf6, 0x28, 0x80, 0x7e, 0xaa, 0xa0, 0x3a, 0x11, 0xcd, 0x84, 0xbf, 0x46, 0x5b, 0xa1, 0xa4, + 0x32, 0x0a, 0xb5, 0xca, 0x8b, 0x1d, 0x5f, 0xc7, 0xa9, 0xeb, 0xdc, 0x1d, 0xc3, 0xba, 0x95, 0x9c, + 0x89, 0xe1, 0x6b, 0xfc, 0x6b, 0xa1, 0xdd, 0xe5, 0x16, 0xf0, 0xbb, 0xa8, 0x44, 0xa3, 0x01, 0x53, + 0xa6, 0xb9, 0x90, 0x78, 0x3b, 0x9e, 0xd6, 0x4a, 0x07, 0x17, 0x41, 0x92, 0xe6, 0x31, 0x47, 0x3b, + 0xbd, 0x05, 0xb7, 0x99, 0x1e, 0xdb, 0xeb, 0x7b, 0xbc, 0xcc, 0xa1, 0x2e, 0x8e, 0xa7, 0xb5, 0x9d, + 0xc5, 0x0c, 0x59, 0x62, 0xc7, 0x9f, 0xa0, 0x3d, 0x78, 0x1a, 0x30, 0xa1, 0x99, 0x1e, 0x41, 0xdf, + 0xe7, 0x83, 0x50, 0x7b, 0x2b, 0xe7, 0xde, 0x8a, 0xa7, 0xb5, 0xbd, 0xce, 0x72, 0x92, 0xac, 0xe2, + 0x1b, 0xbf, 0x59, 0x08, 0xaf, 0xaa, 0x84, 0xdf, 0x42, 0x05, 0xa9, 0xa2, 0xe6, 0x2f, 0xb2, 0x6d, + 0x44, 0x2b, 0x24, 0xd0, 0x24, 0x87, 0x27, 0xe8, 0x66, 0x4a, 0xf8, 0x98, 0x79, 0x10, 0x4a, 0xea, + 0x05, 0xe6, 0xb5, 0xdf, 0xd9, 0xcc, 0x4b, 0xaa, 0xcc, 0x7d, 0xcd, 0xd0, 0xdf, 0xec, 0xac, 0xd2, + 0x91, 0xcb, 0xee, 0x68, 0xfc, 0x9c, 0x45, 0x65, 0xd3, 0xf6, 0x98, 0xc1, 0x0f, 0x2f, 0xc1, 0xcb, + 0x0f, 0x16, 0xbc, 0x7c, 0x7b, 0x23, 0xdf, 0xa9, 0xd6, 0xae, 0xb4, 0xf2, 0x57, 0x4b, 0x56, 0x76, + 0x36, 0xa7, 0x5c, 0xef, 0xe4, 0xbb, 0xe8, 0x95, 0xa5, 0xfb, 0x37, 0x7a, 0xce, 0xc6, 0x1f, 0x16, + 0xda, 0x5b, 0xb9, 0x05, 0x7f, 0x84, 0xb6, 0xe7, 0x9a, 0x81, 0x64, 0x69, 0x16, 0xdd, 0x5b, 0x86, + 0x62, 0xfb, 0x60, 0x3e, 0x49, 0x16, 0xb1, 0xf8, 0x33, 0x94, 0x8f, 0x42, 0x10, 0x46, 0xb4, 0xb7, + 0xd7, 0x4f, 0x78, 0x14, 0x82, 0xe8, 0xf2, 0x13, 0x3f, 0x55, 0x4b, 0x45, 0x88, 0x66, 0x50, 0x13, + 0x80, 0x10, 0xbe, 0x30, 0xdb, 0x75, 0x36, 0x41, 0x47, 0x05, 0x49, 0x92, 0x6b, 0xfc, 0x9e, 0x45, + 0xc5, 0x0b, 0x16, 0xfc, 0x1e, 0x2a, 0xaa, 0x4a, 0xbd, 0x92, 0x93, 0xb1, 0x77, 0x4d, 0x91, 0xc6, + 0xa8, 0x38, 0x99, 0x21, 0xf0, 0x1b, 0x28, 0x17, 0xb1, 0x81, 0xd9, 0xf4, 0xe5, 0xb9, 0xd5, 0x4c, + 0x54, 0x1c, 0x37, 0xd0, 0xd6, 0x50, 0xf8, 0x51, 0xa0, 0x1e, 0x4b, 0x6d, 0x01, 0xa4, 0x74, 0xff, + 0x54, 0x47, 0x88, 0xc9, 0xe0, 0x63, 0x54, 0x00, 0xb5, 0x99, 0x2b, 0xf9, 0x7a, 0xae, 0x59, 0x6e, + 0xb7, 0x36, 0x9b, 0xd6, 0xd6, 0xdb, 0xbc, 0xc3, 0xa5, 0x98, 0xcc, 0x4d, 0xa5, 0x62, 0x24, 0xa1, + 0xdb, 0xef, 0x99, 0x8d, 0xaf, 0x31, 0x78, 0x17, 0xe5, 0x46, 0x30, 0x49, 0x26, 0x22, 0xea, 0x27, + 0xfe, 0x18, 0x15, 0xc6, 0xea, 0x63, 0x60, 0x54, 0x6e, 0xae, 0xbf, 0x37, 0xfd, 0x78, 0x90, 0xa4, + 0xec, 0x7e, 0xf6, 0x9e, 0xe5, 0x36, 0xcf, 0xce, 0xab, 0x99, 0xe7, 0xe7, 0xd5, 0xcc, 0x8b, 0xf3, + 0x6a, 0xe6, 0x59, 0x5c, 0xb5, 0xce, 0xe2, 0xaa, 0xf5, 0x3c, 0xae, 0x5a, 0x2f, 0xe2, 0xaa, 0xf5, + 0x77, 0x5c, 0xb5, 0x7e, 0xfc, 0xa7, 0x9a, 0xf9, 0x26, 0x3b, 0x6e, 0xfd, 0x1f, 0x00, 0x00, 0xff, + 0xff, 0x5f, 0x39, 0x60, 0xb1, 0xbd, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto similarity index 58% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.proto rename to vendor/k8s.io/api/authentication/v1/generated.proto index cbc050970..d20887f82 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,17 +19,34 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.authentication.v1beta1; +package k8s.io.api.authentication.v1; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "v1"; + +// BoundObjectReference is a reference to an object that a token is bound to. +message BoundObjectReference { + // Kind of the referent. Valid kinds are 'Pod' and 'Secret'. + // +optional + optional string kind = 1; + + // API version of the referent. + // +optional + optional string aPIVersion = 2; + + // Name of the referent. + // +optional + optional string name = 3; + + // UID of the referent. + // +optional + optional string uID = 4; +} // ExtraValue masks the value so protobuf can generate // +protobuf.nullable=true @@ -40,6 +57,48 @@ message ExtraValue { repeated string items = 1; } +// TokenRequest requests a token for a given service account. +message TokenRequest { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional TokenRequestSpec spec = 2; + + // +optional + optional TokenRequestStatus status = 3; +} + +// TokenRequestSpec contains client provided parameters of a token request. +message TokenRequestSpec { + // Audiences are the intendend audiences of the token. A recipient of a + // token must identitfy themself with an identifier in the list of + // audiences of the token, and otherwise should reject the token. A + // token issued for multiple audiences may be used to authenticate + // against any of the audiences listed but implies a high degree of + // trust between the target audiences. + repeated string audiences = 1; + + // ExpirationSeconds is the requested duration of validity of the request. The + // token issuer may return a token with a different validity duration so a + // client needs to check the 'expiration' field in a response. + // +optional + optional int64 expirationSeconds = 4; + + // BoundObjectRef is a reference to an object that the token will be bound to. + // The token will only be valid for as long as the bound objet exists. + // +optional + optional BoundObjectReference boundObjectRef = 3; +} + +// TokenRequestStatus is the result of a token request. +message TokenRequestStatus { + // Token is the opaque bearer token. + optional string token = 1; + + // ExpirationTimestamp is the time of expiration of the returned token. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2; +} + // TokenReview attempts to authenticate a token to a known user. // Note: TokenReview requests may be cached by the webhook token authenticator // plugin in the kube-apiserver. diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go b/vendor/k8s.io/api/authentication/v1/register.go similarity index 78% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go rename to vendor/k8s.io/api/authentication/v1/register.go index 8661169af..c522e4a46 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go +++ b/vendor/k8s.io/api/authentication/v1/register.go @@ -34,14 +34,18 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &TokenReview{}, + &TokenRequest{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go new file mode 100644 index 000000000..723457a3d --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -0,0 +1,168 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + // ImpersonateUserHeader is used to impersonate a particular user during an API server request + ImpersonateUserHeader = "Impersonate-User" + + // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. + // It can be repeated multiplied times for multiple groups. + ImpersonateGroupHeader = "Impersonate-Group" + + // ImpersonateUserExtraHeaderPrefix is a prefix for any header used to impersonate an entry in the + // extra map[string][]string for user.Info. The key will be every after the prefix. + // It can be repeated multiplied times for multiple map keys and the same key can be repeated multiple + // times to have multiple elements in the slice under a single key + ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-" +) + +// +genclient +// +genclient:nonNamespaced +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +type TokenReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + // +optional + Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` +} + +// TokenReviewStatus is the result of the token authentication request. +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + // +optional + Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"` + // User is the UserInfo associated with the provided token. + // +optional + User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Error indicates that the token couldn't be checked + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` + // The names of groups this user is a part of. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` + // Any additional information provided by the authenticator. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TokenRequest requests a token for a given service account. +type TokenRequest struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec TokenRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // +optional + Status TokenRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// TokenRequestSpec contains client provided parameters of a token request. +type TokenRequestSpec struct { + // Audiences are the intendend audiences of the token. A recipient of a + // token must identitfy themself with an identifier in the list of + // audiences of the token, and otherwise should reject the token. A + // token issued for multiple audiences may be used to authenticate + // against any of the audiences listed but implies a high degree of + // trust between the target audiences. + Audiences []string `json:"audiences" protobuf:"bytes,1,rep,name=audiences"` + + // ExpirationSeconds is the requested duration of validity of the request. The + // token issuer may return a token with a different validity duration so a + // client needs to check the 'expiration' field in a response. + // +optional + ExpirationSeconds *int64 `json:"expirationSeconds" protobuf:"varint,4,opt,name=expirationSeconds"` + + // BoundObjectRef is a reference to an object that the token will be bound to. + // The token will only be valid for as long as the bound objet exists. + // +optional + BoundObjectRef *BoundObjectReference `json:"boundObjectRef" protobuf:"bytes,3,opt,name=boundObjectRef"` +} + +// TokenRequestStatus is the result of a token request. +type TokenRequestStatus struct { + // Token is the opaque bearer token. + Token string `json:"token" protobuf:"bytes,1,opt,name=token"` + // ExpirationTimestamp is the time of expiration of the returned token. + ExpirationTimestamp metav1.Time `json:"expirationTimestamp" protobuf:"bytes,2,opt,name=expirationTimestamp"` +} + +// BoundObjectReference is a reference to an object that a token is bound to. +type BoundObjectReference struct { + // Kind of the referent. Valid kinds are 'Pod' and 'Secret'. + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + // API version of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=aPIVersion"` + + // Name of the referent. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uID,casttype=k8s.io/apimachinery/pkg/types.UID"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go similarity index 58% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index bb235e4ea..6632a5dd5 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,48 @@ package v1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_BoundObjectReference = map[string]string{ + "": "BoundObjectReference is a reference to an object that a token is bound to.", + "kind": "Kind of the referent. Valid kinds are 'Pod' and 'Secret'.", + "apiVersion": "API version of the referent.", + "name": "Name of the referent.", + "uid": "UID of the referent.", +} + +func (BoundObjectReference) SwaggerDoc() map[string]string { + return map_BoundObjectReference +} + +var map_TokenRequest = map[string]string{ + "": "TokenRequest requests a token for a given service account.", +} + +func (TokenRequest) SwaggerDoc() map[string]string { + return map_TokenRequest +} + +var map_TokenRequestSpec = map[string]string{ + "": "TokenRequestSpec contains client provided parameters of a token request.", + "audiences": "Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.", + "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.", + "boundObjectRef": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound objet exists.", +} + +func (TokenRequestSpec) SwaggerDoc() map[string]string { + return map_TokenRequestSpec +} + +var map_TokenRequestStatus = map[string]string{ + "": "TokenRequestStatus is the result of a token request.", + "token": "Token is the opaque bearer token.", + "expirationTimestamp": "ExpirationTimestamp is the time of expiration of the returned token.", +} + +func (TokenRequestStatus) SwaggerDoc() map[string]string { + return map_TokenRequestStatus +} + var map_TokenReview = map[string]string{ "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", "spec": "Spec holds information about the request being evaluated", diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..4b9b94105 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -0,0 +1,239 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BoundObjectReference) DeepCopyInto(out *BoundObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BoundObjectReference. +func (in *BoundObjectReference) DeepCopy() *BoundObjectReference { + if in == nil { + return nil + } + out := new(BoundObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtraValue) DeepCopyInto(out *ExtraValue) { + { + in := &in + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. +func (in ExtraValue) DeepCopy() ExtraValue { + if in == nil { + return nil + } + out := new(ExtraValue) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequest) DeepCopyInto(out *TokenRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequest. +func (in *TokenRequest) DeepCopy() *TokenRequest { + if in == nil { + return nil + } + out := new(TokenRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TokenRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequestSpec) DeepCopyInto(out *TokenRequestSpec) { + *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.BoundObjectRef != nil { + in, out := &in.BoundObjectRef, &out.BoundObjectRef + if *in == nil { + *out = nil + } else { + *out = new(BoundObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequestSpec. +func (in *TokenRequestSpec) DeepCopy() *TokenRequestSpec { + if in == nil { + return nil + } + out := new(TokenRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequestStatus) DeepCopyInto(out *TokenRequestStatus) { + *out = *in + in.ExpirationTimestamp.DeepCopyInto(&out.ExpirationTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequestStatus. +func (in *TokenRequestStatus) DeepCopy() *TokenRequestStatus { + if in == nil { + return nil + } + out := new(TokenRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReview) DeepCopyInto(out *TokenReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReview. +func (in *TokenReview) DeepCopy() *TokenReview { + if in == nil { + return nil + } + out := new(TokenReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TokenReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewSpec. +func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { + if in == nil { + return nil + } + out := new(TokenReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { + *out = *in + in.User.DeepCopyInto(&out.User) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewStatus. +func (in *TokenReviewStatus) DeepCopy() *TokenReviewStatus { + if in == nil { + return nil + } + out := new(TokenReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInfo) DeepCopyInto(out *UserInfo) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]string, len(val)) + copy((*out)[key], val) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo. +func (in *UserInfo) DeepCopy() *UserInfo { + if in == nil { + return nil + } + out := new(UserInfo) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go new file mode 100644 index 000000000..e0de315d4 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=authentication.k8s.io +// +k8s:openapi-gen=true +package v1beta1 // import "k8s.io/api/authentication/v1beta1" diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go similarity index 65% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.pb.go rename to vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 760416e63..0efd6bf57 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,14 +15,14 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto // DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto + k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto It has these top-level messages: ExtraValue @@ -37,9 +37,10 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + import strings "strings" import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" import io "io" @@ -50,7 +51,9 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} @@ -73,80 +76,80 @@ func (*UserInfo) ProtoMessage() {} func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func init() { - proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.ExtraValue") - proto.RegisterType((*TokenReview)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.TokenReview") - proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.TokenReviewSpec") - proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.TokenReviewStatus") - proto.RegisterType((*UserInfo)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.UserInfo") + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") + proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1beta1.TokenReview") + proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") + proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") + proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1beta1.UserInfo") } -func (m ExtraValue) Marshal() (data []byte, err error) { +func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m ExtraValue) MarshalTo(data []byte) (int, error) { +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m) > 0 { for _, s := range m { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } return i, nil } -func (m *TokenReview) Marshal() (data []byte, err error) { +func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *TokenReview) MarshalTo(data []byte) (int, error) { +func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -154,120 +157,129 @@ func (m *TokenReview) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *TokenReviewSpec) Marshal() (data []byte, err error) { +func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *TokenReviewSpec) MarshalTo(data []byte) (int, error) { +func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Token))) - i += copy(data[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) return i, nil } -func (m *TokenReviewStatus) Marshal() (data []byte, err error) { +func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *TokenReviewStatus) MarshalTo(data []byte) (int, error) { +func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0x8 i++ if m.Authenticated { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.User.Size())) - n4, err := m.User.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) + n4, err := m.User.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n4 - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Error))) - i += copy(data[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) return i, nil } -func (m *UserInfo) Marshal() (data []byte, err error) { +func (m *UserInfo) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *UserInfo) MarshalTo(data []byte) (int, error) { +func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Username))) - i += copy(data[i:], m.Username) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.UID))) - i += copy(data[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) if len(m.Groups) > 0 { for _, s := range m.Groups { - data[i] = 0x1a + dAtA[i] = 0x1a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { - data[i] = 0x22 + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for _, k := range keysForExtra { + dAtA[i] = 0x22 i++ - v := m.Extra[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa + v := m.Extra[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n5, err := (&v).MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n5, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -277,31 +289,31 @@ func (m *UserInfo) MarshalTo(data []byte) (int, error) { return i, nil } -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m ExtraValue) Size() (n int) { @@ -450,8 +462,8 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *ExtraValue) Unmarshal(data []byte) error { - l := len(data) +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -463,7 +475,7 @@ func (m *ExtraValue) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -491,7 +503,7 @@ func (m *ExtraValue) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -506,11 +518,11 @@ func (m *ExtraValue) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - *m = append(*m, string(data[iNdEx:postIndex])) + *m = append(*m, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -529,8 +541,8 @@ func (m *ExtraValue) Unmarshal(data []byte) error { } return nil } -func (m *TokenReview) Unmarshal(data []byte) error { - l := len(data) +func (m *TokenReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -542,7 +554,7 @@ func (m *TokenReview) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -570,7 +582,7 @@ func (m *TokenReview) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -584,7 +596,7 @@ func (m *TokenReview) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -600,7 +612,7 @@ func (m *TokenReview) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -614,7 +626,7 @@ func (m *TokenReview) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -630,7 +642,7 @@ func (m *TokenReview) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -644,13 +656,13 @@ func (m *TokenReview) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -669,8 +681,8 @@ func (m *TokenReview) Unmarshal(data []byte) error { } return nil } -func (m *TokenReviewSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -682,7 +694,7 @@ func (m *TokenReviewSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -710,7 +722,7 @@ func (m *TokenReviewSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -725,11 +737,11 @@ func (m *TokenReviewSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Token = string(data[iNdEx:postIndex]) + m.Token = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -748,8 +760,8 @@ func (m *TokenReviewSpec) Unmarshal(data []byte) error { } return nil } -func (m *TokenReviewStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -761,7 +773,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -789,7 +801,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -809,7 +821,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -823,7 +835,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.User.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -839,7 +851,7 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -854,11 +866,11 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Error = string(data[iNdEx:postIndex]) + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -877,8 +889,8 @@ func (m *TokenReviewStatus) Unmarshal(data []byte) error { } return nil } -func (m *UserInfo) Unmarshal(data []byte) error { - l := len(data) +func (m *UserInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -890,7 +902,7 @@ func (m *UserInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -918,7 +930,7 @@ func (m *UserInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -933,7 +945,7 @@ func (m *UserInfo) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Username = string(data[iNdEx:postIndex]) + m.Username = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -947,7 +959,7 @@ func (m *UserInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -962,7 +974,7 @@ func (m *UserInfo) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = string(data[iNdEx:postIndex]) + m.UID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -976,7 +988,7 @@ func (m *UserInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -991,7 +1003,7 @@ func (m *UserInfo) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { @@ -1005,7 +1017,7 @@ func (m *UserInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1027,7 +1039,7 @@ func (m *UserInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1042,7 +1054,7 @@ func (m *UserInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1057,61 +1069,66 @@ func (m *UserInfo) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - m.Extra[mapkey] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1130,8 +1147,8 @@ func (m *UserInfo) Unmarshal(data []byte) error { } return nil } -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -1142,7 +1159,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1160,7 +1177,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -1177,7 +1194,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1200,7 +1217,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1211,7 +1228,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -1235,48 +1252,51 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 668 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x4a, - 0x14, 0x8d, 0xf3, 0xd1, 0x97, 0x4c, 0x5e, 0xdf, 0xeb, 0x1b, 0xe9, 0x49, 0x51, 0x24, 0x9c, 0x28, - 0x6c, 0x8a, 0x54, 0xc6, 0xa4, 0xa0, 0x52, 0xb5, 0x62, 0x51, 0xab, 0x05, 0x75, 0x81, 0x90, 0xa6, - 0x94, 0x05, 0x12, 0x12, 0x13, 0xe7, 0xd6, 0x31, 0x8e, 0x3f, 0x34, 0x1e, 0xa7, 0xed, 0xae, 0x3f, - 0x81, 0x25, 0x4b, 0xfe, 0x0b, 0x9b, 0x2e, 0xbb, 0x60, 0xc1, 0x02, 0x55, 0x24, 0xfc, 0x11, 0x34, - 0xe3, 0xa1, 0x76, 0x49, 0x2b, 0x44, 0xbb, 0xf3, 0x9c, 0x7b, 0xcf, 0xb9, 0xf7, 0xdc, 0xeb, 0x8b, - 0xb6, 0xfc, 0xf5, 0x84, 0x78, 0x91, 0xe5, 0xa7, 0x03, 0xe0, 0x21, 0x08, 0x48, 0xac, 0xd8, 0x77, - 0x2d, 0x16, 0x7b, 0x89, 0xc5, 0x52, 0x31, 0x82, 0x50, 0x78, 0x0e, 0x13, 0x5e, 0x14, 0x5a, 0x93, - 0xfe, 0x00, 0x04, 0xeb, 0x5b, 0x2e, 0x84, 0xc0, 0x99, 0x80, 0x21, 0x89, 0x79, 0x24, 0x22, 0xdc, - 0xcf, 0x24, 0x48, 0x2e, 0x41, 0x62, 0xdf, 0x25, 0x52, 0x82, 0x5c, 0x96, 0x20, 0x5a, 0xa2, 0x7d, - 0xdf, 0xf5, 0xc4, 0x28, 0x1d, 0x10, 0x27, 0x0a, 0x2c, 0x37, 0x72, 0x23, 0x4b, 0x29, 0x0d, 0xd2, - 0x03, 0xf5, 0x52, 0x0f, 0xf5, 0x95, 0x55, 0x68, 0x3f, 0xd2, 0x4d, 0xb2, 0xd8, 0x0b, 0x98, 0x33, - 0xf2, 0x42, 0xe0, 0xc7, 0x79, 0x9b, 0x01, 0x08, 0x66, 0x4d, 0xe6, 0xfa, 0x6a, 0x5b, 0xd7, 0xb1, - 0x78, 0x1a, 0x0a, 0x2f, 0x80, 0x39, 0xc2, 0xda, 0xef, 0x08, 0x89, 0x33, 0x82, 0x80, 0xcd, 0xf1, - 0x1e, 0x5e, 0xc7, 0x4b, 0x85, 0x37, 0xb6, 0xbc, 0x50, 0x24, 0x82, 0xcf, 0x91, 0x0a, 0x9e, 0x12, - 0xe0, 0x13, 0xe0, 0xb9, 0x21, 0x38, 0x62, 0x41, 0x3c, 0x86, 0xab, 0x3c, 0xad, 0x5c, 0xbb, 0xae, - 0x2b, 0xb2, 0x7b, 0x8f, 0x11, 0xda, 0x39, 0x12, 0x9c, 0xbd, 0x62, 0xe3, 0x14, 0x70, 0x07, 0xd5, - 0x3c, 0x01, 0x41, 0xd2, 0x32, 0xba, 0x95, 0xe5, 0x86, 0xdd, 0x98, 0x9d, 0x77, 0x6a, 0xbb, 0x12, - 0xa0, 0x19, 0xbe, 0x51, 0xff, 0xf0, 0xb1, 0x53, 0x3a, 0xf9, 0xda, 0x2d, 0xf5, 0x3e, 0x95, 0x51, - 0xf3, 0x65, 0xe4, 0x43, 0x48, 0x61, 0xe2, 0xc1, 0x21, 0x7e, 0x8b, 0xea, 0x72, 0xca, 0x43, 0x26, - 0x58, 0xcb, 0xe8, 0x1a, 0xcb, 0xcd, 0xd5, 0x07, 0x44, 0x6f, 0xbd, 0x68, 0x3a, 0xdf, 0xbb, 0xcc, - 0x26, 0x93, 0x3e, 0x79, 0x31, 0x78, 0x07, 0x8e, 0x78, 0x0e, 0x82, 0xd9, 0xf8, 0xf4, 0xbc, 0x53, - 0x9a, 0x9d, 0x77, 0x50, 0x8e, 0xd1, 0x0b, 0x55, 0x3c, 0x44, 0xd5, 0x24, 0x06, 0xa7, 0x55, 0x56, - 0xea, 0x36, 0xf9, 0xe3, 0x7f, 0x8a, 0x14, 0xfa, 0xdd, 0x8b, 0xc1, 0xb1, 0xff, 0xd6, 0xf5, 0xaa, - 0xf2, 0x45, 0x95, 0x3a, 0x1e, 0xa3, 0x85, 0x44, 0x30, 0x91, 0x26, 0xad, 0x8a, 0xaa, 0xb3, 0x7d, - 0xcb, 0x3a, 0x4a, 0xcb, 0xfe, 0x47, 0x57, 0x5a, 0xc8, 0xde, 0x54, 0xd7, 0xe8, 0xad, 0xa1, 0x7f, - 0x7f, 0x69, 0x0a, 0xdf, 0x45, 0x35, 0x21, 0x21, 0x35, 0xc5, 0x86, 0xbd, 0xa8, 0x99, 0xb5, 0x2c, - 0x2f, 0x8b, 0xf5, 0x3e, 0x1b, 0xe8, 0xbf, 0xb9, 0x2a, 0x78, 0x13, 0x2d, 0x16, 0x3a, 0x82, 0xa1, - 0x92, 0xa8, 0xdb, 0xff, 0x6b, 0x89, 0xc5, 0xad, 0x62, 0x90, 0x5e, 0xce, 0xc5, 0x6f, 0x50, 0x35, - 0x4d, 0x80, 0xeb, 0xf1, 0x6e, 0xde, 0xc0, 0xf6, 0x7e, 0x02, 0x7c, 0x37, 0x3c, 0x88, 0xf2, 0xb9, - 0x4a, 0x84, 0x2a, 0x59, 0x69, 0x0b, 0x38, 0x8f, 0xb8, 0x1a, 0x6b, 0xc1, 0xd6, 0x8e, 0x04, 0x69, - 0x16, 0xeb, 0x4d, 0xcb, 0xa8, 0xfe, 0x53, 0x05, 0xaf, 0xa0, 0xba, 0x64, 0x86, 0x2c, 0x00, 0x3d, - 0x8b, 0x25, 0x4d, 0x52, 0x39, 0x12, 0xa7, 0x17, 0x19, 0xf8, 0x0e, 0xaa, 0xa4, 0xde, 0x50, 0x75, - 0xdf, 0xb0, 0x9b, 0x3a, 0xb1, 0xb2, 0xbf, 0xbb, 0x4d, 0x25, 0x8e, 0x7b, 0x68, 0xc1, 0xe5, 0x51, - 0x1a, 0xcb, 0xb5, 0xca, 0x5f, 0x1b, 0xc9, 0x65, 0x3c, 0x53, 0x08, 0xd5, 0x11, 0xec, 0xa3, 0x1a, - 0xc8, 0x5b, 0x68, 0x55, 0xbb, 0x95, 0xe5, 0xe6, 0xea, 0xd3, 0x5b, 0x8c, 0x80, 0xa8, 0xa3, 0xda, - 0x09, 0x05, 0x3f, 0x2e, 0x58, 0x95, 0x18, 0xcd, 0x6a, 0xb4, 0x0f, 0xf5, 0xe1, 0xa9, 0x1c, 0xbc, - 0x84, 0x2a, 0x3e, 0x1c, 0x67, 0x36, 0xa9, 0xfc, 0xc4, 0x7b, 0xa8, 0x36, 0x91, 0x37, 0xa9, 0xf7, - 0xf1, 0xe4, 0x06, 0xcd, 0xe4, 0x87, 0x4d, 0x33, 0xad, 0x8d, 0xf2, 0xba, 0x61, 0xdf, 0x3b, 0x9d, - 0x9a, 0xa5, 0xb3, 0xa9, 0x59, 0xfa, 0x32, 0x35, 0x4b, 0x27, 0x33, 0xd3, 0x38, 0x9d, 0x99, 0xc6, - 0xd9, 0xcc, 0x34, 0xbe, 0xcd, 0x4c, 0xe3, 0xfd, 0x77, 0xb3, 0xf4, 0xfa, 0x2f, 0x2d, 0xf0, 0x23, - 0x00, 0x00, 0xff, 0xff, 0xb9, 0x87, 0xc6, 0x94, 0xfa, 0x05, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 650 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x4d, 0x4f, 0x14, 0x4d, + 0x10, 0xc7, 0x67, 0xf6, 0x85, 0x67, 0xb7, 0xf7, 0x41, 0xb1, 0x13, 0x93, 0xcd, 0x26, 0xce, 0xae, + 0xeb, 0x85, 0x44, 0xe9, 0x11, 0x24, 0x48, 0xf0, 0xe4, 0x28, 0x31, 0x98, 0x10, 0x93, 0x16, 0x3c, + 0xa8, 0x07, 0x7b, 0x67, 0x8b, 0xd9, 0x76, 0x99, 0x97, 0xf4, 0xf4, 0xac, 0x72, 0xe3, 0x23, 0x78, + 0xf4, 0x68, 0xe2, 0x27, 0x31, 0xf1, 0xc0, 0x91, 0x23, 0x07, 0x43, 0x64, 0xfc, 0x22, 0xa6, 0x7b, + 0x5a, 0x76, 0x61, 0x43, 0x80, 0xdb, 0xf4, 0xbf, 0xea, 0xff, 0x9b, 0xaa, 0xea, 0x2e, 0xf4, 0x72, + 0xb8, 0x9a, 0x12, 0x1e, 0xbb, 0xc3, 0xac, 0x07, 0x22, 0x02, 0x09, 0xa9, 0x3b, 0x82, 0xa8, 0x1f, + 0x0b, 0xd7, 0x04, 0x58, 0xc2, 0x5d, 0x96, 0xc9, 0x01, 0x44, 0x92, 0xfb, 0x4c, 0xf2, 0x38, 0x72, + 0x47, 0x8b, 0x3d, 0x90, 0x6c, 0xd1, 0x0d, 0x20, 0x02, 0xc1, 0x24, 0xf4, 0x49, 0x22, 0x62, 0x19, + 0xe3, 0xbb, 0x85, 0x85, 0xb0, 0x84, 0x93, 0xb3, 0x16, 0x62, 0x2c, 0xad, 0x85, 0x80, 0xcb, 0x41, + 0xd6, 0x23, 0x7e, 0x1c, 0xba, 0x41, 0x1c, 0xc4, 0xae, 0x76, 0xf6, 0xb2, 0x1d, 0x7d, 0xd2, 0x07, + 0xfd, 0x55, 0x10, 0x5b, 0xcb, 0xe3, 0x22, 0x42, 0xe6, 0x0f, 0x78, 0x04, 0x62, 0xcf, 0x4d, 0x86, + 0x81, 0x12, 0x52, 0x37, 0x04, 0xc9, 0xdc, 0xd1, 0x54, 0x1d, 0x2d, 0xf7, 0x22, 0x97, 0xc8, 0x22, + 0xc9, 0x43, 0x98, 0x32, 0xac, 0x5c, 0x66, 0x48, 0xfd, 0x01, 0x84, 0x6c, 0xca, 0xf7, 0xe8, 0x22, + 0x5f, 0x26, 0xf9, 0xae, 0xcb, 0x23, 0x99, 0x4a, 0x71, 0xde, 0xd4, 0x7d, 0x8c, 0xd0, 0xfa, 0x67, + 0x29, 0xd8, 0x1b, 0xb6, 0x9b, 0x01, 0x6e, 0xa3, 0x2a, 0x97, 0x10, 0xa6, 0x4d, 0xbb, 0x53, 0x9e, + 0xaf, 0x7b, 0xf5, 0xfc, 0xb8, 0x5d, 0xdd, 0x50, 0x02, 0x2d, 0xf4, 0xb5, 0xda, 0xd7, 0x6f, 0x6d, + 0x6b, 0xff, 0x57, 0xc7, 0xea, 0x7e, 0x2f, 0xa1, 0xc6, 0x56, 0x3c, 0x84, 0x88, 0xc2, 0x88, 0xc3, + 0x27, 0xfc, 0x01, 0xd5, 0xd4, 0x04, 0xfa, 0x4c, 0xb2, 0xa6, 0xdd, 0xb1, 0xe7, 0x1b, 0x4b, 0x0f, + 0xc9, 0xf8, 0x06, 0x4e, 0x0b, 0x22, 0xc9, 0x30, 0x50, 0x42, 0x4a, 0x54, 0x36, 0x19, 0x2d, 0x92, + 0x57, 0xbd, 0x8f, 0xe0, 0xcb, 0x4d, 0x90, 0xcc, 0xc3, 0x07, 0xc7, 0x6d, 0x2b, 0x3f, 0x6e, 0xa3, + 0xb1, 0x46, 0x4f, 0xa9, 0x78, 0x0b, 0x55, 0xd2, 0x04, 0xfc, 0x66, 0x49, 0xd3, 0x97, 0xc8, 0xa5, + 0xf7, 0x4b, 0x26, 0xea, 0x7b, 0x9d, 0x80, 0xef, 0xfd, 0x6f, 0xf8, 0x15, 0x75, 0xa2, 0x9a, 0x86, + 0xdf, 0xa3, 0x99, 0x54, 0x32, 0x99, 0xa5, 0xcd, 0xb2, 0xe6, 0x2e, 0x5f, 0x93, 0xab, 0xbd, 0xde, + 0x0d, 0x43, 0x9e, 0x29, 0xce, 0xd4, 0x30, 0xbb, 0x2b, 0xe8, 0xe6, 0xb9, 0x22, 0xf0, 0x3d, 0x54, + 0x95, 0x4a, 0xd2, 0x53, 0xaa, 0x7b, 0xb3, 0xc6, 0x59, 0x2d, 0xf2, 0x8a, 0x58, 0xf7, 0xa7, 0x8d, + 0x6e, 0x4d, 0xfd, 0x05, 0x3f, 0x41, 0xb3, 0x13, 0x15, 0x41, 0x5f, 0x23, 0x6a, 0xde, 0x6d, 0x83, + 0x98, 0x7d, 0x3a, 0x19, 0xa4, 0x67, 0x73, 0xf1, 0x26, 0xaa, 0x64, 0x29, 0x08, 0x33, 0xbe, 0xfb, + 0x57, 0x68, 0x73, 0x3b, 0x05, 0xb1, 0x11, 0xed, 0xc4, 0xe3, 0xb9, 0x29, 0x85, 0x6a, 0x8c, 0x6a, + 0x03, 0x84, 0x88, 0x85, 0x1e, 0xdb, 0x44, 0x1b, 0xeb, 0x4a, 0xa4, 0x45, 0xac, 0xfb, 0xa3, 0x84, + 0x6a, 0xff, 0x28, 0xf8, 0x01, 0xaa, 0x29, 0x67, 0xc4, 0x42, 0x30, 0xbd, 0xcf, 0x19, 0x93, 0xce, + 0x51, 0x3a, 0x3d, 0xcd, 0xc0, 0x77, 0x50, 0x39, 0xe3, 0x7d, 0x5d, 0x6d, 0xdd, 0x6b, 0x98, 0xc4, + 0xf2, 0xf6, 0xc6, 0x73, 0xaa, 0x74, 0xdc, 0x45, 0x33, 0x81, 0x88, 0xb3, 0x44, 0x5d, 0x9b, 0x7a, + 0xaa, 0x48, 0x0d, 0xff, 0x85, 0x56, 0xa8, 0x89, 0xe0, 0x77, 0xa8, 0x0a, 0xea, 0x6d, 0x37, 0x2b, + 0x9d, 0xf2, 0x7c, 0x63, 0x69, 0xe5, 0x1a, 0x2d, 0x13, 0xbd, 0x14, 0xeb, 0x91, 0x14, 0x7b, 0x13, + 0xad, 0x29, 0x8d, 0x16, 0xcc, 0x56, 0x60, 0x16, 0x47, 0xe7, 0xe0, 0x39, 0x54, 0x1e, 0xc2, 0x5e, + 0xd1, 0x16, 0x55, 0x9f, 0xf8, 0x19, 0xaa, 0x8e, 0xd4, 0x4e, 0x99, 0x79, 0x2f, 0x5c, 0xe1, 0xe7, + 0xe3, 0x45, 0xa4, 0x85, 0x77, 0xad, 0xb4, 0x6a, 0x7b, 0x0b, 0x07, 0x27, 0x8e, 0x75, 0x78, 0xe2, + 0x58, 0x47, 0x27, 0x8e, 0xb5, 0x9f, 0x3b, 0xf6, 0x41, 0xee, 0xd8, 0x87, 0xb9, 0x63, 0x1f, 0xe5, + 0x8e, 0xfd, 0x3b, 0x77, 0xec, 0x2f, 0x7f, 0x1c, 0xeb, 0xed, 0x7f, 0x06, 0xf2, 0x37, 0x00, 0x00, + 0xff, 0xff, 0x36, 0x0e, 0x35, 0x2a, 0x43, 0x05, 0x00, 0x00, } diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto similarity index 94% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto rename to vendor/k8s.io/api/authentication/v1beta1/generated.proto index ea5203d37..4fb6448f7 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,16 +19,15 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.authentication.v1; +package k8s.io.api.authentication.v1beta1; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "v1beta1"; // ExtraValue masks the value so protobuf can generate // +protobuf.nullable=true diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/register.go b/vendor/k8s.io/api/authentication/v1beta1/register.go similarity index 79% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/register.go rename to vendor/k8s.io/api/authentication/v1beta1/register.go index ddaa19702..ed23e50f7 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/register.go +++ b/vendor/k8s.io/api/authentication/v1beta1/register.go @@ -34,11 +34,14 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &TokenReview{}, diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go similarity index 95% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.go rename to vendor/k8s.io/api/authentication/v1beta1/types.go index 57c96e3bc..a90949dc3 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -22,9 +22,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// +genclient=true -// +nonNamespaced=true -// +noMethods=true +// +genclient +// +genclient:nonNamespaced +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // TokenReview attempts to authenticate a token to a known user. // Note: TokenReview requests may be cached by the webhook token authenticator diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go similarity index 97% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go index f910bea6f..968999d1e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ package v1beta1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_TokenReview = map[string]string{ "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", "spec": "Spec holds information about the request being evaluated", diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..bb552ff63 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,139 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtraValue) DeepCopyInto(out *ExtraValue) { + { + in := &in + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. +func (in ExtraValue) DeepCopy() ExtraValue { + if in == nil { + return nil + } + out := new(ExtraValue) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReview) DeepCopyInto(out *TokenReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReview. +func (in *TokenReview) DeepCopy() *TokenReview { + if in == nil { + return nil + } + out := new(TokenReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TokenReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewSpec. +func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { + if in == nil { + return nil + } + out := new(TokenReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { + *out = *in + in.User.DeepCopyInto(&out.User) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewStatus. +func (in *TokenReviewStatus) DeepCopy() *TokenReviewStatus { + if in == nil { + return nil + } + out := new(TokenReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInfo) DeepCopyInto(out *UserInfo) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]string, len(val)) + copy((*out)[key], val) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo. +func (in *UserInfo) DeepCopy() *UserInfo { + if in == nil { + return nil + } + out := new(UserInfo) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go similarity index 85% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1/doc.go rename to vendor/k8s.io/api/authorization/v1/doc.go index 41741dd53..c06b798df 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/doc.go +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -14,5 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + // +groupName=authorization.k8s.io -package v1 +package v1 // import "k8s.io/api/authorization/v1" diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go new file mode 100644 index 000000000..508c58f1b --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -0,0 +1,3528 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto + + It has these top-level messages: + ExtraValue + LocalSubjectAccessReview + NonResourceAttributes + NonResourceRule + ResourceAttributes + ResourceRule + SelfSubjectAccessReview + SelfSubjectAccessReviewSpec + SelfSubjectRulesReview + SelfSubjectRulesReviewSpec + SubjectAccessReview + SubjectAccessReviewSpec + SubjectAccessReviewStatus + SubjectRulesReviewStatus +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } +func (*LocalSubjectAccessReview) ProtoMessage() {} +func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } +func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} +func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{7} +} + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } +func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} +func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{9} +} + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } +func (*SubjectAccessReviewSpec) ProtoMessage() {} +func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{11} +} + +func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } +func (*SubjectAccessReviewStatus) ProtoMessage() {} +func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{12} +} + +func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } +func (*SubjectRulesReviewStatus) ProtoMessage() {} +func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{13} +} + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.LocalSubjectAccessReview") + proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1.NonResourceAttributes") + proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1.NonResourceRule") + proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.api.authorization.v1.ResourceAttributes") + proto.RegisterType((*ResourceRule)(nil), "k8s.io.api.authorization.v1.ResourceRule") + proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SelfSubjectAccessReview") + proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectAccessReviewSpec") + proto.RegisterType((*SelfSubjectRulesReview)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReview") + proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReviewSpec") + proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewStatus") + proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectRulesReviewStatus") +} +func (m ExtraValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i += copy(dAtA[i:], m.Verb) + return i, nil +} + +func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i += copy(dAtA[i:], m.Verb) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i += copy(dAtA[i:], m.Subresource) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} + +func (m *ResourceRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil +} + +func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) + n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.NonResourceAttributes != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) + n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n11, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil +} + +func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + return i, nil +} + +func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n13, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n14, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) + n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.NonResourceAttributes != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) + n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for _, k := range keysForExtra { + dAtA[i] = 0x2a + i++ + v := m.Extra[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n17, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + } + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + return i, nil +} + +func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i += copy(dAtA[i:], m.EvaluationError) + dAtA[i] = 0x20 + i++ + if m.Denied { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ResourceRules) > 0 { + for _, msg := range m.ResourceRules { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NonResourceRules) > 0 { + for _, msg := range m.NonResourceRules { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x18 + i++ + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i += copy(dAtA[i:], m.EvaluationError) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourceRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SelfSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SelfSubjectRulesReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *SubjectRulesReviewStatus) Size() (n int) { + var l int + _ = l + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LocalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourceAttributes{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourceRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourceRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAttributes{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReviewSpec{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&SubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReviewStatus{`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `Denied:` + fmt.Sprintf("%v", this.Denied) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectRulesReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectRulesReviewStatus{`, + `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, + `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, + `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourceRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourceRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Denied = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, ResourceRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, NonResourceRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incomplete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Incomplete = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1152 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xae, 0xed, 0xd4, 0x1e, 0x37, 0x24, 0x9d, 0x28, 0xcd, 0x36, 0x15, 0x76, 0xb4, 0x48, + 0x90, 0x8a, 0xb2, 0x4b, 0x4c, 0xdb, 0x44, 0x95, 0x2a, 0x14, 0x2b, 0x11, 0x8a, 0xd4, 0x96, 0x6a, + 0xa2, 0x44, 0xa2, 0x08, 0xc4, 0x78, 0x3d, 0xb1, 0x97, 0xd8, 0xbb, 0xcb, 0xcc, 0xac, 0x43, 0x38, + 0x55, 0xe2, 0x0b, 0x70, 0xe4, 0xc0, 0x81, 0x6f, 0xc0, 0x05, 0x89, 0x1b, 0x07, 0x0e, 0x28, 0xc7, + 0x1e, 0x8b, 0x84, 0x2c, 0xb2, 0x9c, 0xf9, 0x0e, 0x68, 0x66, 0xc7, 0xde, 0x75, 0xb2, 0x76, 0x13, + 0x0e, 0xf4, 0xd2, 0xdb, 0xee, 0xfb, 0xfd, 0xde, 0x9f, 0x79, 0x7f, 0x66, 0x1e, 0xd8, 0x3a, 0xdc, + 0x60, 0x96, 0xeb, 0xdb, 0x87, 0x61, 0x93, 0x50, 0x8f, 0x70, 0xc2, 0xec, 0x3e, 0xf1, 0x5a, 0x3e, + 0xb5, 0x15, 0x80, 0x03, 0xd7, 0xc6, 0x21, 0xef, 0xf8, 0xd4, 0xfd, 0x06, 0x73, 0xd7, 0xf7, 0xec, + 0xfe, 0x9a, 0xdd, 0x26, 0x1e, 0xa1, 0x98, 0x93, 0x96, 0x15, 0x50, 0x9f, 0xfb, 0xf0, 0x66, 0x4c, + 0xb6, 0x70, 0xe0, 0x5a, 0x63, 0x64, 0xab, 0xbf, 0xb6, 0xfc, 0x5e, 0xdb, 0xe5, 0x9d, 0xb0, 0x69, + 0x39, 0x7e, 0xcf, 0x6e, 0xfb, 0x6d, 0xdf, 0x96, 0x3a, 0xcd, 0xf0, 0x40, 0xfe, 0xc9, 0x1f, 0xf9, + 0x15, 0xdb, 0x5a, 0xbe, 0x93, 0x38, 0xee, 0x61, 0xa7, 0xe3, 0x7a, 0x84, 0x1e, 0xdb, 0xc1, 0x61, + 0x5b, 0x08, 0x98, 0xdd, 0x23, 0x1c, 0x67, 0x44, 0xb0, 0x6c, 0x4f, 0xd2, 0xa2, 0xa1, 0xc7, 0xdd, + 0x1e, 0x39, 0xa7, 0x70, 0xef, 0x65, 0x0a, 0xcc, 0xe9, 0x90, 0x1e, 0x3e, 0xa7, 0xf7, 0xc1, 0x24, + 0xbd, 0x90, 0xbb, 0x5d, 0xdb, 0xf5, 0x38, 0xe3, 0xf4, 0xac, 0x92, 0xb9, 0x0e, 0xc0, 0xf6, 0xd7, + 0x9c, 0xe2, 0x7d, 0xdc, 0x0d, 0x09, 0xac, 0x81, 0xa2, 0xcb, 0x49, 0x8f, 0x19, 0xda, 0x4a, 0x7e, + 0xb5, 0xdc, 0x28, 0x47, 0x83, 0x5a, 0x71, 0x47, 0x08, 0x50, 0x2c, 0xbf, 0x5f, 0xfa, 0xfe, 0xc7, + 0x5a, 0xee, 0xd9, 0x9f, 0x2b, 0x39, 0xf3, 0x67, 0x1d, 0x18, 0x0f, 0x7d, 0x07, 0x77, 0x77, 0xc3, + 0xe6, 0x97, 0xc4, 0xe1, 0x9b, 0x8e, 0x43, 0x18, 0x43, 0xa4, 0xef, 0x92, 0x23, 0xf8, 0x05, 0x28, + 0x89, 0x74, 0xb4, 0x30, 0xc7, 0x86, 0xb6, 0xa2, 0xad, 0x56, 0xea, 0xef, 0x5b, 0x49, 0x21, 0x46, + 0xd1, 0x59, 0xc1, 0x61, 0x5b, 0x08, 0x98, 0x25, 0xd8, 0x56, 0x7f, 0xcd, 0xfa, 0x58, 0xda, 0x7a, + 0x44, 0x38, 0x6e, 0xc0, 0x93, 0x41, 0x2d, 0x17, 0x0d, 0x6a, 0x20, 0x91, 0xa1, 0x91, 0x55, 0xb8, + 0x0f, 0x0a, 0x2c, 0x20, 0x8e, 0xa1, 0x4b, 0xeb, 0x77, 0xac, 0x29, 0x65, 0xb6, 0x32, 0x22, 0xdc, + 0x0d, 0x88, 0xd3, 0xb8, 0xaa, 0x3c, 0x14, 0xc4, 0x1f, 0x92, 0xf6, 0xe0, 0xe7, 0x60, 0x86, 0x71, + 0xcc, 0x43, 0x66, 0xe4, 0xa5, 0xe5, 0x7b, 0x97, 0xb6, 0x2c, 0xb5, 0x1b, 0x6f, 0x28, 0xdb, 0x33, + 0xf1, 0x3f, 0x52, 0x56, 0xcd, 0x4f, 0xc1, 0xe2, 0x63, 0xdf, 0x43, 0x84, 0xf9, 0x21, 0x75, 0xc8, + 0x26, 0xe7, 0xd4, 0x6d, 0x86, 0x9c, 0x30, 0xb8, 0x02, 0x0a, 0x01, 0xe6, 0x1d, 0x99, 0xae, 0x72, + 0x12, 0xda, 0x13, 0xcc, 0x3b, 0x48, 0x22, 0x82, 0xd1, 0x27, 0xb4, 0x29, 0x8f, 0x9c, 0x62, 0xec, + 0x13, 0xda, 0x44, 0x12, 0x31, 0xbf, 0x02, 0x73, 0x29, 0xe3, 0x28, 0xec, 0xca, 0x8a, 0x0a, 0x68, + 0xac, 0xa2, 0x42, 0x83, 0xa1, 0x58, 0x0e, 0x1f, 0x80, 0x39, 0x2f, 0xd1, 0xd9, 0x43, 0x0f, 0x99, + 0xa1, 0x4b, 0xea, 0x42, 0x34, 0xa8, 0xa5, 0xcd, 0x09, 0x08, 0x9d, 0xe5, 0x9a, 0xbf, 0xea, 0x00, + 0x66, 0x9c, 0xc6, 0x06, 0x65, 0x0f, 0xf7, 0x08, 0x0b, 0xb0, 0x43, 0xd4, 0x91, 0xae, 0xa9, 0x80, + 0xcb, 0x8f, 0x87, 0x00, 0x4a, 0x38, 0x2f, 0x3f, 0x1c, 0x7c, 0x0b, 0x14, 0xdb, 0xd4, 0x0f, 0x03, + 0x59, 0x98, 0x72, 0x63, 0x56, 0x51, 0x8a, 0x1f, 0x09, 0x21, 0x8a, 0x31, 0x78, 0x0b, 0x5c, 0xe9, + 0x13, 0xca, 0x5c, 0xdf, 0x33, 0x0a, 0x92, 0x36, 0xa7, 0x68, 0x57, 0xf6, 0x63, 0x31, 0x1a, 0xe2, + 0xf0, 0x36, 0x28, 0x51, 0x15, 0xb8, 0x51, 0x94, 0xdc, 0x79, 0xc5, 0x2d, 0x8d, 0x32, 0x38, 0x62, + 0xc0, 0xbb, 0xa0, 0xc2, 0xc2, 0xe6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x28, 0x85, 0xca, 0x6e, 0x02, + 0xa1, 0x34, 0x4f, 0x1c, 0x4b, 0x9c, 0xd1, 0xb8, 0x32, 0x7e, 0x2c, 0x91, 0x02, 0x24, 0x11, 0xf3, + 0x37, 0x0d, 0x5c, 0xbd, 0x5c, 0xc5, 0xde, 0x05, 0x65, 0x1c, 0xb8, 0xf2, 0xd8, 0xc3, 0x5a, 0xcd, + 0x8a, 0xbc, 0x6e, 0x3e, 0xd9, 0x89, 0x85, 0x28, 0xc1, 0x05, 0x79, 0x18, 0x8c, 0x68, 0xe9, 0x11, + 0x79, 0xe8, 0x92, 0xa1, 0x04, 0x87, 0xeb, 0x60, 0x76, 0xf8, 0x23, 0x8b, 0x64, 0x14, 0xa4, 0xc2, + 0xb5, 0x68, 0x50, 0x9b, 0x45, 0x69, 0x00, 0x8d, 0xf3, 0xcc, 0x5f, 0x74, 0xb0, 0xb4, 0x4b, 0xba, + 0x07, 0xaf, 0xe6, 0x2e, 0x78, 0x3a, 0x76, 0x17, 0x6c, 0x4c, 0x9f, 0xd8, 0xec, 0x28, 0x5f, 0xd9, + 0x7d, 0xf0, 0x83, 0x0e, 0x6e, 0x4e, 0x89, 0x09, 0x1e, 0x01, 0x48, 0xcf, 0x8d, 0x97, 0xca, 0xa3, + 0x3d, 0x35, 0x96, 0xf3, 0x53, 0xd9, 0xb8, 0x1e, 0x0d, 0x6a, 0x19, 0xd3, 0x8a, 0x32, 0x5c, 0xc0, + 0x6f, 0x35, 0xb0, 0xe8, 0x65, 0xdd, 0x54, 0x2a, 0xcd, 0xf5, 0xa9, 0xce, 0x33, 0xef, 0xb8, 0xc6, + 0x8d, 0x68, 0x50, 0xcb, 0xbe, 0xfe, 0x50, 0xb6, 0x2f, 0xf1, 0xca, 0x5c, 0x4f, 0xa5, 0x47, 0x0c, + 0xc8, 0xff, 0xd7, 0x57, 0x9f, 0x8c, 0xf5, 0xd5, 0xfa, 0x45, 0xfb, 0x2a, 0x15, 0xe4, 0xc4, 0xb6, + 0xfa, 0xec, 0x4c, 0x5b, 0xdd, 0xbd, 0x48, 0x5b, 0xa5, 0x0d, 0x4f, 0xef, 0xaa, 0x47, 0x60, 0x79, + 0x72, 0x40, 0x97, 0xbe, 0x9c, 0xcd, 0x9f, 0x74, 0xb0, 0xf0, 0xfa, 0x99, 0xbf, 0xcc, 0x58, 0xff, + 0x5e, 0x00, 0x4b, 0xaf, 0x47, 0x7a, 0xd2, 0xa2, 0x13, 0x32, 0x42, 0xd5, 0x33, 0x3e, 0x2a, 0xce, + 0x1e, 0x23, 0x14, 0x49, 0x04, 0x9a, 0x60, 0xa6, 0x1d, 0xbf, 0x6e, 0xf1, 0xfb, 0x03, 0x44, 0x82, + 0xd5, 0xd3, 0xa6, 0x10, 0xd8, 0x02, 0x45, 0x22, 0xf6, 0x56, 0xa3, 0xb8, 0x92, 0x5f, 0xad, 0xd4, + 0x3f, 0xfc, 0x2f, 0x9d, 0x61, 0xc9, 0xcd, 0x77, 0xdb, 0xe3, 0xf4, 0x38, 0x59, 0x27, 0xa4, 0x0c, + 0xc5, 0xc6, 0xe1, 0x9b, 0x20, 0x1f, 0xba, 0x2d, 0xf5, 0xda, 0x57, 0x14, 0x25, 0xbf, 0xb7, 0xb3, + 0x85, 0x84, 0x7c, 0x19, 0xab, 0xe5, 0x59, 0x9a, 0x80, 0xf3, 0x20, 0x7f, 0x48, 0x8e, 0xe3, 0x81, + 0x42, 0xe2, 0x13, 0x3e, 0x00, 0xc5, 0xbe, 0xd8, 0xab, 0x55, 0x7e, 0xdf, 0x99, 0x1a, 0x64, 0xb2, + 0x86, 0xa3, 0x58, 0xeb, 0xbe, 0xbe, 0xa1, 0x99, 0x7f, 0x68, 0xe0, 0xc6, 0xc4, 0xf6, 0x13, 0xeb, + 0x0e, 0xee, 0x76, 0xfd, 0x23, 0xd2, 0x92, 0x6e, 0x4b, 0xc9, 0xba, 0xb3, 0x19, 0x8b, 0xd1, 0x10, + 0x87, 0x6f, 0x83, 0x19, 0x4a, 0x30, 0xf3, 0x3d, 0xb5, 0x62, 0x8d, 0x3a, 0x17, 0x49, 0x29, 0x52, + 0x28, 0xdc, 0x04, 0x73, 0x44, 0xb8, 0x97, 0x71, 0x6d, 0x53, 0xea, 0x0f, 0x2b, 0xb5, 0xa4, 0x14, + 0xe6, 0xb6, 0xc7, 0x61, 0x74, 0x96, 0x2f, 0x5c, 0xb5, 0x88, 0xe7, 0x92, 0x96, 0xdc, 0xc1, 0x4a, + 0x89, 0xab, 0x2d, 0x29, 0x45, 0x0a, 0x35, 0xff, 0xd1, 0x81, 0x31, 0xe9, 0x6a, 0x83, 0x07, 0xc9, + 0x2e, 0x22, 0x41, 0xb9, 0x0e, 0x55, 0xea, 0xb7, 0x2e, 0x34, 0x20, 0x42, 0xa3, 0xb1, 0xa8, 0xdc, + 0xce, 0xa6, 0xa5, 0xa9, 0xd5, 0x45, 0xfe, 0x42, 0x0a, 0xe6, 0xbd, 0xf1, 0x9d, 0x39, 0x5e, 0xaa, + 0x2a, 0xf5, 0xdb, 0x17, 0x1d, 0x07, 0xe9, 0xcd, 0x50, 0xde, 0xe6, 0xcf, 0x00, 0x0c, 0x9d, 0xb3, + 0x0f, 0xeb, 0x00, 0xb8, 0x9e, 0xe3, 0xf7, 0x82, 0x2e, 0xe1, 0x44, 0xa6, 0xb7, 0x94, 0xdc, 0x83, + 0x3b, 0x23, 0x04, 0xa5, 0x58, 0x59, 0x75, 0x29, 0x5c, 0xae, 0x2e, 0x8d, 0xd5, 0x93, 0xd3, 0x6a, + 0xee, 0xf9, 0x69, 0x35, 0xf7, 0xe2, 0xb4, 0x9a, 0x7b, 0x16, 0x55, 0xb5, 0x93, 0xa8, 0xaa, 0x3d, + 0x8f, 0xaa, 0xda, 0x8b, 0xa8, 0xaa, 0xfd, 0x15, 0x55, 0xb5, 0xef, 0xfe, 0xae, 0xe6, 0x9e, 0xea, + 0xfd, 0xb5, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x0e, 0xab, 0x82, 0x7c, 0x0f, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto similarity index 59% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.proto rename to vendor/k8s.io/api/authorization/v1/generated.proto index 7036b8eb3..d87b03dbf 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,14 +19,12 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.authorization.v1; +package k8s.io.api.authorization.v1; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; @@ -67,6 +65,17 @@ message NonResourceAttributes { optional string verb = 2; } +// NonResourceRule holds information that describes a rule for the non-resource +message NonResourceRule { + // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + repeated string verbs = 1; + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, + // final step in the path. "*" means all. + // +optional + repeated string nonResourceURLs = 2; +} + // ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface message ResourceAttributes { // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces @@ -101,6 +110,27 @@ message ResourceAttributes { optional string name = 7; } +// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, +// may contain duplicates, and possibly be incomplete. +message ResourceRule { + // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + repeated string verbs = 1; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. "*" means all. + // +optional + repeated string apiGroups = 2; + + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + repeated string resources = 3; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. + // +optional + repeated string resourceNames = 4; +} + // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a // spec.namespace means "in all namespaces". Self is a special case, because users should always be able // to check whether they can perform an action @@ -128,6 +158,29 @@ message SelfSubjectAccessReviewSpec { optional NonResourceAttributes nonResourceAttributes = 2; } +// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. +// The returned list of actions may be incomplete depending on the server's authorization mode, +// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, +// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to +// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. +// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. +message SelfSubjectRulesReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. + optional SelfSubjectRulesReviewSpec spec = 2; + + // Status is filled in by the server and indicates the set of actions a user can perform. + // +optional + optional SubjectRulesReviewStatus status = 3; +} + +message SelfSubjectRulesReviewSpec { + // Namespace to evaluate rules for. Required. + optional string namespace = 1; +} + // SubjectAccessReview checks whether or not a user or group can perform an action. message SubjectAccessReview { // +optional @@ -155,7 +208,7 @@ message SubjectAccessReviewSpec { // User is the user you're testing for. // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups // +optional - optional string verb = 3; + optional string user = 3; // Groups is the groups you're testing for. // +optional @@ -165,13 +218,24 @@ message SubjectAccessReviewSpec { // it needs a reflection here. // +optional map extra = 5; + + // UID information about the requesting user. + // +optional + optional string uid = 6; } // SubjectAccessReviewStatus message SubjectAccessReviewStatus { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. optional bool allowed = 1; + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + optional bool denied = 4; + // Reason is optional. It indicates why a request was allowed or denied. // +optional optional string reason = 2; @@ -183,3 +247,27 @@ message SubjectAccessReviewStatus { optional string evaluationError = 3; } +// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on +// the set of authorizers the server is configured with and any errors experienced during evaluation. +// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, +// even if that list is incomplete. +message SubjectRulesReviewStatus { + // ResourceRules is the list of actions the subject is allowed to perform on resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + repeated ResourceRule resourceRules = 1; + + // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + repeated NonResourceRule nonResourceRules = 2; + + // Incomplete is true when the rules returned by this call are incomplete. This is most commonly + // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. + optional bool incomplete = 3; + + // EvaluationError can appear in combination with Rules. It indicates an error occurred during + // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that + // ResourceRules and/or NonResourceRules may be incomplete. + // +optional + optional string evaluationError = 4; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/register.go b/vendor/k8s.io/api/authorization/v1/register.go similarity index 75% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1/register.go rename to vendor/k8s.io/api/authorization/v1/register.go index 909bc0a7d..59311981e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/register.go +++ b/vendor/k8s.io/api/authorization/v1/register.go @@ -34,13 +34,17 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectRulesReview{}, &SelfSubjectAccessReview{}, &SubjectAccessReview{}, &LocalSubjectAccessReview{}, @@ -49,7 +53,3 @@ func addKnownTypes(scheme *runtime.Scheme) error { metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } - -func (obj *LocalSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *SubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *SelfSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go similarity index 56% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.go rename to vendor/k8s.io/api/authorization/v1/types.go index 38c314ffc..86b05c54e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.go +++ b/vendor/k8s.io/api/authorization/v1/types.go @@ -22,9 +22,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// +genclient=true -// +nonNamespaced=true -// +noMethods=true +// +genclient +// +genclient:nonNamespaced +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SubjectAccessReview checks whether or not a user or group can perform an action. type SubjectAccessReview struct { @@ -40,9 +41,10 @@ type SubjectAccessReview struct { Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// +genclient=true -// +nonNamespaced=true -// +noMethods=true +// +genclient +// +genclient:nonNamespaced +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a // spec.namespace means "in all namespaces". Self is a special case, because users should always be able @@ -60,8 +62,9 @@ type SelfSubjectAccessReview struct { Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// +genclient=true -// +noMethods=true +// +genclient +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions @@ -131,7 +134,7 @@ type SubjectAccessReviewSpec struct { // User is the user you're testing for. // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups // +optional - User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=verb"` + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Groups is the groups you're testing for. // +optional Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` @@ -139,6 +142,9 @@ type SubjectAccessReviewSpec struct { // it needs a reflection here. // +optional Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` + // UID information about the requesting user. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid"` } // ExtraValue masks the value so protobuf can generate @@ -163,8 +169,14 @@ type SelfSubjectAccessReviewSpec struct { // SubjectAccessReviewStatus type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"` // Reason is optional. It indicates why a request was allowed or denied. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` @@ -174,3 +186,83 @@ type SubjectAccessReviewStatus struct { // +optional EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,3,opt,name=evaluationError"` } + +// +genclient +// +genclient:nonNamespaced +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. +// The returned list of actions may be incomplete depending on the server's authorization mode, +// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, +// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to +// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. +// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. +type SelfSubjectRulesReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. + Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates the set of actions a user can perform. + // +optional + Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +type SelfSubjectRulesReviewSpec struct { + // Namespace to evaluate rules for. Required. + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` +} + +// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on +// the set of authorizers the server is configured with and any errors experienced during evaluation. +// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, +// even if that list is incomplete. +type SubjectRulesReviewStatus struct { + // ResourceRules is the list of actions the subject is allowed to perform on resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + ResourceRules []ResourceRule `json:"resourceRules" protobuf:"bytes,1,rep,name=resourceRules"` + // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + NonResourceRules []NonResourceRule `json:"nonResourceRules" protobuf:"bytes,2,rep,name=nonResourceRules"` + // Incomplete is true when the rules returned by this call are incomplete. This is most commonly + // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. + Incomplete bool `json:"incomplete" protobuf:"bytes,3,rep,name=incomplete"` + // EvaluationError can appear in combination with Rules. It indicates an error occurred during + // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that + // ResourceRules and/or NonResourceRules may be incomplete. + // +optional + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"` +} + +// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, +// may contain duplicates, and possibly be incomplete. +type ResourceRule struct { + // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. "*" means all. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` +} + +// NonResourceRule holds information that describes a rule for the non-resource +type NonResourceRule struct { + // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, + // final step in the path. "*" means all. + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,2,rep,name=nonResourceURLs"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go similarity index 58% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 33c0035b4..8445f7116 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ package v1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", @@ -47,6 +47,16 @@ func (NonResourceAttributes) SwaggerDoc() map[string]string { return map_NonResourceAttributes } +var map_NonResourceRule = map[string]string{ + "": "NonResourceRule holds information that describes a rule for the non-resource", + "verbs": "Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \"*\" means all.", +} + +func (NonResourceRule) SwaggerDoc() map[string]string { + return map_NonResourceRule +} + var map_ResourceAttributes = map[string]string{ "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", @@ -62,6 +72,18 @@ func (ResourceAttributes) SwaggerDoc() map[string]string { return map_ResourceAttributes } +var map_ResourceRule = map[string]string{ + "": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "verbs": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.", + "resources": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", +} + +func (ResourceRule) SwaggerDoc() map[string]string { + return map_ResourceRule +} + var map_SelfSubjectAccessReview = map[string]string{ "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", "spec": "Spec holds information about the request being evaluated. user and groups must be empty", @@ -82,6 +104,24 @@ func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { return map_SelfSubjectAccessReviewSpec } +var map_SelfSubjectRulesReview = map[string]string{ + "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.", + "spec": "Spec holds information about the request being evaluated.", + "status": "Status is filled in by the server and indicates the set of actions a user can perform.", +} + +func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReview +} + +var map_SelfSubjectRulesReviewSpec = map[string]string{ + "namespace": "Namespace to evaluate rules for. Required.", +} + +func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReviewSpec +} + var map_SubjectAccessReview = map[string]string{ "": "SubjectAccessReview checks whether or not a user or group can perform an action.", "spec": "Spec holds information about the request being evaluated", @@ -99,6 +139,7 @@ var map_SubjectAccessReviewSpec = map[string]string{ "user": "User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups", "groups": "Groups is the groups you're testing for.", "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", + "uid": "UID information about the requesting user.", } func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { @@ -107,7 +148,8 @@ func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { var map_SubjectAccessReviewStatus = map[string]string{ "": "SubjectAccessReviewStatus", - "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "denied": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", "reason": "Reason is optional. It indicates why a request was allowed or denied.", "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", } @@ -116,4 +158,16 @@ func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string { return map_SubjectAccessReviewStatus } +var map_SubjectRulesReviewStatus = map[string]string{ + "": "SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.", + "resourceRules": "ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "nonResourceRules": "NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "incomplete": "Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.", + "evaluationError": "EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.", +} + +func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectRulesReviewStatus +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..999933d74 --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go @@ -0,0 +1,398 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtraValue) DeepCopyInto(out *ExtraValue) { + { + in := &in + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. +func (in ExtraValue) DeepCopy() ExtraValue { + if in == nil { + return nil + } + out := new(ExtraValue) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview. +func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview { + if in == nil { + return nil + } + out := new(LocalSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourceAttributes) DeepCopyInto(out *NonResourceAttributes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceAttributes. +func (in *NonResourceAttributes) DeepCopy() *NonResourceAttributes { + if in == nil { + return nil + } + out := new(NonResourceAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourceRule) DeepCopyInto(out *NonResourceRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceRule. +func (in *NonResourceRule) DeepCopy() *NonResourceRule { + if in == nil { + return nil + } + out := new(NonResourceRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAttributes. +func (in *ResourceAttributes) DeepCopy() *ResourceAttributes { + if in == nil { + return nil + } + out := new(ResourceAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRule) DeepCopyInto(out *ResourceRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRule. +func (in *ResourceRule) DeepCopy() *ResourceRule { + if in == nil { + return nil + } + out := new(ResourceRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectAccessReview) DeepCopyInto(out *SelfSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReview. +func (in *SelfSubjectAccessReview) DeepCopy() *SelfSubjectAccessReview { + if in == nil { + return nil + } + out := new(SelfSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReviewSpec) { + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + if *in == nil { + *out = nil + } else { + *out = new(ResourceAttributes) + **out = **in + } + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + if *in == nil { + *out = nil + } else { + *out = new(NonResourceAttributes) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReviewSpec. +func (in *SelfSubjectAccessReviewSpec) DeepCopy() *SelfSubjectAccessReviewSpec { + if in == nil { + return nil + } + out := new(SelfSubjectAccessReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview. +func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec. +func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview. +func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview { + if in == nil { + return nil + } + out := new(SubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + if *in == nil { + *out = nil + } else { + *out = new(ResourceAttributes) + **out = **in + } + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + if *in == nil { + *out = nil + } else { + *out = new(NonResourceAttributes) + **out = **in + } + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]string, len(val)) + copy((*out)[key], val) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewSpec. +func (in *SubjectAccessReviewSpec) DeepCopy() *SubjectAccessReviewSpec { + if in == nil { + return nil + } + out := new(SubjectAccessReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReviewStatus) DeepCopyInto(out *SubjectAccessReviewStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewStatus. +func (in *SubjectAccessReviewStatus) DeepCopy() *SubjectAccessReviewStatus { + if in == nil { + return nil + } + out := new(SubjectAccessReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) { + *out = *in + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]ResourceRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NonResourceRules != nil { + in, out := &in.NonResourceRules, &out.NonResourceRules + *out = make([]NonResourceRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus. +func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus { + if in == nil { + return nil + } + out := new(SubjectRulesReviewStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go similarity index 83% rename from vendor/k8s.io/client-go/pkg/apis/authorization/doc.go rename to vendor/k8s.io/api/authorization/v1beta1/doc.go index 91344f674..ea4f802e2 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/doc.go +++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go @@ -14,5 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + // +groupName=authorization.k8s.io -package authorization +package v1beta1 // import "k8s.io/api/authorization/v1beta1" diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go new file mode 100644 index 000000000..1f8abde4f --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -0,0 +1,3529 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto + + It has these top-level messages: + ExtraValue + LocalSubjectAccessReview + NonResourceAttributes + NonResourceRule + ResourceAttributes + ResourceRule + SelfSubjectAccessReview + SelfSubjectAccessReviewSpec + SelfSubjectRulesReview + SelfSubjectRulesReviewSpec + SubjectAccessReview + SubjectAccessReviewSpec + SubjectAccessReviewStatus + SubjectRulesReviewStatus +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } +func (*LocalSubjectAccessReview) ProtoMessage() {} +func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } +func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} +func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{7} +} + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } +func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} +func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{9} +} + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } +func (*SubjectAccessReviewSpec) ProtoMessage() {} +func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{11} +} + +func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } +func (*SubjectAccessReviewStatus) ProtoMessage() {} +func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{12} +} + +func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } +func (*SubjectRulesReviewStatus) ProtoMessage() {} +func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{13} +} + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.ExtraValue") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.LocalSubjectAccessReview") + proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1beta1.NonResourceAttributes") + proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1beta1.NonResourceRule") + proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.api.authorization.v1beta1.ResourceAttributes") + proto.RegisterType((*ResourceRule)(nil), "k8s.io.api.authorization.v1beta1.ResourceRule") + proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectAccessReview") + proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectAccessReviewSpec") + proto.RegisterType((*SelfSubjectRulesReview)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReview") + proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReviewSpec") + proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewStatus") + proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectRulesReviewStatus") +} +func (m ExtraValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i += copy(dAtA[i:], m.Verb) + return i, nil +} + +func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i += copy(dAtA[i:], m.Verb) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i += copy(dAtA[i:], m.Subresource) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} + +func (m *ResourceRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil +} + +func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) + n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.NonResourceAttributes != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) + n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n11, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil +} + +func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + return i, nil +} + +func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n13, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n14, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) + n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.NonResourceAttributes != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) + n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for _, k := range keysForExtra { + dAtA[i] = 0x2a + i++ + v := m.Extra[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n17, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + } + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + return i, nil +} + +func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i += copy(dAtA[i:], m.EvaluationError) + dAtA[i] = 0x20 + i++ + if m.Denied { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ResourceRules) > 0 { + for _, msg := range m.ResourceRules { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NonResourceRules) > 0 { + for _, msg := range m.NonResourceRules { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x18 + i++ + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i += copy(dAtA[i:], m.EvaluationError) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourceRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SelfSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SelfSubjectRulesReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *SubjectRulesReviewStatus) Size() (n int) { + var l int + _ = l + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LocalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourceAttributes{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourceRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourceRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAttributes{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReviewSpec{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&SubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReviewStatus{`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `Denied:` + fmt.Sprintf("%v", this.Denied) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectRulesReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectRulesReviewStatus{`, + `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, + `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, + `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourceRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourceRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Denied = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, ResourceRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, NonResourceRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incomplete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Incomplete = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1154 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4d, 0x6f, 0x1b, 0xc5, + 0x1b, 0xf7, 0xfa, 0x25, 0xb1, 0xc7, 0xcd, 0x3f, 0xe9, 0x44, 0x69, 0xb6, 0xf9, 0x0b, 0xdb, 0x32, + 0x12, 0x0a, 0xa2, 0xdd, 0x25, 0xa1, 0x90, 0x12, 0xe8, 0x21, 0x56, 0x22, 0x14, 0xa9, 0x2d, 0xd5, + 0x44, 0xc9, 0x81, 0x4a, 0xc0, 0x78, 0x3d, 0xb1, 0x17, 0xdb, 0xbb, 0xcb, 0xcc, 0xac, 0x43, 0x10, + 0x87, 0x1e, 0x39, 0x72, 0xe4, 0xc8, 0x89, 0xef, 0xc0, 0x05, 0x09, 0x4e, 0x39, 0xf6, 0x18, 0x24, + 0x64, 0x91, 0xe5, 0x43, 0x70, 0x45, 0x33, 0x3b, 0xf6, 0xae, 0xe3, 0x75, 0x1c, 0xe7, 0x40, 0x2f, + 0xbd, 0xed, 0x3c, 0xbf, 0xe7, 0x6d, 0x9e, 0x97, 0xd9, 0x1f, 0xd8, 0x6f, 0x3f, 0x64, 0x86, 0xed, + 0x9a, 0x6d, 0xbf, 0x4e, 0xa8, 0x43, 0x38, 0x61, 0x66, 0x8f, 0x38, 0x0d, 0x97, 0x9a, 0x0a, 0xc0, + 0x9e, 0x6d, 0x62, 0x9f, 0xb7, 0x5c, 0x6a, 0x7f, 0x8b, 0xb9, 0xed, 0x3a, 0x66, 0x6f, 0xa3, 0x4e, + 0x38, 0xde, 0x30, 0x9b, 0xc4, 0x21, 0x14, 0x73, 0xd2, 0x30, 0x3c, 0xea, 0x72, 0x17, 0x56, 0x42, + 0x0b, 0x03, 0x7b, 0xb6, 0x31, 0x62, 0x61, 0x28, 0x8b, 0xb5, 0xfb, 0x4d, 0x9b, 0xb7, 0xfc, 0xba, + 0x61, 0xb9, 0x5d, 0xb3, 0xe9, 0x36, 0x5d, 0x53, 0x1a, 0xd6, 0xfd, 0x63, 0x79, 0x92, 0x07, 0xf9, + 0x15, 0x3a, 0x5c, 0x7b, 0x10, 0xa5, 0xd0, 0xc5, 0x56, 0xcb, 0x76, 0x08, 0x3d, 0x35, 0xbd, 0x76, + 0x53, 0x08, 0x98, 0xd9, 0x25, 0x1c, 0x9b, 0xbd, 0xb1, 0x34, 0xd6, 0xcc, 0x49, 0x56, 0xd4, 0x77, + 0xb8, 0xdd, 0x25, 0x63, 0x06, 0x1f, 0x4c, 0x33, 0x60, 0x56, 0x8b, 0x74, 0xf1, 0x98, 0xdd, 0x7b, + 0x93, 0xec, 0x7c, 0x6e, 0x77, 0x4c, 0xdb, 0xe1, 0x8c, 0xd3, 0xcb, 0x46, 0xd5, 0x2d, 0x00, 0xf6, + 0xbe, 0xe1, 0x14, 0x1f, 0xe1, 0x8e, 0x4f, 0x60, 0x19, 0xe4, 0x6c, 0x4e, 0xba, 0x4c, 0xd7, 0x2a, + 0x99, 0xf5, 0x42, 0xad, 0x10, 0xf4, 0xcb, 0xb9, 0x7d, 0x21, 0x40, 0xa1, 0x7c, 0x3b, 0xff, 0xe3, + 0x4f, 0xe5, 0xd4, 0x8b, 0x3f, 0x2b, 0xa9, 0xea, 0xaf, 0x69, 0xa0, 0x3f, 0x76, 0x2d, 0xdc, 0x39, + 0xf0, 0xeb, 0x5f, 0x11, 0x8b, 0xef, 0x58, 0x16, 0x61, 0x0c, 0x91, 0x9e, 0x4d, 0x4e, 0xe0, 0x97, + 0x20, 0x2f, 0xca, 0xd1, 0xc0, 0x1c, 0xeb, 0x5a, 0x45, 0x5b, 0x2f, 0x6e, 0xbe, 0x6b, 0x44, 0xdd, + 0x18, 0x66, 0x67, 0x78, 0xed, 0xa6, 0x10, 0x30, 0x43, 0x68, 0x1b, 0xbd, 0x0d, 0xe3, 0x53, 0xe9, + 0xeb, 0x09, 0xe1, 0xb8, 0x06, 0xcf, 0xfa, 0xe5, 0x54, 0xd0, 0x2f, 0x83, 0x48, 0x86, 0x86, 0x5e, + 0xe1, 0x73, 0x90, 0x65, 0x1e, 0xb1, 0xf4, 0xb4, 0xf4, 0xfe, 0xa1, 0x31, 0xad, 0xd7, 0x46, 0x42, + 0x9a, 0x07, 0x1e, 0xb1, 0x6a, 0xb7, 0x54, 0x98, 0xac, 0x38, 0x21, 0xe9, 0x14, 0x5a, 0x60, 0x8e, + 0x71, 0xcc, 0x7d, 0xa6, 0x67, 0xa4, 0xfb, 0x8f, 0x6e, 0xe6, 0x5e, 0xba, 0xa8, 0xfd, 0x4f, 0x05, + 0x98, 0x0b, 0xcf, 0x48, 0xb9, 0xae, 0x3e, 0x07, 0x2b, 0x4f, 0x5d, 0x07, 0x11, 0xe6, 0xfa, 0xd4, + 0x22, 0x3b, 0x9c, 0x53, 0xbb, 0xee, 0x73, 0xc2, 0x60, 0x05, 0x64, 0x3d, 0xcc, 0x5b, 0xb2, 0x70, + 0x85, 0x28, 0xbf, 0x67, 0x98, 0xb7, 0x90, 0x44, 0x84, 0x46, 0x8f, 0xd0, 0xba, 0xbc, 0x7c, 0x4c, + 0xe3, 0x88, 0xd0, 0x3a, 0x92, 0x48, 0xf5, 0x6b, 0xb0, 0x18, 0x73, 0x8e, 0xfc, 0x8e, 0xec, 0xad, + 0x80, 0x46, 0x7a, 0x2b, 0x2c, 0x18, 0x0a, 0xe5, 0xf0, 0x11, 0x58, 0x74, 0x22, 0x9b, 0x43, 0xf4, + 0x98, 0xe9, 0x69, 0xa9, 0xba, 0x1c, 0xf4, 0xcb, 0x71, 0x77, 0x02, 0x42, 0x97, 0x75, 0xc5, 0x40, + 0xc0, 0x84, 0xdb, 0x98, 0xa0, 0xe0, 0xe0, 0x2e, 0x61, 0x1e, 0xb6, 0x88, 0xba, 0xd2, 0x6d, 0x95, + 0x70, 0xe1, 0xe9, 0x00, 0x40, 0x91, 0xce, 0xf4, 0xcb, 0xc1, 0x37, 0x41, 0xae, 0x49, 0x5d, 0xdf, + 0x93, 0xdd, 0x29, 0xd4, 0x16, 0x94, 0x4a, 0xee, 0x13, 0x21, 0x44, 0x21, 0x06, 0xdf, 0x06, 0xf3, + 0x3d, 0x42, 0x99, 0xed, 0x3a, 0x7a, 0x56, 0xaa, 0x2d, 0x2a, 0xb5, 0xf9, 0xa3, 0x50, 0x8c, 0x06, + 0x38, 0xbc, 0x07, 0xf2, 0x54, 0x25, 0xae, 0xe7, 0xa4, 0xee, 0x92, 0xd2, 0xcd, 0x0f, 0x2b, 0x38, + 0xd4, 0x80, 0xef, 0x83, 0x22, 0xf3, 0xeb, 0x43, 0x83, 0x39, 0x69, 0xb0, 0xac, 0x0c, 0x8a, 0x07, + 0x11, 0x84, 0xe2, 0x7a, 0xe2, 0x5a, 0xe2, 0x8e, 0xfa, 0xfc, 0xe8, 0xb5, 0x44, 0x09, 0x90, 0x44, + 0xaa, 0xbf, 0x6b, 0xe0, 0xd6, 0x6c, 0x1d, 0x7b, 0x07, 0x14, 0xb0, 0x67, 0xcb, 0x6b, 0x0f, 0x7a, + 0xb5, 0x20, 0xea, 0xba, 0xf3, 0x6c, 0x3f, 0x14, 0xa2, 0x08, 0x17, 0xca, 0x83, 0x64, 0xc4, 0x5c, + 0x0f, 0x95, 0x07, 0x21, 0x19, 0x8a, 0x70, 0xb8, 0x05, 0x16, 0x06, 0x07, 0xd9, 0x24, 0x3d, 0x2b, + 0x0d, 0x6e, 0x07, 0xfd, 0xf2, 0x02, 0x8a, 0x03, 0x68, 0x54, 0xaf, 0xfa, 0x5b, 0x1a, 0xac, 0x1e, + 0x90, 0xce, 0xf1, 0xab, 0x79, 0x15, 0xbe, 0x18, 0x79, 0x15, 0x1e, 0x5d, 0x63, 0x6d, 0x93, 0x53, + 0x7d, 0xb5, 0x2f, 0xc3, 0xcf, 0x69, 0xf0, 0xff, 0x2b, 0x12, 0x83, 0xdf, 0x01, 0x48, 0xc7, 0x16, + 0x4d, 0x55, 0xf4, 0xc1, 0xf4, 0x84, 0xc6, 0x97, 0xb4, 0x76, 0x27, 0xe8, 0x97, 0x13, 0x96, 0x17, + 0x25, 0xc4, 0x81, 0xdf, 0x6b, 0x60, 0xc5, 0x49, 0x7a, 0xb8, 0x54, 0xd5, 0xb7, 0xa6, 0x67, 0x90, + 0xf8, 0xee, 0xd5, 0xee, 0x06, 0xfd, 0x72, 0xf2, 0x93, 0x88, 0x92, 0x03, 0x8a, 0x27, 0xe7, 0x4e, + 0xac, 0x50, 0x62, 0x69, 0xfe, 0xbb, 0x59, 0xfb, 0x7c, 0x64, 0xd6, 0x3e, 0x9e, 0x69, 0xd6, 0x62, + 0x99, 0x4e, 0x1c, 0xb5, 0xfa, 0xa5, 0x51, 0xdb, 0xbe, 0xf6, 0xa8, 0xc5, 0xbd, 0x5f, 0x3d, 0x69, + 0x4f, 0xc0, 0xda, 0xe4, 0xac, 0x66, 0x7e, 0xba, 0xab, 0xbf, 0xa4, 0xc1, 0xf2, 0x6b, 0x3a, 0x70, + 0xb3, 0xa5, 0x3f, 0xcf, 0x82, 0xd5, 0xd7, 0x0b, 0x7f, 0xf5, 0xc2, 0x8b, 0x9f, 0xa8, 0xcf, 0x08, + 0x55, 0x3f, 0xfe, 0x61, 0xaf, 0x0e, 0x19, 0xa1, 0x48, 0x22, 0xb0, 0x32, 0xe0, 0x06, 0xe1, 0x0f, + 0x0b, 0x88, 0x4a, 0xab, 0x7f, 0xa1, 0x22, 0x06, 0x36, 0xc8, 0x11, 0xc1, 0x78, 0xf5, 0x5c, 0x25, + 0xb3, 0x5e, 0xdc, 0xdc, 0xbd, 0xf1, 0xac, 0x18, 0x92, 0x38, 0xef, 0x39, 0x9c, 0x9e, 0x46, 0x1c, + 0x44, 0xca, 0x50, 0x18, 0x01, 0xbe, 0x01, 0x32, 0xbe, 0xdd, 0x50, 0x14, 0xa1, 0xa8, 0x54, 0x32, + 0x87, 0xfb, 0xbb, 0x48, 0xc8, 0xd7, 0x8e, 0x15, 0xf7, 0x96, 0x2e, 0xe0, 0x12, 0xc8, 0xb4, 0xc9, + 0x69, 0xb8, 0x67, 0x48, 0x7c, 0xc2, 0x1a, 0xc8, 0xf5, 0x04, 0x2d, 0x57, 0x75, 0xbe, 0x37, 0x3d, + 0xd3, 0x88, 0xca, 0xa3, 0xd0, 0x74, 0x3b, 0xfd, 0x50, 0xab, 0xfe, 0xa1, 0x81, 0xbb, 0x13, 0x07, + 0x52, 0x10, 0x25, 0xdc, 0xe9, 0xb8, 0x27, 0xa4, 0x21, 0x63, 0xe7, 0x23, 0xa2, 0xb4, 0x13, 0x8a, + 0xd1, 0x00, 0x87, 0x6f, 0x81, 0x39, 0x4a, 0x30, 0x73, 0x1d, 0x45, 0xce, 0x86, 0xb3, 0x8c, 0xa4, + 0x14, 0x29, 0x14, 0xee, 0x80, 0x45, 0x22, 0xc2, 0xcb, 0xe4, 0xf6, 0x28, 0x75, 0x07, 0x1d, 0x5b, + 0x55, 0x06, 0x8b, 0x7b, 0xa3, 0x30, 0xba, 0xac, 0x2f, 0x42, 0x35, 0x88, 0x63, 0x93, 0x86, 0x64, + 0x6f, 0xf9, 0x28, 0xd4, 0xae, 0x94, 0x22, 0x85, 0x56, 0xff, 0x49, 0x03, 0x7d, 0xd2, 0xb3, 0x07, + 0xdb, 0x11, 0x8b, 0x91, 0xa0, 0x24, 0x52, 0xc5, 0x4d, 0xe3, 0xfa, 0x2b, 0x23, 0xcc, 0x6a, 0x2b, + 0x2a, 0xf6, 0x42, 0x5c, 0x1a, 0x63, 0x3e, 0xf2, 0x08, 0x4f, 0xc0, 0x92, 0x33, 0x4a, 0xb9, 0x43, + 0x4e, 0x56, 0xdc, 0xdc, 0x98, 0x69, 0x41, 0x64, 0x48, 0x5d, 0x85, 0x5c, 0xba, 0x04, 0x30, 0x34, + 0x16, 0x04, 0x6e, 0x02, 0x60, 0x3b, 0x96, 0xdb, 0xf5, 0x3a, 0x84, 0x13, 0x59, 0xe8, 0x7c, 0xf4, + 0x5a, 0xee, 0x0f, 0x11, 0x14, 0xd3, 0x4a, 0xea, 0x50, 0x76, 0xb6, 0x0e, 0xd5, 0xee, 0x9f, 0x5d, + 0x94, 0x52, 0x2f, 0x2f, 0x4a, 0xa9, 0xf3, 0x8b, 0x52, 0xea, 0x45, 0x50, 0xd2, 0xce, 0x82, 0x92, + 0xf6, 0x32, 0x28, 0x69, 0xe7, 0x41, 0x49, 0xfb, 0x2b, 0x28, 0x69, 0x3f, 0xfc, 0x5d, 0x4a, 0x7d, + 0x36, 0xaf, 0x6e, 0xf8, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xb3, 0x5e, 0x05, 0xd9, 0x0f, + 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto similarity index 59% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.proto rename to vendor/k8s.io/api/authorization/v1beta1/generated.proto index 6b77db462..98656fbac 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,14 +19,12 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.authorization.v1beta1; +package k8s.io.api.authorization.v1beta1; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; @@ -67,6 +65,17 @@ message NonResourceAttributes { optional string verb = 2; } +// NonResourceRule holds information that describes a rule for the non-resource +message NonResourceRule { + // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + repeated string verbs = 1; + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, + // final step in the path. "*" means all. + // +optional + repeated string nonResourceURLs = 2; +} + // ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface message ResourceAttributes { // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces @@ -101,6 +110,27 @@ message ResourceAttributes { optional string name = 7; } +// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, +// may contain duplicates, and possibly be incomplete. +message ResourceRule { + // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + repeated string verbs = 1; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. "*" means all. + // +optional + repeated string apiGroups = 2; + + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + repeated string resources = 3; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. + // +optional + repeated string resourceNames = 4; +} + // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a // spec.namespace means "in all namespaces". Self is a special case, because users should always be able // to check whether they can perform an action @@ -128,6 +158,29 @@ message SelfSubjectAccessReviewSpec { optional NonResourceAttributes nonResourceAttributes = 2; } +// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. +// The returned list of actions may be incomplete depending on the server's authorization mode, +// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, +// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to +// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. +// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. +message SelfSubjectRulesReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. + optional SelfSubjectRulesReviewSpec spec = 2; + + // Status is filled in by the server and indicates the set of actions a user can perform. + // +optional + optional SubjectRulesReviewStatus status = 3; +} + +message SelfSubjectRulesReviewSpec { + // Namespace to evaluate rules for. Required. + optional string namespace = 1; +} + // SubjectAccessReview checks whether or not a user or group can perform an action. message SubjectAccessReview { // +optional @@ -155,7 +208,7 @@ message SubjectAccessReviewSpec { // User is the user you're testing for. // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups // +optional - optional string verb = 3; + optional string user = 3; // Groups is the groups you're testing for. // +optional @@ -165,13 +218,24 @@ message SubjectAccessReviewSpec { // it needs a reflection here. // +optional map extra = 5; + + // UID information about the requesting user. + // +optional + optional string uid = 6; } // SubjectAccessReviewStatus message SubjectAccessReviewStatus { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. optional bool allowed = 1; + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + optional bool denied = 4; + // Reason is optional. It indicates why a request was allowed or denied. // +optional optional string reason = 2; @@ -183,3 +247,27 @@ message SubjectAccessReviewStatus { optional string evaluationError = 3; } +// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on +// the set of authorizers the server is configured with and any errors experienced during evaluation. +// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, +// even if that list is incomplete. +message SubjectRulesReviewStatus { + // ResourceRules is the list of actions the subject is allowed to perform on resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + repeated ResourceRule resourceRules = 1; + + // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + repeated NonResourceRule nonResourceRules = 2; + + // Incomplete is true when the rules returned by this call are incomplete. This is most commonly + // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. + optional bool incomplete = 3; + + // EvaluationError can appear in combination with Rules. It indicates an error occurred during + // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that + // ResourceRules and/or NonResourceRules may be incomplete. + // +optional + optional string evaluationError = 4; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/register.go b/vendor/k8s.io/api/authorization/v1beta1/register.go similarity index 75% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/register.go rename to vendor/k8s.io/api/authorization/v1beta1/register.go index 66549ed83..84255dd6c 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/register.go +++ b/vendor/k8s.io/api/authorization/v1beta1/register.go @@ -34,13 +34,17 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectRulesReview{}, &SelfSubjectAccessReview{}, &SubjectAccessReview{}, &LocalSubjectAccessReview{}, @@ -49,7 +53,3 @@ func addKnownTypes(scheme *runtime.Scheme) error { metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } - -func (obj *LocalSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *SubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *SelfSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go similarity index 56% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.go rename to vendor/k8s.io/api/authorization/v1beta1/types.go index 8a1727423..618ff8c0f 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types.go @@ -22,9 +22,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// +genclient=true -// +nonNamespaced=true -// +noMethods=true +// +genclient +// +genclient:nonNamespaced +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SubjectAccessReview checks whether or not a user or group can perform an action. type SubjectAccessReview struct { @@ -40,9 +41,10 @@ type SubjectAccessReview struct { Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// +genclient=true -// +nonNamespaced=true -// +noMethods=true +// +genclient +// +genclient:nonNamespaced +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a // spec.namespace means "in all namespaces". Self is a special case, because users should always be able @@ -60,8 +62,9 @@ type SelfSubjectAccessReview struct { Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// +genclient=true -// +noMethods=true +// +genclient +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions @@ -131,7 +134,7 @@ type SubjectAccessReviewSpec struct { // User is the user you're testing for. // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups // +optional - User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=verb"` + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Groups is the groups you're testing for. // +optional Groups []string `json:"group,omitempty" protobuf:"bytes,4,rep,name=group"` @@ -139,6 +142,9 @@ type SubjectAccessReviewSpec struct { // it needs a reflection here. // +optional Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` + // UID information about the requesting user. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid"` } // ExtraValue masks the value so protobuf can generate @@ -163,8 +169,14 @@ type SelfSubjectAccessReviewSpec struct { // SubjectAccessReviewStatus type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"` // Reason is optional. It indicates why a request was allowed or denied. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` @@ -174,3 +186,83 @@ type SubjectAccessReviewStatus struct { // +optional EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,3,opt,name=evaluationError"` } + +// +genclient +// +genclient:nonNamespaced +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. +// The returned list of actions may be incomplete depending on the server's authorization mode, +// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, +// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to +// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. +// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. +type SelfSubjectRulesReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. + Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates the set of actions a user can perform. + // +optional + Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +type SelfSubjectRulesReviewSpec struct { + // Namespace to evaluate rules for. Required. + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` +} + +// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on +// the set of authorizers the server is configured with and any errors experienced during evaluation. +// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, +// even if that list is incomplete. +type SubjectRulesReviewStatus struct { + // ResourceRules is the list of actions the subject is allowed to perform on resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + ResourceRules []ResourceRule `json:"resourceRules" protobuf:"bytes,1,rep,name=resourceRules"` + // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + NonResourceRules []NonResourceRule `json:"nonResourceRules" protobuf:"bytes,2,rep,name=nonResourceRules"` + // Incomplete is true when the rules returned by this call are incomplete. This is most commonly + // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. + Incomplete bool `json:"incomplete" protobuf:"bytes,3,rep,name=incomplete"` + // EvaluationError can appear in combination with Rules. It indicates an error occurred during + // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that + // ResourceRules and/or NonResourceRules may be incomplete. + // +optional + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"` +} + +// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, +// may contain duplicates, and possibly be incomplete. +type ResourceRule struct { + // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. "*" means all. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` +} + +// NonResourceRule holds information that describes a rule for the non-resource +type NonResourceRule struct { + // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, + // final step in the path. "*" means all. + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,2,rep,name=nonResourceURLs"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go similarity index 58% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index 8e521ba16..3ae6e7206 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ package v1beta1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", @@ -47,6 +47,16 @@ func (NonResourceAttributes) SwaggerDoc() map[string]string { return map_NonResourceAttributes } +var map_NonResourceRule = map[string]string{ + "": "NonResourceRule holds information that describes a rule for the non-resource", + "verbs": "Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \"*\" means all.", +} + +func (NonResourceRule) SwaggerDoc() map[string]string { + return map_NonResourceRule +} + var map_ResourceAttributes = map[string]string{ "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", @@ -62,6 +72,18 @@ func (ResourceAttributes) SwaggerDoc() map[string]string { return map_ResourceAttributes } +var map_ResourceRule = map[string]string{ + "": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "verbs": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.", + "resources": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", +} + +func (ResourceRule) SwaggerDoc() map[string]string { + return map_ResourceRule +} + var map_SelfSubjectAccessReview = map[string]string{ "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", "spec": "Spec holds information about the request being evaluated. user and groups must be empty", @@ -82,6 +104,24 @@ func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { return map_SelfSubjectAccessReviewSpec } +var map_SelfSubjectRulesReview = map[string]string{ + "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.", + "spec": "Spec holds information about the request being evaluated.", + "status": "Status is filled in by the server and indicates the set of actions a user can perform.", +} + +func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReview +} + +var map_SelfSubjectRulesReviewSpec = map[string]string{ + "namespace": "Namespace to evaluate rules for. Required.", +} + +func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReviewSpec +} + var map_SubjectAccessReview = map[string]string{ "": "SubjectAccessReview checks whether or not a user or group can perform an action.", "spec": "Spec holds information about the request being evaluated", @@ -99,6 +139,7 @@ var map_SubjectAccessReviewSpec = map[string]string{ "user": "User is the user you're testing for. If you specify \"User\" but not \"Group\", then is it interpreted as \"What if User were not a member of any groups", "group": "Groups is the groups you're testing for.", "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", + "uid": "UID information about the requesting user.", } func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { @@ -107,7 +148,8 @@ func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { var map_SubjectAccessReviewStatus = map[string]string{ "": "SubjectAccessReviewStatus", - "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "denied": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", "reason": "Reason is optional. It indicates why a request was allowed or denied.", "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", } @@ -116,4 +158,16 @@ func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string { return map_SubjectAccessReviewStatus } +var map_SubjectRulesReviewStatus = map[string]string{ + "": "SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.", + "resourceRules": "ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "nonResourceRules": "NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "incomplete": "Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.", + "evaluationError": "EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.", +} + +func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectRulesReviewStatus +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..eb14973cd --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,398 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtraValue) DeepCopyInto(out *ExtraValue) { + { + in := &in + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. +func (in ExtraValue) DeepCopy() ExtraValue { + if in == nil { + return nil + } + out := new(ExtraValue) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview. +func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview { + if in == nil { + return nil + } + out := new(LocalSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourceAttributes) DeepCopyInto(out *NonResourceAttributes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceAttributes. +func (in *NonResourceAttributes) DeepCopy() *NonResourceAttributes { + if in == nil { + return nil + } + out := new(NonResourceAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourceRule) DeepCopyInto(out *NonResourceRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceRule. +func (in *NonResourceRule) DeepCopy() *NonResourceRule { + if in == nil { + return nil + } + out := new(NonResourceRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAttributes. +func (in *ResourceAttributes) DeepCopy() *ResourceAttributes { + if in == nil { + return nil + } + out := new(ResourceAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRule) DeepCopyInto(out *ResourceRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRule. +func (in *ResourceRule) DeepCopy() *ResourceRule { + if in == nil { + return nil + } + out := new(ResourceRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectAccessReview) DeepCopyInto(out *SelfSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReview. +func (in *SelfSubjectAccessReview) DeepCopy() *SelfSubjectAccessReview { + if in == nil { + return nil + } + out := new(SelfSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReviewSpec) { + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + if *in == nil { + *out = nil + } else { + *out = new(ResourceAttributes) + **out = **in + } + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + if *in == nil { + *out = nil + } else { + *out = new(NonResourceAttributes) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReviewSpec. +func (in *SelfSubjectAccessReviewSpec) DeepCopy() *SelfSubjectAccessReviewSpec { + if in == nil { + return nil + } + out := new(SelfSubjectAccessReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview. +func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec. +func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview. +func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview { + if in == nil { + return nil + } + out := new(SubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + if *in == nil { + *out = nil + } else { + *out = new(ResourceAttributes) + **out = **in + } + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + if *in == nil { + *out = nil + } else { + *out = new(NonResourceAttributes) + **out = **in + } + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]string, len(val)) + copy((*out)[key], val) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewSpec. +func (in *SubjectAccessReviewSpec) DeepCopy() *SubjectAccessReviewSpec { + if in == nil { + return nil + } + out := new(SubjectAccessReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReviewStatus) DeepCopyInto(out *SubjectAccessReviewStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewStatus. +func (in *SubjectAccessReviewStatus) DeepCopy() *SubjectAccessReviewStatus { + if in == nil { + return nil + } + out := new(SubjectAccessReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) { + *out = *in + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]ResourceRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NonResourceRules != nil { + in, out := &in.NonResourceRules, &out.NonResourceRules + *out = make([]NonResourceRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus. +func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus { + if in == nil { + return nil + } + out := new(SubjectRulesReviewStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go similarity index 84% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/doc.go rename to vendor/k8s.io/api/autoscaling/v1/doc.go index 342f20126..9c3be845f 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +groupName=authentication.k8s.io -package v1beta1 +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +package v1 // import "k8s.io/api/autoscaling/v1" diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go similarity index 57% rename from vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.pb.go rename to vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 96b8335f8..84c40a9f4 100644 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,18 +15,21 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto // DO NOT EDIT! /* Package v1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto + k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto It has these top-level messages: CrossVersionObjectReference + ExternalMetricSource + ExternalMetricStatus HorizontalPodAutoscaler + HorizontalPodAutoscalerCondition HorizontalPodAutoscalerList HorizontalPodAutoscalerSpec HorizontalPodAutoscalerStatus @@ -51,7 +54,7 @@ import math "math" import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" import strings "strings" import reflect "reflect" @@ -65,7 +68,9 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} @@ -73,191 +78,352 @@ func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } +func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} +func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{4} +} func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptorGenerated, []int{5} } func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptorGenerated, []int{6} } func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptorGenerated, []int{7} } func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func init() { - proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.CrossVersionObjectReference") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerStatus") - proto.RegisterType((*MetricSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.MetricSpec") - proto.RegisterType((*MetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.MetricStatus") - proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ObjectMetricSource") - proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ObjectMetricStatus") - proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.PodsMetricSource") - proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.PodsMetricStatus") - proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ResourceMetricSource") - proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ResourceMetricStatus") - proto.RegisterType((*Scale)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ScaleStatus") + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") + proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") + proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") + proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") } -func (m *CrossVersionObjectReference) Marshal() (data []byte, err error) { +func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CrossVersionObjectReference) MarshalTo(data []byte) (int, error) { +func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) return i, nil } -func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { +func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { +func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + if m.MetricSelector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) + n1, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err + if m.TargetValue != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) + n2, err := m.TargetValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err + if m.TargetAverageValue != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) + n3, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 } - i += n3 return i, nil } -func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { +func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { +func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + if m.MetricSelector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) + n4, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) + n5, err := m.CurrentValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n5 + if m.CurrentAverageValue != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) + n6, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n8, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n9, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + return i, nil +} + +func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n10, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -267,436 +433,244 @@ func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { +func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) { +func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ScaleTargetRef.Size())) - n5, err := m.ScaleTargetRef.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) + n12, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n12 if m.MinReplicas != nil { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) } - data[i] = 0x18 + dAtA[i] = 0x18 i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas)) + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) if m.TargetCPUUtilizationPercentage != nil { - data[i] = 0x20 + dAtA[i] = 0x20 i++ - i = encodeVarintGenerated(data, i, uint64(*m.TargetCPUUtilizationPercentage)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetCPUUtilizationPercentage)) } return i, nil } -func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { +func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { +func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.ObservedGeneration != nil { - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) } if m.LastScaleTime != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size())) - n6, err := m.LastScaleTime.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) + n13, err := m.LastScaleTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n13 } - data[i] = 0x18 + dAtA[i] = 0x18 i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas)) - data[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x20 i++ - i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas)) + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) if m.CurrentCPUUtilizationPercentage != nil { - data[i] = 0x28 + dAtA[i] = 0x28 i++ - i = encodeVarintGenerated(data, i, uint64(*m.CurrentCPUUtilizationPercentage)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentCPUUtilizationPercentage)) } return i, nil } -func (m *MetricSpec) Marshal() (data []byte, err error) { +func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *MetricSpec) MarshalTo(data []byte) (int, error) { +func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if m.Object != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) - n7, err := m.Object.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n14, err := m.Object.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n14 } if m.Pods != nil { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) - n8, err := m.Pods.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) + n15, err := m.Pods.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n15 } if m.Resource != nil { - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) - n9, err := m.Resource.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) + n16, err := m.Resource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n16 + } + if m.External != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) + n17, err := m.External.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 } return i, nil } -func (m *MetricStatus) Marshal() (data []byte, err error) { +func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *MetricStatus) MarshalTo(data []byte) (int, error) { +func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if m.Object != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) - n10, err := m.Object.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n18, err := m.Object.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n18 } if m.Pods != nil { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) - n11, err := m.Pods.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n11 - } - if m.Resource != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) - n12, err := m.Resource.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n12 - } - return i, nil -} - -func (m *ObjectMetricSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ObjectMetricSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) - n13, err := m.Target.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n13 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) - i += copy(data[i:], m.MetricName) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size())) - n14, err := m.TargetValue.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n14 - return i, nil -} - -func (m *ObjectMetricStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ObjectMetricStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) - n15, err := m.Target.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n15 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) - i += copy(data[i:], m.MetricName) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size())) - n16, err := m.CurrentValue.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n16 - return i, nil -} - -func (m *PodsMetricSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodsMetricSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) - i += copy(data[i:], m.MetricName) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) - n17, err := m.TargetAverageValue.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n17 - return i, nil -} - -func (m *PodsMetricStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodsMetricStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) - i += copy(data[i:], m.MetricName) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) - n18, err := m.CurrentAverageValue.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n18 - return i, nil -} - -func (m *ResourceMetricSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceMetricSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - if m.TargetAverageUtilization != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.TargetAverageUtilization)) - } - if m.TargetAverageValue != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) - n19, err := m.TargetAverageValue.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) + n19, err := m.Pods.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n19 } - return i, nil -} - -func (m *ResourceMetricStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceMetricStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - if m.CurrentAverageUtilization != nil { - data[i] = 0x10 + if m.Resource != nil { + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(*m.CurrentAverageUtilization)) + i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) + n20, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) - n20, err := m.CurrentAverageValue.MarshalTo(data[i:]) - if err != nil { - return 0, err + if m.External != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) + n21, err := m.External.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 } - i += n20 return i, nil } -func (m *Scale) Marshal() (data []byte, err error) { +func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Scale) MarshalTo(data []byte) (int, error) { +func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n21, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n21 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n22, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n22, err := m.Target.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n22 - data[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n23, err := m.Status.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) + n23, err := m.TargetValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -704,77 +678,289 @@ func (m *Scale) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ScaleSpec) Marshal() (data []byte, err error) { +func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { +func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n24, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) + n25, err := m.CurrentValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 return i, nil } -func (m *ScaleStatus) Marshal() (data []byte, err error) { +func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { +func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Selector))) - i += copy(data[i:], m.Selector) + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) + n26, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 return i, nil } -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) + n27, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + return i, nil +} + +func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.TargetAverageUtilization != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) + n28, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + return i, nil +} + +func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.CurrentAverageUtilization != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) + n29, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + return i, nil +} + +func (m *Scale) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n30, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n31, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n32, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + return i, nil +} + +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) + i += copy(dAtA[i:], m.Selector) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *CrossVersionObjectReference) Size() (n int) { @@ -789,6 +975,44 @@ func (m *CrossVersionObjectReference) Size() (n int) { return n } +func (m *ExternalMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + if m.MetricSelector != nil { + l = m.MetricSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetValue != nil { + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExternalMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + if m.MetricSelector != nil { + l = m.MetricSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageValue != nil { + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *HorizontalPodAutoscaler) Size() (n int) { var l int _ = l @@ -801,6 +1025,22 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { return n } +func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *HorizontalPodAutoscalerList) Size() (n int) { var l int _ = l @@ -865,6 +1105,10 @@ func (m *MetricSpec) Size() (n int) { l = m.Resource.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -885,6 +1129,10 @@ func (m *MetricStatus) Size() (n int) { l = m.Resource.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1013,6 +1261,32 @@ func (this *CrossVersionObjectReference) String() string { }, "") return s } +func (this *ExternalMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricSource{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricStatus{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} func (this *HorizontalPodAutoscaler) String() string { if this == nil { return "nil" @@ -1025,6 +1299,20 @@ func (this *HorizontalPodAutoscaler) String() string { }, "") return s } +func (this *HorizontalPodAutoscalerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" @@ -1072,6 +1360,7 @@ func (this *MetricSpec) String() string { `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -1085,6 +1374,7 @@ func (this *MetricStatus) String() string { `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -1200,8 +1490,8 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { - l := len(data) +func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1213,7 +1503,7 @@ func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1241,7 +1531,7 @@ func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1256,7 +1546,7 @@ func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -1270,7 +1560,7 @@ func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1285,7 +1575,7 @@ func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -1299,7 +1589,7 @@ func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1314,11 +1604,11 @@ func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(data[iNdEx:postIndex]) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1337,8 +1627,8 @@ func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { } return nil } -func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { - l := len(data) +func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1350,7 +1640,360 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricSelector == nil { + m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetValue == nil { + m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricSelector == nil { + m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CurrentAverageValue == nil { + m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1378,7 +2021,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1392,7 +2035,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1408,7 +2051,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1422,7 +2065,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1438,7 +2081,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1452,13 +2095,13 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1477,8 +2120,8 @@ func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { } return nil } -func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { - l := len(data) +func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1490,7 +2133,203 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = HorizontalPodAutoscalerConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1518,7 +2357,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1532,7 +2371,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1548,7 +2387,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1563,13 +2402,13 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, HorizontalPodAutoscaler{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1588,8 +2427,8 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { } return nil } -func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1601,7 +2440,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1629,7 +2468,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1643,7 +2482,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ScaleTargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ScaleTargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1659,7 +2498,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -1679,7 +2518,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.MaxReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -1698,7 +2537,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -1708,7 +2547,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { m.TargetCPUUtilizationPercentage = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1727,8 +2566,8 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { } return nil } -func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1740,7 +2579,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1768,7 +2607,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -1788,7 +2627,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1805,7 +2644,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { if m.LastScaleTime == nil { m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } - if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1821,7 +2660,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.CurrentReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -1840,7 +2679,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.DesiredReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -1859,7 +2698,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -1869,7 +2708,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { m.CurrentCPUUtilizationPercentage = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1888,8 +2727,8 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { } return nil } -func (m *MetricSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *MetricSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1901,7 +2740,7 @@ func (m *MetricSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1929,7 +2768,7 @@ func (m *MetricSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1944,7 +2783,7 @@ func (m *MetricSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = MetricSourceType(data[iNdEx:postIndex]) + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -1958,7 +2797,7 @@ func (m *MetricSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1975,7 +2814,7 @@ func (m *MetricSpec) Unmarshal(data []byte) error { if m.Object == nil { m.Object = &ObjectMetricSource{} } - if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1991,7 +2830,7 @@ func (m *MetricSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2008,7 +2847,7 @@ func (m *MetricSpec) Unmarshal(data []byte) error { if m.Pods == nil { m.Pods = &PodsMetricSource{} } - if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2024,7 +2863,7 @@ func (m *MetricSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2041,13 +2880,46 @@ func (m *MetricSpec) Unmarshal(data []byte) error { if m.Resource == nil { m.Resource = &ResourceMetricSource{} } - if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricSource{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2066,8 +2938,8 @@ func (m *MetricSpec) Unmarshal(data []byte) error { } return nil } -func (m *MetricStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *MetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2079,7 +2951,7 @@ func (m *MetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2107,7 +2979,7 @@ func (m *MetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2122,7 +2994,7 @@ func (m *MetricStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = MetricSourceType(data[iNdEx:postIndex]) + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2136,7 +3008,7 @@ func (m *MetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2153,7 +3025,7 @@ func (m *MetricStatus) Unmarshal(data []byte) error { if m.Object == nil { m.Object = &ObjectMetricStatus{} } - if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2169,7 +3041,7 @@ func (m *MetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2186,7 +3058,7 @@ func (m *MetricStatus) Unmarshal(data []byte) error { if m.Pods == nil { m.Pods = &PodsMetricStatus{} } - if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2202,7 +3074,7 @@ func (m *MetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2219,13 +3091,46 @@ func (m *MetricStatus) Unmarshal(data []byte) error { if m.Resource == nil { m.Resource = &ResourceMetricStatus{} } - if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricStatus{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2244,8 +3149,8 @@ func (m *MetricStatus) Unmarshal(data []byte) error { } return nil } -func (m *ObjectMetricSource) Unmarshal(data []byte) error { - l := len(data) +func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2257,7 +3162,7 @@ func (m *ObjectMetricSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2285,7 +3190,7 @@ func (m *ObjectMetricSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2299,7 +3204,7 @@ func (m *ObjectMetricSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2315,7 +3220,7 @@ func (m *ObjectMetricSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2330,7 +3235,7 @@ func (m *ObjectMetricSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MetricName = string(data[iNdEx:postIndex]) + m.MetricName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -2344,7 +3249,7 @@ func (m *ObjectMetricSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2358,13 +3263,13 @@ func (m *ObjectMetricSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2383,8 +3288,8 @@ func (m *ObjectMetricSource) Unmarshal(data []byte) error { } return nil } -func (m *ObjectMetricStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2396,7 +3301,7 @@ func (m *ObjectMetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2424,7 +3329,7 @@ func (m *ObjectMetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2438,7 +3343,7 @@ func (m *ObjectMetricStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2454,7 +3359,7 @@ func (m *ObjectMetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2469,7 +3374,7 @@ func (m *ObjectMetricStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MetricName = string(data[iNdEx:postIndex]) + m.MetricName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -2483,7 +3388,7 @@ func (m *ObjectMetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2497,13 +3402,13 @@ func (m *ObjectMetricStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2522,8 +3427,8 @@ func (m *ObjectMetricStatus) Unmarshal(data []byte) error { } return nil } -func (m *PodsMetricSource) Unmarshal(data []byte) error { - l := len(data) +func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2535,7 +3440,7 @@ func (m *PodsMetricSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2563,7 +3468,7 @@ func (m *PodsMetricSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2578,7 +3483,7 @@ func (m *PodsMetricSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MetricName = string(data[iNdEx:postIndex]) + m.MetricName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2592,7 +3497,7 @@ func (m *PodsMetricSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2606,13 +3511,13 @@ func (m *PodsMetricSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2631,8 +3536,8 @@ func (m *PodsMetricSource) Unmarshal(data []byte) error { } return nil } -func (m *PodsMetricStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2644,7 +3549,7 @@ func (m *PodsMetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2672,7 +3577,7 @@ func (m *PodsMetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2687,7 +3592,7 @@ func (m *PodsMetricStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MetricName = string(data[iNdEx:postIndex]) + m.MetricName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2701,7 +3606,7 @@ func (m *PodsMetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2715,13 +3620,13 @@ func (m *PodsMetricStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2740,8 +3645,8 @@ func (m *PodsMetricStatus) Unmarshal(data []byte) error { } return nil } -func (m *ResourceMetricSource) Unmarshal(data []byte) error { - l := len(data) +func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2753,7 +3658,7 @@ func (m *ResourceMetricSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2781,7 +3686,7 @@ func (m *ResourceMetricSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2796,7 +3701,7 @@ func (m *ResourceMetricSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { @@ -2810,7 +3715,7 @@ func (m *ResourceMetricSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -2830,7 +3735,7 @@ func (m *ResourceMetricSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2847,13 +3752,13 @@ func (m *ResourceMetricSource) Unmarshal(data []byte) error { if m.TargetAverageValue == nil { m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } - if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2872,8 +3777,8 @@ func (m *ResourceMetricSource) Unmarshal(data []byte) error { } return nil } -func (m *ResourceMetricStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2885,7 +3790,7 @@ func (m *ResourceMetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2913,7 +3818,7 @@ func (m *ResourceMetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2928,7 +3833,7 @@ func (m *ResourceMetricStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { @@ -2942,7 +3847,7 @@ func (m *ResourceMetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -2962,7 +3867,7 @@ func (m *ResourceMetricStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2976,13 +3881,13 @@ func (m *ResourceMetricStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3001,8 +3906,8 @@ func (m *ResourceMetricStatus) Unmarshal(data []byte) error { } return nil } -func (m *Scale) Unmarshal(data []byte) error { - l := len(data) +func (m *Scale) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3014,7 +3919,7 @@ func (m *Scale) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3042,7 +3947,7 @@ func (m *Scale) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3056,7 +3961,7 @@ func (m *Scale) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3072,7 +3977,7 @@ func (m *Scale) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3086,7 +3991,7 @@ func (m *Scale) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3102,7 +4007,7 @@ func (m *Scale) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3116,13 +4021,13 @@ func (m *Scale) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3141,8 +4046,8 @@ func (m *Scale) Unmarshal(data []byte) error { } return nil } -func (m *ScaleSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *ScaleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3154,7 +4059,7 @@ func (m *ScaleSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3182,7 +4087,7 @@ func (m *ScaleSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -3191,7 +4096,7 @@ func (m *ScaleSpec) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3210,8 +4115,8 @@ func (m *ScaleSpec) Unmarshal(data []byte) error { } return nil } -func (m *ScaleStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *ScaleStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3223,7 +4128,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3251,7 +4156,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -3270,7 +4175,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3285,11 +4190,11 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Selector = string(data[iNdEx:postIndex]) + m.Selector = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3308,8 +4213,8 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { } return nil } -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -3320,7 +4225,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3338,7 +4243,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -3355,7 +4260,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3378,7 +4283,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3389,7 +4294,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -3413,86 +4318,103 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 1279 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x57, 0xcd, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xda, 0x4e, 0x94, 0xce, 0xa6, 0x1f, 0x4c, 0xaa, 0xd4, 0x4d, 0xa8, 0x37, 0x5a, 0x38, - 0xb4, 0xa8, 0xec, 0x12, 0x13, 0x2a, 0x22, 0x84, 0x50, 0x6c, 0x54, 0x5a, 0x51, 0xb7, 0x61, 0xe2, - 0x46, 0x7c, 0x09, 0x31, 0x59, 0x4f, 0x9d, 0x69, 0xbc, 0x1f, 0x9a, 0x1d, 0x5b, 0x4d, 0x24, 0x24, - 0x4e, 0x9c, 0xb9, 0x70, 0x46, 0xf0, 0x4f, 0x70, 0x2e, 0x12, 0x52, 0x8e, 0xbd, 0xc1, 0xc9, 0x22, - 0x0b, 0x37, 0xc4, 0x3f, 0x50, 0x71, 0x40, 0x3b, 0x3b, 0x5e, 0xef, 0xda, 0x5e, 0x27, 0x4e, 0xd3, - 0x22, 0x6e, 0xbb, 0x33, 0xef, 0xfd, 0x7e, 0xef, 0xfd, 0xe6, 0xcd, 0x9b, 0x19, 0xb0, 0xb6, 0xfb, - 0xb6, 0x6f, 0x50, 0xd7, 0xdc, 0x6d, 0x6f, 0x13, 0xe6, 0x10, 0x4e, 0x7c, 0xd3, 0xdb, 0x6d, 0x9a, - 0xd8, 0xa3, 0xbe, 0x89, 0xdb, 0xdc, 0xf5, 0x2d, 0xdc, 0xa2, 0x4e, 0xd3, 0xec, 0xac, 0x98, 0x4d, - 0xe2, 0x10, 0x86, 0x39, 0x69, 0x18, 0x1e, 0x73, 0xb9, 0x0b, 0xaf, 0x45, 0xae, 0x46, 0xdf, 0xd5, - 0xf0, 0x76, 0x9b, 0x46, 0xe8, 0x6a, 0x24, 0x5c, 0x8d, 0xce, 0xca, 0xe2, 0xeb, 0x4d, 0xca, 0x77, - 0xda, 0xdb, 0x86, 0xe5, 0xda, 0x66, 0xd3, 0x6d, 0xba, 0xa6, 0x40, 0xd8, 0x6e, 0x3f, 0x10, 0x7f, - 0xe2, 0x47, 0x7c, 0x45, 0xc8, 0x8b, 0xab, 0x32, 0x28, 0xec, 0x51, 0x1b, 0x5b, 0x3b, 0xd4, 0x21, - 0x6c, 0xaf, 0x17, 0x96, 0xc9, 0x88, 0xef, 0xb6, 0x99, 0x45, 0x06, 0xe3, 0x19, 0xeb, 0xe5, 0x9b, - 0x36, 0xe1, 0x78, 0x44, 0x16, 0x8b, 0x66, 0x96, 0x17, 0x6b, 0x3b, 0x9c, 0xda, 0xc3, 0x34, 0x37, - 0x8e, 0x72, 0xf0, 0xad, 0x1d, 0x62, 0xe3, 0x21, 0xbf, 0x37, 0xb3, 0xfc, 0xda, 0x9c, 0xb6, 0x4c, - 0xea, 0x70, 0x9f, 0xb3, 0x71, 0x39, 0xf9, 0x84, 0x75, 0x08, 0xeb, 0x27, 0x44, 0x1e, 0x61, 0xdb, - 0x6b, 0x91, 0x51, 0x39, 0x5d, 0xcf, 0x5c, 0xd4, 0x11, 0xd6, 0xfa, 0x77, 0x0a, 0x58, 0xaa, 0x32, - 0xd7, 0xf7, 0xb7, 0x08, 0xf3, 0xa9, 0xeb, 0xdc, 0xdb, 0x7e, 0x48, 0x2c, 0x8e, 0xc8, 0x03, 0xc2, - 0x88, 0x63, 0x11, 0xb8, 0x0c, 0x0a, 0xbb, 0xd4, 0x69, 0x14, 0x95, 0x65, 0xe5, 0xea, 0x99, 0xca, - 0xdc, 0x41, 0x57, 0x9b, 0x0a, 0xba, 0x5a, 0xe1, 0x43, 0xea, 0x34, 0x90, 0x98, 0x09, 0x2d, 0x1c, - 0x6c, 0x93, 0x62, 0x2e, 0x6d, 0x71, 0x17, 0xdb, 0x04, 0x89, 0x19, 0x58, 0x06, 0x00, 0x7b, 0x54, - 0x12, 0x14, 0xf3, 0xc2, 0x0e, 0x4a, 0x3b, 0xb0, 0xbe, 0x71, 0x5b, 0xce, 0xa0, 0x84, 0x95, 0xfe, - 0x6b, 0x0e, 0x5c, 0xba, 0xe5, 0x32, 0xba, 0xef, 0x3a, 0x1c, 0xb7, 0x36, 0xdc, 0xc6, 0xba, 0x2c, - 0x2a, 0xc2, 0xe0, 0x97, 0x60, 0x36, 0x5c, 0xd0, 0x06, 0xe6, 0x58, 0xc4, 0xa5, 0x96, 0xdf, 0x30, - 0x64, 0x39, 0x26, 0xf5, 0xed, 0x17, 0x64, 0x68, 0x6d, 0x74, 0x56, 0x8c, 0x28, 0xb9, 0x1a, 0xe1, - 0xb8, 0xcf, 0xdf, 0x1f, 0x43, 0x31, 0x2a, 0xdc, 0x01, 0x05, 0xdf, 0x23, 0x96, 0xc8, 0x49, 0x2d, - 0xdf, 0x34, 0x8e, 0x5d, 0xec, 0x46, 0x46, 0xcc, 0x9b, 0x1e, 0xb1, 0xfa, 0xda, 0x84, 0x7f, 0x48, - 0x30, 0x40, 0x0f, 0xcc, 0xf8, 0x1c, 0xf3, 0xb6, 0x2f, 0x74, 0x51, 0xcb, 0xb7, 0x4e, 0x81, 0x4b, - 0xe0, 0x55, 0xce, 0x49, 0xb6, 0x99, 0xe8, 0x1f, 0x49, 0x1e, 0xfd, 0x4f, 0x05, 0x2c, 0x65, 0x78, - 0xde, 0xa1, 0x3e, 0x87, 0x9f, 0x0f, 0xa9, 0x6b, 0x1c, 0x4f, 0xdd, 0xd0, 0x5b, 0x68, 0x7b, 0x41, - 0x32, 0xcf, 0xf6, 0x46, 0x12, 0xca, 0x36, 0xc1, 0x34, 0xe5, 0xc4, 0xf6, 0x8b, 0xb9, 0xe5, 0xfc, - 0x55, 0xb5, 0x5c, 0x79, 0xf6, 0x74, 0x2b, 0x67, 0x25, 0xdd, 0xf4, 0xed, 0x10, 0x18, 0x45, 0xf8, - 0xfa, 0x3f, 0xb9, 0xcc, 0x34, 0x43, 0xf9, 0xe1, 0x37, 0x0a, 0x38, 0x27, 0x7e, 0xeb, 0x98, 0x35, - 0x49, 0x58, 0xf1, 0x32, 0xdb, 0x49, 0x56, 0x7b, 0xcc, 0xce, 0xa9, 0x2c, 0xc8, 0xb0, 0xce, 0x6d, - 0xa6, 0x58, 0xd0, 0x00, 0x2b, 0x5c, 0x01, 0xaa, 0x4d, 0x1d, 0x44, 0xbc, 0x16, 0xb5, 0xb0, 0x2f, - 0x4a, 0x6e, 0xba, 0x72, 0x3e, 0xe8, 0x6a, 0x6a, 0xad, 0x3f, 0x8c, 0x92, 0x36, 0xf0, 0x2d, 0xa0, - 0xda, 0xf8, 0x51, 0xec, 0x92, 0x17, 0x2e, 0xf3, 0x92, 0x4f, 0xad, 0xf5, 0xa7, 0x50, 0xd2, 0x0e, - 0x3e, 0x04, 0x25, 0x2e, 0x68, 0xab, 0x1b, 0xf7, 0xef, 0x73, 0xda, 0xa2, 0xfb, 0x98, 0x53, 0xd7, - 0xd9, 0x20, 0xcc, 0x22, 0x0e, 0xc7, 0x4d, 0x52, 0x2c, 0x08, 0x24, 0x3d, 0xe8, 0x6a, 0xa5, 0xfa, - 0x58, 0x4b, 0x74, 0x04, 0x92, 0xfe, 0x38, 0x0f, 0xae, 0x8c, 0xad, 0x4f, 0x78, 0x13, 0x40, 0x77, - 0x5b, 0xf4, 0xb5, 0xc6, 0x07, 0x51, 0x53, 0x0a, 0xbb, 0x43, 0xb8, 0x06, 0xf9, 0xca, 0x42, 0xd0, - 0xd5, 0xe0, 0xbd, 0xa1, 0x59, 0x34, 0xc2, 0x03, 0x5a, 0xe0, 0x6c, 0x0b, 0xfb, 0x3c, 0x52, 0x99, - 0xca, 0x46, 0xa4, 0x96, 0x5f, 0x3b, 0x5e, 0xd1, 0x86, 0x1e, 0x95, 0x97, 0x82, 0xae, 0x76, 0xf6, - 0x4e, 0x12, 0x04, 0xa5, 0x31, 0xe1, 0x3a, 0x38, 0x6f, 0xb5, 0x19, 0x23, 0x0e, 0x1f, 0x50, 0xfd, - 0x92, 0x54, 0xfd, 0x7c, 0x35, 0x3d, 0x8d, 0x06, 0xed, 0x43, 0x88, 0x06, 0xf1, 0x29, 0x23, 0x8d, - 0x18, 0xa2, 0x90, 0x86, 0x78, 0x3f, 0x3d, 0x8d, 0x06, 0xed, 0xa1, 0x0d, 0x34, 0x89, 0x9a, 0xb9, - 0x82, 0xd3, 0x02, 0xf2, 0x95, 0xa0, 0xab, 0x69, 0xd5, 0xf1, 0xa6, 0xe8, 0x28, 0x2c, 0xfd, 0xaf, - 0x1c, 0x00, 0x35, 0xc2, 0x19, 0xb5, 0xc4, 0x8e, 0x59, 0x05, 0x05, 0xbe, 0xe7, 0x11, 0x79, 0x14, - 0x2c, 0xf7, 0x9a, 0x59, 0x7d, 0xcf, 0x23, 0x4f, 0xbb, 0xda, 0x05, 0x69, 0x29, 0x8e, 0xe7, 0x70, - 0x0c, 0x09, 0x6b, 0x88, 0xc1, 0x8c, 0x2b, 0x76, 0x86, 0x5c, 0x97, 0x77, 0x27, 0xd8, 0x5e, 0x71, - 0x6f, 0x8e, 0x81, 0x2b, 0x20, 0xec, 0x68, 0x72, 0xab, 0x49, 0x60, 0xf8, 0x09, 0x28, 0x78, 0x6e, - 0xa3, 0xd7, 0x41, 0xdf, 0x99, 0x80, 0x60, 0xc3, 0x6d, 0xf8, 0x29, 0xf8, 0xd9, 0x30, 0xa3, 0x70, - 0x14, 0x09, 0x48, 0x48, 0xc1, 0x6c, 0xef, 0xca, 0x21, 0x56, 0x4b, 0x2d, 0xbf, 0x37, 0x01, 0x3c, - 0x92, 0xae, 0x29, 0x8a, 0xb9, 0xb0, 0x33, 0xf6, 0x66, 0x50, 0x0c, 0xaf, 0xff, 0x9d, 0x03, 0x73, - 0xd2, 0x30, 0xda, 0x20, 0xff, 0xb1, 0xde, 0xd1, 0x29, 0xf2, 0xdc, 0xf4, 0x8e, 0xe0, 0x9f, 0xab, - 0xde, 0x11, 0x45, 0x96, 0xde, 0xdf, 0xe7, 0x00, 0x1c, 0x2e, 0x30, 0xe8, 0x80, 0x99, 0xa8, 0xb5, - 0x9d, 0xf2, 0x71, 0x10, 0x1f, 0xc7, 0xb2, 0xf3, 0x4b, 0x96, 0xf0, 0x72, 0x64, 0x0b, 0xfe, 0xbb, - 0xfd, 0x4b, 0x54, 0x7c, 0x39, 0xa9, 0xc5, 0x33, 0x28, 0x61, 0x05, 0x09, 0x50, 0x23, 0xef, 0x2d, - 0xdc, 0x6a, 0x13, 0xb9, 0x0e, 0x63, 0x4f, 0x69, 0xa3, 0x97, 0xb6, 0xf1, 0x51, 0x1b, 0x3b, 0x9c, - 0xf2, 0xbd, 0xfe, 0x79, 0x51, 0xef, 0x43, 0xa1, 0x24, 0xae, 0xfe, 0xe3, 0xa0, 0x42, 0x51, 0x5d, - 0xfe, 0x1f, 0x14, 0xda, 0x01, 0x73, 0xb2, 0xbb, 0x3d, 0x8b, 0x44, 0x17, 0x25, 0xcb, 0x5c, 0x35, - 0x81, 0x85, 0x52, 0xc8, 0xfa, 0xcf, 0x0a, 0xb8, 0x30, 0xd8, 0x46, 0x06, 0x42, 0x56, 0x8e, 0x15, - 0xf2, 0x3e, 0x80, 0x51, 0xc2, 0xeb, 0x1d, 0xc2, 0x70, 0x93, 0x44, 0x81, 0xe7, 0x4e, 0x14, 0xf8, - 0xa2, 0xe4, 0x82, 0xf5, 0x21, 0x44, 0x34, 0x82, 0x45, 0xff, 0x25, 0x9d, 0x44, 0xb4, 0xce, 0x27, - 0x49, 0xe2, 0x2b, 0x30, 0x2f, 0xd5, 0x39, 0x85, 0x2c, 0x96, 0x24, 0xd9, 0x7c, 0x75, 0x18, 0x12, - 0x8d, 0xe2, 0xd1, 0x7f, 0xca, 0x81, 0x8b, 0xa3, 0x9a, 0x2e, 0xac, 0xc9, 0x47, 0x4a, 0x94, 0xc5, - 0x5a, 0xf2, 0x91, 0xf2, 0xb4, 0xab, 0x5d, 0x1b, 0xf7, 0x64, 0x8a, 0xbb, 0x4a, 0xe2, 0x45, 0xf3, - 0x31, 0x28, 0xa6, 0x54, 0x4c, 0x9c, 0x9f, 0xf2, 0x02, 0xf7, 0x72, 0xd0, 0xd5, 0x8a, 0xf5, 0x0c, - 0x1b, 0x94, 0xe9, 0x0d, 0x3b, 0x23, 0xab, 0xe0, 0x64, 0xe5, 0xbb, 0x30, 0x41, 0x05, 0x3c, 0x1e, - 0x56, 0x2e, 0xaa, 0x82, 0x53, 0x56, 0xee, 0x33, 0x70, 0x39, 0xbd, 0x70, 0xc3, 0xd2, 0x5d, 0x09, - 0xba, 0xda, 0xe5, 0x6a, 0x96, 0x11, 0xca, 0xf6, 0xcf, 0xaa, 0xbe, 0xfc, 0x0b, 0xaa, 0xbe, 0x1f, - 0x72, 0x60, 0x5a, 0x5c, 0x19, 0x5f, 0xc0, 0x0b, 0x75, 0x2b, 0xf5, 0x42, 0x5d, 0x9d, 0xa0, 0x05, - 0x8b, 0x08, 0x33, 0xdf, 0xa3, 0x5f, 0x0c, 0xbc, 0x47, 0x6f, 0x4c, 0x8c, 0x3c, 0xfe, 0xf5, 0xb9, - 0x06, 0xce, 0xc4, 0x01, 0xc0, 0xeb, 0xe1, 0x69, 0x2f, 0xef, 0xc2, 0x8a, 0x58, 0xfb, 0xf8, 0xe9, - 0x18, 0x5f, 0x82, 0x63, 0x0b, 0x9d, 0x02, 0x35, 0xc1, 0x30, 0x99, 0x73, 0x68, 0xed, 0x93, 0x16, - 0xb1, 0xb8, 0xcb, 0xe4, 0x11, 0x12, 0x5b, 0x6f, 0xca, 0x71, 0x14, 0x5b, 0x54, 0x5e, 0x3d, 0x38, - 0x2c, 0x4d, 0x3d, 0x39, 0x2c, 0x4d, 0xfd, 0x76, 0x58, 0x9a, 0xfa, 0x3a, 0x28, 0x29, 0x07, 0x41, - 0x49, 0x79, 0x12, 0x94, 0x94, 0xdf, 0x83, 0x92, 0xf2, 0xed, 0x1f, 0xa5, 0xa9, 0x4f, 0x73, 0x9d, - 0x95, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x61, 0xfc, 0xd4, 0x31, 0x3f, 0x13, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1485 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcb, 0x6f, 0x14, 0x47, + 0x13, 0xf7, 0x3e, 0x6c, 0xec, 0x5e, 0x63, 0xf3, 0x35, 0x08, 0x8c, 0xf9, 0xd8, 0xb1, 0xe6, 0x43, + 0x88, 0x2f, 0x09, 0x33, 0xb1, 0x21, 0x88, 0x1c, 0xbd, 0x9b, 0x10, 0x50, 0xbc, 0x60, 0xda, 0x86, + 0x90, 0x87, 0x22, 0xda, 0xb3, 0xcd, 0xba, 0xf1, 0xce, 0xcc, 0xaa, 0xa7, 0x77, 0x85, 0x91, 0x22, + 0x25, 0x87, 0x9c, 0x13, 0x45, 0x4a, 0x94, 0x63, 0xfe, 0x81, 0x9c, 0x39, 0x27, 0x52, 0x24, 0x8e, + 0x1c, 0x72, 0xe0, 0x34, 0x0a, 0x93, 0x63, 0xfe, 0x03, 0x4e, 0x51, 0x3f, 0x76, 0x76, 0x66, 0x77, + 0x67, 0xfd, 0xc0, 0x58, 0xc9, 0x6d, 0x7a, 0xaa, 0xea, 0x57, 0xdd, 0x55, 0xd5, 0xf5, 0x68, 0x50, + 0xd9, 0xba, 0x1a, 0x58, 0xd4, 0xb7, 0xb7, 0xda, 0x1b, 0x84, 0x79, 0x84, 0x93, 0xc0, 0xee, 0x10, + 0xaf, 0xee, 0x33, 0x5b, 0x13, 0x70, 0x8b, 0xda, 0xb8, 0xcd, 0xfd, 0xc0, 0xc1, 0x4d, 0xea, 0x35, + 0xec, 0xce, 0xa2, 0xdd, 0x20, 0x1e, 0x61, 0x98, 0x93, 0xba, 0xd5, 0x62, 0x3e, 0xf7, 0xe1, 0x69, + 0xc5, 0x6a, 0xe1, 0x16, 0xb5, 0x12, 0xac, 0x56, 0x67, 0x71, 0xfe, 0x62, 0x83, 0xf2, 0xcd, 0xf6, + 0x86, 0xe5, 0xf8, 0xae, 0xdd, 0xf0, 0x1b, 0xbe, 0x2d, 0x25, 0x36, 0xda, 0x0f, 0xe4, 0x4a, 0x2e, + 0xe4, 0x97, 0x42, 0x9a, 0x37, 0x13, 0x4a, 0x1d, 0x9f, 0x91, 0x21, 0xda, 0xe6, 0x2f, 0xf7, 0x78, + 0x5c, 0xec, 0x6c, 0x52, 0x8f, 0xb0, 0x6d, 0xbb, 0xb5, 0xd5, 0x90, 0x42, 0x8c, 0x04, 0x7e, 0x9b, + 0x39, 0x64, 0x4f, 0x52, 0x81, 0xed, 0x12, 0x8e, 0x87, 0xe9, 0xb2, 0xb3, 0xa4, 0x58, 0xdb, 0xe3, + 0xd4, 0x1d, 0x54, 0x73, 0x65, 0x27, 0x81, 0xc0, 0xd9, 0x24, 0x2e, 0x1e, 0x90, 0xbb, 0x94, 0x25, + 0xd7, 0xe6, 0xb4, 0x69, 0x53, 0x8f, 0x07, 0x9c, 0xf5, 0x0b, 0x99, 0xdf, 0xe7, 0xc0, 0x99, 0x2a, + 0xf3, 0x83, 0xe0, 0x2e, 0x61, 0x01, 0xf5, 0xbd, 0x5b, 0x1b, 0x0f, 0x89, 0xc3, 0x11, 0x79, 0x40, + 0x18, 0xf1, 0x1c, 0x02, 0x17, 0x40, 0x71, 0x8b, 0x7a, 0xf5, 0xb9, 0xdc, 0x42, 0xee, 0xc2, 0x54, + 0x65, 0xfa, 0x69, 0x68, 0x8c, 0x45, 0xa1, 0x51, 0xfc, 0x90, 0x7a, 0x75, 0x24, 0x29, 0x82, 0xc3, + 0xc3, 0x2e, 0x99, 0xcb, 0xa7, 0x39, 0x6e, 0x62, 0x97, 0x20, 0x49, 0x81, 0x4b, 0x00, 0xe0, 0x16, + 0xd5, 0x0a, 0xe6, 0x0a, 0x92, 0x0f, 0x6a, 0x3e, 0xb0, 0xbc, 0x7a, 0x43, 0x53, 0x50, 0x82, 0xcb, + 0xfc, 0xa1, 0x00, 0x4e, 0xbc, 0xff, 0x88, 0x13, 0xe6, 0xe1, 0x66, 0x8d, 0x70, 0x46, 0x9d, 0x35, + 0xe9, 0x14, 0x01, 0xe6, 0xca, 0xb5, 0x50, 0xa0, 0xb7, 0x15, 0x83, 0xd5, 0x62, 0x0a, 0x4a, 0x70, + 0x41, 0x1f, 0xcc, 0xa8, 0xd5, 0x1a, 0x69, 0x12, 0x87, 0xfb, 0x4c, 0x6e, 0xb6, 0xb4, 0x74, 0xc9, + 0xea, 0x45, 0x5d, 0x6c, 0x32, 0xab, 0xb5, 0xd5, 0x10, 0x3f, 0x02, 0x4b, 0x78, 0xd4, 0xea, 0x2c, + 0x5a, 0x2b, 0x78, 0x83, 0x34, 0xbb, 0xa2, 0x15, 0x18, 0x85, 0xc6, 0x4c, 0x2d, 0x05, 0x87, 0xfa, + 0xe0, 0x21, 0x06, 0x25, 0x8e, 0x59, 0x83, 0xf0, 0xbb, 0xb8, 0xd9, 0x26, 0xf2, 0xc8, 0xa5, 0x25, + 0x6b, 0x94, 0x36, 0xab, 0x1b, 0x75, 0xd6, 0xed, 0x36, 0xf6, 0x38, 0xe5, 0xdb, 0x95, 0xd9, 0x28, + 0x34, 0x4a, 0xeb, 0x3d, 0x18, 0x94, 0xc4, 0x84, 0x1d, 0x00, 0xd5, 0x72, 0xb9, 0x43, 0x18, 0x6e, + 0x10, 0xa5, 0xa9, 0xb8, 0x2f, 0x4d, 0x27, 0xa3, 0xd0, 0x80, 0xeb, 0x03, 0x68, 0x68, 0x88, 0x06, + 0xf3, 0xa7, 0x41, 0xc7, 0x70, 0xcc, 0xdb, 0xc1, 0xbf, 0xc3, 0x31, 0x9b, 0x60, 0xda, 0x69, 0x33, + 0x46, 0xbc, 0x57, 0xf2, 0xcc, 0x09, 0x7d, 0xac, 0xe9, 0x6a, 0x02, 0x0b, 0xa5, 0x90, 0xe1, 0x36, + 0x38, 0xae, 0xd7, 0x07, 0xe0, 0xa0, 0x53, 0x51, 0x68, 0x1c, 0xaf, 0x0e, 0xc2, 0xa1, 0x61, 0x3a, + 0xcc, 0x27, 0x79, 0x70, 0xea, 0xba, 0xcf, 0xe8, 0x63, 0xdf, 0xe3, 0xb8, 0xb9, 0xea, 0xd7, 0x97, + 0x75, 0x42, 0x25, 0x0c, 0xde, 0x07, 0x93, 0xc2, 0x7a, 0x75, 0xcc, 0xb1, 0xf4, 0x51, 0x69, 0xe9, + 0xed, 0xdd, 0xd9, 0x5a, 0x25, 0x86, 0x1a, 0xe1, 0xb8, 0xe7, 0xd5, 0xde, 0x3f, 0x14, 0xa3, 0xc2, + 0x7b, 0xa0, 0x18, 0xb4, 0x88, 0xa3, 0x3d, 0x79, 0xc5, 0xca, 0x4c, 0xec, 0x56, 0xc6, 0x1e, 0xd7, + 0x5a, 0xc4, 0xe9, 0xe5, 0x11, 0xb1, 0x42, 0x12, 0x11, 0xde, 0x07, 0x13, 0x81, 0x8c, 0x35, 0xed, + 0xb6, 0xab, 0xfb, 0xc0, 0x96, 0xf2, 0x95, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, 0x9a, 0x5f, + 0x17, 0xc0, 0x42, 0x86, 0x64, 0xd5, 0xf7, 0xea, 0x94, 0x53, 0xdf, 0x83, 0xd7, 0x41, 0x91, 0x6f, + 0xb7, 0xba, 0x21, 0x7e, 0xb9, 0xbb, 0xd1, 0xf5, 0xed, 0x16, 0x79, 0x19, 0x1a, 0xe7, 0x76, 0x92, + 0x17, 0x7c, 0x48, 0x22, 0xc0, 0x95, 0xf8, 0x40, 0xf9, 0x14, 0x96, 0xde, 0xd6, 0xcb, 0xd0, 0x18, + 0x52, 0xcc, 0xac, 0x18, 0x29, 0xbd, 0x79, 0x91, 0x11, 0x9a, 0x38, 0xe0, 0xeb, 0x0c, 0x7b, 0x81, + 0xd2, 0x44, 0xdd, 0x6e, 0x84, 0xbf, 0xb1, 0x3b, 0x27, 0x0b, 0x89, 0xca, 0xbc, 0xde, 0x05, 0x5c, + 0x19, 0x40, 0x43, 0x43, 0x34, 0xc0, 0xf3, 0x60, 0x82, 0x11, 0x1c, 0xf8, 0x9e, 0x0c, 0xee, 0xa9, + 0x9e, 0x71, 0x91, 0xfc, 0x8b, 0x34, 0x15, 0xfe, 0x1f, 0x1c, 0x71, 0x49, 0x10, 0xe0, 0x06, 0x99, + 0x1b, 0x97, 0x8c, 0xb3, 0x9a, 0xf1, 0x48, 0x4d, 0xfd, 0x46, 0x5d, 0xba, 0xf9, 0x7b, 0x0e, 0x9c, + 0xc9, 0xb0, 0xe3, 0x0a, 0x0d, 0x38, 0xfc, 0x6c, 0x20, 0x8a, 0xad, 0x5d, 0x66, 0x0c, 0x1a, 0xa8, + 0x18, 0x3e, 0xa6, 0x75, 0x4f, 0x76, 0xff, 0x24, 0x22, 0xf8, 0x23, 0x30, 0x4e, 0x39, 0x71, 0x85, + 0x57, 0x0a, 0x17, 0x4a, 0x4b, 0x4b, 0x7b, 0x0f, 0xb3, 0xca, 0x51, 0x0d, 0x3f, 0x7e, 0x43, 0x00, + 0x21, 0x85, 0x67, 0xfe, 0x95, 0xcf, 0x3c, 0x96, 0x08, 0x73, 0xd8, 0x01, 0x33, 0x72, 0xa5, 0x52, + 0x31, 0x22, 0x0f, 0xf4, 0xe1, 0x46, 0x5d, 0xa2, 0x11, 0xc5, 0xbb, 0x72, 0x52, 0xef, 0x62, 0x66, + 0x2d, 0x85, 0x8a, 0xfa, 0xb4, 0xc0, 0x45, 0x50, 0x72, 0xa9, 0x87, 0x48, 0xab, 0x49, 0x1d, 0xac, + 0x82, 0x71, 0x5c, 0x95, 0x9f, 0x5a, 0xef, 0x37, 0x4a, 0xf2, 0xc0, 0x77, 0x40, 0xc9, 0xc5, 0x8f, + 0x62, 0x91, 0x82, 0x14, 0x39, 0xae, 0xf5, 0x95, 0x6a, 0x3d, 0x12, 0x4a, 0xf2, 0xc1, 0x87, 0xa0, + 0xac, 0x6a, 0x4a, 0x75, 0xf5, 0xce, 0x1d, 0x4e, 0x9b, 0xf4, 0x31, 0x16, 0x71, 0xb4, 0x4a, 0x98, + 0x43, 0x3c, 0x2e, 0x42, 0xa3, 0x28, 0x91, 0xcc, 0x28, 0x34, 0xca, 0xeb, 0x23, 0x39, 0xd1, 0x0e, + 0x48, 0xe6, 0x2f, 0x05, 0x70, 0x76, 0x64, 0x1a, 0x80, 0xd7, 0x00, 0xf4, 0x37, 0x02, 0xc2, 0x3a, + 0xa4, 0xfe, 0x81, 0xea, 0x8b, 0x44, 0x83, 0x22, 0x6c, 0x5e, 0x50, 0x35, 0xf1, 0xd6, 0x00, 0x15, + 0x0d, 0x91, 0x80, 0x0e, 0x38, 0x2a, 0xee, 0x85, 0xb2, 0x32, 0xd5, 0xbd, 0xd0, 0xde, 0x2e, 0xdd, + 0x7f, 0xa2, 0xd0, 0x38, 0xba, 0x92, 0x04, 0x41, 0x69, 0x4c, 0xb8, 0x0c, 0x66, 0x75, 0xb2, 0xef, + 0xb3, 0xfa, 0x29, 0x6d, 0xf5, 0xd9, 0x6a, 0x9a, 0x8c, 0xfa, 0xf9, 0x05, 0x44, 0x9d, 0x04, 0x94, + 0x91, 0x7a, 0x0c, 0x51, 0x4c, 0x43, 0xbc, 0x97, 0x26, 0xa3, 0x7e, 0x7e, 0xe8, 0x02, 0x43, 0xa3, + 0x66, 0x7a, 0x70, 0x5c, 0x42, 0xfe, 0x2f, 0x0a, 0x0d, 0xa3, 0x3a, 0x9a, 0x15, 0xed, 0x84, 0x25, + 0xda, 0x40, 0xdd, 0x3b, 0xc8, 0x0b, 0x72, 0x39, 0x95, 0x7a, 0x17, 0xfa, 0x52, 0xef, 0xb1, 0x64, + 0xa3, 0x98, 0x48, 0xb3, 0xb7, 0xc1, 0x84, 0x2f, 0x6f, 0x86, 0xf6, 0xcb, 0xc5, 0x11, 0xd7, 0x29, + 0x2e, 0x69, 0x31, 0x50, 0x05, 0x88, 0x5c, 0xa6, 0xaf, 0x96, 0x06, 0x82, 0x37, 0x40, 0xb1, 0xe5, + 0xd7, 0xbb, 0x85, 0xe8, 0xcd, 0x11, 0x80, 0xab, 0x7e, 0x3d, 0x48, 0xc1, 0x4d, 0x8a, 0x1d, 0x8b, + 0xbf, 0x48, 0x42, 0xc0, 0x8f, 0xc1, 0x64, 0xb7, 0xe0, 0xeb, 0xee, 0xc0, 0x1e, 0x01, 0x87, 0x34, + 0x6b, 0x0a, 0x72, 0x5a, 0x24, 0xb2, 0x2e, 0x05, 0xc5, 0x70, 0x02, 0x9a, 0xe8, 0x56, 0x4d, 0x7a, + 0x65, 0x34, 0xf4, 0xb0, 0x76, 0x5b, 0x41, 0x77, 0x29, 0x28, 0x86, 0x33, 0x7f, 0x2c, 0x80, 0xe9, + 0x54, 0xfb, 0x77, 0xc8, 0xae, 0x51, 0x75, 0xfc, 0xc0, 0x5c, 0xa3, 0xe0, 0x0e, 0xd4, 0x35, 0x0a, + 0xf2, 0xb5, 0xb8, 0x26, 0x01, 0x3d, 0xc4, 0x35, 0xdf, 0xe4, 0x01, 0x1c, 0x0c, 0x63, 0xf8, 0x39, + 0x98, 0x50, 0x09, 0xf3, 0x15, 0x8b, 0x4a, 0x5c, 0xde, 0x75, 0xfd, 0xd0, 0xa8, 0x7d, 0xfd, 0x7f, + 0x7e, 0x57, 0xfd, 0x3f, 0x39, 0x88, 0x39, 0x29, 0xae, 0x3a, 0x59, 0xb3, 0x92, 0xf9, 0x5d, 0xbf, + 0x45, 0x54, 0xc8, 0xfe, 0x13, 0x2d, 0x72, 0x68, 0x03, 0x8a, 0xf9, 0x6b, 0x0e, 0x1c, 0xeb, 0x4f, + 0x4e, 0xfb, 0x1a, 0xe2, 0x1e, 0x0f, 0x9d, 0x44, 0xf3, 0xfb, 0xda, 0x78, 0xdc, 0x7b, 0xee, 0x72, + 0x1a, 0xfd, 0x2d, 0x7d, 0x88, 0xfd, 0x4f, 0xa2, 0x5f, 0x0c, 0x1f, 0xd7, 0xf6, 0x77, 0x8a, 0x33, + 0x5a, 0xd9, 0xee, 0x47, 0xb6, 0x9f, 0xf3, 0xe0, 0xc4, 0xb0, 0xd4, 0x0e, 0xab, 0xfa, 0x75, 0x45, + 0x9d, 0xc2, 0x4e, 0xbe, 0xae, 0xbc, 0x0c, 0x0d, 0x63, 0xc8, 0x78, 0xd0, 0x85, 0x49, 0x3c, 0xc0, + 0xdc, 0x03, 0x73, 0x29, 0xdb, 0x25, 0x6a, 0xad, 0x6e, 0xf6, 0xfe, 0x1b, 0x85, 0xc6, 0xdc, 0x7a, + 0x06, 0x0f, 0xca, 0x94, 0xce, 0x78, 0x85, 0x28, 0xbc, 0xf6, 0x57, 0x88, 0x27, 0x83, 0xf6, 0x52, + 0xbe, 0x3f, 0x10, 0x7b, 0x7d, 0x0a, 0x4e, 0xa7, 0x9d, 0x34, 0x68, 0xb0, 0xb3, 0x51, 0x68, 0x9c, + 0xae, 0x66, 0x31, 0xa1, 0x6c, 0xf9, 0xac, 0x48, 0x2b, 0x1c, 0x52, 0xa4, 0x7d, 0x95, 0x07, 0xe3, + 0xb2, 0xa9, 0x3c, 0x84, 0xa7, 0x80, 0x6b, 0xa9, 0xa7, 0x80, 0x73, 0x23, 0xd2, 0xab, 0xdc, 0x51, + 0xe6, 0xe0, 0x7f, 0xb3, 0x6f, 0xf0, 0x3f, 0xbf, 0x23, 0xd2, 0xe8, 0x31, 0xff, 0x5d, 0x30, 0x15, + 0x2b, 0x84, 0x6f, 0x89, 0x22, 0xaf, 0xbb, 0xe1, 0x9c, 0xf4, 0x6d, 0x3c, 0x1b, 0xc6, 0x6d, 0x70, + 0xcc, 0x61, 0x52, 0x50, 0x4a, 0x68, 0xd8, 0x9b, 0xb0, 0xe0, 0x0e, 0x92, 0x0f, 0x5d, 0x53, 0x3d, + 0xee, 0xf8, 0xc5, 0x2a, 0xe6, 0xa8, 0x5c, 0x78, 0xfa, 0xa2, 0x3c, 0xf6, 0xec, 0x45, 0x79, 0xec, + 0xf9, 0x8b, 0xf2, 0xd8, 0x97, 0x51, 0x39, 0xf7, 0x34, 0x2a, 0xe7, 0x9e, 0x45, 0xe5, 0xdc, 0xf3, + 0xa8, 0x9c, 0xfb, 0x23, 0x2a, 0xe7, 0xbe, 0xfd, 0xb3, 0x3c, 0xf6, 0x49, 0xbe, 0xb3, 0xf8, 0x77, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x61, 0x55, 0xfd, 0xeb, 0x81, 0x17, 0x00, 0x00, } diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto similarity index 71% rename from vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.proto rename to vendor/k8s.io/api/autoscaling/v1/generated.proto index 4953624af..2ff7f9e28 100644 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,22 +19,21 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.autoscaling.v1; +package k8s.io.api.autoscaling.v1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -45,13 +44,56 @@ message CrossVersionObjectReference { optional string apiVersion = 3; } +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +message ExternalMetricSource { + // metricName is the name of the metric in question. + optional string metricName = 1; + + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + + // targetValue is the target value of the metric (as a quantity). + // Mutually exclusive with TargetAverageValue. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + + // targetAverageValue is the target per-pod value of global metric (as a quantity). + // Mutually exclusive with TargetValue. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +message ExternalMetricStatus { + // metricName is the name of a metric used for autoscaling in + // metric system. + optional string metricName = 1; + + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + + // currentValue is the current value of the metric (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + + // currentAverageValue is the current value of metric averaged over autoscaled pods. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; +} + // configuration of a horizontal pod autoscaler. message HorizontalPodAutoscaler { - // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -60,6 +102,30 @@ message HorizontalPodAutoscaler { optional HorizontalPodAutoscalerStatus status = 3; } +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +message HorizontalPodAutoscalerCondition { + // type describes the current condition + optional string type = 1; + + // status is the status of the condition (True, False, Unknown) + optional string status = 2; + + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is the reason for the condition's last transition. + // +optional + optional string reason = 4; + + // message is a human-readable explanation containing details about + // the transition + // +optional + optional string message = 5; +} + // list of horizontal pod autoscaler objects. message HorizontalPodAutoscalerList { // Standard list metadata. @@ -115,7 +181,8 @@ message HorizontalPodAutoscalerStatus { // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). message MetricSpec { - // type is the type of metric source. It should match one of the fields below. + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -136,11 +203,20 @@ message MetricSpec { // to normal per-pod metrics using the "pods" source. // +optional optional ResourceMetricSource resource = 4; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricSource external = 5; } // MetricStatus describes the last-read state of a single metric. message MetricStatus { - // type is the type of metric source. It will match one of the fields below. + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -161,6 +237,14 @@ message MetricStatus { // to normal per-pod metrics using the "pods" source. // +optional optional ResourceMetricStatus resource = 4; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricStatus external = 5; } // ObjectMetricSource indicates how to scale on a metric describing a @@ -230,7 +314,7 @@ message ResourceMetricSource { // +optional optional int32 targetAverageUtilization = 2; - // targetAverageValue is the the target value of the average of the + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional @@ -254,7 +338,7 @@ message ResourceMetricStatus { // +optional optional int32 currentAverageUtilization = 2; - // currentAverageValue is the the current value of the average of the + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. @@ -263,15 +347,15 @@ message ResourceMetricStatus { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/register.go b/vendor/k8s.io/api/autoscaling/v1/register.go similarity index 80% rename from vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/register.go rename to vendor/k8s.io/api/autoscaling/v1/register.go index aaa225261..8dfe361ed 100644 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/register.go +++ b/vendor/k8s.io/api/autoscaling/v1/register.go @@ -34,11 +34,14 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &HorizontalPodAutoscaler{}, diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go similarity index 68% rename from vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.go rename to vendor/k8s.io/api/autoscaling/v1/types.go index 47fd31e27..344af774f 100644 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -17,14 +17,14 @@ limitations under the License. package v1 import ( + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api/v1" ) // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` @@ -72,16 +72,17 @@ type HorizontalPodAutoscalerStatus struct { CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -90,6 +91,8 @@ type HorizontalPodAutoscaler struct { Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { metav1.TypeMeta `json:",inline"` @@ -101,18 +104,20 @@ type HorizontalPodAutoscalerList struct { Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -156,12 +161,19 @@ var ( // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics (the "pods" source). ResourceMetricSourceType MetricSourceType = "Resource" + // ExternalMetricSourceType is a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + ExternalMetricSourceType MetricSourceType = "External" ) // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). type MetricSpec struct { - // type is the type of metric source. It should match one of the fields below. + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -180,6 +192,13 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` } // ObjectMetricSource indicates how to scale on a metric describing a @@ -221,16 +240,37 @@ type ResourceMetricSource struct { // the requested value of the resource for the pods. // +optional TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` - // targetAverageValue is the the target value of the average of the + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` } +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +type ExternalMetricSource struct { + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` + // targetValue is the target value of the metric (as a quantity). + // Mutually exclusive with TargetAverageValue. + // +optional + TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"` + // targetAverageValue is the target per-pod value of global metric (as a quantity). + // Mutually exclusive with TargetValue. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,4,opt,name=targetAverageValue"` +} + // MetricStatus describes the last-read state of a single metric. type MetricStatus struct { - // type is the type of metric source. It will match one of the fields below. + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -249,6 +289,49 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` +} + +// HorizontalPodAutoscalerConditionType are the valid conditions of +// a HorizontalPodAutoscaler. +type HorizontalPodAutoscalerConditionType string + +var ( + // ScalingActive indicates that the HPA controller is able to scale if necessary: + // it's correctly configured, can fetch the desired metrics, and isn't disabled. + ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" + // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, + // such as being in a backoff window, or being unable to access/update the target scale. + AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" + // ScalingLimited indicates that the calculated scale based on metrics would be above or + // below the range for the HPA, and has thus been capped. + ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited" +) + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +type HorizontalPodAutoscalerCondition struct { + // type describes the current condition + Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about + // the transition + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // ObjectMetricStatus indicates the current value of a metric describing a @@ -288,9 +371,26 @@ type ResourceMetricStatus struct { // specification. // +optional CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` - // currentAverageValue is the the current value of the average of the + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` } + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +type ExternalMetricStatus struct { + // metricName is the name of a metric used for autoscaling in + // metric system. + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` + // currentValue is the current value of the metric (as a quantity) + CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` + // currentAverageValue is the current value of metric averaged over autoscaled pods. + // +optional + CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go similarity index 70% rename from vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 01d205f87..e84909269 100644 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,10 +26,10 @@ package v1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -38,10 +38,34 @@ func (CrossVersionObjectReference) SwaggerDoc() map[string]string { return map_CrossVersionObjectReference } +var map_ExternalMetricSource = map[string]string{ + "": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "metricName": "metricName is the name of the metric in question.", + "metricSelector": "metricSelector is used to identify a specific time series within a given metric.", + "targetValue": "targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.", + "targetAverageValue": "targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.", +} + +func (ExternalMetricSource) SwaggerDoc() map[string]string { + return map_ExternalMetricSource +} + +var map_ExternalMetricStatus = map[string]string{ + "": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.", + "metricName": "metricName is the name of a metric used for autoscaling in metric system.", + "metricSelector": "metricSelector is used to identify a specific time series within a given metric.", + "currentValue": "currentValue is the current value of the metric (as a quantity)", + "currentAverageValue": "currentAverageValue is the current value of metric averaged over autoscaled pods.", +} + +func (ExternalMetricStatus) SwaggerDoc() map[string]string { + return map_ExternalMetricStatus +} + var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", "status": "current information about the autoscaler.", } @@ -49,6 +73,19 @@ func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { return map_HorizontalPodAutoscaler } +var map_HorizontalPodAutoscalerCondition = map[string]string{ + "": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", + "type": "type describes the current condition", + "status": "status is the status of the condition (True, False, Unknown)", + "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another", + "reason": "reason is the reason for the condition's last transition.", + "message": "message is a human-readable explanation containing details about the transition", +} + +func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerCondition +} + var map_HorizontalPodAutoscalerList = map[string]string{ "": "list of horizontal pod autoscaler objects.", "metadata": "Standard list metadata.", @@ -86,10 +123,11 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { var map_MetricSpec = map[string]string{ "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should match one of the fields below.", + "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricSpec) SwaggerDoc() map[string]string { @@ -98,10 +136,11 @@ func (MetricSpec) SwaggerDoc() map[string]string { var map_MetricStatus = map[string]string{ "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will match one of the fields below.", + "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } func (MetricStatus) SwaggerDoc() map[string]string { @@ -154,7 +193,7 @@ var map_ResourceMetricSource = map[string]string{ "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", "name": "name is the name of the resource in question.", "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", - "targetAverageValue": "targetAverageValue is the the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", + "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", } func (ResourceMetricSource) SwaggerDoc() map[string]string { @@ -165,7 +204,7 @@ var map_ResourceMetricStatus = map[string]string{ "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "name": "name is the name of the resource in question.", "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", - "currentAverageValue": "currentAverageValue is the the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", } func (ResourceMetricStatus) SwaggerDoc() map[string]string { @@ -174,9 +213,9 @@ func (ResourceMetricStatus) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..ee9ac01de --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -0,0 +1,569 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference. +func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference { + if in == nil { + return nil + } + out := new(CrossVersionObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) { + *out = *in + if in.MetricSelector != nil { + in, out := &in.MetricSelector, &out.MetricSelector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + if *in == nil { + *out = nil + } else { + x := (*in).DeepCopy() + *out = &x + } + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + if *in == nil { + *out = nil + } else { + x := (*in).DeepCopy() + *out = &x + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource. +func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource { + if in == nil { + return nil + } + out := new(ExternalMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) { + *out = *in + if in.MetricSelector != nil { + in, out := &in.MetricSelector, &out.MetricSelector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + out.CurrentValue = in.CurrentValue.DeepCopy() + if in.CurrentAverageValue != nil { + in, out := &in.CurrentAverageValue, &out.CurrentAverageValue + if *in == nil { + *out = nil + } else { + x := (*in).DeepCopy() + *out = &x + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus. +func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus { + if in == nil { + return nil + } + out := new(ExternalMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler. +func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition. +func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList. +func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) { + *out = *in + out.ScaleTargetRef = in.ScaleTargetRef + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.TargetCPUUtilizationPercentage != nil { + in, out := &in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec. +func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } + if in.CurrentCPUUtilizationPercentage != nil { + in, out := &in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus. +func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + if *in == nil { + *out = nil + } else { + *out = new(ObjectMetricSource) + (*in).DeepCopyInto(*out) + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + if *in == nil { + *out = nil + } else { + *out = new(PodsMetricSource) + (*in).DeepCopyInto(*out) + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + if *in == nil { + *out = nil + } else { + *out = new(ResourceMetricSource) + (*in).DeepCopyInto(*out) + } + } + if in.External != nil { + in, out := &in.External, &out.External + if *in == nil { + *out = nil + } else { + *out = new(ExternalMetricSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. +func (in *MetricSpec) DeepCopy() *MetricSpec { + if in == nil { + return nil + } + out := new(MetricSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + if *in == nil { + *out = nil + } else { + *out = new(ObjectMetricStatus) + (*in).DeepCopyInto(*out) + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + if *in == nil { + *out = nil + } else { + *out = new(PodsMetricStatus) + (*in).DeepCopyInto(*out) + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + if *in == nil { + *out = nil + } else { + *out = new(ResourceMetricStatus) + (*in).DeepCopyInto(*out) + } + } + if in.External != nil { + in, out := &in.External, &out.External + if *in == nil { + *out = nil + } else { + *out = new(ExternalMetricStatus) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. +func (in *MetricStatus) DeepCopy() *MetricStatus { + if in == nil { + return nil + } + out := new(MetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) { + *out = *in + out.Target = in.Target + out.TargetValue = in.TargetValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource. +func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource { + if in == nil { + return nil + } + out := new(ObjectMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) { + *out = *in + out.Target = in.Target + out.CurrentValue = in.CurrentValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus. +func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus { + if in == nil { + return nil + } + out := new(ObjectMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) { + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource. +func (in *PodsMetricSource) DeepCopy() *PodsMetricSource { + if in == nil { + return nil + } + out := new(PodsMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) { + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus. +func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus { + if in == nil { + return nil + } + out := new(PodsMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) { + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + if *in == nil { + *out = nil + } else { + x := (*in).DeepCopy() + *out = &x + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource. +func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource { + if in == nil { + return nil + } + out := new(ResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) { + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus. +func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus { + if in == nil { + return nil + } + out := new(ResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go new file mode 100644 index 000000000..da9789e5c --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +package v2beta1 // import "k8s.io/api/autoscaling/v2beta1" diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go new file mode 100644 index 000000000..467345d32 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -0,0 +1,6732 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +// DO NOT EDIT! + +/* + Package v2beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto + + It has these top-level messages: + ContainerResourcePolicy + CrossVersionObjectReference + ExternalMetricSource + ExternalMetricStatus + HorizontalPodAutoscaler + HorizontalPodAutoscalerCondition + HorizontalPodAutoscalerList + HorizontalPodAutoscalerSpec + HorizontalPodAutoscalerStatus + MetricSpec + MetricStatus + ObjectMetricSource + ObjectMetricStatus + PodResourcePolicy + PodUpdatePolicy + PodsMetricSource + PodsMetricStatus + RecommendedContainerResources + RecommendedPodResources + ResourceMetricSource + ResourceMetricStatus + VerticalPodAutoscaler + VerticalPodAutoscalerCondition + VerticalPodAutoscalerList + VerticalPodAutoscalerSpec + VerticalPodAutoscalerStatus +*/ +package v2beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ContainerResourcePolicy) Reset() { *m = ContainerResourcePolicy{} } +func (*ContainerResourcePolicy) ProtoMessage() {} +func (*ContainerResourcePolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } +func (*CrossVersionObjectReference) ProtoMessage() {} +func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } +func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} +func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} +} + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} +func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{6} +} + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} +func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{7} +} + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} +func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{8} +} + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *PodResourcePolicy) Reset() { *m = PodResourcePolicy{} } +func (*PodResourcePolicy) ProtoMessage() {} +func (*PodResourcePolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *PodUpdatePolicy) Reset() { *m = PodUpdatePolicy{} } +func (*PodUpdatePolicy) ProtoMessage() {} +func (*PodUpdatePolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *RecommendedContainerResources) Reset() { *m = RecommendedContainerResources{} } +func (*RecommendedContainerResources) ProtoMessage() {} +func (*RecommendedContainerResources) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{17} +} + +func (m *RecommendedPodResources) Reset() { *m = RecommendedPodResources{} } +func (*RecommendedPodResources) ProtoMessage() {} +func (*RecommendedPodResources) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{18} +} + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } + +func (m *VerticalPodAutoscaler) Reset() { *m = VerticalPodAutoscaler{} } +func (*VerticalPodAutoscaler) ProtoMessage() {} +func (*VerticalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } + +func (m *VerticalPodAutoscalerCondition) Reset() { *m = VerticalPodAutoscalerCondition{} } +func (*VerticalPodAutoscalerCondition) ProtoMessage() {} +func (*VerticalPodAutoscalerCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{22} +} + +func (m *VerticalPodAutoscalerList) Reset() { *m = VerticalPodAutoscalerList{} } +func (*VerticalPodAutoscalerList) ProtoMessage() {} +func (*VerticalPodAutoscalerList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{23} +} + +func (m *VerticalPodAutoscalerSpec) Reset() { *m = VerticalPodAutoscalerSpec{} } +func (*VerticalPodAutoscalerSpec) ProtoMessage() {} +func (*VerticalPodAutoscalerSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{24} +} + +func (m *VerticalPodAutoscalerStatus) Reset() { *m = VerticalPodAutoscalerStatus{} } +func (*VerticalPodAutoscalerStatus) ProtoMessage() {} +func (*VerticalPodAutoscalerStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{25} +} + +func init() { + proto.RegisterType((*ContainerResourcePolicy)(nil), "k8s.io.api.autoscaling.v2beta1.ContainerResourcePolicy") + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta1.CrossVersionObjectReference") + proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricSource") + proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricStatus") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v2beta1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ObjectMetricStatus") + proto.RegisterType((*PodResourcePolicy)(nil), "k8s.io.api.autoscaling.v2beta1.PodResourcePolicy") + proto.RegisterType((*PodUpdatePolicy)(nil), "k8s.io.api.autoscaling.v2beta1.PodUpdatePolicy") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.PodsMetricStatus") + proto.RegisterType((*RecommendedContainerResources)(nil), "k8s.io.api.autoscaling.v2beta1.RecommendedContainerResources") + proto.RegisterType((*RecommendedPodResources)(nil), "k8s.io.api.autoscaling.v2beta1.RecommendedPodResources") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricStatus") + proto.RegisterType((*VerticalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2beta1.VerticalPodAutoscaler") + proto.RegisterType((*VerticalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2beta1.VerticalPodAutoscalerCondition") + proto.RegisterType((*VerticalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2beta1.VerticalPodAutoscalerList") + proto.RegisterType((*VerticalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2beta1.VerticalPodAutoscalerSpec") + proto.RegisterType((*VerticalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v2beta1.VerticalPodAutoscalerStatus") +} +func (m *ContainerResourcePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResourcePolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i += copy(dAtA[i:], m.ContainerName) + if m.Mode != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Mode))) + i += copy(dAtA[i:], *m.Mode) + } + if len(m.MinAllowed) > 0 { + keysForMinAllowed := make([]string, 0, len(m.MinAllowed)) + for k := range m.MinAllowed { + keysForMinAllowed = append(keysForMinAllowed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMinAllowed) + for _, k := range keysForMinAllowed { + dAtA[i] = 0x1a + i++ + v := m.MinAllowed[k8s_io_api_core_v1.ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n1, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + } + if len(m.MaxAllowed) > 0 { + keysForMaxAllowed := make([]string, 0, len(m.MaxAllowed)) + for k := range m.MaxAllowed { + keysForMaxAllowed = append(keysForMaxAllowed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxAllowed) + for _, k := range keysForMaxAllowed { + dAtA[i] = 0x22 + i++ + v := m.MaxAllowed[k8s_io_api_core_v1.ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n2, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + } + return i, nil +} + +func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + return i, nil +} + +func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + if m.MetricSelector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) + n3, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.TargetValue != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) + n4, err := m.TargetValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.TargetAverageValue != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) + n5, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + if m.MetricSelector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) + n6, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) + n7, err := m.CurrentValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + if m.CurrentAverageValue != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) + n8, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n11, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil +} + +func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n12, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n13, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) + n14, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + if m.MinReplicas != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + } + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, msg := range m.Metrics { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) + n15, err := m.LastScaleTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + if len(m.CurrentMetrics) > 0 { + for _, msg := range m.CurrentMetrics { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MetricSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.Object != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n16, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.Pods != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) + n17, err := m.Pods.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.Resource != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) + n18, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.External != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) + n19, err := m.External.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} + +func (m *MetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.Object != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n20, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.Pods != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) + n21, err := m.Pods.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Resource != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) + n22, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if m.External != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) + n23, err := m.External.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + return i, nil +} + +func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n24, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) + n25, err := m.TargetValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + return i, nil +} + +func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n26, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) + n27, err := m.CurrentValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + return i, nil +} + +func (m *PodResourcePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodResourcePolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerPolicies) > 0 { + for _, msg := range m.ContainerPolicies { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodUpdatePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodUpdatePolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.UpdateMode != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UpdateMode))) + i += copy(dAtA[i:], *m.UpdateMode) + } + return i, nil +} + +func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) + n28, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + return i, nil +} + +func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) + n29, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + return i, nil +} + +func (m *RecommendedContainerResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RecommendedContainerResources) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i += copy(dAtA[i:], m.ContainerName) + if len(m.Target) > 0 { + keysForTarget := make([]string, 0, len(m.Target)) + for k := range m.Target { + keysForTarget = append(keysForTarget, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTarget) + for _, k := range keysForTarget { + dAtA[i] = 0x12 + i++ + v := m.Target[k8s_io_api_core_v1.ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n30, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + } + if len(m.LowerBound) > 0 { + keysForLowerBound := make([]string, 0, len(m.LowerBound)) + for k := range m.LowerBound { + keysForLowerBound = append(keysForLowerBound, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLowerBound) + for _, k := range keysForLowerBound { + dAtA[i] = 0x1a + i++ + v := m.LowerBound[k8s_io_api_core_v1.ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n31, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + } + if len(m.UpperBound) > 0 { + keysForUpperBound := make([]string, 0, len(m.UpperBound)) + for k := range m.UpperBound { + keysForUpperBound = append(keysForUpperBound, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpperBound) + for _, k := range keysForUpperBound { + dAtA[i] = 0x22 + i++ + v := m.UpperBound[k8s_io_api_core_v1.ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n32, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + } + return i, nil +} + +func (m *RecommendedPodResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RecommendedPodResources) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerRecommendations) > 0 { + for _, msg := range m.ContainerRecommendations { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.TargetAverageUtilization != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) + n33, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } + return i, nil +} + +func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.CurrentAverageUtilization != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) + n34, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + return i, nil +} + +func (m *VerticalPodAutoscaler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VerticalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n35, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n36, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n37, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + return i, nil +} + +func (m *VerticalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VerticalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n38, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *VerticalPodAutoscalerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VerticalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n39, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *VerticalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VerticalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Selector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n40, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + if m.UpdatePolicy != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatePolicy.Size())) + n41, err := m.UpdatePolicy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + if m.ResourcePolicy != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourcePolicy.Size())) + n42, err := m.ResourcePolicy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + return i, nil +} + +func (m *VerticalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VerticalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Recommendation != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Recommendation.Size())) + n43, err := m.Recommendation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ContainerResourcePolicy) Size() (n int) { + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Mode != nil { + l = len(*m.Mode) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MinAllowed) > 0 { + for k, v := range m.MinAllowed { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MaxAllowed) > 0 { + for k, v := range m.MaxAllowed { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *CrossVersionObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExternalMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + if m.MetricSelector != nil { + l = m.MetricSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetValue != nil { + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExternalMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + if m.MetricSelector != nil { + l = m.MetricSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageValue != nil { + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + var l int + _ = l + l = m.ScaleTargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if len(m.CurrentMetrics) > 0 { + for _, e := range m.CurrentMetrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MetricSpec) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ObjectMetricSource) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMetricStatus) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodResourcePolicy) Size() (n int) { + var l int + _ = l + if len(m.ContainerPolicies) > 0 { + for _, e := range m.ContainerPolicies { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodUpdatePolicy) Size() (n int) { + var l int + _ = l + if m.UpdateMode != nil { + l = len(*m.UpdateMode) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodsMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RecommendedContainerResources) Size() (n int) { + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Target) > 0 { + for k, v := range m.Target { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.LowerBound) > 0 { + for k, v := range m.LowerBound { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.UpperBound) > 0 { + for k, v := range m.UpperBound { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *RecommendedPodResources) Size() (n int) { + var l int + _ = l + if len(m.ContainerRecommendations) > 0 { + for _, e := range m.ContainerRecommendations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VerticalPodAutoscaler) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VerticalPodAutoscalerCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VerticalPodAutoscalerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VerticalPodAutoscalerSpec) Size() (n int) { + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.UpdatePolicy != nil { + l = m.UpdatePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourcePolicy != nil { + l = m.ResourcePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VerticalPodAutoscalerStatus) Size() (n int) { + var l int + _ = l + if m.Recommendation != nil { + l = m.Recommendation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ContainerResourcePolicy) String() string { + if this == nil { + return "nil" + } + keysForMinAllowed := make([]string, 0, len(this.MinAllowed)) + for k := range this.MinAllowed { + keysForMinAllowed = append(keysForMinAllowed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMinAllowed) + mapStringForMinAllowed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForMinAllowed { + mapStringForMinAllowed += fmt.Sprintf("%v: %v,", k, this.MinAllowed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForMinAllowed += "}" + keysForMaxAllowed := make([]string, 0, len(this.MaxAllowed)) + for k := range this.MaxAllowed { + keysForMaxAllowed = append(keysForMaxAllowed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxAllowed) + mapStringForMaxAllowed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForMaxAllowed { + mapStringForMaxAllowed += fmt.Sprintf("%v: %v,", k, this.MaxAllowed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForMaxAllowed += "}" + s := strings.Join([]string{`&ContainerResourcePolicy{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `MinAllowed:` + mapStringForMinAllowed + `,`, + `MaxAllowed:` + mapStringForMaxAllowed + `,`, + `}`, + }, "") + return s +} +func (this *CrossVersionObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CrossVersionObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricSource{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricStatus{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscaler{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, + `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, + `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, + `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, + `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricStatus{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricSource{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricStatus{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodResourcePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodResourcePolicy{`, + `ContainerPolicies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerPolicies), "ContainerResourcePolicy", "ContainerResourcePolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodUpdatePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodUpdatePolicy{`, + `UpdateMode:` + valueToStringGenerated(this.UpdateMode) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricSource{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricStatus{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RecommendedContainerResources) String() string { + if this == nil { + return "nil" + } + keysForTarget := make([]string, 0, len(this.Target)) + for k := range this.Target { + keysForTarget = append(keysForTarget, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTarget) + mapStringForTarget := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForTarget { + mapStringForTarget += fmt.Sprintf("%v: %v,", k, this.Target[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForTarget += "}" + keysForLowerBound := make([]string, 0, len(this.LowerBound)) + for k := range this.LowerBound { + keysForLowerBound = append(keysForLowerBound, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLowerBound) + mapStringForLowerBound := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForLowerBound { + mapStringForLowerBound += fmt.Sprintf("%v: %v,", k, this.LowerBound[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForLowerBound += "}" + keysForUpperBound := make([]string, 0, len(this.UpperBound)) + for k := range this.UpperBound { + keysForUpperBound = append(keysForUpperBound, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpperBound) + mapStringForUpperBound := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForUpperBound { + mapStringForUpperBound += fmt.Sprintf("%v: %v,", k, this.UpperBound[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForUpperBound += "}" + s := strings.Join([]string{`&RecommendedContainerResources{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Target:` + mapStringForTarget + `,`, + `LowerBound:` + mapStringForLowerBound + `,`, + `UpperBound:` + mapStringForUpperBound + `,`, + `}`, + }, "") + return s +} +func (this *RecommendedPodResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RecommendedPodResources{`, + `ContainerRecommendations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerRecommendations), "RecommendedContainerResources", "RecommendedContainerResources", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VerticalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VerticalPodAutoscaler{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VerticalPodAutoscalerSpec", "VerticalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VerticalPodAutoscalerStatus", "VerticalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VerticalPodAutoscalerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VerticalPodAutoscalerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *VerticalPodAutoscalerList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VerticalPodAutoscalerList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VerticalPodAutoscaler", "VerticalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VerticalPodAutoscalerSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VerticalPodAutoscalerSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `UpdatePolicy:` + strings.Replace(fmt.Sprintf("%v", this.UpdatePolicy), "PodUpdatePolicy", "PodUpdatePolicy", 1) + `,`, + `ResourcePolicy:` + strings.Replace(fmt.Sprintf("%v", this.ResourcePolicy), "PodResourcePolicy", "PodResourcePolicy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VerticalPodAutoscalerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VerticalPodAutoscalerStatus{`, + `Recommendation:` + strings.Replace(fmt.Sprintf("%v", this.Recommendation), "RecommendedPodResources", "RecommendedPodResources", 1) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "VerticalPodAutoscalerCondition", "VerticalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerResourcePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResourcePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResourcePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ContainerScalingMode(dAtA[iNdEx:postIndex]) + m.Mode = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MinAllowed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.MinAllowed == nil { + m.MinAllowed = make(k8s_io_api_core_v1.ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.MinAllowed[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.MinAllowed[k8s_io_api_core_v1.ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxAllowed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.MaxAllowed == nil { + m.MaxAllowed = make(k8s_io_api_core_v1.ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.MaxAllowed[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.MaxAllowed[k8s_io_api_core_v1.ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricSelector == nil { + m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetValue == nil { + m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricSelector == nil { + m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CurrentAverageValue == nil { + m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = HorizontalPodAutoscalerConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, MetricSpec{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentMetrics = append(m.CurrentMetrics, MetricStatus{}) + if err := m.CurrentMetrics[len(m.CurrentMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, HorizontalPodAutoscalerCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricSource{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricSource{} + } + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricSource{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricSource{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricStatus{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricStatus{} + } + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricStatus{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricStatus{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodResourcePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodResourcePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodResourcePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPolicies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerPolicies = append(m.ContainerPolicies, ContainerResourcePolicy{}) + if err := m.ContainerPolicies[len(m.ContainerPolicies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodUpdatePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodUpdatePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodUpdatePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := UpdateMode(dAtA[iNdEx:postIndex]) + m.UpdateMode = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RecommendedContainerResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RecommendedContainerResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RecommendedContainerResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Target == nil { + m.Target = make(k8s_io_api_core_v1.ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Target[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Target[k8s_io_api_core_v1.ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LowerBound", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.LowerBound == nil { + m.LowerBound = make(k8s_io_api_core_v1.ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.LowerBound[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.LowerBound[k8s_io_api_core_v1.ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpperBound", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.UpperBound == nil { + m.UpperBound = make(k8s_io_api_core_v1.ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.UpperBound[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.UpperBound[k8s_io_api_core_v1.ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RecommendedPodResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RecommendedPodResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RecommendedPodResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerRecommendations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerRecommendations = append(m.ContainerRecommendations, RecommendedContainerResources{}) + if err := m.ContainerRecommendations[len(m.ContainerRecommendations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VerticalPodAutoscaler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VerticalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VerticalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VerticalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VerticalPodAutoscalerCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VerticalPodAutoscalerCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = VerticalPodAutoscalerConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VerticalPodAutoscalerList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VerticalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VerticalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VerticalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VerticalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VerticalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VerticalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdatePolicy == nil { + m.UpdatePolicy = &PodUpdatePolicy{} + } + if err := m.UpdatePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourcePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourcePolicy == nil { + m.ResourcePolicy = &PodResourcePolicy{} + } + if err := m.ResourcePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VerticalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VerticalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VerticalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Recommendation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Recommendation == nil { + m.Recommendation = &RecommendedPodResources{} + } + if err := m.Recommendation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, VerticalPodAutoscalerCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 2044 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xdd, 0x6f, 0x1c, 0x57, + 0x15, 0xf7, 0xec, 0xae, 0x1d, 0xf7, 0xd8, 0xb1, 0x9d, 0x9b, 0xb4, 0xde, 0x38, 0x64, 0xd7, 0x1a, + 0x21, 0x54, 0x2a, 0x3a, 0xdb, 0xb8, 0x29, 0x2d, 0x84, 0x22, 0xbc, 0xdb, 0x36, 0x8d, 0xf0, 0x36, + 0xe6, 0xda, 0x8e, 0xaa, 0x7e, 0x20, 0xc6, 0x33, 0x37, 0x9b, 0xc1, 0x3b, 0x73, 0x57, 0x33, 0x77, + 0xdd, 0x38, 0x08, 0xa9, 0x20, 0xc1, 0x33, 0x0f, 0x50, 0x1e, 0x78, 0xe1, 0x19, 0xc1, 0x2b, 0x79, + 0xe0, 0xa9, 0x48, 0x88, 0x3c, 0xe6, 0x05, 0xa9, 0x08, 0x69, 0x21, 0xcb, 0x7f, 0x00, 0x6f, 0x79, + 0x42, 0xf7, 0x63, 0x3e, 0x77, 0xc7, 0xbb, 0x59, 0xaf, 0x03, 0x48, 0x7d, 0xdb, 0xb9, 0xf7, 0x9c, + 0xdf, 0xf9, 0xbc, 0xe7, 0xde, 0x73, 0x16, 0xae, 0x1f, 0xbc, 0x16, 0x18, 0x0e, 0xad, 0x1d, 0x74, + 0xf7, 0x89, 0xef, 0x11, 0x46, 0x82, 0xda, 0x21, 0xf1, 0x6c, 0xea, 0xd7, 0xd4, 0x86, 0xd9, 0x71, + 0x6a, 0x66, 0x97, 0xd1, 0xc0, 0x32, 0xdb, 0x8e, 0xd7, 0xaa, 0x1d, 0x6e, 0xec, 0x13, 0x66, 0x5e, + 0xa9, 0xb5, 0x88, 0x47, 0x7c, 0x93, 0x11, 0xdb, 0xe8, 0xf8, 0x94, 0x51, 0x54, 0x91, 0xf4, 0x86, + 0xd9, 0x71, 0x8c, 0x04, 0xbd, 0xa1, 0xe8, 0xd7, 0x5e, 0x6c, 0x39, 0xec, 0x4e, 0x77, 0xdf, 0xb0, + 0xa8, 0x5b, 0x6b, 0xd1, 0x16, 0xad, 0x09, 0xb6, 0xfd, 0xee, 0x6d, 0xf1, 0x25, 0x3e, 0xc4, 0x2f, + 0x09, 0xb7, 0xa6, 0x27, 0xc4, 0x5b, 0xd4, 0x27, 0xb5, 0xc3, 0x01, 0x91, 0x6b, 0x57, 0x63, 0x1a, + 0xd7, 0xb4, 0xee, 0x38, 0x1e, 0xf1, 0x8f, 0x6a, 0x9d, 0x83, 0x96, 0x60, 0xf2, 0x49, 0x40, 0xbb, + 0xbe, 0x45, 0x9e, 0x88, 0x2b, 0xa8, 0xb9, 0x84, 0x99, 0xc3, 0x64, 0xd5, 0xf2, 0xb8, 0xfc, 0xae, + 0xc7, 0x1c, 0x77, 0x50, 0xcc, 0x57, 0x47, 0x31, 0x04, 0xd6, 0x1d, 0xe2, 0x9a, 0x03, 0x7c, 0x2f, + 0xe7, 0xf1, 0x75, 0x99, 0xd3, 0xae, 0x39, 0x1e, 0x0b, 0x98, 0x9f, 0x65, 0xd2, 0xff, 0x35, 0x0b, + 0xab, 0x0d, 0xea, 0x31, 0x93, 0x93, 0x63, 0x65, 0xf9, 0x36, 0x6d, 0x3b, 0xd6, 0x11, 0xba, 0x06, + 0x67, 0xad, 0x70, 0xeb, 0x1d, 0xd3, 0x25, 0x65, 0x6d, 0x5d, 0x7b, 0xfe, 0x99, 0xfa, 0xb3, 0x0f, + 0x7a, 0xd5, 0x99, 0x7e, 0xaf, 0x7a, 0xb6, 0x91, 0xdc, 0xc4, 0x69, 0x5a, 0x74, 0x15, 0x4a, 0x2e, + 0xb5, 0x49, 0xb9, 0x20, 0x78, 0xd6, 0xfb, 0xbd, 0x6a, 0xa9, 0x49, 0x6d, 0xf2, 0xb8, 0x57, 0xbd, + 0x10, 0xf1, 0xed, 0xc8, 0x48, 0xf3, 0x75, 0x2c, 0xa8, 0xd1, 0xa7, 0x1a, 0x80, 0xeb, 0x78, 0x9b, + 0xed, 0x36, 0xfd, 0x88, 0xd8, 0xe5, 0xe2, 0x7a, 0xf1, 0xf9, 0x85, 0x8d, 0xeb, 0xc6, 0xf1, 0x19, + 0x62, 0xe4, 0x18, 0x60, 0x34, 0x23, 0xa4, 0x37, 0x3d, 0xe6, 0x1f, 0xd5, 0xdf, 0x55, 0x9a, 0x43, + 0xbc, 0xf1, 0xb8, 0x57, 0xad, 0x0e, 0x26, 0x8c, 0x11, 0x02, 0x6d, 0x39, 0x01, 0xfb, 0xf1, 0xdf, + 0x8f, 0x25, 0x11, 0xc6, 0x27, 0x94, 0x96, 0x36, 0x98, 0x77, 0x43, 0x1b, 0x4a, 0x27, 0xb4, 0x21, + 0x42, 0xca, 0xda, 0x10, 0x6d, 0x4c, 0xcd, 0x86, 0x08, 0x71, 0xcd, 0x85, 0xe5, 0x8c, 0xf3, 0xd0, + 0x0a, 0x14, 0x0f, 0xc8, 0x91, 0xcc, 0x01, 0xcc, 0x7f, 0xa2, 0x37, 0x60, 0xf6, 0xd0, 0x6c, 0x77, + 0x65, 0x8c, 0x17, 0x36, 0x8c, 0x84, 0x89, 0x51, 0x02, 0x1a, 0x9d, 0x83, 0x96, 0xb0, 0x39, 0x3c, + 0x55, 0xc6, 0x77, 0xba, 0xa6, 0xc7, 0x1c, 0x76, 0x84, 0x25, 0xf3, 0xd7, 0x0b, 0xaf, 0x69, 0x42, + 0x5c, 0xda, 0xce, 0xd3, 0x14, 0xa7, 0xff, 0x42, 0x83, 0x4b, 0x0d, 0x9f, 0x06, 0xc1, 0x2d, 0xe2, + 0x07, 0x0e, 0xf5, 0x6e, 0xee, 0x7f, 0x9f, 0x58, 0x0c, 0x93, 0xdb, 0xc4, 0x27, 0x9e, 0x45, 0xd0, + 0x3a, 0x94, 0x0e, 0x1c, 0xcf, 0x56, 0xf9, 0xbe, 0xa8, 0x3c, 0x5e, 0xfa, 0xb6, 0xe3, 0xd9, 0x58, + 0xec, 0x70, 0x0a, 0x8f, 0x9f, 0x88, 0x42, 0x9a, 0x42, 0xf8, 0x51, 0xec, 0xa0, 0x0d, 0x00, 0xb3, + 0xe3, 0x28, 0x01, 0xe5, 0xa2, 0xa0, 0x43, 0x61, 0xec, 0x36, 0xb7, 0x6f, 0xa8, 0x1d, 0x9c, 0xa0, + 0xd2, 0x3f, 0x29, 0xc2, 0x85, 0x37, 0xef, 0x32, 0xe2, 0x7b, 0x66, 0xbb, 0x49, 0x98, 0xef, 0x58, + 0x3b, 0xc2, 0x08, 0x0e, 0xe6, 0x8a, 0xef, 0xc4, 0x31, 0x8c, 0xc0, 0x9a, 0xd1, 0x0e, 0x4e, 0x50, + 0x21, 0x0a, 0x4b, 0xf2, 0x6b, 0x87, 0xb4, 0x89, 0xc5, 0xa8, 0xaf, 0xfc, 0xf6, 0xf2, 0x71, 0x7e, + 0x0b, 0x0c, 0x5e, 0xc6, 0x8c, 0xc3, 0x2b, 0xc6, 0x96, 0xb9, 0x4f, 0xda, 0x21, 0x6b, 0x1d, 0xf5, + 0x7b, 0xd5, 0xa5, 0x66, 0x0a, 0x0e, 0x67, 0xe0, 0x91, 0x09, 0x0b, 0xcc, 0xf4, 0x5b, 0x84, 0xdd, + 0x12, 0x51, 0x2a, 0x4e, 0x12, 0xa5, 0xfa, 0x72, 0xbf, 0x57, 0x5d, 0xd8, 0x8d, 0x61, 0x70, 0x12, + 0x13, 0x1d, 0x02, 0x92, 0x9f, 0x9b, 0x87, 0xc4, 0x37, 0x5b, 0x44, 0x4a, 0x2a, 0x4d, 0x24, 0xe9, + 0xb9, 0x7e, 0xaf, 0x8a, 0x76, 0x07, 0xd0, 0xf0, 0x10, 0x09, 0xfa, 0xaf, 0x07, 0x03, 0xc3, 0x4c, + 0xd6, 0x0d, 0xfe, 0x3f, 0x02, 0x73, 0x07, 0x16, 0xad, 0xae, 0xef, 0x13, 0xef, 0x44, 0x91, 0xb9, + 0xa0, 0xcc, 0x5a, 0x6c, 0x24, 0xb0, 0x70, 0x0a, 0x19, 0x1d, 0xc1, 0x79, 0xf5, 0x3d, 0x85, 0x00, + 0xad, 0xf6, 0x7b, 0xd5, 0xf3, 0x8d, 0x41, 0x38, 0x3c, 0x4c, 0x86, 0xfe, 0x69, 0x01, 0x56, 0xdf, + 0xa6, 0xbe, 0x73, 0x8f, 0xd7, 0xd1, 0xf6, 0x36, 0xb5, 0x37, 0x55, 0x91, 0x25, 0x3e, 0xfa, 0x1e, + 0xcc, 0x73, 0xef, 0xd9, 0x26, 0x33, 0x45, 0x8c, 0x16, 0x36, 0x5e, 0x1a, 0xcf, 0xd7, 0xb2, 0x30, + 0x34, 0x09, 0x33, 0xe3, 0xa8, 0xc6, 0x6b, 0x38, 0x42, 0x45, 0x1f, 0x42, 0x29, 0xe8, 0x10, 0x4b, + 0x45, 0xf2, 0xda, 0xa8, 0x62, 0x9f, 0xa3, 0xe8, 0x4e, 0x87, 0x58, 0x71, 0x31, 0xe1, 0x5f, 0x58, + 0xc0, 0x22, 0x02, 0x73, 0x81, 0x48, 0x38, 0x15, 0xbb, 0xd7, 0x27, 0x15, 0x20, 0x40, 0xea, 0x4b, + 0x4a, 0xc4, 0x9c, 0xfc, 0xc6, 0x0a, 0x5c, 0xff, 0x49, 0x11, 0xd6, 0x73, 0x38, 0x1b, 0xd4, 0xb3, + 0x1d, 0xe6, 0x50, 0x0f, 0xbd, 0x0d, 0x25, 0x76, 0xd4, 0x09, 0x93, 0xfd, 0x6a, 0xa8, 0xed, 0xee, + 0x51, 0x87, 0x5f, 0xee, 0x5f, 0x1c, 0xc5, 0xcf, 0xe9, 0xb0, 0x40, 0x40, 0x5b, 0x91, 0x55, 0x85, + 0x14, 0x96, 0x52, 0xeb, 0x71, 0xaf, 0x3a, 0xe4, 0x2d, 0x67, 0x44, 0x48, 0x69, 0xe5, 0x79, 0x6d, + 0x68, 0x9b, 0x01, 0xdb, 0xf5, 0x4d, 0x2f, 0x90, 0x92, 0x1c, 0x37, 0xcc, 0xf5, 0x17, 0xc6, 0x0b, + 0x37, 0xe7, 0xa8, 0xaf, 0x29, 0x2d, 0xd0, 0xd6, 0x00, 0x1a, 0x1e, 0x22, 0x01, 0x7d, 0x09, 0xe6, + 0x7c, 0x62, 0x06, 0xd4, 0x13, 0x69, 0xfe, 0x4c, 0xec, 0x5c, 0x2c, 0x56, 0xb1, 0xda, 0x45, 0x5f, + 0x86, 0x33, 0x2e, 0x09, 0x02, 0xb3, 0x45, 0xca, 0xb3, 0x82, 0x70, 0x59, 0x11, 0x9e, 0x69, 0xca, + 0x65, 0x1c, 0xee, 0xeb, 0x7f, 0xd5, 0xe0, 0x52, 0x8e, 0x1f, 0xf9, 0x65, 0x8e, 0x3e, 0x18, 0xc8, + 0x67, 0x63, 0xcc, 0xda, 0xe1, 0x04, 0x32, 0x9b, 0x57, 0x94, 0xec, 0xf9, 0x70, 0x25, 0x91, 0xcb, + 0x1f, 0xc0, 0xac, 0xc3, 0x88, 0xcb, 0xa3, 0xc2, 0x5f, 0x2e, 0xaf, 0x4e, 0x98, 0x6b, 0xf5, 0xb3, + 0x4a, 0xc6, 0xec, 0x0d, 0x8e, 0x86, 0x25, 0xa8, 0xfe, 0xb7, 0x42, 0xae, 0x6d, 0x3c, 0xe1, 0xd1, + 0x0f, 0x60, 0x49, 0x7c, 0xc9, 0xca, 0x8c, 0xc9, 0x6d, 0x65, 0xe1, 0xc8, 0x33, 0x75, 0xcc, 0x85, + 0x5e, 0x7f, 0x4e, 0xa9, 0xb2, 0xb4, 0x93, 0x82, 0xc6, 0x19, 0x51, 0xe8, 0x0a, 0x2c, 0xb8, 0x8e, + 0x87, 0x49, 0xa7, 0xed, 0x58, 0xa6, 0x4c, 0xcb, 0x59, 0x79, 0x25, 0x35, 0xe3, 0x65, 0x9c, 0xa4, + 0x41, 0xaf, 0xc0, 0x82, 0x6b, 0xde, 0x8d, 0x58, 0x8a, 0x82, 0xe5, 0xbc, 0x92, 0xb7, 0xd0, 0x8c, + 0xb7, 0x70, 0x92, 0x0e, 0xed, 0xf1, 0x6c, 0xe0, 0x55, 0x3a, 0x50, 0x0f, 0xc4, 0x17, 0x46, 0xd9, + 0xa7, 0x8a, 0x3c, 0x2f, 0x11, 0x89, 0xcc, 0x11, 0x10, 0x38, 0xc4, 0xd2, 0x7f, 0x5f, 0x82, 0xcb, + 0xc7, 0x9e, 0x7d, 0xf4, 0x16, 0x20, 0xba, 0x1f, 0x10, 0xff, 0x90, 0xd8, 0xd7, 0x65, 0x2f, 0xc0, + 0xdf, 0x27, 0xdc, 0xc7, 0x45, 0x79, 0x25, 0xde, 0x1c, 0xd8, 0xc5, 0x43, 0x38, 0x90, 0x05, 0x67, + 0xf9, 0x61, 0x90, 0x0e, 0x75, 0xdc, 0xf0, 0x55, 0xf6, 0x24, 0x27, 0xed, 0x1c, 0x6f, 0x22, 0xb6, + 0x92, 0x20, 0x38, 0x8d, 0x89, 0x36, 0x61, 0x59, 0xd5, 0xfa, 0x8c, 0x83, 0x57, 0x95, 0x07, 0x96, + 0x1b, 0xe9, 0x6d, 0x9c, 0xa5, 0xe7, 0x10, 0x36, 0x09, 0x1c, 0x9f, 0xd8, 0x11, 0x44, 0x29, 0x0d, + 0xf1, 0x46, 0x7a, 0x1b, 0x67, 0xe9, 0x51, 0x1b, 0x96, 0x14, 0xaa, 0xf2, 0x77, 0x79, 0x56, 0x84, + 0xec, 0x2b, 0x63, 0x86, 0x4c, 0x16, 0xdd, 0x28, 0x07, 0x1b, 0x29, 0x2c, 0x9c, 0xc1, 0x46, 0x0c, + 0xc0, 0x0a, 0x4b, 0x5c, 0x50, 0x9e, 0x13, 0x92, 0xbe, 0x35, 0xe1, 0x19, 0x8c, 0x6a, 0x65, 0x7c, + 0x7d, 0x45, 0x4b, 0x01, 0x4e, 0xc8, 0xd1, 0x7f, 0x53, 0x04, 0x88, 0x33, 0x8c, 0x77, 0x6f, 0x89, + 0x22, 0xbf, 0x9e, 0x29, 0xf2, 0x2b, 0xc9, 0xc7, 0x69, 0xa2, 0xa0, 0xdf, 0x82, 0x39, 0x2a, 0x4e, + 0x9e, 0x4a, 0x86, 0x8d, 0x51, 0x6a, 0x47, 0x77, 0x69, 0x84, 0x56, 0x07, 0x5e, 0x3a, 0xd5, 0xf9, + 0x55, 0x68, 0xe8, 0x1d, 0x28, 0x75, 0xa8, 0x1d, 0x5e, 0x7e, 0x2f, 0x8d, 0x42, 0xdd, 0xa6, 0x76, + 0x90, 0xc2, 0x9c, 0xe7, 0xba, 0xf3, 0x55, 0x2c, 0x70, 0xd0, 0x77, 0x61, 0x3e, 0x7c, 0x6e, 0xa8, + 0xb7, 0xc9, 0xd5, 0x51, 0x98, 0x61, 0xa7, 0x94, 0xc2, 0x5d, 0xe4, 0x15, 0x34, 0xdc, 0xc1, 0x11, + 0x26, 0xc7, 0x27, 0xea, 0xb5, 0x28, 0x6a, 0xfd, 0x18, 0xf8, 0xc3, 0x9e, 0xfd, 0x12, 0x3f, 0xdc, + 0xc1, 0x11, 0xa6, 0xfe, 0xdb, 0x22, 0x2c, 0xa6, 0x9e, 0xa1, 0xff, 0x8d, 0x70, 0xc9, 0xac, 0x9e, + 0x6e, 0xb8, 0x24, 0xe6, 0xf4, 0xc3, 0x25, 0x71, 0x4f, 0x2f, 0x5c, 0x09, 0xfc, 0x21, 0xe1, 0xfa, + 0x79, 0x01, 0xd0, 0x60, 0xa6, 0x23, 0x0b, 0xe6, 0x64, 0xab, 0x31, 0x8d, 0x1b, 0x2e, 0x7a, 0x75, + 0xa8, 0xcb, 0x4c, 0x41, 0x67, 0x1a, 0x94, 0xc2, 0x58, 0x0d, 0x0a, 0x99, 0x46, 0x23, 0x17, 0x5d, + 0x81, 0x79, 0xcd, 0x9c, 0xfe, 0xcb, 0xac, 0x5b, 0x64, 0x2e, 0xff, 0xcf, 0xba, 0xe5, 0xa9, 0xb5, + 0x51, 0xfa, 0x27, 0x1a, 0x9c, 0xdb, 0xa6, 0x76, 0x66, 0x1c, 0xf7, 0xb1, 0x06, 0xe7, 0xa2, 0x19, + 0x9b, 0x58, 0x73, 0x48, 0x50, 0xd6, 0xc6, 0x7b, 0xa4, 0xe5, 0x8c, 0x97, 0xea, 0x17, 0x95, 0x3a, + 0xe7, 0x1a, 0x59, 0x64, 0x3c, 0x28, 0x4c, 0xbf, 0x09, 0xcb, 0xdb, 0xd4, 0xde, 0xeb, 0xd8, 0x26, + 0x0b, 0xb5, 0xfa, 0x06, 0x40, 0x57, 0x7c, 0x37, 0xa9, 0x1d, 0x16, 0xa0, 0x2f, 0x70, 0x2f, 0xee, + 0x45, 0xab, 0x8f, 0x53, 0x5f, 0x38, 0x41, 0xaf, 0xff, 0x51, 0x83, 0x95, 0x6c, 0xb9, 0x9e, 0xa8, + 0xa9, 0xbe, 0x37, 0x74, 0x32, 0x30, 0xd1, 0xa4, 0x28, 0xee, 0x00, 0xc6, 0x9c, 0x0e, 0xfc, 0x29, + 0x6d, 0xc4, 0xe4, 0x93, 0x81, 0x1f, 0x0e, 0x6f, 0x9f, 0x27, 0xb3, 0xe2, 0x92, 0x12, 0x36, 0x7e, + 0x0b, 0xfd, 0xef, 0x33, 0x70, 0x19, 0x13, 0x8b, 0xba, 0x2e, 0xf1, 0x6c, 0x62, 0x0f, 0xa4, 0x4c, + 0x70, 0xb2, 0x89, 0xf0, 0x7d, 0x2d, 0x3a, 0xd9, 0xb2, 0xb3, 0xb8, 0x31, 0xba, 0x8a, 0x1f, 0xa3, + 0x8c, 0x21, 0xa3, 0x23, 0xa7, 0xa2, 0xbb, 0xe9, 0x73, 0x3e, 0xa5, 0x89, 0x68, 0x58, 0x2d, 0xfe, + 0xac, 0x01, 0xb4, 0xe9, 0x47, 0xc4, 0xaf, 0xd3, 0xae, 0x17, 0x4e, 0xa5, 0x9b, 0x27, 0xd3, 0x7e, + 0x2b, 0xc2, 0xcb, 0xcc, 0x75, 0xe3, 0x8d, 0x69, 0xcd, 0x75, 0x63, 0xd5, 0x85, 0x25, 0xdd, 0x4e, + 0x27, 0xb4, 0xa4, 0x34, 0x0d, 0x4b, 0xf6, 0x22, 0xbc, 0x8c, 0x25, 0xf1, 0xc6, 0xb4, 0x2c, 0x89, + 0x55, 0x5f, 0x73, 0x60, 0x21, 0x91, 0x00, 0xa7, 0x3d, 0x9d, 0xce, 0x44, 0xeb, 0xb4, 0xc5, 0x65, + 0x5c, 0x7a, 0xaa, 0xc3, 0xf0, 0xfb, 0x1a, 0xac, 0x26, 0x02, 0x9c, 0xb8, 0x77, 0x02, 0xf4, 0x2b, + 0x0d, 0xca, 0x56, 0x1c, 0x71, 0x45, 0x64, 0xca, 0xd6, 0x44, 0xde, 0x3c, 0xaf, 0x9f, 0x28, 0x79, + 0xa2, 0xb7, 0x6a, 0xb9, 0x91, 0x23, 0x06, 0xe7, 0x2a, 0xa0, 0xff, 0xae, 0x00, 0x17, 0x86, 0xbd, + 0xcb, 0x51, 0x43, 0x4d, 0xe7, 0x65, 0x75, 0xaa, 0x25, 0xa7, 0xf3, 0x23, 0x32, 0x31, 0x31, 0xc0, + 0x7f, 0x17, 0xca, 0xa9, 0x5a, 0xbf, 0xc7, 0x9c, 0xb6, 0x73, 0x4f, 0xb6, 0xcb, 0x72, 0x30, 0xc0, + 0xaf, 0xb9, 0xf2, 0x6e, 0x0e, 0x0d, 0xce, 0xe5, 0xce, 0x99, 0x62, 0x17, 0x4f, 0x7d, 0x8a, 0x7d, + 0x7f, 0xd0, 0x5f, 0xf2, 0xae, 0x9a, 0x8a, 0xbf, 0xde, 0x87, 0x8b, 0xe9, 0x4b, 0x65, 0xd0, 0x61, + 0x97, 0xfb, 0xbd, 0xea, 0xc5, 0x46, 0x1e, 0x11, 0xce, 0xe7, 0xcf, 0xbb, 0x19, 0x8b, 0x4f, 0xe9, + 0x66, 0xfc, 0x43, 0x01, 0x9e, 0xbd, 0x45, 0x7c, 0xe6, 0x58, 0x4f, 0x7f, 0xb4, 0xfc, 0x7e, 0x6a, + 0xb4, 0xfc, 0xb5, 0x51, 0xc7, 0x6d, 0xa8, 0x9a, 0xb9, 0x83, 0x65, 0x2b, 0x33, 0x58, 0xbe, 0x36, + 0x19, 0xfc, 0xf1, 0x63, 0xe5, 0x1f, 0x15, 0xa1, 0x32, 0x94, 0x2f, 0x1e, 0x2a, 0xbf, 0x95, 0x6a, + 0x60, 0x37, 0x32, 0x0d, 0xac, 0x7e, 0x3c, 0xf7, 0xe7, 0x23, 0xe5, 0xd1, 0x23, 0xe5, 0xbf, 0x68, + 0x70, 0x71, 0xa8, 0x17, 0x9f, 0xc2, 0x40, 0xf9, 0xbd, 0xf4, 0x40, 0xf9, 0x95, 0x89, 0x72, 0x2c, + 0x67, 0x9c, 0xfc, 0xb0, 0x90, 0x63, 0x97, 0x18, 0x63, 0x7d, 0x08, 0xf3, 0x41, 0xf8, 0x27, 0x9b, + 0x36, 0xf9, 0x9f, 0x6c, 0xa2, 0xb1, 0x8f, 0xfe, 0x5e, 0x8b, 0x20, 0x11, 0x81, 0xc5, 0x6e, 0xa2, + 0x17, 0x52, 0x47, 0xb4, 0x36, 0xc6, 0xc0, 0x23, 0xd9, 0x42, 0xd5, 0x57, 0x78, 0x3b, 0x98, 0x5c, + 0xc1, 0x29, 0x58, 0xe4, 0xc2, 0x92, 0x9f, 0xea, 0xda, 0x54, 0x0a, 0x5e, 0x19, 0x43, 0x50, 0xa6, + 0xdd, 0x13, 0x7f, 0x17, 0xa6, 0xd7, 0x70, 0x06, 0x5c, 0xff, 0x69, 0x01, 0x2e, 0x1d, 0x73, 0xcc, + 0x51, 0xc0, 0xd5, 0x49, 0xde, 0xc4, 0xca, 0xb5, 0xaf, 0x3e, 0xc1, 0x4b, 0x20, 0xf9, 0xca, 0x08, + 0x95, 0x4a, 0x42, 0xe2, 0x8c, 0x08, 0xe4, 0xa7, 0xa6, 0xa2, 0x32, 0x91, 0xbe, 0x39, 0x51, 0x22, + 0x8d, 0x3d, 0x13, 0xad, 0xbf, 0xf8, 0xe0, 0x51, 0x65, 0xe6, 0xe1, 0xa3, 0xca, 0xcc, 0x67, 0x8f, + 0x2a, 0x33, 0x1f, 0xf7, 0x2b, 0xda, 0x83, 0x7e, 0x45, 0x7b, 0xd8, 0xaf, 0x68, 0x9f, 0xf5, 0x2b, + 0xda, 0x3f, 0xfa, 0x15, 0xed, 0x67, 0xff, 0xac, 0xcc, 0xbc, 0x77, 0x46, 0x49, 0xfb, 0x4f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x58, 0x06, 0x79, 0x35, 0x11, 0x25, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto similarity index 50% rename from vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto rename to vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 27b99c224..5e5c491a4 100644 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,23 +19,44 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1; +package k8s.io.api.autoscaling.v2beta1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v2alpha1"; +option go_package = "v2beta1"; + +// ContainerResourcePolicy controls how autoscaler computes the recommended +// resources for a specific container. +message ContainerResourcePolicy { + // Name of the container or DefaultContainerResourcePolicy, in which + // case the policy is used by the containers that don't have their own + // policy specified. + optional string containerName = 1; + + // Whether autoscaler is enabled for the container. The default is "Auto". + // +optional + optional string mode = 2; + + // Specifies the minimal amount of resources that will be recommended + // for the container. The default is no minimum. + // +optional + map minAllowed = 3; + + // Specifies the maximum amount of resources that will be recommended + // for the container. The default is no maximum. + // +optional + map maxAllowed = 4; +} // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -46,17 +67,61 @@ message CrossVersionObjectReference { optional string apiVersion = 3; } +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +// Exactly one "target" type should be set. +message ExternalMetricSource { + // metricName is the name of the metric in question. + optional string metricName = 1; + + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + + // targetValue is the target value of the metric (as a quantity). + // Mutually exclusive with TargetAverageValue. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + + // targetAverageValue is the target per-pod value of global metric (as a quantity). + // Mutually exclusive with TargetValue. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +message ExternalMetricStatus { + // metricName is the name of a metric used for autoscaling in + // metric system. + optional string metricName = 1; + + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + + // currentValue is the current value of the metric (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + + // currentAverageValue is the current value of metric averaged over autoscaled pods. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; +} + // HorizontalPodAutoscaler is the configuration for a horizontal pod // autoscaler, which automatically manages the replica count of any resource // implementing the scale subresource based on the metrics specified. message HorizontalPodAutoscaler { // metadata is the standard object metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -65,6 +130,30 @@ message HorizontalPodAutoscaler { optional HorizontalPodAutoscalerStatus status = 3; } +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +message HorizontalPodAutoscalerCondition { + // type describes the current condition + optional string type = 1; + + // status is the status of the condition (True, False, Unknown) + optional string status = 2; + + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is the reason for the condition's last transition. + // +optional + optional string reason = 4; + + // message is a human-readable explanation containing details about + // the transition + // +optional + optional string message = 5; +} + // HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. message HorizontalPodAutoscalerList { // metadata is the standard list metadata. @@ -122,12 +211,17 @@ message HorizontalPodAutoscalerStatus { // currentMetrics is the last read state of the metrics used by this autoscaler. repeated MetricStatus currentMetrics = 5; + + // conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + repeated HorizontalPodAutoscalerCondition conditions = 6; } // MetricSpec specifies how to scale based on a single metric // (only `type` and one other matching field should be set at once). message MetricSpec { - // type is the type of metric source. It should match one of the fields below. + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -148,11 +242,20 @@ message MetricSpec { // to normal per-pod metrics using the "pods" source. // +optional optional ResourceMetricSource resource = 4; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricSource external = 5; } // MetricStatus describes the last-read state of a single metric. message MetricStatus { - // type is the type of metric source. It will match one of the fields below. + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -173,6 +276,14 @@ message MetricStatus { // to normal per-pod metrics using the "pods" source. // +optional optional ResourceMetricStatus resource = 4; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricStatus external = 5; } // ObjectMetricSource indicates how to scale on a metric describing a @@ -201,6 +312,26 @@ message ObjectMetricStatus { optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; } +// PodResourcePolicy controls how autoscaler computes the recommended resources +// for containers belonging to the pod. There can be at most one entry for every +// named container and optionally a single wildcard entry with `containerName` = '*', +// which handles all containers that don't have individual policies. +message PodResourcePolicy { + // Per-container resource policies. + // +optional + // +patchMergeKey=containerName + // +patchStrategy=merge + repeated ContainerResourcePolicy containerPolicies = 1; +} + +// PodUpdatePolicy describes the rules on how changes are applied to the pods. +message PodUpdatePolicy { + // Controls when autoscaler applies changes to the pod resources. + // The default is 'Auto'. + // +optional + optional string updateMode = 1; +} + // PodsMetricSource indicates how to scale on a metric describing each pod in // the current scale target (for example, transactions-processed-per-second). // The values will be averaged together before being compared to the target @@ -225,6 +356,39 @@ message PodsMetricStatus { optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; } +// RecommendedContainerResources is the recommendation of resources computed by +// autoscaler for a specific container. Respects the container resource policy +// if present in the spec. In particular the recommendation is not produced for +// containers with `ContainerScalingMode` set to 'Off'. +message RecommendedContainerResources { + // Name of the container. + optional string containerName = 1; + + // Recommended amount of resources. + map target = 2; + + // Minimum recommended amount of resources. + // This amount is not guaranteed to be sufficient for the application to operate in a stable way, however + // running with less resources is likely to have significant impact on performance/availability. + // +optional + map lowerBound = 3; + + // Maximum recommended amount of resources. + // Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum + // amount of application is actually capable of consuming. + // +optional + map upperBound = 4; +} + +// RecommendedPodResources is the recommendation of resources computed by +// autoscaler. It contains a recommendation for each container in the pod +// (except for those with `ContainerScalingMode` set to 'Off'). +message RecommendedPodResources { + // Resources recommended by the autoscaler for each container. + // +optional + repeated RecommendedContainerResources containerRecommendations = 1; +} + // ResourceMetricSource indicates how to scale on a resource metric known to // Kubernetes, as specified in requests and limits, describing each pod in the // current scale target (e.g. CPU or memory). The values will be averaged @@ -242,7 +406,7 @@ message ResourceMetricSource { // +optional optional int32 targetAverageUtilization = 2; - // targetAverageValue is the the target value of the average of the + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional @@ -266,10 +430,96 @@ message ResourceMetricStatus { // +optional optional int32 currentAverageUtilization = 2; - // currentAverageValue is the the current value of the average of the + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; } +// VerticalPodAutoscaler is the configuration for a vertical pod +// autoscaler, which automatically manages pod resources based on historical and +// real time resource utilization. +message VerticalPodAutoscaler { + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the behavior of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + optional VerticalPodAutoscalerSpec spec = 2; + + // Current information about the autoscaler. + // +optional + optional VerticalPodAutoscalerStatus status = 3; +} + +// VerticalPodAutoscalerCondition describes the state of +// a VerticalPodAutoscaler at a certain point. +message VerticalPodAutoscalerCondition { + // type describes the current condition + optional string type = 1; + + // status is the status of the condition (True, False, Unknown) + optional string status = 2; + + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is the reason for the condition's last transition. + // +optional + optional string reason = 4; + + // message is a human-readable explanation containing details about + // the transition + // +optional + optional string message = 5; +} + +// VerticalPodAutoscalerList is a list of VerticalPodAutoscaler objects. +message VerticalPodAutoscalerList { + // metadata is the standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of vertical pod autoscaler objects. + repeated VerticalPodAutoscaler items = 2; +} + +// VerticalPodAutoscalerSpec is the specification of the behavior of the autoscaler. +message VerticalPodAutoscalerSpec { + // A label query that determines the set of pods controlled by the Autoscaler. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + + // Describes the rules on how changes are applied to the pods. + // If not specified, all fields in the `PodUpdatePolicy` are set to their + // default values. + // +optional + optional PodUpdatePolicy updatePolicy = 2; + + // Controls how the autoscaler computes recommended resources. + // The resource policy may be used to set constraints on the recommendations + // for individual containers. If not specified, the autoscaler computes recommended + // resources for all containers in the pod, without additional constraints. + // +optional + optional PodResourcePolicy resourcePolicy = 3; +} + +// VerticalPodAutoscalerStatus describes the runtime state of the autoscaler. +message VerticalPodAutoscalerStatus { + // The most recently computed amount of resources recommended by the + // autoscaler for the controlled pods. + // +optional + optional RecommendedPodResources recommendation = 1; + + // Conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated VerticalPodAutoscalerCondition conditions = 2; +} + diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/register.go b/vendor/k8s.io/api/autoscaling/v2beta1/register.go new file mode 100644 index 000000000..12d697f01 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go new file mode 100644 index 000000000..450bbc5cf --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -0,0 +1,583 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2beta1 + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ** Horizontal Pod Autoscaler types start here ** + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +type HorizontalPodAutoscalerSpec struct { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` + // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` +} + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +var ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" + // ExternalMetricSourceType is a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + ExternalMetricSourceType MetricSourceType = "External" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` +} + +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +// Exactly one "target" type should be set. +type ExternalMetricSource struct { + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` + // targetValue is the target value of the metric (as a quantity). + // Mutually exclusive with TargetAverageValue. + // +optional + TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"` + // targetAverageValue is the target per-pod value of global metric (as a quantity). + // Mutually exclusive with TargetValue. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,4,opt,name=targetAverageValue"` +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +type HorizontalPodAutoscalerStatus struct { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` + + // currentMetrics is the last read state of the metrics used by this autoscaler. + CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` + + // conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"` +} + +// HorizontalPodAutoscalerConditionType are the valid conditions of +// a HorizontalPodAutoscaler. +type HorizontalPodAutoscalerConditionType string + +var ( + // ScalingActive indicates that the HPA controller is able to scale if necessary: + // it's correctly configured, can fetch the desired metrics, and isn't disabled. + ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" + // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, + // such as being in a backoff window, or being unable to access/update the target scale. + AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" + // ScalingLimited indicates that the calculated scale based on metrics would be above or + // below the range for the HPA, and has thus been capped. + ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited" +) + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +type HorizontalPodAutoscalerCondition struct { + // type describes the current condition + Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about + // the transition + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +type ExternalMetricStatus struct { + // metricName is the name of a metric used for autoscaling in + // metric system. + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` + // currentValue is the current value of the metric (as a quantity) + CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` + // currentAverageValue is the current value of metric averaged over autoscaled pods. + // +optional + CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification for the behaviour of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status is the current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ** Vertical Pod Autoscaler types start here ** + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VerticalPodAutoscalerList is a list of VerticalPodAutoscaler objects. +type VerticalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of vertical pod autoscaler objects. + Items []VerticalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VerticalPodAutoscaler is the configuration for a vertical pod +// autoscaler, which automatically manages pod resources based on historical and +// real time resource utilization. +type VerticalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the behavior of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + Spec VerticalPodAutoscalerSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Current information about the autoscaler. + // +optional + Status VerticalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// VerticalPodAutoscalerSpec is the specification of the behavior of the autoscaler. +type VerticalPodAutoscalerSpec struct { + // A label query that determines the set of pods controlled by the Autoscaler. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,name=selector"` + + // Describes the rules on how changes are applied to the pods. + // If not specified, all fields in the `PodUpdatePolicy` are set to their + // default values. + // +optional + UpdatePolicy *PodUpdatePolicy `json:"updatePolicy,omitempty" protobuf:"bytes,2,opt,name=updatePolicy"` + + // Controls how the autoscaler computes recommended resources. + // The resource policy may be used to set constraints on the recommendations + // for individual containers. If not specified, the autoscaler computes recommended + // resources for all containers in the pod, without additional constraints. + // +optional + ResourcePolicy *PodResourcePolicy `json:"resourcePolicy,omitempty" protobuf:"bytes,3,opt,name=resourcePolicy"` +} + +// PodUpdatePolicy describes the rules on how changes are applied to the pods. +type PodUpdatePolicy struct { + // Controls when autoscaler applies changes to the pod resources. + // The default is 'Auto'. + // +optional + UpdateMode *UpdateMode `json:"updateMode,omitempty" protobuf:"bytes,1,opt,name=updateMode"` +} + +// UpdateMode controls when autoscaler applies changes to the pod resoures. +type UpdateMode string + +const ( + // UpdateModeOff means that autoscaler never changes Pod resources. + // The recommender still sets the recommended resources in the + // VerticalPodAutoscaler object. This can be used for a "dry run". + UpdateModeOff UpdateMode = "Off" + // UpdateModeInitial means that autoscaler only assigns resources on pod + // creation and does not change them during the lifetime of the pod. + UpdateModeInitial UpdateMode = "Initial" + // UpdateModeRecreate means that autoscaler assigns resources on pod + // creation and additionally can update them during the lifetime of the + // pod by deleting and recreating the pod. + UpdateModeRecreate UpdateMode = "Recreate" + // UpdateModeAuto means that autoscaler assigns resources on pod creation + // and additionally can update them during the lifetime of the pod, + // using any available update method. Currently this is equivalent to + // Recreate, which is the only available update method. + UpdateModeAuto UpdateMode = "Auto" +) + +// PodResourcePolicy controls how autoscaler computes the recommended resources +// for containers belonging to the pod. There can be at most one entry for every +// named container and optionally a single wildcard entry with `containerName` = '*', +// which handles all containers that don't have individual policies. +type PodResourcePolicy struct { + // Per-container resource policies. + // +optional + // +patchMergeKey=containerName + // +patchStrategy=merge + ContainerPolicies []ContainerResourcePolicy `json:"containerPolicies,omitempty" patchStrategy:"merge" patchMergeKey:"containerName" protobuf:"bytes,1,rep,name=containerPolicies"` +} + +// ContainerResourcePolicy controls how autoscaler computes the recommended +// resources for a specific container. +type ContainerResourcePolicy struct { + // Name of the container or DefaultContainerResourcePolicy, in which + // case the policy is used by the containers that don't have their own + // policy specified. + ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` + // Whether autoscaler is enabled for the container. The default is "Auto". + // +optional + Mode *ContainerScalingMode `json:"mode,omitempty" protobuf:"bytes,2,opt,name=mode"` + // Specifies the minimal amount of resources that will be recommended + // for the container. The default is no minimum. + // +optional + MinAllowed v1.ResourceList `json:"minAllowed,omitempty" protobuf:"bytes,3,rep,name=minAllowed,casttype=ResourceList,castkey=ResourceName"` + // Specifies the maximum amount of resources that will be recommended + // for the container. The default is no maximum. + // +optional + MaxAllowed v1.ResourceList `json:"maxAllowed,omitempty" protobuf:"bytes,4,rep,name=maxAllowed,casttype=ResourceList,castkey=ResourceName"` +} + +const ( + // DefaultContainerResourcePolicy can be passed as + // ContainerResourcePolicy.ContainerName to specify the default policy. + DefaultContainerResourcePolicy = "*" +) + +// ContainerScalingMode controls whether autoscaler is enabled for a specific +// container. +type ContainerScalingMode string + +const ( + // ContainerScalingModeAuto means autoscaling is enabled for a container. + ContainerScalingModeAuto ContainerScalingMode = "Auto" + // ContainerScalingModeOff means autoscaling is disabled for a container. + ContainerScalingModeOff ContainerScalingMode = "Off" +) + +// VerticalPodAutoscalerStatus describes the runtime state of the autoscaler. +type VerticalPodAutoscalerStatus struct { + // The most recently computed amount of resources recommended by the + // autoscaler for the controlled pods. + // +optional + Recommendation *RecommendedPodResources `json:"recommendation,omitempty" protobuf:"bytes,1,opt,name=recommendation"` + + // Conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []VerticalPodAutoscalerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` +} + +// RecommendedPodResources is the recommendation of resources computed by +// autoscaler. It contains a recommendation for each container in the pod +// (except for those with `ContainerScalingMode` set to 'Off'). +type RecommendedPodResources struct { + // Resources recommended by the autoscaler for each container. + // +optional + ContainerRecommendations []RecommendedContainerResources `json:"containerRecommendations,omitempty" protobuf:"bytes,1,rep,name=containerRecommendations"` +} + +// RecommendedContainerResources is the recommendation of resources computed by +// autoscaler for a specific container. Respects the container resource policy +// if present in the spec. In particular the recommendation is not produced for +// containers with `ContainerScalingMode` set to 'Off'. +type RecommendedContainerResources struct { + // Name of the container. + ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` + // Recommended amount of resources. + Target v1.ResourceList `json:"target" protobuf:"bytes,2,rep,name=target,casttype=ResourceList,castkey=ResourceName"` + // Minimum recommended amount of resources. + // This amount is not guaranteed to be sufficient for the application to operate in a stable way, however + // running with less resources is likely to have significant impact on performance/availability. + // +optional + LowerBound v1.ResourceList `json:"lowerBound,omitempty" protobuf:"bytes,3,rep,name=lowerBound,casttype=ResourceList,castkey=ResourceName"` + // Maximum recommended amount of resources. + // Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum + // amount of application is actually capable of consuming. + // +optional + UpperBound v1.ResourceList `json:"upperBound,omitempty" protobuf:"bytes,4,rep,name=upperBound,casttype=ResourceList,castkey=ResourceName"` +} + +// VerticalPodAutoscalerConditionType are the valid conditions of +// a VerticalPodAutoscaler. +type VerticalPodAutoscalerConditionType string + +var ( + // RecommendationProvided indicates whether the VPA recommender was able to calculate a recommendation. + RecommendationProvided VerticalPodAutoscalerConditionType = "RecommendationProvided" +) + +// VerticalPodAutoscalerCondition describes the state of +// a VerticalPodAutoscaler at a certain point. +type VerticalPodAutoscalerCondition struct { + // type describes the current condition + Type VerticalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about + // the transition + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..fe67bb1b4 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -0,0 +1,321 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ContainerResourcePolicy = map[string]string{ + "": "ContainerResourcePolicy controls how autoscaler computes the recommended resources for a specific container.", + "containerName": "Name of the container or DefaultContainerResourcePolicy, in which case the policy is used by the containers that don't have their own policy specified.", + "mode": "Whether autoscaler is enabled for the container. The default is \"Auto\".", + "minAllowed": "Specifies the minimal amount of resources that will be recommended for the container. The default is no minimum.", + "maxAllowed": "Specifies the maximum amount of resources that will be recommended for the container. The default is no maximum.", +} + +func (ContainerResourcePolicy) SwaggerDoc() map[string]string { + return map_ContainerResourcePolicy +} + +var map_CrossVersionObjectReference = map[string]string{ + "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "apiVersion": "API version of the referent", +} + +func (CrossVersionObjectReference) SwaggerDoc() map[string]string { + return map_CrossVersionObjectReference +} + +var map_ExternalMetricSource = map[string]string{ + "": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). Exactly one \"target\" type should be set.", + "metricName": "metricName is the name of the metric in question.", + "metricSelector": "metricSelector is used to identify a specific time series within a given metric.", + "targetValue": "targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.", + "targetAverageValue": "targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.", +} + +func (ExternalMetricSource) SwaggerDoc() map[string]string { + return map_ExternalMetricSource +} + +var map_ExternalMetricStatus = map[string]string{ + "": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.", + "metricName": "metricName is the name of a metric used for autoscaling in metric system.", + "metricSelector": "metricSelector is used to identify a specific time series within a given metric.", + "currentValue": "currentValue is the current value of the metric (as a quantity)", + "currentAverageValue": "currentAverageValue is the current value of metric averaged over autoscaled pods.", +} + +func (ExternalMetricStatus) SwaggerDoc() map[string]string { + return map_ExternalMetricStatus +} + +var map_HorizontalPodAutoscaler = map[string]string{ + "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "status": "status is the current information about the autoscaler.", +} + +func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscaler +} + +var map_HorizontalPodAutoscalerCondition = map[string]string{ + "": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", + "type": "type describes the current condition", + "status": "status is the status of the condition (True, False, Unknown)", + "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another", + "reason": "reason is the reason for the condition's last transition.", + "message": "message is a human-readable explanation containing details about the transition", +} + +func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerCondition +} + +var map_HorizontalPodAutoscalerList = map[string]string{ + "": "HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects.", + "metadata": "metadata is the standard list metadata.", + "items": "items is the list of horizontal pod autoscaler objects.", +} + +func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerList +} + +var map_HorizontalPodAutoscalerSpec = map[string]string{ + "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", + "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", + "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", + "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.", +} + +func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerSpec +} + +var map_HorizontalPodAutoscalerStatus = map[string]string{ + "": "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.", + "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", + "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.", + "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.", + "currentMetrics": "currentMetrics is the last read state of the metrics used by this autoscaler.", + "conditions": "conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.", +} + +func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerStatus +} + +var map_MetricSpec = map[string]string{ + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", +} + +func (MetricSpec) SwaggerDoc() map[string]string { + return map_MetricSpec +} + +var map_MetricStatus = map[string]string{ + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", +} + +func (MetricStatus) SwaggerDoc() map[string]string { + return map_MetricStatus +} + +var map_ObjectMetricSource = map[string]string{ + "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "targetValue": "targetValue is the target value of the metric (as a quantity).", +} + +func (ObjectMetricSource) SwaggerDoc() map[string]string { + return map_ObjectMetricSource +} + +var map_ObjectMetricStatus = map[string]string{ + "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "currentValue": "currentValue is the current value of the metric (as a quantity).", +} + +func (ObjectMetricStatus) SwaggerDoc() map[string]string { + return map_ObjectMetricStatus +} + +var map_PodResourcePolicy = map[string]string{ + "": "PodResourcePolicy controls how autoscaler computes the recommended resources for containers belonging to the pod. There can be at most one entry for every named container and optionally a single wildcard entry with `containerName` = '*', which handles all containers that don't have individual policies.", + "containerPolicies": "Per-container resource policies.", +} + +func (PodResourcePolicy) SwaggerDoc() map[string]string { + return map_PodResourcePolicy +} + +var map_PodUpdatePolicy = map[string]string{ + "": "PodUpdatePolicy describes the rules on how changes are applied to the pods.", + "updateMode": "Controls when autoscaler applies changes to the pod resources. The default is 'Auto'.", +} + +func (PodUpdatePolicy) SwaggerDoc() map[string]string { + return map_PodUpdatePolicy +} + +var map_PodsMetricSource = map[string]string{ + "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "metricName": "metricName is the name of the metric in question", + "targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricSource) SwaggerDoc() map[string]string { + return map_PodsMetricSource +} + +var map_PodsMetricStatus = map[string]string{ + "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "metricName": "metricName is the name of the metric in question", + "currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricStatus) SwaggerDoc() map[string]string { + return map_PodsMetricStatus +} + +var map_RecommendedContainerResources = map[string]string{ + "": "RecommendedContainerResources is the recommendation of resources computed by autoscaler for a specific container. Respects the container resource policy if present in the spec. In particular the recommendation is not produced for containers with `ContainerScalingMode` set to 'Off'.", + "containerName": "Name of the container.", + "target": "Recommended amount of resources.", + "lowerBound": "Minimum recommended amount of resources. This amount is not guaranteed to be sufficient for the application to operate in a stable way, however running with less resources is likely to have significant impact on performance/availability.", + "upperBound": "Maximum recommended amount of resources. Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum amount of application is actually capable of consuming.", +} + +func (RecommendedContainerResources) SwaggerDoc() map[string]string { + return map_RecommendedContainerResources +} + +var map_RecommendedPodResources = map[string]string{ + "": "RecommendedPodResources is the recommendation of resources computed by autoscaler. It contains a recommendation for each container in the pod (except for those with `ContainerScalingMode` set to 'Off').", + "containerRecommendations": "Resources recommended by the autoscaler for each container.", +} + +func (RecommendedPodResources) SwaggerDoc() map[string]string { + return map_RecommendedPodResources +} + +var map_ResourceMetricSource = map[string]string{ + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", +} + +func (ResourceMetricSource) SwaggerDoc() map[string]string { + return map_ResourceMetricSource +} + +var map_ResourceMetricStatus = map[string]string{ + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", +} + +func (ResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ResourceMetricStatus +} + +var map_VerticalPodAutoscaler = map[string]string{ + "": "VerticalPodAutoscaler is the configuration for a vertical pod autoscaler, which automatically manages pod resources based on historical and real time resource utilization.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the behavior of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "status": "Current information about the autoscaler.", +} + +func (VerticalPodAutoscaler) SwaggerDoc() map[string]string { + return map_VerticalPodAutoscaler +} + +var map_VerticalPodAutoscalerCondition = map[string]string{ + "": "VerticalPodAutoscalerCondition describes the state of a VerticalPodAutoscaler at a certain point.", + "type": "type describes the current condition", + "status": "status is the status of the condition (True, False, Unknown)", + "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another", + "reason": "reason is the reason for the condition's last transition.", + "message": "message is a human-readable explanation containing details about the transition", +} + +func (VerticalPodAutoscalerCondition) SwaggerDoc() map[string]string { + return map_VerticalPodAutoscalerCondition +} + +var map_VerticalPodAutoscalerList = map[string]string{ + "": "VerticalPodAutoscalerList is a list of VerticalPodAutoscaler objects.", + "metadata": "metadata is the standard list metadata.", + "items": "items is the list of vertical pod autoscaler objects.", +} + +func (VerticalPodAutoscalerList) SwaggerDoc() map[string]string { + return map_VerticalPodAutoscalerList +} + +var map_VerticalPodAutoscalerSpec = map[string]string{ + "": "VerticalPodAutoscalerSpec is the specification of the behavior of the autoscaler.", + "selector": "A label query that determines the set of pods controlled by the Autoscaler. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "updatePolicy": "Describes the rules on how changes are applied to the pods. If not specified, all fields in the `PodUpdatePolicy` are set to their default values.", + "resourcePolicy": "Controls how the autoscaler computes recommended resources. The resource policy may be used to set constraints on the recommendations for individual containers. If not specified, the autoscaler computes recommended resources for all containers in the pod, without additional constraints.", +} + +func (VerticalPodAutoscalerSpec) SwaggerDoc() map[string]string { + return map_VerticalPodAutoscalerSpec +} + +var map_VerticalPodAutoscalerStatus = map[string]string{ + "": "VerticalPodAutoscalerStatus describes the runtime state of the autoscaler.", + "recommendation": "The most recently computed amount of resources recommended by the autoscaler for the controlled pods.", + "conditions": "Conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.", +} + +func (VerticalPodAutoscalerStatus) SwaggerDoc() map[string]string { + return map_VerticalPodAutoscalerStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..56fa1cec5 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -0,0 +1,813 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v2beta1 + +import ( + v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourcePolicy) DeepCopyInto(out *ContainerResourcePolicy) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + if *in == nil { + *out = nil + } else { + *out = new(ContainerScalingMode) + **out = **in + } + } + if in.MinAllowed != nil { + in, out := &in.MinAllowed, &out.MinAllowed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxAllowed != nil { + in, out := &in.MaxAllowed, &out.MaxAllowed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourcePolicy. +func (in *ContainerResourcePolicy) DeepCopy() *ContainerResourcePolicy { + if in == nil { + return nil + } + out := new(ContainerResourcePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference. +func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference { + if in == nil { + return nil + } + out := new(CrossVersionObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) { + *out = *in + if in.MetricSelector != nil { + in, out := &in.MetricSelector, &out.MetricSelector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + if *in == nil { + *out = nil + } else { + x := (*in).DeepCopy() + *out = &x + } + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + if *in == nil { + *out = nil + } else { + x := (*in).DeepCopy() + *out = &x + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource. +func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource { + if in == nil { + return nil + } + out := new(ExternalMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) { + *out = *in + if in.MetricSelector != nil { + in, out := &in.MetricSelector, &out.MetricSelector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + out.CurrentValue = in.CurrentValue.DeepCopy() + if in.CurrentAverageValue != nil { + in, out := &in.CurrentAverageValue, &out.CurrentAverageValue + if *in == nil { + *out = nil + } else { + x := (*in).DeepCopy() + *out = &x + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus. +func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus { + if in == nil { + return nil + } + out := new(ExternalMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler. +func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition. +func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList. +func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) { + *out = *in + out.ScaleTargetRef = in.ScaleTargetRef + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec. +func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]HorizontalPodAutoscalerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus. +func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + if *in == nil { + *out = nil + } else { + *out = new(ObjectMetricSource) + (*in).DeepCopyInto(*out) + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + if *in == nil { + *out = nil + } else { + *out = new(PodsMetricSource) + (*in).DeepCopyInto(*out) + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + if *in == nil { + *out = nil + } else { + *out = new(ResourceMetricSource) + (*in).DeepCopyInto(*out) + } + } + if in.External != nil { + in, out := &in.External, &out.External + if *in == nil { + *out = nil + } else { + *out = new(ExternalMetricSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. +func (in *MetricSpec) DeepCopy() *MetricSpec { + if in == nil { + return nil + } + out := new(MetricSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + if *in == nil { + *out = nil + } else { + *out = new(ObjectMetricStatus) + (*in).DeepCopyInto(*out) + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + if *in == nil { + *out = nil + } else { + *out = new(PodsMetricStatus) + (*in).DeepCopyInto(*out) + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + if *in == nil { + *out = nil + } else { + *out = new(ResourceMetricStatus) + (*in).DeepCopyInto(*out) + } + } + if in.External != nil { + in, out := &in.External, &out.External + if *in == nil { + *out = nil + } else { + *out = new(ExternalMetricStatus) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. +func (in *MetricStatus) DeepCopy() *MetricStatus { + if in == nil { + return nil + } + out := new(MetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) { + *out = *in + out.Target = in.Target + out.TargetValue = in.TargetValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource. +func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource { + if in == nil { + return nil + } + out := new(ObjectMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) { + *out = *in + out.Target = in.Target + out.CurrentValue = in.CurrentValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus. +func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus { + if in == nil { + return nil + } + out := new(ObjectMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodResourcePolicy) DeepCopyInto(out *PodResourcePolicy) { + *out = *in + if in.ContainerPolicies != nil { + in, out := &in.ContainerPolicies, &out.ContainerPolicies + *out = make([]ContainerResourcePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourcePolicy. +func (in *PodResourcePolicy) DeepCopy() *PodResourcePolicy { + if in == nil { + return nil + } + out := new(PodResourcePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodUpdatePolicy) DeepCopyInto(out *PodUpdatePolicy) { + *out = *in + if in.UpdateMode != nil { + in, out := &in.UpdateMode, &out.UpdateMode + if *in == nil { + *out = nil + } else { + *out = new(UpdateMode) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodUpdatePolicy. +func (in *PodUpdatePolicy) DeepCopy() *PodUpdatePolicy { + if in == nil { + return nil + } + out := new(PodUpdatePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) { + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource. +func (in *PodsMetricSource) DeepCopy() *PodsMetricSource { + if in == nil { + return nil + } + out := new(PodsMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) { + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus. +func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus { + if in == nil { + return nil + } + out := new(PodsMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecommendedContainerResources) DeepCopyInto(out *RecommendedContainerResources) { + *out = *in + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.LowerBound != nil { + in, out := &in.LowerBound, &out.LowerBound + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.UpperBound != nil { + in, out := &in.UpperBound, &out.UpperBound + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendedContainerResources. +func (in *RecommendedContainerResources) DeepCopy() *RecommendedContainerResources { + if in == nil { + return nil + } + out := new(RecommendedContainerResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecommendedPodResources) DeepCopyInto(out *RecommendedPodResources) { + *out = *in + if in.ContainerRecommendations != nil { + in, out := &in.ContainerRecommendations, &out.ContainerRecommendations + *out = make([]RecommendedContainerResources, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendedPodResources. +func (in *RecommendedPodResources) DeepCopy() *RecommendedPodResources { + if in == nil { + return nil + } + out := new(RecommendedPodResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) { + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + if *in == nil { + *out = nil + } else { + x := (*in).DeepCopy() + *out = &x + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource. +func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource { + if in == nil { + return nil + } + out := new(ResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) { + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus. +func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus { + if in == nil { + return nil + } + out := new(ResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscaler) DeepCopyInto(out *VerticalPodAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscaler. +func (in *VerticalPodAutoscaler) DeepCopy() *VerticalPodAutoscaler { + if in == nil { + return nil + } + out := new(VerticalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VerticalPodAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerCondition) DeepCopyInto(out *VerticalPodAutoscalerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerCondition. +func (in *VerticalPodAutoscalerCondition) DeepCopy() *VerticalPodAutoscalerCondition { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerList) DeepCopyInto(out *VerticalPodAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VerticalPodAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerList. +func (in *VerticalPodAutoscalerList) DeepCopy() *VerticalPodAutoscalerList { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VerticalPodAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerSpec) DeepCopyInto(out *VerticalPodAutoscalerSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.UpdatePolicy != nil { + in, out := &in.UpdatePolicy, &out.UpdatePolicy + if *in == nil { + *out = nil + } else { + *out = new(PodUpdatePolicy) + (*in).DeepCopyInto(*out) + } + } + if in.ResourcePolicy != nil { + in, out := &in.ResourcePolicy, &out.ResourcePolicy + if *in == nil { + *out = nil + } else { + *out = new(PodResourcePolicy) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerSpec. +func (in *VerticalPodAutoscalerSpec) DeepCopy() *VerticalPodAutoscalerSpec { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerStatus) DeepCopyInto(out *VerticalPodAutoscalerStatus) { + *out = *in + if in.Recommendation != nil { + in, out := &in.Recommendation, &out.Recommendation + if *in == nil { + *out = nil + } else { + *out = new(RecommendedPodResources) + (*in).DeepCopyInto(*out) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]VerticalPodAutoscalerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerStatus. +func (in *VerticalPodAutoscalerStatus) DeepCopy() *VerticalPodAutoscalerStatus { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/doc.go b/vendor/k8s.io/api/batch/v1/doc.go similarity index 85% rename from vendor/k8s.io/client-go/pkg/apis/apps/doc.go rename to vendor/k8s.io/api/batch/v1/doc.go index d27cee51c..04491807f 100644 --- a/vendor/k8s.io/client-go/pkg/apis/apps/doc.go +++ b/vendor/k8s.io/api/batch/v1/doc.go @@ -14,4 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package apps +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +package v1 // import "k8s.io/api/batch/v1" diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go similarity index 67% rename from vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.pb.go rename to vendor/k8s.io/api/batch/v1/generated.pb.go index 0ebc07be7..176567acf 100644 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,14 +15,14 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto // DO NOT EDIT! /* Package v1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto + k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto It has these top-level messages: Job @@ -37,10 +37,9 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" - import strings "strings" import reflect "reflect" @@ -53,7 +52,9 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *Job) Reset() { *m = Job{} } func (*Job) ProtoMessage() {} @@ -76,47 +77,47 @@ func (*JobStatus) ProtoMessage() {} func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func init() { - proto.RegisterType((*Job)(nil), "k8s.io.client-go.pkg.apis.batch.v1.Job") - proto.RegisterType((*JobCondition)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobCondition") - proto.RegisterType((*JobList)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobList") - proto.RegisterType((*JobSpec)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobSpec") - proto.RegisterType((*JobStatus)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobStatus") + proto.RegisterType((*Job)(nil), "k8s.io.api.batch.v1.Job") + proto.RegisterType((*JobCondition)(nil), "k8s.io.api.batch.v1.JobCondition") + proto.RegisterType((*JobList)(nil), "k8s.io.api.batch.v1.JobList") + proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec") + proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus") } -func (m *Job) Marshal() (data []byte, err error) { +func (m *Job) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Job) MarshalTo(data []byte) (int, error) { +func (m *Job) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -124,85 +125,85 @@ func (m *Job) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *JobCondition) Marshal() (data []byte, err error) { +func (m *JobCondition) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *JobCondition) MarshalTo(data []byte) (int, error) { +func (m *JobCondition) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) - n4, err := m.LastProbeTime.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) + n4, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n4 - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n5 - data[i] = 0x2a + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) return i, nil } -func (m *JobList) Marshal() (data []byte, err error) { +func (m *JobList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *JobList) MarshalTo(data []byte) (int, error) { +func (m *JobList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -212,88 +213,93 @@ func (m *JobList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *JobSpec) Marshal() (data []byte, err error) { +func (m *JobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *JobSpec) MarshalTo(data []byte) (int, error) { +func (m *JobSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Parallelism != nil { - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(*m.Parallelism)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) } if m.Completions != nil { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(*m.Completions)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions)) } if m.ActiveDeadlineSeconds != nil { - data[i] = 0x18 + dAtA[i] = 0x18 i++ - i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) } if m.Selector != nil { - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n7, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n7 } if m.ManualSelector != nil { - data[i] = 0x28 + dAtA[i] = 0x28 i++ if *m.ManualSelector { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ } - data[i] = 0x32 + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n8 + if m.BackoffLimit != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) + } return i, nil } -func (m *JobStatus) Marshal() (data []byte, err error) { +func (m *JobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *JobStatus) MarshalTo(data []byte) (int, error) { +func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Conditions) > 0 { for _, msg := range m.Conditions { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -301,62 +307,62 @@ func (m *JobStatus) MarshalTo(data []byte) (int, error) { } } if m.StartTime != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) - n9, err := m.StartTime.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) + n9, err := m.StartTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n9 } if m.CompletionTime != nil { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size())) - n10, err := m.CompletionTime.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.CompletionTime.Size())) + n10, err := m.CompletionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n10 } - data[i] = 0x20 + dAtA[i] = 0x20 i++ - i = encodeVarintGenerated(data, i, uint64(m.Active)) - data[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.Active)) + dAtA[i] = 0x28 i++ - i = encodeVarintGenerated(data, i, uint64(m.Succeeded)) - data[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) + dAtA[i] = 0x30 i++ - i = encodeVarintGenerated(data, i, uint64(m.Failed)) + i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) return i, nil } -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *Job) Size() (n int) { @@ -424,6 +430,9 @@ func (m *JobSpec) Size() (n int) { } l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) + if m.BackoffLimit != nil { + n += 1 + sovGenerated(uint64(*m.BackoffLimit)) + } return n } @@ -511,7 +520,8 @@ func (this *JobSpec) String() string { `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `ManualSelector:` + valueToStringGenerated(this.ManualSelector) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `BackoffLimit:` + valueToStringGenerated(this.BackoffLimit) + `,`, `}`, }, "") return s @@ -539,8 +549,8 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *Job) Unmarshal(data []byte) error { - l := len(data) +func (m *Job) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -552,7 +562,7 @@ func (m *Job) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -580,7 +590,7 @@ func (m *Job) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -594,7 +604,7 @@ func (m *Job) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -610,7 +620,7 @@ func (m *Job) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -624,7 +634,7 @@ func (m *Job) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -640,7 +650,7 @@ func (m *Job) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -654,13 +664,13 @@ func (m *Job) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -679,8 +689,8 @@ func (m *Job) Unmarshal(data []byte) error { } return nil } -func (m *JobCondition) Unmarshal(data []byte) error { - l := len(data) +func (m *JobCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -692,7 +702,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -720,7 +730,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -735,7 +745,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = JobConditionType(data[iNdEx:postIndex]) + m.Type = JobConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -749,7 +759,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -764,7 +774,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -778,7 +788,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -792,7 +802,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -808,7 +818,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -822,7 +832,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -838,7 +848,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -853,7 +863,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -867,7 +877,7 @@ func (m *JobCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -882,11 +892,11 @@ func (m *JobCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -905,8 +915,8 @@ func (m *JobCondition) Unmarshal(data []byte) error { } return nil } -func (m *JobList) Unmarshal(data []byte) error { - l := len(data) +func (m *JobList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -918,7 +928,7 @@ func (m *JobList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -946,7 +956,7 @@ func (m *JobList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -960,7 +970,7 @@ func (m *JobList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -976,7 +986,7 @@ func (m *JobList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -991,13 +1001,13 @@ func (m *JobList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, Job{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1016,8 +1026,8 @@ func (m *JobList) Unmarshal(data []byte) error { } return nil } -func (m *JobSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *JobSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1029,7 +1039,7 @@ func (m *JobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1057,7 +1067,7 @@ func (m *JobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -1077,7 +1087,7 @@ func (m *JobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -1097,7 +1107,7 @@ func (m *JobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -1117,7 +1127,7 @@ func (m *JobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1134,7 +1144,7 @@ func (m *JobSpec) Unmarshal(data []byte) error { if m.Selector == nil { m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1150,7 +1160,7 @@ func (m *JobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1171,7 +1181,7 @@ func (m *JobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1185,13 +1195,33 @@ func (m *JobSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BackoffLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.BackoffLimit = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1210,8 +1240,8 @@ func (m *JobSpec) Unmarshal(data []byte) error { } return nil } -func (m *JobStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *JobStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1223,7 +1253,7 @@ func (m *JobStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1251,7 +1281,7 @@ func (m *JobStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1266,7 +1296,7 @@ func (m *JobStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Conditions = append(m.Conditions, JobCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1282,7 +1312,7 @@ func (m *JobStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1299,7 +1329,7 @@ func (m *JobStatus) Unmarshal(data []byte) error { if m.StartTime == nil { m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } - if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1315,7 +1345,7 @@ func (m *JobStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1332,7 +1362,7 @@ func (m *JobStatus) Unmarshal(data []byte) error { if m.CompletionTime == nil { m.CompletionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } - if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.CompletionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1348,7 +1378,7 @@ func (m *JobStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Active |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -1367,7 +1397,7 @@ func (m *JobStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Succeeded |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -1386,7 +1416,7 @@ func (m *JobStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Failed |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -1395,7 +1425,7 @@ func (m *JobStatus) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1414,8 +1444,8 @@ func (m *JobStatus) Unmarshal(data []byte) error { } return nil } -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -1426,7 +1456,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1444,7 +1474,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -1461,7 +1491,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1484,7 +1514,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1495,7 +1525,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -1519,62 +1549,67 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 885 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x54, 0xdd, 0x6e, 0xe3, 0x44, - 0x14, 0xce, 0x4f, 0xd3, 0x26, 0x93, 0xb6, 0xbb, 0x8c, 0x54, 0x29, 0xf4, 0x22, 0x59, 0x05, 0x84, - 0x0a, 0xda, 0xb5, 0x49, 0xbb, 0x42, 0x88, 0x0b, 0x24, 0x5c, 0x84, 0x44, 0xd5, 0xb2, 0xd5, 0xa4, - 0x02, 0x89, 0x1f, 0x89, 0xb1, 0x7d, 0x9a, 0x0e, 0xb5, 0x3d, 0x96, 0x67, 0x12, 0xd1, 0x3b, 0xde, - 0x00, 0x1e, 0x06, 0x21, 0x1e, 0xa1, 0x97, 0xbd, 0xe4, 0x2a, 0xa2, 0xe6, 0x2d, 0xf6, 0x0a, 0xcd, - 0x78, 0xfc, 0x93, 0x4d, 0x0a, 0xd9, 0xbd, 0xb3, 0xcf, 0x7c, 0xdf, 0x37, 0x67, 0xce, 0xf9, 0xce, - 0x41, 0x47, 0xd7, 0x1f, 0x0b, 0x8b, 0x71, 0xfb, 0x7a, 0xea, 0x42, 0x12, 0x81, 0x04, 0x61, 0xc7, - 0xd7, 0x13, 0x9b, 0xc6, 0x4c, 0xd8, 0x2e, 0x95, 0xde, 0x95, 0x3d, 0x1b, 0xd9, 0x13, 0x88, 0x20, - 0xa1, 0x12, 0x7c, 0x2b, 0x4e, 0xb8, 0xe4, 0xf8, 0x9d, 0x8c, 0x64, 0x95, 0x24, 0x2b, 0xbe, 0x9e, - 0x58, 0x8a, 0x64, 0x69, 0x92, 0x35, 0x1b, 0xed, 0x3f, 0x9b, 0x30, 0x79, 0x35, 0x75, 0x2d, 0x8f, - 0x87, 0xf6, 0x84, 0x4f, 0xb8, 0xad, 0xb9, 0xee, 0xf4, 0x52, 0xff, 0xe9, 0x1f, 0xfd, 0x95, 0x69, - 0xee, 0x3f, 0x37, 0x89, 0xd0, 0x98, 0x85, 0xd4, 0xbb, 0x62, 0x11, 0x24, 0x37, 0x65, 0x2a, 0x21, - 0x48, 0xba, 0x22, 0x93, 0x7d, 0xfb, 0x21, 0x56, 0x32, 0x8d, 0x24, 0x0b, 0x61, 0x89, 0xf0, 0xd1, - 0xff, 0x11, 0x84, 0x77, 0x05, 0x21, 0x5d, 0xe2, 0x1d, 0x3d, 0xc4, 0x9b, 0x4a, 0x16, 0xd8, 0x2c, - 0x92, 0x42, 0x26, 0x4b, 0xa4, 0xca, 0x9b, 0x04, 0x24, 0x33, 0x48, 0xca, 0x07, 0xc1, 0xcf, 0x34, - 0x8c, 0x03, 0x58, 0xf5, 0xa6, 0xa7, 0x0f, 0xb6, 0x64, 0x05, 0x7a, 0xf8, 0x6b, 0x03, 0x35, 0x4f, - 0xb8, 0x8b, 0x7f, 0x44, 0x6d, 0x55, 0x24, 0x9f, 0x4a, 0xda, 0xab, 0x3f, 0xa9, 0x1f, 0x74, 0x0f, - 0x3f, 0xb4, 0x4c, 0x9b, 0xaa, 0x39, 0x97, 0x8d, 0x52, 0x68, 0x6b, 0x36, 0xb2, 0x5e, 0xb8, 0x3f, - 0x81, 0x27, 0xcf, 0x40, 0x52, 0x07, 0xdf, 0xce, 0x07, 0xb5, 0x74, 0x3e, 0x40, 0x65, 0x8c, 0x14, - 0xaa, 0xf8, 0x2b, 0xb4, 0x21, 0x62, 0xf0, 0x7a, 0x0d, 0xad, 0xfe, 0xd4, 0x5a, 0xc3, 0x04, 0xd6, - 0x09, 0x77, 0xc7, 0x31, 0x78, 0xce, 0xb6, 0x51, 0xde, 0x50, 0x7f, 0x44, 0xeb, 0xe0, 0xaf, 0xd1, - 0xa6, 0x90, 0x54, 0x4e, 0x45, 0xaf, 0xa9, 0x15, 0xad, 0xb5, 0x15, 0x35, 0xcb, 0xd9, 0x35, 0x9a, - 0x9b, 0xd9, 0x3f, 0x31, 0x6a, 0xc3, 0xbb, 0x26, 0xda, 0x3e, 0xe1, 0xee, 0x31, 0x8f, 0x7c, 0x26, - 0x19, 0x8f, 0xf0, 0x73, 0xb4, 0x21, 0x6f, 0x62, 0xd0, 0x65, 0xe9, 0x38, 0x4f, 0xf2, 0x54, 0x2e, - 0x6e, 0x62, 0x78, 0x39, 0x1f, 0x3c, 0xae, 0x62, 0x55, 0x8c, 0x68, 0x74, 0x25, 0xbd, 0x86, 0xe6, - 0x7d, 0xba, 0x78, 0xdd, 0xcb, 0xf9, 0xe0, 0x3f, 0x1b, 0x65, 0x15, 0x9a, 0x8b, 0xe9, 0xe1, 0x09, - 0xda, 0x09, 0xa8, 0x90, 0xe7, 0x09, 0x77, 0xe1, 0x82, 0x85, 0x60, 0x5e, 0xff, 0xc1, 0x7a, 0xdd, - 0x52, 0x0c, 0x67, 0xcf, 0xa4, 0xb2, 0x73, 0x5a, 0x15, 0x22, 0x8b, 0xba, 0x78, 0x86, 0xb0, 0x0a, - 0x5c, 0x24, 0x34, 0x12, 0xd9, 0xe3, 0xd4, 0x6d, 0x1b, 0xaf, 0x7d, 0xdb, 0xbe, 0xb9, 0x0d, 0x9f, - 0x2e, 0xa9, 0x91, 0x15, 0x37, 0xe0, 0xf7, 0xd0, 0x66, 0x02, 0x54, 0xf0, 0xa8, 0xd7, 0xd2, 0x85, - 0x2b, 0xfa, 0x44, 0x74, 0x94, 0x98, 0x53, 0xfc, 0x3e, 0xda, 0x0a, 0x41, 0x08, 0x3a, 0x81, 0xde, - 0xa6, 0x06, 0x3e, 0x32, 0xc0, 0xad, 0xb3, 0x2c, 0x4c, 0xf2, 0xf3, 0xe1, 0x1f, 0x75, 0xb4, 0x75, - 0xc2, 0xdd, 0x53, 0x26, 0x24, 0xfe, 0x7e, 0xc9, 0xe8, 0xd6, 0x7a, 0x8f, 0x51, 0x6c, 0x6d, 0xf3, - 0xc7, 0xe6, 0x9e, 0x76, 0x1e, 0xa9, 0x98, 0xfc, 0x0c, 0xb5, 0x98, 0x84, 0x50, 0x35, 0xbd, 0x79, - 0xd0, 0x3d, 0x3c, 0x58, 0xd7, 0x93, 0xce, 0x8e, 0x11, 0x6d, 0x7d, 0xa9, 0xe8, 0x24, 0x53, 0x19, - 0xfe, 0xd9, 0xd4, 0x89, 0x2b, 0xd7, 0xe3, 0x11, 0xea, 0xc6, 0x34, 0xa1, 0x41, 0x00, 0x01, 0x13, - 0xa1, 0xce, 0xbd, 0xe5, 0x3c, 0x4a, 0xe7, 0x83, 0xee, 0x79, 0x19, 0x26, 0x55, 0x8c, 0xa2, 0x78, - 0x5c, 0xed, 0x09, 0x55, 0xdc, 0xcc, 0x88, 0x86, 0x72, 0x5c, 0x86, 0x49, 0x15, 0x83, 0x5f, 0xa0, - 0x3d, 0xea, 0x49, 0x36, 0x83, 0xcf, 0x81, 0xfa, 0x01, 0x8b, 0x60, 0x0c, 0x1e, 0x8f, 0xfc, 0x6c, - 0xc8, 0x9a, 0xce, 0xdb, 0xe9, 0x7c, 0xb0, 0xf7, 0xd9, 0x2a, 0x00, 0x59, 0xcd, 0xc3, 0x3f, 0xa0, - 0xb6, 0x80, 0x00, 0x3c, 0xc9, 0x13, 0x63, 0x9e, 0xa3, 0x35, 0xeb, 0x4d, 0x5d, 0x08, 0xc6, 0x86, - 0xea, 0x6c, 0xab, 0x82, 0xe7, 0x7f, 0xa4, 0x90, 0xc4, 0x9f, 0xa0, 0xdd, 0x90, 0x46, 0x53, 0x5a, - 0x20, 0xb5, 0x6b, 0xda, 0x0e, 0x4e, 0xe7, 0x83, 0xdd, 0xb3, 0x85, 0x13, 0xf2, 0x0a, 0x12, 0x7f, - 0x87, 0xda, 0x12, 0xc2, 0x38, 0xa0, 0x32, 0xb3, 0x50, 0xf7, 0xf0, 0xd9, 0xc3, 0xfd, 0x52, 0x29, - 0x9d, 0x73, 0xff, 0xc2, 0x10, 0xf4, 0x5a, 0x2a, 0x9c, 0x90, 0x47, 0x49, 0x21, 0x38, 0xfc, 0xbd, - 0x89, 0x3a, 0xc5, 0xb2, 0xc1, 0x80, 0x90, 0x97, 0x0f, 0xb4, 0xe8, 0xd5, 0xb5, 0x39, 0x46, 0xeb, - 0x9a, 0xa3, 0x58, 0x05, 0xe5, 0x86, 0x2d, 0x42, 0x82, 0x54, 0x84, 0xf1, 0x37, 0xa8, 0x23, 0x24, - 0x4d, 0xa4, 0x1e, 0xd5, 0xc6, 0x6b, 0x8f, 0xea, 0x4e, 0x3a, 0x1f, 0x74, 0xc6, 0xb9, 0x00, 0x29, - 0xb5, 0xf0, 0x25, 0xda, 0x2d, 0x5d, 0xf2, 0x86, 0x6b, 0x47, 0xb7, 0xe4, 0x78, 0x41, 0x85, 0xbc, - 0xa2, 0xaa, 0x86, 0x3f, 0xb3, 0x91, 0xf6, 0x4a, 0xab, 0x1c, 0xfe, 0xcc, 0x73, 0xc4, 0x9c, 0x62, - 0x1b, 0x75, 0xc4, 0xd4, 0xf3, 0x00, 0x7c, 0xf0, 0x75, 0xc7, 0x5b, 0xce, 0x5b, 0x06, 0xda, 0x19, - 0xe7, 0x07, 0xa4, 0xc4, 0x28, 0xe1, 0x4b, 0xca, 0x02, 0xf0, 0x75, 0xa7, 0x2b, 0xc2, 0x5f, 0xe8, - 0x28, 0x31, 0xa7, 0xce, 0xbb, 0xb7, 0xf7, 0xfd, 0xda, 0xdd, 0x7d, 0xbf, 0xf6, 0xd7, 0x7d, 0xbf, - 0xf6, 0x4b, 0xda, 0xaf, 0xdf, 0xa6, 0xfd, 0xfa, 0x5d, 0xda, 0xaf, 0xff, 0x9d, 0xf6, 0xeb, 0xbf, - 0xfd, 0xd3, 0xaf, 0x7d, 0xdb, 0x98, 0x8d, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xe7, 0x0a, - 0x8d, 0xf7, 0x08, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 907 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x41, 0x6f, 0xe3, 0x44, + 0x18, 0x8d, 0x9b, 0xa6, 0x4d, 0x26, 0x69, 0x77, 0x19, 0x54, 0x29, 0x54, 0xc8, 0x59, 0x82, 0x84, + 0x0a, 0x12, 0x36, 0xe9, 0x56, 0x08, 0x21, 0x40, 0xc2, 0x45, 0x2b, 0x51, 0xa5, 0xda, 0x32, 0x29, + 0x42, 0x42, 0x20, 0x31, 0xb6, 0xbf, 0xa4, 0x43, 0x6c, 0x8f, 0xe5, 0x99, 0x44, 0xea, 0x8d, 0x9f, + 0xc0, 0x8f, 0x40, 0xfc, 0x14, 0xd4, 0xe3, 0x1e, 0xf7, 0x14, 0x51, 0xc3, 0x9d, 0xfb, 0x9e, 0xd0, + 0x8c, 0x1d, 0xdb, 0x69, 0x13, 0xd1, 0xe5, 0x66, 0xbf, 0x79, 0xef, 0x7d, 0x33, 0xf3, 0xbd, 0xf9, + 0xd0, 0x67, 0xd3, 0x4f, 0x84, 0xc5, 0xb8, 0x3d, 0x9d, 0xb9, 0x90, 0x44, 0x20, 0x41, 0xd8, 0x73, + 0x88, 0x7c, 0x9e, 0xd8, 0xf9, 0x02, 0x8d, 0x99, 0xed, 0x52, 0xe9, 0x5d, 0xd9, 0xf3, 0x81, 0x3d, + 0x81, 0x08, 0x12, 0x2a, 0xc1, 0xb7, 0xe2, 0x84, 0x4b, 0x8e, 0xdf, 0xcc, 0x48, 0x16, 0x8d, 0x99, + 0xa5, 0x49, 0xd6, 0x7c, 0x70, 0xf8, 0xe1, 0x84, 0xc9, 0xab, 0x99, 0x6b, 0x79, 0x3c, 0xb4, 0x27, + 0x7c, 0xc2, 0x6d, 0xcd, 0x75, 0x67, 0x63, 0xfd, 0xa7, 0x7f, 0xf4, 0x57, 0xe6, 0x71, 0xd8, 0xaf, + 0x14, 0xf2, 0x78, 0x02, 0x6b, 0xea, 0x1c, 0x9e, 0x94, 0x9c, 0x90, 0x7a, 0x57, 0x2c, 0x82, 0xe4, + 0xda, 0x8e, 0xa7, 0x13, 0x05, 0x08, 0x3b, 0x04, 0x49, 0xd7, 0xa9, 0xec, 0x4d, 0xaa, 0x64, 0x16, + 0x49, 0x16, 0xc2, 0x3d, 0xc1, 0xc7, 0xff, 0x25, 0x10, 0xde, 0x15, 0x84, 0xf4, 0x9e, 0xee, 0xe9, + 0x26, 0xdd, 0x4c, 0xb2, 0xc0, 0x66, 0x91, 0x14, 0x32, 0xb9, 0x2b, 0xea, 0xff, 0x63, 0xa0, 0xfa, + 0x19, 0x77, 0xf1, 0x4f, 0xa8, 0xa9, 0x0e, 0xe0, 0x53, 0x49, 0xbb, 0xc6, 0x13, 0xe3, 0xa8, 0x7d, + 0xfc, 0x91, 0x55, 0x5e, 0x6b, 0xe1, 0x67, 0xc5, 0xd3, 0x89, 0x02, 0x84, 0xa5, 0xd8, 0xd6, 0x7c, + 0x60, 0x3d, 0x77, 0x7f, 0x06, 0x4f, 0x9e, 0x83, 0xa4, 0x0e, 0xbe, 0x59, 0xf4, 0x6a, 0xe9, 0xa2, + 0x87, 0x4a, 0x8c, 0x14, 0xae, 0xf8, 0x0b, 0xb4, 0x2d, 0x62, 0xf0, 0xba, 0x5b, 0xda, 0xfd, 0x6d, + 0x6b, 0x4d, 0xd3, 0xac, 0x33, 0xee, 0x8e, 0x62, 0xf0, 0x9c, 0x4e, 0xee, 0xb4, 0xad, 0xfe, 0x88, + 0xd6, 0xe1, 0x67, 0x68, 0x47, 0x48, 0x2a, 0x67, 0xa2, 0x5b, 0xd7, 0x0e, 0xe6, 0x46, 0x07, 0xcd, + 0x72, 0xf6, 0x73, 0x8f, 0x9d, 0xec, 0x9f, 0xe4, 0xea, 0xfe, 0x1f, 0x75, 0xd4, 0x39, 0xe3, 0xee, + 0x29, 0x8f, 0x7c, 0x26, 0x19, 0x8f, 0xf0, 0x09, 0xda, 0x96, 0xd7, 0x31, 0xe8, 0x63, 0xb7, 0x9c, + 0x27, 0xcb, 0xd2, 0x97, 0xd7, 0x31, 0xbc, 0x5a, 0xf4, 0x1e, 0x57, 0xb9, 0x0a, 0x23, 0x9a, 0x8d, + 0x87, 0xc5, 0x76, 0xb6, 0xb4, 0xee, 0x64, 0xb5, 0xdc, 0xab, 0x45, 0x6f, 0x4d, 0xa4, 0xac, 0xc2, + 0x69, 0x75, 0x53, 0x78, 0x82, 0xf6, 0x02, 0x2a, 0xe4, 0x45, 0xc2, 0x5d, 0xb8, 0x64, 0x21, 0xe4, + 0x67, 0xfc, 0xe0, 0x61, 0x3d, 0x50, 0x0a, 0xe7, 0x20, 0xdf, 0xc0, 0xde, 0xb0, 0x6a, 0x44, 0x56, + 0x7d, 0xf1, 0x1c, 0x61, 0x05, 0x5c, 0x26, 0x34, 0x12, 0xd9, 0x91, 0x54, 0xb5, 0xed, 0xd7, 0xae, + 0x76, 0x98, 0x57, 0xc3, 0xc3, 0x7b, 0x6e, 0x64, 0x4d, 0x05, 0xfc, 0x1e, 0xda, 0x49, 0x80, 0x0a, + 0x1e, 0x75, 0x1b, 0xfa, 0xba, 0x8a, 0xee, 0x10, 0x8d, 0x92, 0x7c, 0x15, 0xbf, 0x8f, 0x76, 0x43, + 0x10, 0x82, 0x4e, 0xa0, 0xbb, 0xa3, 0x89, 0x8f, 0x72, 0xe2, 0xee, 0x79, 0x06, 0x93, 0xe5, 0x7a, + 0xff, 0x77, 0x03, 0xed, 0x9e, 0x71, 0x77, 0xc8, 0x84, 0xc4, 0x3f, 0xdc, 0x8b, 0xaf, 0xf5, 0xb0, + 0xc3, 0x28, 0xb5, 0x0e, 0xef, 0xe3, 0xbc, 0x4e, 0x73, 0x89, 0x54, 0xa2, 0xfb, 0x39, 0x6a, 0x30, + 0x09, 0xa1, 0x6a, 0x75, 0xfd, 0xa8, 0x7d, 0xdc, 0xdd, 0x94, 0x3c, 0x67, 0x2f, 0x37, 0x69, 0x7c, + 0xad, 0xe8, 0x24, 0x53, 0xf5, 0xff, 0xae, 0xeb, 0x8d, 0xaa, 0x2c, 0xe3, 0x01, 0x6a, 0xc7, 0x34, + 0xa1, 0x41, 0x00, 0x01, 0x13, 0xa1, 0xde, 0x6b, 0xc3, 0x79, 0x94, 0x2e, 0x7a, 0xed, 0x8b, 0x12, + 0x26, 0x55, 0x8e, 0x92, 0x78, 0x3c, 0x8c, 0x03, 0x50, 0x97, 0x99, 0xc5, 0x2d, 0x97, 0x9c, 0x96, + 0x30, 0xa9, 0x72, 0xf0, 0x73, 0x74, 0x40, 0x3d, 0xc9, 0xe6, 0xf0, 0x15, 0x50, 0x3f, 0x60, 0x11, + 0x8c, 0xc0, 0xe3, 0x91, 0x9f, 0x3d, 0x9d, 0xba, 0xf3, 0x56, 0xba, 0xe8, 0x1d, 0x7c, 0xb9, 0x8e, + 0x40, 0xd6, 0xeb, 0xf0, 0x8f, 0xa8, 0x29, 0x20, 0x00, 0x4f, 0xf2, 0x24, 0x0f, 0xcb, 0xd3, 0x07, + 0xde, 0x2f, 0x75, 0x21, 0x18, 0xe5, 0x52, 0xa7, 0xa3, 0x2e, 0x78, 0xf9, 0x47, 0x0a, 0x4b, 0xfc, + 0x29, 0xda, 0x0f, 0x69, 0x34, 0xa3, 0x05, 0x53, 0xa7, 0xa4, 0xe9, 0xe0, 0x74, 0xd1, 0xdb, 0x3f, + 0x5f, 0x59, 0x21, 0x77, 0x98, 0xf8, 0x1b, 0xd4, 0x94, 0x10, 0xc6, 0x01, 0x95, 0x59, 0x64, 0xda, + 0xc7, 0xef, 0x56, 0xfb, 0xa3, 0x5e, 0x9e, 0xda, 0xc8, 0x05, 0xf7, 0x2f, 0x73, 0x9a, 0x1e, 0x31, + 0x45, 0xbf, 0x97, 0x28, 0x29, 0x6c, 0xf0, 0x09, 0xea, 0xb8, 0xd4, 0x9b, 0xf2, 0xf1, 0x78, 0xc8, + 0x42, 0x26, 0xbb, 0xbb, 0xfa, 0xca, 0x1f, 0xa7, 0x8b, 0x5e, 0xc7, 0xa9, 0xe0, 0x64, 0x85, 0xd5, + 0xff, 0xad, 0x8e, 0x5a, 0xc5, 0xf8, 0xc1, 0xdf, 0x22, 0xe4, 0x2d, 0x1f, 0xbb, 0xe8, 0x1a, 0x3a, + 0x38, 0xef, 0x6c, 0x0a, 0x4e, 0x31, 0x16, 0xca, 0x19, 0x5a, 0x40, 0x82, 0x54, 0x8c, 0xf0, 0x77, + 0xa8, 0x25, 0x24, 0x4d, 0xa4, 0x7e, 0xb6, 0x5b, 0xaf, 0xfd, 0x6c, 0xf7, 0xd2, 0x45, 0xaf, 0x35, + 0x5a, 0x1a, 0x90, 0xd2, 0x0b, 0x8f, 0xd1, 0x7e, 0x99, 0xa0, 0xff, 0x39, 0x82, 0x74, 0xbb, 0x4e, + 0x57, 0x5c, 0xc8, 0x1d, 0x57, 0x35, 0x08, 0xb2, 0x88, 0xe9, 0x1c, 0x35, 0xca, 0x41, 0x90, 0xe5, + 0x91, 0xe4, 0xab, 0xd8, 0x46, 0x2d, 0x31, 0xf3, 0x3c, 0x00, 0x1f, 0x7c, 0x9d, 0x86, 0x86, 0xf3, + 0x46, 0x4e, 0x6d, 0x8d, 0x96, 0x0b, 0xa4, 0xe4, 0x28, 0xe3, 0x31, 0x65, 0x01, 0xf8, 0x3a, 0x05, + 0x15, 0xe3, 0x67, 0x1a, 0x25, 0xf9, 0xaa, 0x73, 0x74, 0x73, 0x6b, 0xd6, 0x5e, 0xdc, 0x9a, 0xb5, + 0x97, 0xb7, 0x66, 0xed, 0x97, 0xd4, 0x34, 0x6e, 0x52, 0xd3, 0x78, 0x91, 0x9a, 0xc6, 0xcb, 0xd4, + 0x34, 0xfe, 0x4c, 0x4d, 0xe3, 0xd7, 0xbf, 0xcc, 0xda, 0xf7, 0x5b, 0xf3, 0xc1, 0xbf, 0x01, 0x00, + 0x00, 0xff, 0xff, 0xce, 0x80, 0xf2, 0xbe, 0x96, 0x08, 0x00, 0x00, } diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto similarity index 65% rename from vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.proto rename to vendor/k8s.io/api/batch/v1/generated.proto index 283da1c8d..c4797a16b 100644 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,14 +19,13 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.batch.v1; +package k8s.io.api.batch.v1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; @@ -34,17 +33,17 @@ option go_package = "v1"; // Job represents the configuration of a single job. message Job { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // Specification of the desired behavior of a job. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional JobSpec spec = 2; - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // Current status of a job. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional JobStatus status = 3; } @@ -76,46 +75,51 @@ message JobCondition { // JobList is a collection of jobs. message JobList { - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of Job. + // items is the list of Jobs. repeated Job items = 2; } // JobSpec describes how the job execution will look like. message JobSpec { - // Parallelism specifies the maximum desired number of pods the job should + // Specifies the maximum desired number of pods the job should // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. - // More info: http://kubernetes.io/docs/user-guide/jobs + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional optional int32 parallelism = 1; - // Completions specifies the desired number of successfully finished pods the + // Specifies the desired number of successfully finished pods the // job should be run with. Setting to nil means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. - // More info: http://kubernetes.io/docs/user-guide/jobs + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional optional int32 completions = 2; - // Optional duration in seconds relative to the startTime that the job may be active + // Specifies the duration in seconds relative to the startTime that the job may be active // before the system tries to terminate it; value must be positive integer // +optional optional int64 activeDeadlineSeconds = 3; - // Selector is a label query over pods that should match the pod count. + // Specifies the number of retries before marking this job failed. + // Defaults to 6 + // +optional + optional int32 backoffLimit = 7; + + // A label query over pods that should match the pod count. // Normally, the system sets this field for you. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; - // ManualSelector controls generation of pod labels and pod selectors. + // manualSelector controls generation of pod labels and pod selectors. // Leave `manualSelector` unset unless you are certain what you are doing. // When false or unset, the system pick labels unique to this job // and appends those labels to the pod template. When true, @@ -124,44 +128,45 @@ message JobSpec { // and other jobs to not function correctly. However, You may see // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // API. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector // +optional optional bool manualSelector = 5; - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://kubernetes.io/docs/user-guide/jobs - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; + // Describes the pod that will be created when executing a job. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + optional k8s.io.api.core.v1.PodTemplateSpec template = 6; } // JobStatus represents the current state of a Job. message JobStatus { - // Conditions represent the latest available observations of an object's current state. - // More info: http://kubernetes.io/docs/user-guide/jobs + // The latest available observations of an object's current state. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional + // +patchMergeKey=type + // +patchStrategy=merge repeated JobCondition conditions = 1; - // StartTime represents time when the job was acknowledged by the Job Manager. + // Represents time when the job was acknowledged by the job controller. // It is not guaranteed to be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; - // CompletionTime represents time when the job was completed. It is not guaranteed to + // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; - // Active is the number of actively running pods. + // The number of actively running pods. // +optional optional int32 active = 4; - // Succeeded is the number of pods which reached Phase Succeeded. + // The number of pods which reached phase Succeeded. // +optional optional int32 succeeded = 5; - // Failed is the number of pods which reached Phase Failed. + // The number of pods which reached phase Failed. // +optional optional int32 failed = 6; } diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/register.go b/vendor/k8s.io/api/batch/v1/register.go similarity index 79% rename from vendor/k8s.io/client-go/pkg/apis/batch/v1/register.go rename to vendor/k8s.io/api/batch/v1/register.go index 4ba570d1b..32fa51f0e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v1/register.go +++ b/vendor/k8s.io/api/batch/v1/register.go @@ -34,11 +34,14 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Job{}, diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go similarity index 67% rename from vendor/k8s.io/client-go/pkg/apis/batch/v1/types.go rename to vendor/k8s.io/api/batch/v1/types.go index 734e6204d..84abb1a90 100644 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -17,75 +17,88 @@ limitations under the License. package v1 import ( + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api/v1" ) -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Job represents the configuration of a single job. type Job struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // Specification of the desired behavior of a job. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // Current status of a job. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // JobList is a collection of jobs. type JobList struct { metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of Job. + // items is the list of Jobs. Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` } // JobSpec describes how the job execution will look like. type JobSpec struct { - // Parallelism specifies the maximum desired number of pods the job should + // Specifies the maximum desired number of pods the job should // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. - // More info: http://kubernetes.io/docs/user-guide/jobs + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` - // Completions specifies the desired number of successfully finished pods the + // Specifies the desired number of successfully finished pods the // job should be run with. Setting to nil means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. - // More info: http://kubernetes.io/docs/user-guide/jobs + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` - // Optional duration in seconds relative to the startTime that the job may be active + // Specifies the duration in seconds relative to the startTime that the job may be active // before the system tries to terminate it; value must be positive integer // +optional ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` - // Selector is a label query over pods that should match the pod count. + // Specifies the number of retries before marking this job failed. + // Defaults to 6 + // +optional + BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"` + + // TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed + // Optional number of failed pods to retain. + // +optional + // FailedPodsLimit *int32 `json:"failedPodsLimit,omitempty" protobuf:"varint,9,opt,name=failedPodsLimit"` + + // A label query over pods that should match the pod count. // Normally, the system sets this field for you. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` - // ManualSelector controls generation of pod labels and pod selectors. + // manualSelector controls generation of pod labels and pod selectors. // Leave `manualSelector` unset unless you are certain what you are doing. // When false or unset, the system pick labels unique to this job // and appends those labels to the pod template. When true, @@ -94,45 +107,45 @@ type JobSpec struct { // and other jobs to not function correctly. However, You may see // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // API. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector // +optional ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://kubernetes.io/docs/user-guide/jobs + // Describes the pod that will be created when executing a job. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` } // JobStatus represents the current state of a Job. type JobStatus struct { - - // Conditions represent the latest available observations of an object's current state. - // More info: http://kubernetes.io/docs/user-guide/jobs + // The latest available observations of an object's current state. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional + // +patchMergeKey=type + // +patchStrategy=merge Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - // StartTime represents time when the job was acknowledged by the Job Manager. + // Represents time when the job was acknowledged by the job controller. // It is not guaranteed to be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` - // CompletionTime represents time when the job was completed. It is not guaranteed to + // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` - // Active is the number of actively running pods. + // The number of actively running pods. // +optional Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` - // Succeeded is the number of pods which reached Phase Succeeded. + // The number of pods which reached phase Succeeded. // +optional Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` - // Failed is the number of pods which reached Phase Failed. + // The number of pods which reached phase Failed. // +optional Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` } @@ -152,7 +165,7 @@ type JobCondition struct { // Type of job condition, Complete or Failed. Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` // Last time the condition was checked. // +optional LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..2bb794a5f --- /dev/null +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Job = map[string]string{ + "": "Job represents the configuration of a single job.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (Job) SwaggerDoc() map[string]string { + return map_Job +} + +var map_JobCondition = map[string]string{ + "": "JobCondition describes current state of a job.", + "type": "Type of job condition, Complete or Failed.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastProbeTime": "Last time the condition was checked.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (JobCondition) SwaggerDoc() map[string]string { + return map_JobCondition +} + +var map_JobList = map[string]string{ + "": "JobList is a collection of jobs.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "items is the list of Jobs.", +} + +func (JobList) SwaggerDoc() map[string]string { + return map_JobList +} + +var map_JobSpec = map[string]string{ + "": "JobSpec describes how the job execution will look like.", + "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", + "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", + "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", + "template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", +} + +func (JobSpec) SwaggerDoc() map[string]string { + return map_JobSpec +} + +var map_JobStatus = map[string]string{ + "": "JobStatus represents the current state of a Job.", + "conditions": "The latest available observations of an object's current state. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "startTime": "Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "active": "The number of actively running pods.", + "succeeded": "The number of pods which reached phase Succeeded.", + "failed": "The number of pods which reached phase Failed.", +} + +func (JobStatus) SwaggerDoc() map[string]string { + return map_JobStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..3e5250f37 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -0,0 +1,215 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Job) DeepCopyInto(out *Job) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Job. +func (in *Job) DeepCopy() *Job { + if in == nil { + return nil + } + out := new(Job) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Job) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobCondition) DeepCopyInto(out *JobCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobCondition. +func (in *JobCondition) DeepCopy() *JobCondition { + if in == nil { + return nil + } + out := new(JobCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobList) DeepCopyInto(out *JobList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobList. +func (in *JobList) DeepCopy() *JobList { + if in == nil { + return nil + } + out := new(JobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobSpec) DeepCopyInto(out *JobSpec) { + *out = *in + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Completions != nil { + in, out := &in.Completions, &out.Completions + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.BackoffLimit != nil { + in, out := &in.BackoffLimit, &out.BackoffLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.ManualSelector != nil { + in, out := &in.ManualSelector, &out.ManualSelector + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSpec. +func (in *JobSpec) DeepCopy() *JobSpec { + if in == nil { + return nil + } + out := new(JobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobStatus) DeepCopyInto(out *JobStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]JobCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus. +func (in *JobStatus) DeepCopy() *JobStatus { + if in == nil { + return nil + } + out := new(JobStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go new file mode 100644 index 000000000..43020ed05 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +package v1beta1 // import "k8s.io/api/batch/v1beta1" diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go new file mode 100644 index 000000000..4edf11868 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -0,0 +1,1509 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto + + It has these top-level messages: + CronJob + CronJobList + CronJobSpec + CronJobStatus + JobTemplate + JobTemplateSpec +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func init() { + proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v1beta1.CronJob") + proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v1beta1.CronJobList") + proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v1beta1.CronJobSpec") + proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v1beta1.CronJobStatus") + proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v1beta1.JobTemplate") + proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec") +} +func (m *CronJob) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *CronJobList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i += copy(dAtA[i:], m.Schedule) + if m.StartingDeadlineSeconds != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i += copy(dAtA[i:], m.ConcurrencyPolicy) + if m.Suspend != nil { + dAtA[i] = 0x20 + i++ + if *m.Suspend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) + n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.SuccessfulJobsHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + } + return i, nil +} + +func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Active) > 0 { + for _, msg := range m.Active { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.LastScheduleTime != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) + n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *JobTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *CronJob) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CronJobList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CronJobSpec) Size() (n int) { + var l int + _ = l + l = len(m.Schedule) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartingDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) + } + l = len(m.ConcurrencyPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.Suspend != nil { + n += 2 + } + l = m.JobTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.SuccessfulJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit)) + } + return n +} + +func (m *CronJobStatus) Size() (n int) { + var l int + _ = l + if len(m.Active) > 0 { + for _, e := range m.Active { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LastScheduleTime != nil { + l = m.LastScheduleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JobTemplate) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobTemplateSpec) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CronJob) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJob{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJobList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJobSpec{`, + `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, + `StartingDeadlineSeconds:` + valueToStringGenerated(this.StartingDeadlineSeconds) + `,`, + `ConcurrencyPolicy:` + fmt.Sprintf("%v", this.ConcurrencyPolicy) + `,`, + `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, + `JobTemplate:` + strings.Replace(strings.Replace(this.JobTemplate.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, + `SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`, + `FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJobStatus{`, + `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CronJob) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CronJob{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedule = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StartingDeadlineSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConcurrencyPolicy = ConcurrencyPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Suspend = &b + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.JobTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SuccessfulJobsHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FailedJobsHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) + if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScheduleTime == nil { + m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 784 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x76, 0xc2, 0x42, 0xd7, 0xa0, 0x5d, 0x2b, 0x20, 0x27, 0x64, 0xb5, + 0x22, 0x20, 0x76, 0x4c, 0x0b, 0x42, 0x9c, 0x90, 0xd6, 0x8b, 0x16, 0x28, 0x45, 0x8b, 0x9c, 0x22, + 0x24, 0x54, 0xa1, 0x8e, 0xc7, 0x2f, 0xc9, 0x34, 0xb6, 0xc7, 0xf2, 0x8c, 0x23, 0xe5, 0xc6, 0x85, + 0x3b, 0xff, 0x08, 0x27, 0xfe, 0x89, 0x88, 0x53, 0x8f, 0x3d, 0x45, 0xd4, 0xfc, 0x17, 0x9c, 0x90, + 0x27, 0xce, 0x8f, 0xe6, 0x47, 0x5b, 0x2e, 0xbd, 0x79, 0x9e, 0xbf, 0xdf, 0xcf, 0xbc, 0x79, 0xef, + 0xcd, 0xa0, 0x17, 0xc3, 0x2f, 0x04, 0x66, 0xdc, 0x1a, 0x26, 0x2e, 0xc4, 0x21, 0x48, 0x10, 0xd6, + 0x08, 0x42, 0x8f, 0xc7, 0x56, 0xfe, 0x83, 0x44, 0xcc, 0x72, 0x89, 0xa4, 0x03, 0x6b, 0x74, 0xe0, + 0x82, 0x24, 0x07, 0x56, 0x1f, 0x42, 0x88, 0x89, 0x04, 0x0f, 0x47, 0x31, 0x97, 0x5c, 0x37, 0x66, + 0x4a, 0x4c, 0x22, 0x86, 0x95, 0x12, 0xe7, 0xca, 0xc6, 0xf3, 0x3e, 0x93, 0x83, 0xc4, 0xc5, 0x94, + 0x07, 0x56, 0x9f, 0xf7, 0xb9, 0xa5, 0x0c, 0x6e, 0xd2, 0x53, 0x2b, 0xb5, 0x50, 0x5f, 0x33, 0x50, + 0xe3, 0xe9, 0x96, 0x2d, 0xd7, 0x77, 0x6b, 0xb4, 0x57, 0x44, 0x94, 0xc7, 0xb0, 0x4d, 0xf3, 0xd9, + 0x52, 0x13, 0x10, 0x3a, 0x60, 0x21, 0xc4, 0x63, 0x2b, 0x1a, 0xf6, 0xb3, 0x80, 0xb0, 0x02, 0x90, + 0x64, 0x9b, 0xcb, 0xda, 0xe5, 0x8a, 0x93, 0x50, 0xb2, 0x00, 0x36, 0x0c, 0x9f, 0xdf, 0x66, 0x10, + 0x74, 0x00, 0x01, 0xd9, 0xf0, 0x7d, 0xba, 0xcb, 0x97, 0x48, 0xe6, 0x5b, 0x2c, 0x94, 0x42, 0xc6, + 0xeb, 0xa6, 0xf6, 0x6f, 0x45, 0x54, 0x7d, 0x19, 0xf3, 0xf0, 0x88, 0xbb, 0xfa, 0x19, 0xaa, 0x65, + 0x87, 0xf0, 0x88, 0x24, 0x86, 0xd6, 0xd2, 0x3a, 0xf5, 0xc3, 0x4f, 0xf0, 0xb2, 0x09, 0x0b, 0x26, + 0x8e, 0x86, 0xfd, 0x2c, 0x20, 0x70, 0xa6, 0xc6, 0xa3, 0x03, 0xfc, 0xda, 0x3d, 0x07, 0x2a, 0xbf, + 0x07, 0x49, 0x6c, 0x7d, 0x32, 0x6d, 0x16, 0xd2, 0x69, 0x13, 0x2d, 0x63, 0xce, 0x82, 0xaa, 0x7f, + 0x8d, 0x4a, 0x22, 0x02, 0x6a, 0x14, 0x15, 0xfd, 0x19, 0xde, 0xd5, 0x62, 0x9c, 0xa7, 0xd4, 0x8d, + 0x80, 0xda, 0x6f, 0xe4, 0xc8, 0x52, 0xb6, 0x72, 0x14, 0x40, 0x7f, 0x8d, 0x2a, 0x42, 0x12, 0x99, + 0x08, 0x63, 0x4f, 0xa1, 0x3e, 0xb8, 0x1d, 0xa5, 0xe4, 0xf6, 0x9b, 0x39, 0xac, 0x32, 0x5b, 0x3b, + 0x39, 0xa6, 0xfd, 0xa7, 0x86, 0xea, 0xb9, 0xf2, 0x98, 0x09, 0xa9, 0x9f, 0x6e, 0xd4, 0x02, 0xdf, + 0xad, 0x16, 0x99, 0x5b, 0x55, 0x62, 0x3f, 0xdf, 0xa9, 0x36, 0x8f, 0xac, 0xd4, 0xe1, 0x15, 0x2a, + 0x33, 0x09, 0x81, 0x30, 0x8a, 0xad, 0xbd, 0x4e, 0xfd, 0xf0, 0xfd, 0x5b, 0xb3, 0xb7, 0x1f, 0xe6, + 0xb4, 0xf2, 0xb7, 0x99, 0xcf, 0x99, 0xd9, 0xdb, 0x7f, 0x94, 0x16, 0x59, 0x67, 0xc5, 0xd1, 0x3f, + 0x46, 0xb5, 0x6c, 0x38, 0xbc, 0xc4, 0x07, 0x95, 0xf5, 0x83, 0x65, 0x16, 0xdd, 0x3c, 0xee, 0x2c, + 0x14, 0xfa, 0x8f, 0xe8, 0x89, 0x90, 0x24, 0x96, 0x2c, 0xec, 0x7f, 0x05, 0xc4, 0xf3, 0x59, 0x08, + 0x5d, 0xa0, 0x3c, 0xf4, 0x84, 0x6a, 0xd0, 0x9e, 0xfd, 0x6e, 0x3a, 0x6d, 0x3e, 0xe9, 0x6e, 0x97, + 0x38, 0xbb, 0xbc, 0xfa, 0x29, 0x7a, 0x44, 0x79, 0x48, 0x93, 0x38, 0x86, 0x90, 0x8e, 0x7f, 0xe0, + 0x3e, 0xa3, 0x63, 0xd5, 0xa6, 0x07, 0x36, 0xce, 0xb3, 0x79, 0xf4, 0x72, 0x5d, 0xf0, 0xef, 0xb6, + 0xa0, 0xb3, 0x09, 0xd2, 0x9f, 0xa1, 0xaa, 0x48, 0x44, 0x04, 0xa1, 0x67, 0x94, 0x5a, 0x5a, 0xa7, + 0x66, 0xd7, 0xd3, 0x69, 0xb3, 0xda, 0x9d, 0x85, 0x9c, 0xf9, 0x3f, 0xfd, 0x0c, 0xd5, 0xcf, 0xb9, + 0x7b, 0x02, 0x41, 0xe4, 0x13, 0x09, 0x46, 0x59, 0xb5, 0xf0, 0xc3, 0xdd, 0x75, 0x3e, 0x5a, 0x8a, + 0xd5, 0xd0, 0xbd, 0x9d, 0x67, 0x5a, 0x5f, 0xf9, 0xe1, 0xac, 0x22, 0xf5, 0x5f, 0x50, 0x43, 0x24, + 0x94, 0x82, 0x10, 0xbd, 0xc4, 0x3f, 0xe2, 0xae, 0xf8, 0x86, 0x09, 0xc9, 0xe3, 0xf1, 0x31, 0x0b, + 0x98, 0x34, 0x2a, 0x2d, 0xad, 0x53, 0xb6, 0xcd, 0x74, 0xda, 0x6c, 0x74, 0x77, 0xaa, 0x9c, 0x1b, + 0x08, 0xba, 0x83, 0x1e, 0xf7, 0x08, 0xf3, 0xc1, 0xdb, 0x60, 0x57, 0x15, 0xbb, 0x91, 0x4e, 0x9b, + 0x8f, 0x5f, 0x6d, 0x55, 0x38, 0x3b, 0x9c, 0xed, 0xbf, 0x34, 0xf4, 0xf0, 0xda, 0x7d, 0xd0, 0xbf, + 0x43, 0x15, 0x42, 0x25, 0x1b, 0x65, 0xf3, 0x92, 0x8d, 0xe2, 0xd3, 0xd5, 0x12, 0x65, 0x0f, 0xe1, + 0xf2, 0x7e, 0x3b, 0xd0, 0x83, 0xac, 0x13, 0xb0, 0xbc, 0x44, 0x2f, 0x94, 0xd5, 0xc9, 0x11, 0xba, + 0x8f, 0xf6, 0x7d, 0x22, 0xe4, 0x7c, 0xd4, 0x4e, 0x58, 0x00, 0xaa, 0x49, 0xf5, 0xc3, 0x8f, 0xee, + 0x76, 0x79, 0x32, 0x87, 0xfd, 0x4e, 0x3a, 0x6d, 0xee, 0x1f, 0xaf, 0x71, 0x9c, 0x0d, 0x72, 0x7b, + 0xa2, 0xa1, 0xd5, 0xee, 0xdc, 0xc3, 0xf3, 0xf5, 0x13, 0xaa, 0xc9, 0xf9, 0x44, 0x15, 0xff, 0xef, + 0x44, 0x2d, 0x6e, 0xe2, 0x62, 0x9c, 0x16, 0xb0, 0xec, 0xf5, 0x79, 0x6b, 0x4d, 0x7f, 0x0f, 0xc7, + 0xf9, 0xf2, 0xda, 0x6b, 0xfc, 0xde, 0xb6, 0xa3, 0xe0, 0x1b, 0x1e, 0x61, 0xfb, 0xf9, 0xe4, 0xca, + 0x2c, 0x5c, 0x5c, 0x99, 0x85, 0xcb, 0x2b, 0xb3, 0xf0, 0x6b, 0x6a, 0x6a, 0x93, 0xd4, 0xd4, 0x2e, + 0x52, 0x53, 0xbb, 0x4c, 0x4d, 0xed, 0xef, 0xd4, 0xd4, 0x7e, 0xff, 0xc7, 0x2c, 0xfc, 0x5c, 0xcd, + 0x0b, 0xf2, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc0, 0x78, 0xe4, 0x62, 0x14, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto new file mode 100644 index 000000000..f19af65e6 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -0,0 +1,138 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.batch.v1beta1; + +import "k8s.io/api/batch/v1/generated.proto"; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// CronJob represents the configuration of a single cron job. +message CronJob { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of a cron job, including the schedule. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional CronJobSpec spec = 2; + + // Current status of a cron job. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional CronJobStatus status = 3; +} + +// CronJobList is a collection of cron jobs. +message CronJobList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CronJobs. + repeated CronJob items = 2; +} + +// CronJobSpec describes how the job execution will look like and when it will actually run. +message CronJobSpec { + // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + optional string schedule = 1; + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + // +optional + optional int64 startingDeadlineSeconds = 2; + + // Specifies how to treat concurrent executions of a Job. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one + // +optional + optional string concurrencyPolicy = 3; + + // This flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + // +optional + optional bool suspend = 4; + + // Specifies the job that will be created when executing a CronJob. + optional JobTemplateSpec jobTemplate = 5; + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 3. + // +optional + optional int32 successfulJobsHistoryLimit = 6; + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 1. + // +optional + optional int32 failedJobsHistoryLimit = 7; +} + +// CronJobStatus represents the current state of a cron job. +message CronJobStatus { + // A list of pointers to currently running jobs. + // +optional + repeated k8s.io.api.core.v1.ObjectReference active = 1; + + // Information when was the last time the job was successfully scheduled. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; +} + +// JobTemplate describes a template for creating copies of a predefined pod. +message JobTemplate { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Defines jobs that will be created from this template. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional JobTemplateSpec template = 2; +} + +// JobTemplateSpec describes the data a Job should have when created from a template +message JobTemplateSpec { + // Standard object's metadata of the jobs created from this template. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the job. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional k8s.io.api.batch.v1.JobSpec spec = 2; +} + diff --git a/vendor/k8s.io/api/batch/v1beta1/register.go b/vendor/k8s.io/api/batch/v1beta1/register.go new file mode 100644 index 000000000..226de49f4 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "batch" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &JobTemplate{}, + &CronJob{}, + &CronJobList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go new file mode 100644 index 000000000..cb5c9bad2 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -0,0 +1,158 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + batchv1 "k8s.io/api/batch/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// JobTemplate describes a template for creating copies of a predefined pod. +type JobTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Defines jobs that will be created from this template. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` +} + +// JobTemplateSpec describes the data a Job should have when created from a template +type JobTemplateSpec struct { + // Standard object's metadata of the jobs created from this template. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the job. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CronJob represents the configuration of a single cron job. +type CronJob struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of a cron job, including the schedule. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Current status of a cron job. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CronJobList is a collection of cron jobs. +type CronJobList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CronJobs. + Items []CronJob `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CronJobSpec describes how the job execution will look like and when it will actually run. +type CronJobSpec struct { + + // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + // +optional + StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` + + // Specifies how to treat concurrent executions of a Job. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one + // +optional + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` + + // This flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + // +optional + Suspend *bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"` + + // Specifies the job that will be created when executing a CronJob. + JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"` + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 3. + // +optional + SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"` + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 1. + // +optional + FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"` +} + +// ConcurrencyPolicy describes how the job will be handled. +// Only one of the following concurrent policies may be specified. +// If none of the following policies is specified, the default one +// is AllowConcurrent. +type ConcurrencyPolicy string + +const ( + // AllowConcurrent allows CronJobs to run concurrently. + AllowConcurrent ConcurrencyPolicy = "Allow" + + // ForbidConcurrent forbids concurrent runs, skipping next run if previous + // hasn't finished yet. + ForbidConcurrent ConcurrencyPolicy = "Forbid" + + // ReplaceConcurrent cancels currently running job and replaces it with a new one. + ReplaceConcurrent ConcurrencyPolicy = "Replace" +) + +// CronJobStatus represents the current state of a cron job. +type CronJobStatus struct { + // A list of pointers to currently running jobs. + // +optional + Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` + + // Information when was the last time the job was successfully scheduled. + // +optional + LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` +} diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..abbdfec01 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,96 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_CronJob = map[string]string{ + "": "CronJob represents the configuration of a single cron job.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (CronJob) SwaggerDoc() map[string]string { + return map_CronJob +} + +var map_CronJobList = map[string]string{ + "": "CronJobList is a collection of cron jobs.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "items is the list of CronJobs.", +} + +func (CronJobList) SwaggerDoc() map[string]string { + return map_CronJobList +} + +var map_CronJobSpec = map[string]string{ + "": "CronJobSpec describes how the job execution will look like and when it will actually run.", + "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", + "jobTemplate": "Specifies the job that will be created when executing a CronJob.", + "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.", + "failedJobsHistoryLimit": "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", +} + +func (CronJobSpec) SwaggerDoc() map[string]string { + return map_CronJobSpec +} + +var map_CronJobStatus = map[string]string{ + "": "CronJobStatus represents the current state of a cron job.", + "active": "A list of pointers to currently running jobs.", + "lastScheduleTime": "Information when was the last time the job was successfully scheduled.", +} + +func (CronJobStatus) SwaggerDoc() map[string]string { + return map_CronJobStatus +} + +var map_JobTemplate = map[string]string{ + "": "JobTemplate describes a template for creating copies of a predefined pod.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (JobTemplate) SwaggerDoc() map[string]string { + return map_JobTemplate +} + +var map_JobTemplateSpec = map[string]string{ + "": "JobTemplateSpec describes the data a Job should have when created from a template", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (JobTemplateSpec) SwaggerDoc() map[string]string { + return map_JobTemplateSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..a33f4ffcf --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,214 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJob) DeepCopyInto(out *CronJob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJob. +func (in *CronJob) DeepCopy() *CronJob { + if in == nil { + return nil + } + out := new(CronJob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CronJob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJobList) DeepCopyInto(out *CronJobList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CronJob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobList. +func (in *CronJobList) DeepCopy() *CronJobList { + if in == nil { + return nil + } + out := new(CronJobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CronJobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJobSpec) DeepCopyInto(out *CronJobSpec) { + *out = *in + if in.StartingDeadlineSeconds != nil { + in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + in.JobTemplate.DeepCopyInto(&out.JobTemplate) + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSpec. +func (in *CronJobSpec) DeepCopy() *CronJobSpec { + if in == nil { + return nil + } + out := new(CronJobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJobStatus) DeepCopyInto(out *CronJobStatus) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } + if in.LastScheduleTime != nil { + in, out := &in.LastScheduleTime, &out.LastScheduleTime + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobStatus. +func (in *CronJobStatus) DeepCopy() *CronJobStatus { + if in == nil { + return nil + } + out := new(CronJobStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobTemplate) DeepCopyInto(out *JobTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate. +func (in *JobTemplate) DeepCopy() *JobTemplate { + if in == nil { + return nil + } + out := new(JobTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JobTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplateSpec. +func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec { + if in == nil { + return nil + } + out := new(JobTemplateSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/batch/v2alpha1/doc.go b/vendor/k8s.io/api/batch/v2alpha1/doc.go new file mode 100644 index 000000000..f4ed01ad8 --- /dev/null +++ b/vendor/k8s.io/api/batch/v2alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +package v2alpha1 // import "k8s.io/api/batch/v2alpha1" diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go similarity index 69% rename from vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.pb.go rename to vendor/k8s.io/api/batch/v2alpha1/generated.pb.go index 81dab9908..3fcbf3425 100644 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,14 +15,14 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto // DO NOT EDIT! /* Package v2alpha1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto + k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto It has these top-level messages: CronJob @@ -38,10 +38,9 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" - import strings "strings" import reflect "reflect" @@ -54,7 +53,9 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CronJob) Reset() { *m = CronJob{} } func (*CronJob) ProtoMessage() {} @@ -81,48 +82,48 @@ func (*JobTemplateSpec) ProtoMessage() {} func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func init() { - proto.RegisterType((*CronJob)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.CronJob") - proto.RegisterType((*CronJobList)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.CronJobList") - proto.RegisterType((*CronJobSpec)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.CronJobSpec") - proto.RegisterType((*CronJobStatus)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.CronJobStatus") - proto.RegisterType((*JobTemplate)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.JobTemplate") - proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.JobTemplateSpec") + proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v2alpha1.CronJob") + proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v2alpha1.CronJobList") + proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v2alpha1.CronJobSpec") + proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v2alpha1.CronJobStatus") + proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v2alpha1.JobTemplate") + proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v2alpha1.JobTemplateSpec") } -func (m *CronJob) Marshal() (data []byte, err error) { +func (m *CronJob) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CronJob) MarshalTo(data []byte) (int, error) { +func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -130,35 +131,35 @@ func (m *CronJob) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *CronJobList) Marshal() (data []byte, err error) { +func (m *CronJobList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CronJobList) MarshalTo(data []byte) (int, error) { +func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n4 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -168,86 +169,86 @@ func (m *CronJobList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *CronJobSpec) Marshal() (data []byte, err error) { +func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CronJobSpec) MarshalTo(data []byte) (int, error) { +func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Schedule))) - i += copy(data[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i += copy(dAtA[i:], m.Schedule) if m.StartingDeadlineSeconds != nil { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(*m.StartingDeadlineSeconds)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) } - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ConcurrencyPolicy))) - i += copy(data[i:], m.ConcurrencyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i += copy(dAtA[i:], m.ConcurrencyPolicy) if m.Suspend != nil { - data[i] = 0x20 + dAtA[i] = 0x20 i++ if *m.Suspend { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ } - data[i] = 0x2a + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(m.JobTemplate.Size())) - n5, err := m.JobTemplate.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) + n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n5 if m.SuccessfulJobsHistoryLimit != nil { - data[i] = 0x30 + dAtA[i] = 0x30 i++ - i = encodeVarintGenerated(data, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) } if m.FailedJobsHistoryLimit != nil { - data[i] = 0x38 + dAtA[i] = 0x38 i++ - i = encodeVarintGenerated(data, i, uint64(*m.FailedJobsHistoryLimit)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) } return i, nil } -func (m *CronJobStatus) Marshal() (data []byte, err error) { +func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CronJobStatus) MarshalTo(data []byte) (int, error) { +func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Active) > 0 { for _, msg := range m.Active { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -255,10 +256,10 @@ func (m *CronJobStatus) MarshalTo(data []byte) (int, error) { } } if m.LastScheduleTime != nil { - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(m.LastScheduleTime.Size())) - n6, err := m.LastScheduleTime.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) + n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -267,33 +268,33 @@ func (m *CronJobStatus) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *JobTemplate) Marshal() (data []byte, err error) { +func (m *JobTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *JobTemplate) MarshalTo(data []byte) (int, error) { +func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n7 - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -301,33 +302,33 @@ func (m *JobTemplate) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *JobTemplateSpec) Marshal() (data []byte, err error) { +func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *JobTemplateSpec) MarshalTo(data []byte) (int, error) { +func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n9 - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -335,31 +336,31 @@ func (m *JobTemplateSpec) MarshalTo(data []byte) (int, error) { return i, nil } -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *CronJob) Size() (n int) { @@ -505,7 +506,7 @@ func (this *CronJobStatus) String() string { return "nil" } s := strings.Join([]string{`&CronJobStatus{`, - `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `}`, }, "") @@ -528,7 +529,7 @@ func (this *JobTemplateSpec) String() string { } s := strings.Join([]string{`&JobTemplateSpec{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_kubernetes_pkg_apis_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -541,8 +542,8 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *CronJob) Unmarshal(data []byte) error { - l := len(data) +func (m *CronJob) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -554,7 +555,7 @@ func (m *CronJob) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -582,7 +583,7 @@ func (m *CronJob) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -596,7 +597,7 @@ func (m *CronJob) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -612,7 +613,7 @@ func (m *CronJob) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -626,7 +627,7 @@ func (m *CronJob) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -642,7 +643,7 @@ func (m *CronJob) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -656,13 +657,13 @@ func (m *CronJob) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -681,8 +682,8 @@ func (m *CronJob) Unmarshal(data []byte) error { } return nil } -func (m *CronJobList) Unmarshal(data []byte) error { - l := len(data) +func (m *CronJobList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -694,7 +695,7 @@ func (m *CronJobList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -722,7 +723,7 @@ func (m *CronJobList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -736,7 +737,7 @@ func (m *CronJobList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -752,7 +753,7 @@ func (m *CronJobList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -767,13 +768,13 @@ func (m *CronJobList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, CronJob{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -792,8 +793,8 @@ func (m *CronJobList) Unmarshal(data []byte) error { } return nil } -func (m *CronJobSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *CronJobSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -805,7 +806,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -833,7 +834,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -848,7 +849,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Schedule = string(data[iNdEx:postIndex]) + m.Schedule = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { @@ -862,7 +863,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -882,7 +883,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -897,7 +898,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ConcurrencyPolicy = ConcurrencyPolicy(data[iNdEx:postIndex]) + m.ConcurrencyPolicy = ConcurrencyPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 0 { @@ -911,7 +912,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -932,7 +933,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -946,7 +947,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.JobTemplate.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.JobTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -962,7 +963,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -982,7 +983,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -992,7 +993,7 @@ func (m *CronJobSpec) Unmarshal(data []byte) error { m.FailedJobsHistoryLimit = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1011,8 +1012,8 @@ func (m *CronJobSpec) Unmarshal(data []byte) error { } return nil } -func (m *CronJobStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *CronJobStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1024,7 +1025,7 @@ func (m *CronJobStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1052,7 +1053,7 @@ func (m *CronJobStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1066,8 +1067,8 @@ func (m *CronJobStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Active = append(m.Active, k8s_io_kubernetes_pkg_api_v1.ObjectReference{}) - if err := m.Active[len(m.Active)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) + if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1083,7 +1084,7 @@ func (m *CronJobStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1100,13 +1101,13 @@ func (m *CronJobStatus) Unmarshal(data []byte) error { if m.LastScheduleTime == nil { m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } - if err := m.LastScheduleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1125,8 +1126,8 @@ func (m *CronJobStatus) Unmarshal(data []byte) error { } return nil } -func (m *JobTemplate) Unmarshal(data []byte) error { - l := len(data) +func (m *JobTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1138,7 +1139,7 @@ func (m *JobTemplate) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1166,7 +1167,7 @@ func (m *JobTemplate) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1180,7 +1181,7 @@ func (m *JobTemplate) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1196,7 +1197,7 @@ func (m *JobTemplate) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1210,13 +1211,13 @@ func (m *JobTemplate) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1235,8 +1236,8 @@ func (m *JobTemplate) Unmarshal(data []byte) error { } return nil } -func (m *JobTemplateSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1248,7 +1249,7 @@ func (m *JobTemplateSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1276,7 +1277,7 @@ func (m *JobTemplateSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1290,7 +1291,7 @@ func (m *JobTemplateSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1306,7 +1307,7 @@ func (m *JobTemplateSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1320,13 +1321,13 @@ func (m *JobTemplateSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1345,8 +1346,8 @@ func (m *JobTemplateSpec) Unmarshal(data []byte) error { } return nil } -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -1357,7 +1358,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1375,7 +1376,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -1392,7 +1393,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1415,7 +1416,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1426,7 +1427,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -1450,56 +1451,60 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 799 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x4f, 0xe3, 0x46, - 0x18, 0xc7, 0xe3, 0x90, 0x37, 0x26, 0xa5, 0x05, 0xb7, 0x82, 0x28, 0x95, 0x9c, 0x28, 0x52, 0xa5, - 0x14, 0x81, 0x5d, 0x42, 0x85, 0x68, 0x6f, 0x35, 0x55, 0xd5, 0x22, 0xfa, 0x22, 0x07, 0xd4, 0xaa, - 0x42, 0x15, 0x63, 0xe7, 0x49, 0x32, 0xc4, 0x6f, 0xf5, 0x8c, 0xa3, 0xe6, 0xd6, 0x8f, 0xd0, 0x6f, - 0xd1, 0x6f, 0xb1, 0x97, 0xdd, 0x03, 0x47, 0x0e, 0x7b, 0xd8, 0xbd, 0x44, 0x8b, 0xf7, 0x5b, 0xec, - 0x69, 0xe5, 0x89, 0x13, 0x07, 0x1c, 0x2f, 0x61, 0x57, 0xe2, 0xe6, 0x19, 0x3f, 0xff, 0xdf, 0x3c, - 0xcf, 0xf3, 0x7f, 0x66, 0xd0, 0x37, 0x83, 0x43, 0x2a, 0x13, 0x47, 0x19, 0xf8, 0x3a, 0x78, 0x36, - 0x30, 0xa0, 0x8a, 0x3b, 0xe8, 0x29, 0xd8, 0x25, 0x54, 0xd1, 0x31, 0x33, 0xfa, 0xca, 0xb0, 0x85, - 0x4d, 0xb7, 0x8f, 0xf7, 0x94, 0x1e, 0xd8, 0xe0, 0x61, 0x06, 0x1d, 0xd9, 0xf5, 0x1c, 0xe6, 0x88, - 0x5f, 0x4e, 0xa4, 0x72, 0x2c, 0x95, 0xdd, 0x41, 0x4f, 0x0e, 0xa5, 0x32, 0x97, 0xca, 0x53, 0x69, - 0x75, 0xb7, 0x47, 0x58, 0xdf, 0xd7, 0x65, 0xc3, 0xb1, 0x94, 0x9e, 0xd3, 0x73, 0x14, 0x4e, 0xd0, - 0xfd, 0x2e, 0x5f, 0xf1, 0x05, 0xff, 0x9a, 0x90, 0xab, 0x5f, 0x47, 0x49, 0x61, 0x97, 0x58, 0xd8, - 0xe8, 0x13, 0x1b, 0xbc, 0x51, 0x9c, 0x96, 0x05, 0x0c, 0x2b, 0xc3, 0x44, 0x3e, 0x55, 0x25, 0x4d, - 0xe5, 0xf9, 0x36, 0x23, 0x16, 0x24, 0x04, 0x07, 0xf7, 0x09, 0xa8, 0xd1, 0x07, 0x0b, 0x27, 0x74, - 0xfb, 0x69, 0x3a, 0x9f, 0x11, 0x53, 0x21, 0x36, 0xa3, 0xcc, 0x4b, 0x88, 0xe6, 0x6a, 0xa2, 0xe0, - 0x0d, 0xc1, 0x8b, 0x0b, 0x82, 0x7f, 0xb0, 0xe5, 0x9a, 0xb0, 0xa8, 0xa6, 0x9d, 0x54, 0x7b, 0x16, - 0x45, 0xef, 0xdf, 0x6f, 0x66, 0x42, 0xd4, 0xf8, 0x3f, 0x8b, 0x8a, 0x47, 0x9e, 0x63, 0x1f, 0x3b, - 0xba, 0x78, 0x81, 0x4a, 0x61, 0x77, 0x3b, 0x98, 0xe1, 0x8a, 0x50, 0x17, 0x9a, 0xe5, 0xd6, 0x57, - 0x72, 0xe4, 0xf2, 0x7c, 0xb1, 0xb1, 0xcf, 0x61, 0xb4, 0x3c, 0xdc, 0x93, 0x7f, 0xd5, 0x2f, 0xc1, - 0x60, 0x3f, 0x03, 0xc3, 0xaa, 0x78, 0x35, 0xae, 0x65, 0x82, 0x71, 0x0d, 0xc5, 0x7b, 0xda, 0x8c, - 0x2a, 0xfe, 0x81, 0x72, 0xd4, 0x05, 0xa3, 0x92, 0xe5, 0xf4, 0x03, 0x79, 0xe9, 0x19, 0x92, 0xa3, - 0x1c, 0xdb, 0x2e, 0x18, 0xea, 0x47, 0xd1, 0x19, 0xb9, 0x70, 0xa5, 0x71, 0xa2, 0x78, 0x81, 0x0a, - 0x94, 0x61, 0xe6, 0xd3, 0xca, 0x0a, 0x67, 0x1f, 0xbe, 0x07, 0x9b, 0xeb, 0xd5, 0x8f, 0x23, 0x7a, - 0x61, 0xb2, 0xd6, 0x22, 0x6e, 0xe3, 0x99, 0x80, 0xca, 0x51, 0xe4, 0x09, 0xa1, 0x4c, 0x3c, 0x4f, - 0x74, 0x4b, 0x5e, 0xae, 0x5b, 0xa1, 0x9a, 0xf7, 0x6a, 0x3d, 0x3a, 0xa9, 0x34, 0xdd, 0x99, 0xeb, - 0xd4, 0xef, 0x28, 0x4f, 0x18, 0x58, 0xb4, 0x92, 0xad, 0xaf, 0x34, 0xcb, 0xad, 0xd6, 0xc3, 0xcb, - 0x51, 0xd7, 0x22, 0x7c, 0xfe, 0xa7, 0x10, 0xa4, 0x4d, 0x78, 0x8d, 0x27, 0xb9, 0x59, 0x19, 0x61, - 0xfb, 0xc4, 0x1d, 0x54, 0x0a, 0x07, 0xbd, 0xe3, 0x9b, 0xc0, 0xcb, 0x58, 0x8d, 0xd3, 0x6a, 0x47, - 0xfb, 0xda, 0x2c, 0x42, 0x3c, 0x43, 0x5b, 0x94, 0x61, 0x8f, 0x11, 0xbb, 0xf7, 0x3d, 0xe0, 0x8e, - 0x49, 0x6c, 0x68, 0x83, 0xe1, 0xd8, 0x1d, 0xca, 0x3d, 0x5d, 0x51, 0x3f, 0x0f, 0xc6, 0xb5, 0xad, - 0xf6, 0xe2, 0x10, 0x2d, 0x4d, 0x2b, 0x9e, 0xa3, 0x0d, 0xc3, 0xb1, 0x0d, 0xdf, 0xf3, 0xc0, 0x36, - 0x46, 0xbf, 0x39, 0x26, 0x31, 0x46, 0xdc, 0xc8, 0x55, 0x55, 0x8e, 0xb2, 0xd9, 0x38, 0xba, 0x1b, - 0xf0, 0x66, 0xd1, 0xa6, 0x96, 0x04, 0x89, 0x5f, 0xa0, 0x22, 0xf5, 0xa9, 0x0b, 0x76, 0xa7, 0x92, - 0xab, 0x0b, 0xcd, 0x92, 0x5a, 0x0e, 0xc6, 0xb5, 0x62, 0x7b, 0xb2, 0xa5, 0x4d, 0xff, 0x89, 0x7f, - 0xa3, 0xf2, 0xa5, 0xa3, 0x9f, 0x82, 0xe5, 0x9a, 0x98, 0x41, 0x25, 0xcf, 0x3d, 0xfd, 0xf6, 0x01, - 0x8d, 0x3f, 0x8e, 0xd5, 0x7c, 0x4e, 0x3f, 0x8d, 0x52, 0x2f, 0xcf, 0xfd, 0xd0, 0xe6, 0xcf, 0x10, - 0xff, 0x42, 0x55, 0xea, 0x1b, 0x06, 0x50, 0xda, 0xf5, 0xcd, 0x63, 0x47, 0xa7, 0x3f, 0x12, 0xca, - 0x1c, 0x6f, 0x74, 0x42, 0x2c, 0xc2, 0x2a, 0x85, 0xba, 0xd0, 0xcc, 0xab, 0x52, 0x30, 0xae, 0x55, - 0xdb, 0xa9, 0x51, 0xda, 0x3b, 0x08, 0xa2, 0x86, 0x36, 0xbb, 0x98, 0x98, 0xd0, 0x49, 0xb0, 0x8b, - 0x9c, 0x5d, 0x0d, 0xc6, 0xb5, 0xcd, 0x1f, 0x16, 0x46, 0x68, 0x29, 0xca, 0xc6, 0x73, 0x01, 0xad, - 0xdd, 0xba, 0x31, 0xe2, 0x19, 0x2a, 0x60, 0x83, 0x91, 0x61, 0x38, 0x40, 0xe1, 0xb0, 0xee, 0xa6, - 0xf7, 0x2c, 0x7e, 0x2d, 0x34, 0xe8, 0x42, 0x68, 0x12, 0xc4, 0x17, 0xee, 0x3b, 0x0e, 0xd1, 0x22, - 0x98, 0x68, 0xa2, 0x75, 0x13, 0x53, 0x36, 0x9d, 0xc2, 0x53, 0x62, 0x01, 0xf7, 0xaf, 0xdc, 0xda, - 0x5e, 0xee, 0xa2, 0x85, 0x0a, 0xf5, 0xb3, 0x60, 0x5c, 0x5b, 0x3f, 0xb9, 0xc3, 0xd1, 0x12, 0xe4, - 0xc6, 0x4b, 0x01, 0xcd, 0xfb, 0xf4, 0x08, 0x8f, 0x61, 0x1f, 0x95, 0xd8, 0x74, 0xd8, 0xb2, 0x1f, - 0x3c, 0x6c, 0xb3, 0x5b, 0x3b, 0x9b, 0xb4, 0x19, 0xbd, 0xf1, 0x54, 0x40, 0x9f, 0xdc, 0x89, 0x7f, - 0x84, 0xfa, 0x7e, 0xb9, 0xf5, 0xd8, 0xef, 0x2c, 0x51, 0x1b, 0xaf, 0x2a, 0xed, 0x89, 0x57, 0xb7, - 0xaf, 0x6e, 0xa4, 0xcc, 0xf5, 0x8d, 0x94, 0x79, 0x71, 0x23, 0x65, 0xfe, 0x0d, 0x24, 0xe1, 0x2a, - 0x90, 0x84, 0xeb, 0x40, 0x12, 0x5e, 0x05, 0x92, 0xf0, 0xdf, 0x6b, 0x29, 0xf3, 0x67, 0x69, 0xda, - 0x9d, 0xb7, 0x01, 0x00, 0x00, 0xff, 0xff, 0x32, 0x5e, 0xac, 0x56, 0xd9, 0x08, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 787 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x3a, 0xa1, 0xd0, 0x1a, 0xd4, 0x86, 0x80, 0x9c, 0xc8, 0x15, 0x28, + 0x42, 0x30, 0xa6, 0x05, 0x21, 0x4e, 0x48, 0xb8, 0x08, 0x4a, 0x29, 0xa2, 0x72, 0x8a, 0x84, 0x50, + 0xb5, 0xda, 0xf1, 0x78, 0x92, 0x4c, 0x63, 0x7b, 0x2c, 0xcf, 0x38, 0x52, 0x6e, 0x7b, 0xdb, 0xeb, + 0xfe, 0x25, 0x7b, 0xd9, 0xfd, 0x23, 0xba, 0x7b, 0xea, 0xb1, 0xa7, 0x68, 0xeb, 0xfd, 0x2f, 0xf6, + 0xb4, 0xf2, 0xc4, 0xf9, 0xd1, 0x38, 0x69, 0xbb, 0x97, 0xde, 0x3c, 0xcf, 0xdf, 0xef, 0x67, 0xde, + 0xbc, 0xf7, 0x66, 0x80, 0xd9, 0xff, 0x99, 0x43, 0xca, 0x8c, 0x7e, 0x64, 0x93, 0xd0, 0x27, 0x82, + 0x70, 0x63, 0x40, 0x7c, 0x87, 0x85, 0x46, 0xfa, 0x03, 0x05, 0xd4, 0xb0, 0x91, 0xc0, 0x3d, 0x63, + 0xb0, 0x8f, 0xdc, 0xa0, 0x87, 0xf6, 0x8c, 0x2e, 0xf1, 0x49, 0x88, 0x04, 0x71, 0x60, 0x10, 0x32, + 0xc1, 0xd4, 0xcf, 0xc7, 0x52, 0x88, 0x02, 0x0a, 0xa5, 0x14, 0x4e, 0xa4, 0xf5, 0xef, 0xba, 0x54, + 0xf4, 0x22, 0x1b, 0x62, 0xe6, 0x19, 0x5d, 0xd6, 0x65, 0x86, 0x74, 0xd8, 0x51, 0x47, 0xae, 0xe4, + 0x42, 0x7e, 0x8d, 0x49, 0xf5, 0xdd, 0xec, 0xa6, 0x99, 0xed, 0xea, 0xfa, 0x9c, 0x08, 0xb3, 0x90, + 0x2c, 0xd3, 0xfc, 0x38, 0xd3, 0x78, 0x08, 0xf7, 0xa8, 0x4f, 0xc2, 0xa1, 0x11, 0xf4, 0xbb, 0x49, + 0x80, 0x1b, 0x1e, 0x11, 0x68, 0x99, 0xcb, 0x58, 0xe5, 0x0a, 0x23, 0x5f, 0x50, 0x8f, 0x64, 0x0c, + 0x3f, 0xdd, 0x65, 0xe0, 0xb8, 0x47, 0x3c, 0x94, 0xf1, 0xfd, 0xb0, 0xca, 0x17, 0x09, 0xea, 0x1a, + 0xd4, 0x17, 0x5c, 0x84, 0x8b, 0x26, 0xfd, 0x69, 0x1e, 0x94, 0x0f, 0x42, 0xe6, 0x1f, 0x31, 0x5b, + 0x7d, 0x0c, 0x2a, 0xc9, 0x21, 0x1c, 0x24, 0x50, 0x4d, 0x69, 0x2a, 0xad, 0xea, 0xfe, 0xf7, 0x70, + 0xd6, 0x85, 0x29, 0x13, 0x06, 0xfd, 0x6e, 0x12, 0xe0, 0x30, 0x51, 0xc3, 0xc1, 0x1e, 0xfc, 0xc7, + 0x3e, 0x27, 0x58, 0xfc, 0x4d, 0x04, 0x32, 0xd5, 0x8b, 0x51, 0x23, 0x17, 0x8f, 0x1a, 0x60, 0x16, + 0xb3, 0xa6, 0x54, 0xf5, 0x10, 0x14, 0x78, 0x40, 0x70, 0x2d, 0x2f, 0xe9, 0x5f, 0xc3, 0x95, 0x3d, + 0x86, 0x69, 0x4e, 0xed, 0x80, 0x60, 0xf3, 0xa3, 0x94, 0x59, 0x48, 0x56, 0x96, 0x24, 0xa8, 0x27, + 0xa0, 0xc4, 0x05, 0x12, 0x11, 0xaf, 0xad, 0x49, 0x56, 0xeb, 0x1e, 0x2c, 0xa9, 0x37, 0x3f, 0x4e, + 0x69, 0xa5, 0xf1, 0xda, 0x4a, 0x39, 0xfa, 0x4b, 0x05, 0x54, 0x53, 0xe5, 0x31, 0xe5, 0x42, 0x3d, + 0xcb, 0x54, 0x03, 0xde, 0xaf, 0x1a, 0x89, 0x5b, 0xd6, 0x62, 0x33, 0xdd, 0xa9, 0x32, 0x89, 0xcc, + 0x55, 0xe2, 0x0f, 0x50, 0xa4, 0x82, 0x78, 0xbc, 0x96, 0x6f, 0xae, 0xb5, 0xaa, 0xfb, 0xfa, 0xdd, + 0xe9, 0x9b, 0x1b, 0x29, 0xae, 0xf8, 0x67, 0x62, 0xb4, 0xc6, 0x7e, 0xfd, 0x79, 0x61, 0x9a, 0x76, + 0x52, 0x1e, 0xf5, 0x5b, 0x50, 0x49, 0xe6, 0xc3, 0x89, 0x5c, 0x22, 0xd3, 0x5e, 0x9f, 0xa5, 0xd1, + 0x4e, 0xe3, 0xd6, 0x54, 0xa1, 0xfe, 0x0b, 0x76, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, 0xf7, 0x37, 0x82, + 0x1c, 0x97, 0xfa, 0xa4, 0x4d, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x66, 0x7e, 0x11, 0x8f, 0x1a, + 0x3b, 0xed, 0xe5, 0x12, 0x6b, 0x95, 0x57, 0x3d, 0x03, 0x5b, 0x98, 0xf9, 0x38, 0x0a, 0x43, 0xe2, + 0xe3, 0xe1, 0x09, 0x73, 0x29, 0x1e, 0xca, 0x46, 0xad, 0x9b, 0x30, 0xcd, 0x66, 0xeb, 0x60, 0x51, + 0xf0, 0x6e, 0x59, 0xd0, 0xca, 0x82, 0xd4, 0xaf, 0x40, 0x99, 0x47, 0x3c, 0x20, 0xbe, 0x53, 0x2b, + 0x34, 0x95, 0x56, 0xc5, 0xac, 0xc6, 0xa3, 0x46, 0xb9, 0x3d, 0x0e, 0x59, 0x93, 0x7f, 0x2a, 0x02, + 0xd5, 0x73, 0x66, 0x9f, 0x12, 0x2f, 0x70, 0x91, 0x20, 0xb5, 0xa2, 0xec, 0xe1, 0x37, 0xb7, 0x14, + 0xfa, 0x68, 0xa6, 0x96, 0x73, 0xf7, 0x69, 0x9a, 0x6a, 0x75, 0xee, 0x87, 0x35, 0xcf, 0x54, 0x1f, + 0x81, 0x3a, 0x8f, 0x30, 0x26, 0x9c, 0x77, 0x22, 0xf7, 0x88, 0xd9, 0xfc, 0x90, 0x72, 0xc1, 0xc2, + 0xe1, 0x31, 0xf5, 0xa8, 0xa8, 0x95, 0x9a, 0x4a, 0xab, 0x68, 0x6a, 0xf1, 0xa8, 0x51, 0x6f, 0xaf, + 0x54, 0x59, 0xb7, 0x10, 0x54, 0x0b, 0x6c, 0x77, 0x10, 0x75, 0x89, 0x93, 0x61, 0x97, 0x25, 0xbb, + 0x1e, 0x8f, 0x1a, 0xdb, 0xbf, 0x2f, 0x55, 0x58, 0x2b, 0x9c, 0xfa, 0x6b, 0x05, 0x6c, 0xdc, 0xb8, + 0x11, 0xea, 0x5f, 0xa0, 0x84, 0xb0, 0xa0, 0x83, 0x64, 0x60, 0x92, 0x61, 0xdc, 0x9d, 0xaf, 0x51, + 0xf2, 0x18, 0xce, 0xee, 0xb8, 0x45, 0x3a, 0x24, 0x69, 0x05, 0x99, 0x5d, 0xa3, 0x5f, 0xa5, 0xd5, + 0x4a, 0x11, 0xaa, 0x0b, 0x36, 0x5d, 0xc4, 0xc5, 0x64, 0xd6, 0x4e, 0xa9, 0x47, 0x64, 0x97, 0x6e, + 0x96, 0xfe, 0x96, 0xeb, 0x93, 0x38, 0xcc, 0xcf, 0xe2, 0x51, 0x63, 0xf3, 0x78, 0x81, 0x63, 0x65, + 0xc8, 0xfa, 0x2b, 0x05, 0xcc, 0x77, 0xe7, 0x01, 0x9e, 0xb0, 0xff, 0x40, 0x45, 0x4c, 0x46, 0x2a, + 0xff, 0xc1, 0x23, 0x35, 0xbd, 0x8b, 0xd3, 0x79, 0x9a, 0xd2, 0xf4, 0x17, 0x0a, 0xf8, 0x64, 0x41, + 0xff, 0x00, 0xe7, 0xf9, 0xe5, 0xc6, 0x93, 0xfc, 0xe5, 0x92, 0xb3, 0xc8, 0x53, 0xac, 0x7a, 0x88, + 0x4d, 0x78, 0x71, 0xad, 0xe5, 0x2e, 0xaf, 0xb5, 0xdc, 0xd5, 0xb5, 0x96, 0x7b, 0x12, 0x6b, 0xca, + 0x45, 0xac, 0x29, 0x97, 0xb1, 0xa6, 0x5c, 0xc5, 0x9a, 0xf2, 0x26, 0xd6, 0x94, 0x67, 0x6f, 0xb5, + 0xdc, 0xff, 0x95, 0x49, 0x45, 0xde, 0x07, 0x00, 0x00, 0xff, 0xff, 0x02, 0x60, 0xaa, 0x00, 0x1c, + 0x08, 0x00, 0x00, } diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto similarity index 62% rename from vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto rename to vendor/k8s.io/api/batch/v2alpha1/generated.proto index 4f51d6161..1fe17ff21 100644 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,15 +19,14 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.batch.v2alpha1; +package k8s.io.api.batch.v2alpha1; +import "k8s.io/api/batch/v1/generated.proto"; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v2alpha1"; @@ -35,35 +34,35 @@ option go_package = "v2alpha1"; // CronJob represents the configuration of a single cron job. message CronJob { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is a structure defining the expected behavior of a job, including the schedule. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // Specification of the desired behavior of a cron job, including the schedule. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional CronJobSpec spec = 2; - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // Current status of a cron job. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional CronJobStatus status = 3; } // CronJobList is a collection of cron jobs. message CronJobList { - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CronJob. + // items is the list of CronJobs. repeated CronJob items = 2; } // CronJobSpec describes how the job execution will look like and when it will actually run. message CronJobSpec { - // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. optional string schedule = 1; // Optional deadline in seconds for starting the job if it misses scheduled @@ -71,17 +70,20 @@ message CronJobSpec { // +optional optional int64 startingDeadlineSeconds = 2; - // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // Specifies how to treat concurrent executions of a Job. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional optional string concurrencyPolicy = 3; - // Suspend flag tells the controller to suspend subsequent executions, it does + // This flag tells the controller to suspend subsequent executions, it does // not apply to already started executions. Defaults to false. // +optional optional bool suspend = 4; - // JobTemplate is the object that describes the job that will be created when - // executing a CronJob. + // Specifies the job that will be created when executing a CronJob. optional JobTemplateSpec jobTemplate = 5; // The number of successful finished jobs to retain. @@ -97,11 +99,11 @@ message CronJobSpec { // CronJobStatus represents the current state of a cron job. message CronJobStatus { - // Active holds pointers to currently running jobs. + // A list of pointers to currently running jobs. // +optional - repeated k8s.io.kubernetes.pkg.api.v1.ObjectReference active = 1; + repeated k8s.io.api.core.v1.ObjectReference active = 1; - // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + // Information when was the last time the job was successfully scheduled. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; } @@ -109,12 +111,12 @@ message CronJobStatus { // JobTemplate describes a template for creating copies of a predefined pod. message JobTemplate { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Template defines jobs that will be created from this template - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // Defines jobs that will be created from this template. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional JobTemplateSpec template = 2; } @@ -122,13 +124,13 @@ message JobTemplate { // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional - optional k8s.io.kubernetes.pkg.apis.batch.v1.JobSpec spec = 2; + optional k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/register.go b/vendor/k8s.io/api/batch/v2alpha1/register.go similarity index 79% rename from vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/register.go rename to vendor/k8s.io/api/batch/v2alpha1/register.go index 5286ca4a0..ac7fa5087 100644 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/register.go +++ b/vendor/k8s.io/api/batch/v2alpha1/register.go @@ -34,19 +34,20 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &JobTemplate{}, &CronJob{}, &CronJobList{}, ) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{}) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{}) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go b/vendor/k8s.io/api/batch/v2alpha1/types.go similarity index 70% rename from vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go rename to vendor/k8s.io/api/batch/v2alpha1/types.go index 67f1c95e4..cccff94ff 100644 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types.go @@ -17,21 +17,23 @@ limitations under the License. package v2alpha1 import ( + batchv1 "k8s.io/api/batch/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api/v1" - batchv1 "k8s.io/client-go/pkg/apis/batch/v1" ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // JobTemplate describes a template for creating copies of a predefined pod. type JobTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Template defines jobs that will be created from this template - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // Defines jobs that will be created from this template. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -39,53 +41,57 @@ type JobTemplate struct { // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // CronJob represents the configuration of a single cron job. type CronJob struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is a structure defining the expected behavior of a job, including the schedule. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // Specification of the desired behavior of a cron job, including the schedule. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // Current status of a cron job. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // CronJobList is a collection of cron jobs. type CronJobList struct { metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CronJob. + // items is the list of CronJobs. Items []CronJob `json:"items" protobuf:"bytes,2,rep,name=items"` } // CronJobSpec describes how the job execution will look like and when it will actually run. type CronJobSpec struct { - // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` // Optional deadline in seconds for starting the job if it misses scheduled @@ -93,17 +99,20 @@ type CronJobSpec struct { // +optional StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` - // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // Specifies how to treat concurrent executions of a Job. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` - // Suspend flag tells the controller to suspend subsequent executions, it does + // This flag tells the controller to suspend subsequent executions, it does // not apply to already started executions. Defaults to false. // +optional Suspend *bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"` - // JobTemplate is the object that describes the job that will be created when - // executing a CronJob. + // Specifies the job that will be created when executing a CronJob. JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"` // The number of successful finished jobs to retain. @@ -137,11 +146,11 @@ const ( // CronJobStatus represents the current state of a cron job. type CronJobStatus struct { - // Active holds pointers to currently running jobs. + // A list of pointers to currently running jobs. // +optional Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` - // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + // Information when was the last time the job was successfully scheduled. // +optional LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` } diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go similarity index 59% rename from vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go index c0b53b8e0..f448a92cf 100644 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,12 +26,12 @@ package v2alpha1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ "": "CronJob represents the configuration of a single cron job.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job, including the schedule. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (CronJob) SwaggerDoc() map[string]string { @@ -40,8 +40,8 @@ func (CronJob) SwaggerDoc() map[string]string { var map_CronJobList = map[string]string{ "": "CronJobList is a collection of cron jobs.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of CronJob.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "items is the list of CronJobs.", } func (CronJobList) SwaggerDoc() map[string]string { @@ -50,11 +50,11 @@ func (CronJobList) SwaggerDoc() map[string]string { var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", - "schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", - "suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", - "jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a CronJob.", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", + "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", "failedJobsHistoryLimit": "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", } @@ -65,8 +65,8 @@ func (CronJobSpec) SwaggerDoc() map[string]string { var map_CronJobStatus = map[string]string{ "": "CronJobStatus represents the current state of a cron job.", - "active": "Active holds pointers to currently running jobs.", - "lastScheduleTime": "LastScheduleTime keeps information of when was the last time the job was successfully scheduled.", + "active": "A list of pointers to currently running jobs.", + "lastScheduleTime": "Information when was the last time the job was successfully scheduled.", } func (CronJobStatus) SwaggerDoc() map[string]string { @@ -75,8 +75,8 @@ func (CronJobStatus) SwaggerDoc() map[string]string { var map_JobTemplate = map[string]string{ "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "template": "Template defines jobs that will be created from this template http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (JobTemplate) SwaggerDoc() map[string]string { @@ -85,8 +85,8 @@ func (JobTemplate) SwaggerDoc() map[string]string { var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (JobTemplateSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..bf0da8bf4 --- /dev/null +++ b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go @@ -0,0 +1,214 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJob) DeepCopyInto(out *CronJob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJob. +func (in *CronJob) DeepCopy() *CronJob { + if in == nil { + return nil + } + out := new(CronJob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CronJob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJobList) DeepCopyInto(out *CronJobList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CronJob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobList. +func (in *CronJobList) DeepCopy() *CronJobList { + if in == nil { + return nil + } + out := new(CronJobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CronJobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJobSpec) DeepCopyInto(out *CronJobSpec) { + *out = *in + if in.StartingDeadlineSeconds != nil { + in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + in.JobTemplate.DeepCopyInto(&out.JobTemplate) + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSpec. +func (in *CronJobSpec) DeepCopy() *CronJobSpec { + if in == nil { + return nil + } + out := new(CronJobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJobStatus) DeepCopyInto(out *CronJobStatus) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } + if in.LastScheduleTime != nil { + in, out := &in.LastScheduleTime, &out.LastScheduleTime + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobStatus. +func (in *CronJobStatus) DeepCopy() *CronJobStatus { + if in == nil { + return nil + } + out := new(CronJobStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobTemplate) DeepCopyInto(out *JobTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate. +func (in *JobTemplate) DeepCopy() *JobTemplate { + if in == nil { + return nil + } + out := new(JobTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JobTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplateSpec. +func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec { + if in == nil { + return nil + } + out := new(JobTemplateSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go similarity index 84% rename from vendor/k8s.io/client-go/pkg/apis/certificates/doc.go rename to vendor/k8s.io/api/certificates/v1beta1/doc.go index a10177469..fb23aadb0 100644 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/doc.go +++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go @@ -14,5 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + // +groupName=certificates.k8s.io -package certificates +package v1beta1 // import "k8s.io/api/certificates/v1beta1" diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go similarity index 68% rename from vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.pb.go rename to vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index 11833465d..5de4ade7e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,14 +15,14 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto // DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.proto + k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto It has these top-level messages: CertificateSigningRequest @@ -38,9 +38,10 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + import strings "strings" import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" import io "io" @@ -51,7 +52,9 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } func (*CertificateSigningRequest) ProtoMessage() {} @@ -88,48 +91,48 @@ func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func init() { - proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequest") - proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequestCondition") - proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequestList") - proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequestSpec") - proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequestStatus") - proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.ExtraValue") + proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequest") + proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestCondition") + proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestList") + proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec") + proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestStatus") + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.ExtraValue") } -func (m *CertificateSigningRequest) Marshal() (data []byte, err error) { +func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CertificateSigningRequest) MarshalTo(data []byte) (int, error) { +func (m *CertificateSigningRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -137,37 +140,37 @@ func (m *CertificateSigningRequest) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *CertificateSigningRequestCondition) Marshal() (data []byte, err error) { +func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CertificateSigningRequestCondition) MarshalTo(data []byte) (int, error) { +func (m *CertificateSigningRequestCondition) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size())) - n4, err := m.LastUpdateTime.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n4, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -175,35 +178,35 @@ func (m *CertificateSigningRequestCondition) MarshalTo(data []byte) (int, error) return i, nil } -func (m *CertificateSigningRequestList) Marshal() (data []byte, err error) { +func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CertificateSigningRequestList) MarshalTo(data []byte) (int, error) { +func (m *CertificateSigningRequestList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -213,81 +216,90 @@ func (m *CertificateSigningRequestList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *CertificateSigningRequestSpec) Marshal() (data []byte, err error) { +func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CertificateSigningRequestSpec) MarshalTo(data []byte) (int, error) { +func (m *CertificateSigningRequestSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Request != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Request))) - i += copy(data[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i += copy(dAtA[i:], m.Request) } - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Username))) - i += copy(data[i:], m.Username) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.UID))) - i += copy(data[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) if len(m.Groups) > 0 { for _, s := range m.Groups { - data[i] = 0x22 + dAtA[i] = 0x22 i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.Usages) > 0 { for _, s := range m.Usages { - data[i] = 0x2a + dAtA[i] = 0x2a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { - data[i] = 0x32 + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for _, k := range keysForExtra { + dAtA[i] = 0x32 i++ - v := m.Extra[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa + v := m.Extra[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n6, err := (&v).MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n6, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -297,27 +309,27 @@ func (m *CertificateSigningRequestSpec) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *CertificateSigningRequestStatus) Marshal() (data []byte, err error) { +func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CertificateSigningRequestStatus) MarshalTo(data []byte) (int, error) { +func (m *CertificateSigningRequestStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Conditions) > 0 { for _, msg := range m.Conditions { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -325,72 +337,72 @@ func (m *CertificateSigningRequestStatus) MarshalTo(data []byte) (int, error) { } } if m.Certificate != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Certificate))) - i += copy(data[i:], m.Certificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) + i += copy(dAtA[i:], m.Certificate) } return i, nil } -func (m ExtraValue) Marshal() (data []byte, err error) { +func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m ExtraValue) MarshalTo(data []byte) (int, error) { +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m) > 0 { for _, s := range m { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } return i, nil } -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *CertificateSigningRequest) Size() (n int) { @@ -589,8 +601,8 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *CertificateSigningRequest) Unmarshal(data []byte) error { - l := len(data) +func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -602,7 +614,7 @@ func (m *CertificateSigningRequest) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -630,7 +642,7 @@ func (m *CertificateSigningRequest) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -644,7 +656,7 @@ func (m *CertificateSigningRequest) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -660,7 +672,7 @@ func (m *CertificateSigningRequest) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -674,7 +686,7 @@ func (m *CertificateSigningRequest) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -690,7 +702,7 @@ func (m *CertificateSigningRequest) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -704,13 +716,13 @@ func (m *CertificateSigningRequest) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -729,8 +741,8 @@ func (m *CertificateSigningRequest) Unmarshal(data []byte) error { } return nil } -func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { - l := len(data) +func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -742,7 +754,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -770,7 +782,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -785,7 +797,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = RequestConditionType(data[iNdEx:postIndex]) + m.Type = RequestConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -799,7 +811,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -814,7 +826,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -828,7 +840,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -843,7 +855,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -857,7 +869,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -871,13 +883,13 @@ func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -896,8 +908,8 @@ func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { } return nil } -func (m *CertificateSigningRequestList) Unmarshal(data []byte) error { - l := len(data) +func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -909,7 +921,7 @@ func (m *CertificateSigningRequestList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -937,7 +949,7 @@ func (m *CertificateSigningRequestList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -951,7 +963,7 @@ func (m *CertificateSigningRequestList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -967,7 +979,7 @@ func (m *CertificateSigningRequestList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -982,13 +994,13 @@ func (m *CertificateSigningRequestList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, CertificateSigningRequest{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1007,8 +1019,8 @@ func (m *CertificateSigningRequestList) Unmarshal(data []byte) error { } return nil } -func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1020,7 +1032,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1048,7 +1060,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1062,7 +1074,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Request = append(m.Request[:0], data[iNdEx:postIndex]...) + m.Request = append(m.Request[:0], dAtA[iNdEx:postIndex]...) if m.Request == nil { m.Request = []byte{} } @@ -1079,7 +1091,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1094,7 +1106,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Username = string(data[iNdEx:postIndex]) + m.Username = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -1108,7 +1120,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1123,7 +1135,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = string(data[iNdEx:postIndex]) + m.UID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -1137,7 +1149,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1152,7 +1164,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 5: if wireType != 2 { @@ -1166,7 +1178,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1181,7 +1193,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Usages = append(m.Usages, KeyUsage(data[iNdEx:postIndex])) + m.Usages = append(m.Usages, KeyUsage(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 6: if wireType != 2 { @@ -1195,7 +1207,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1217,7 +1229,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1232,7 +1244,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1247,61 +1259,66 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - m.Extra[mapkey] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1320,8 +1337,8 @@ func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { } return nil } -func (m *CertificateSigningRequestStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1333,7 +1350,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1361,7 +1378,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1376,7 +1393,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Conditions = append(m.Conditions, CertificateSigningRequestCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1392,7 +1409,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1406,14 +1423,14 @@ func (m *CertificateSigningRequestStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Certificate = append(m.Certificate[:0], data[iNdEx:postIndex]...) + m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...) if m.Certificate == nil { m.Certificate = []byte{} } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1432,8 +1449,8 @@ func (m *CertificateSigningRequestStatus) Unmarshal(data []byte) error { } return nil } -func (m *ExtraValue) Unmarshal(data []byte) error { - l := len(data) +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1445,7 +1462,7 @@ func (m *ExtraValue) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1473,7 +1490,7 @@ func (m *ExtraValue) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1488,11 +1505,11 @@ func (m *ExtraValue) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - *m = append(*m, string(data[iNdEx:postIndex])) + *m = append(*m, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1511,8 +1528,8 @@ func (m *ExtraValue) Unmarshal(data []byte) error { } return nil } -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -1523,7 +1540,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1541,7 +1558,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -1558,7 +1575,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1581,7 +1598,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1592,7 +1609,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -1616,59 +1633,61 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 839 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x54, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0x8e, 0xf3, 0x6b, 0x93, 0xc9, 0xb2, 0xad, 0x46, 0xa8, 0x32, 0x2b, 0xd5, 0x5e, 0x59, 0x80, - 0xb6, 0xa8, 0xd8, 0x64, 0x41, 0xb0, 0x2a, 0x07, 0x24, 0x97, 0x0a, 0x15, 0x5a, 0x7e, 0xcc, 0x36, - 0x48, 0x20, 0x0e, 0x4c, 0x9c, 0x57, 0xef, 0x34, 0xf1, 0x0f, 0x3c, 0xe3, 0x68, 0x73, 0x41, 0xbd, - 0x71, 0xe5, 0xc8, 0x05, 0x89, 0x3f, 0x67, 0x8f, 0x3d, 0x72, 0x40, 0x11, 0x1b, 0x4e, 0x5c, 0xf8, - 0x03, 0x7a, 0x42, 0x33, 0x9e, 0xc4, 0x66, 0xd3, 0xd0, 0x56, 0xca, 0xcd, 0xf3, 0xcd, 0xf7, 0xbe, - 0xf7, 0xde, 0xf7, 0x9e, 0x07, 0x7d, 0x34, 0x3e, 0xe6, 0x2e, 0x4b, 0xbc, 0x71, 0x3e, 0x84, 0x2c, - 0x06, 0x01, 0xdc, 0x4b, 0xc7, 0xa1, 0x47, 0x53, 0xc6, 0xbd, 0x00, 0x32, 0xc1, 0x1e, 0xb2, 0x80, - 0x4a, 0x74, 0xda, 0x1f, 0x82, 0xa0, 0x7d, 0x2f, 0x84, 0x18, 0x32, 0x2a, 0x60, 0xe4, 0xa6, 0x59, - 0x22, 0x12, 0xec, 0x15, 0x02, 0x6e, 0x29, 0xe0, 0xa6, 0xe3, 0xd0, 0x95, 0x02, 0x6e, 0x55, 0xc0, - 0xd5, 0x02, 0xfb, 0x6f, 0x87, 0x4c, 0x9c, 0xe6, 0x43, 0x37, 0x48, 0x22, 0x2f, 0x4c, 0xc2, 0xc4, - 0x53, 0x3a, 0xc3, 0xfc, 0xa1, 0x3a, 0xa9, 0x83, 0xfa, 0x2a, 0xf4, 0xf7, 0xdf, 0xd3, 0x05, 0xd2, - 0x94, 0x45, 0x34, 0x38, 0x65, 0x31, 0x64, 0xb3, 0xb2, 0xc4, 0x08, 0x04, 0xf5, 0xa6, 0x6b, 0x55, - 0xed, 0x7b, 0x9b, 0xa2, 0xb2, 0x3c, 0x16, 0x2c, 0x82, 0xb5, 0x80, 0xf7, 0x9f, 0x17, 0xc0, 0x83, - 0x53, 0x88, 0xe8, 0x5a, 0xdc, 0xbb, 0x9b, 0xe2, 0x72, 0xc1, 0x26, 0x1e, 0x8b, 0x05, 0x17, 0xd9, - 0x5a, 0x50, 0xa5, 0x27, 0x0e, 0xd9, 0x14, 0xb2, 0xb2, 0x21, 0x38, 0xa3, 0x51, 0x3a, 0x81, 0x67, - 0xf5, 0x74, 0x73, 0xe3, 0xa8, 0x9e, 0xc1, 0x76, 0xfe, 0xae, 0xa3, 0xd7, 0x6e, 0x97, 0xfe, 0x9f, - 0xb0, 0x30, 0x66, 0x71, 0x48, 0xe0, 0x87, 0x1c, 0xb8, 0xc0, 0xdf, 0xa3, 0x8e, 0xb4, 0x6e, 0x44, - 0x05, 0x35, 0x8d, 0x03, 0xe3, 0xb0, 0x77, 0xf4, 0x8e, 0xab, 0x07, 0x59, 0xed, 0xa4, 0x1c, 0xa5, - 0x64, 0xbb, 0xd3, 0xbe, 0xfb, 0xc5, 0xf0, 0x11, 0x04, 0xe2, 0x3e, 0x08, 0xea, 0xe3, 0xf3, 0xb9, - 0x5d, 0x5b, 0xcc, 0x6d, 0x54, 0x62, 0x64, 0xa5, 0x8a, 0x53, 0xd4, 0xe4, 0x29, 0x04, 0x66, 0x5d, - 0xa9, 0x7f, 0xee, 0xbe, 0xe4, 0x9a, 0xb8, 0x1b, 0x6b, 0x3f, 0x49, 0x21, 0xf0, 0x77, 0x75, 0xee, - 0xa6, 0x3c, 0x11, 0x95, 0x09, 0x9f, 0xa1, 0x36, 0x17, 0x54, 0xe4, 0xdc, 0x6c, 0xa8, 0x9c, 0x5f, - 0x6e, 0x31, 0xa7, 0xd2, 0xf5, 0xf7, 0x74, 0xd6, 0x76, 0x71, 0x26, 0x3a, 0x9f, 0xf3, 0x6b, 0x1d, - 0x39, 0x1b, 0x63, 0x6f, 0x27, 0xf1, 0x88, 0x09, 0x96, 0xc4, 0xf8, 0x18, 0x35, 0xc5, 0x2c, 0x05, - 0x65, 0x78, 0xd7, 0x7f, 0x7d, 0xd9, 0xc2, 0x83, 0x59, 0x0a, 0x4f, 0xe7, 0xf6, 0xab, 0x97, 0xf9, - 0x12, 0x27, 0x2a, 0x02, 0xbf, 0x89, 0xda, 0x19, 0x50, 0x9e, 0xc4, 0xca, 0xce, 0x6e, 0x59, 0x08, - 0x51, 0x28, 0xd1, 0xb7, 0xf8, 0x06, 0xda, 0x89, 0x80, 0x73, 0x1a, 0x82, 0xf2, 0xa0, 0xeb, 0x5f, - 0xd1, 0xc4, 0x9d, 0xfb, 0x05, 0x4c, 0x96, 0xf7, 0xf8, 0x11, 0xda, 0x9b, 0x50, 0x2e, 0x06, 0xe9, - 0x88, 0x0a, 0x78, 0xc0, 0x22, 0x30, 0x9b, 0xca, 0xb5, 0xb7, 0x5e, 0x6c, 0x0f, 0x64, 0x84, 0x7f, - 0x4d, 0xab, 0xef, 0xdd, 0xfb, 0x8f, 0x12, 0xb9, 0xa4, 0xec, 0xfc, 0x63, 0xa0, 0xeb, 0x1b, 0xfd, - 0xb9, 0xc7, 0xb8, 0xc0, 0xdf, 0xad, 0xed, 0xa3, 0xfb, 0x62, 0x75, 0xc8, 0x68, 0xb5, 0x8d, 0x57, - 0x75, 0x2d, 0x9d, 0x25, 0x52, 0xd9, 0xc5, 0x04, 0xb5, 0x98, 0x80, 0x88, 0x9b, 0xf5, 0x83, 0xc6, - 0x61, 0xef, 0xe8, 0xd3, 0xed, 0x2d, 0x86, 0xff, 0x8a, 0x4e, 0xdb, 0xba, 0x2b, 0x13, 0x90, 0x22, - 0x8f, 0xb3, 0x68, 0xfc, 0x4f, 0xc3, 0x72, 0x65, 0xf1, 0x1b, 0x68, 0x27, 0x2b, 0x8e, 0xaa, 0xdf, - 0x5d, 0xbf, 0x27, 0xa7, 0xa4, 0x19, 0x64, 0x79, 0x87, 0x6f, 0xa2, 0x4e, 0xce, 0x21, 0x8b, 0x69, - 0x04, 0x7a, 0xf4, 0xab, 0x3e, 0x07, 0x1a, 0x27, 0x2b, 0x06, 0xbe, 0x8e, 0x1a, 0x39, 0x1b, 0xe9, - 0xd1, 0xf7, 0x34, 0xb1, 0x31, 0xb8, 0xfb, 0x31, 0x91, 0x38, 0x76, 0x50, 0x3b, 0xcc, 0x92, 0x3c, - 0xe5, 0x66, 0xf3, 0xa0, 0x71, 0xd8, 0xf5, 0x91, 0xdc, 0xa0, 0x4f, 0x14, 0x42, 0xf4, 0x0d, 0x3e, - 0x42, 0x9d, 0x31, 0xcc, 0x06, 0x6a, 0x85, 0x5a, 0x8a, 0x75, 0x4d, 0xb2, 0x14, 0xc0, 0x9f, 0xce, - 0xed, 0xce, 0x67, 0xfa, 0x96, 0xac, 0x78, 0xf8, 0x47, 0xd4, 0x82, 0x33, 0x91, 0x51, 0xb3, 0xad, - 0xec, 0xfd, 0x66, 0xbb, 0xff, 0xba, 0x7b, 0x47, 0x6a, 0xdf, 0x89, 0x45, 0x36, 0x2b, 0xdd, 0x56, - 0x18, 0x29, 0xd2, 0xee, 0xe7, 0x08, 0x95, 0x1c, 0x7c, 0x15, 0x35, 0xc6, 0x30, 0x2b, 0x7e, 0x32, - 0x22, 0x3f, 0xf1, 0x57, 0xa8, 0x35, 0xa5, 0x93, 0x1c, 0xf4, 0x5b, 0xf4, 0xe1, 0x4b, 0xd7, 0xa7, - 0xd4, 0xbf, 0x96, 0x12, 0xa4, 0x50, 0xba, 0x55, 0x3f, 0x36, 0x9c, 0xb9, 0x81, 0xec, 0xe7, 0xbc, - 0x18, 0xf8, 0x27, 0x03, 0xa1, 0x60, 0xf9, 0x43, 0x73, 0xd3, 0x50, 0x06, 0x9d, 0x6c, 0xcf, 0xa0, - 0xd5, 0x63, 0x51, 0xbe, 0xc6, 0x2b, 0x88, 0x93, 0x4a, 0x6a, 0xdc, 0x47, 0xbd, 0x8a, 0xb4, 0xb2, - 0x62, 0xd7, 0xbf, 0xb2, 0x98, 0xdb, 0xbd, 0x8a, 0x38, 0xa9, 0x72, 0x9c, 0x0f, 0xb4, 0xaf, 0xaa, - 0x73, 0x6c, 0x2f, 0x7f, 0x22, 0x43, 0xad, 0x45, 0xf7, 0xf2, 0xd2, 0xdf, 0xea, 0xfc, 0xf2, 0x9b, - 0x5d, 0x7b, 0xfc, 0xc7, 0x41, 0xcd, 0xbf, 0x71, 0x7e, 0x61, 0xd5, 0x9e, 0x5c, 0x58, 0xb5, 0xdf, - 0x2f, 0xac, 0xda, 0xe3, 0x85, 0x65, 0x9c, 0x2f, 0x2c, 0xe3, 0xc9, 0xc2, 0x32, 0xfe, 0x5c, 0x58, - 0xc6, 0xcf, 0x7f, 0x59, 0xb5, 0x6f, 0x77, 0x74, 0x67, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x07, - 0x0c, 0x3b, 0x3a, 0x80, 0x08, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 816 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xf6, 0xfa, 0x2b, 0xf6, 0x38, 0xa4, 0xd5, 0x08, 0x55, 0x4b, 0xa4, 0xee, 0x46, 0x2b, 0x40, + 0xe1, 0xa3, 0xb3, 0xa4, 0x20, 0x88, 0x72, 0x40, 0xb0, 0xa1, 0x82, 0x88, 0x56, 0x48, 0xd3, 0x86, + 0x03, 0x42, 0xa2, 0xe3, 0xf5, 0xdb, 0xcd, 0xd4, 0xd9, 0x0f, 0x76, 0x66, 0x0d, 0xbe, 0xf5, 0x27, + 0x70, 0xe4, 0x82, 0xc4, 0x2f, 0xe1, 0x1c, 0x0e, 0x48, 0x3d, 0xf6, 0x80, 0x2c, 0x62, 0xfe, 0x45, + 0x4f, 0x68, 0x66, 0xc7, 0x5e, 0x63, 0xcb, 0x75, 0xd5, 0xdc, 0xf6, 0x7d, 0xde, 0xf7, 0x79, 0xde, + 0xcf, 0x59, 0xf4, 0xd5, 0xf0, 0x50, 0x10, 0x9e, 0xfa, 0xc3, 0xa2, 0x0f, 0x79, 0x02, 0x12, 0x84, + 0x3f, 0x82, 0x64, 0x90, 0xe6, 0xbe, 0x71, 0xb0, 0x8c, 0xfb, 0x21, 0xe4, 0x92, 0x3f, 0xe2, 0x21, + 0xd3, 0xee, 0x83, 0x3e, 0x48, 0x76, 0xe0, 0x47, 0x90, 0x40, 0xce, 0x24, 0x0c, 0x48, 0x96, 0xa7, + 0x32, 0xc5, 0x6e, 0x49, 0x20, 0x2c, 0xe3, 0x64, 0x91, 0x40, 0x0c, 0x61, 0xf7, 0x56, 0xc4, 0xe5, + 0x59, 0xd1, 0x27, 0x61, 0x1a, 0xfb, 0x51, 0x1a, 0xa5, 0xbe, 0xe6, 0xf5, 0x8b, 0x47, 0xda, 0xd2, + 0x86, 0xfe, 0x2a, 0xf5, 0x76, 0x3f, 0xaa, 0x0a, 0x88, 0x59, 0x78, 0xc6, 0x13, 0xc8, 0xc7, 0x7e, + 0x36, 0x8c, 0x14, 0x20, 0xfc, 0x18, 0x24, 0xf3, 0x47, 0x2b, 0x55, 0xec, 0xfa, 0xeb, 0x58, 0x79, + 0x91, 0x48, 0x1e, 0xc3, 0x0a, 0xe1, 0xe3, 0x4d, 0x04, 0x11, 0x9e, 0x41, 0xcc, 0x56, 0x78, 0x1f, + 0xae, 0xe3, 0x15, 0x92, 0x9f, 0xfb, 0x3c, 0x91, 0x42, 0xe6, 0xcb, 0x24, 0xef, 0xcf, 0x3a, 0x7a, + 0xe3, 0xb8, 0x9a, 0xcd, 0x7d, 0x1e, 0x25, 0x3c, 0x89, 0x28, 0xfc, 0x58, 0x80, 0x90, 0xf8, 0x21, + 0xea, 0xa8, 0xb6, 0x06, 0x4c, 0x32, 0xdb, 0xda, 0xb3, 0xf6, 0x7b, 0xb7, 0x3f, 0x20, 0xd5, 0x50, + 0xe7, 0x59, 0x48, 0x36, 0x8c, 0x14, 0x20, 0x88, 0x8a, 0x26, 0xa3, 0x03, 0xf2, 0x4d, 0xff, 0x31, + 0x84, 0xf2, 0x1e, 0x48, 0x16, 0xe0, 0x8b, 0x89, 0x5b, 0x9b, 0x4e, 0x5c, 0x54, 0x61, 0x74, 0xae, + 0x8a, 0x1f, 0xa2, 0xa6, 0xc8, 0x20, 0xb4, 0xeb, 0x5a, 0xfd, 0x53, 0xb2, 0x61, 0x65, 0x64, 0x6d, + 0xad, 0xf7, 0x33, 0x08, 0x83, 0x6d, 0x93, 0xab, 0xa9, 0x2c, 0xaa, 0x95, 0xf1, 0x19, 0x6a, 0x0b, + 0xc9, 0x64, 0x21, 0xec, 0x86, 0xce, 0xf1, 0xd9, 0x15, 0x72, 0x68, 0x9d, 0x60, 0xc7, 0x64, 0x69, + 0x97, 0x36, 0x35, 0xfa, 0xde, 0x6f, 0x75, 0xe4, 0xad, 0xe5, 0x1e, 0xa7, 0xc9, 0x80, 0x4b, 0x9e, + 0x26, 0xf8, 0x10, 0x35, 0xe5, 0x38, 0x03, 0x3d, 0xd0, 0x6e, 0xf0, 0xe6, 0xac, 0xe4, 0x07, 0xe3, + 0x0c, 0x9e, 0x4f, 0xdc, 0xd7, 0x97, 0xe3, 0x15, 0x4e, 0x35, 0x03, 0xbf, 0x8d, 0xda, 0x39, 0x30, + 0x91, 0x26, 0x7a, 0x5c, 0xdd, 0xaa, 0x10, 0xaa, 0x51, 0x6a, 0xbc, 0xf8, 0x1d, 0xb4, 0x15, 0x83, + 0x10, 0x2c, 0x02, 0xdd, 0x73, 0x37, 0xb8, 0x66, 0x02, 0xb7, 0xee, 0x95, 0x30, 0x9d, 0xf9, 0xf1, + 0x63, 0xb4, 0x73, 0xce, 0x84, 0x3c, 0xcd, 0x06, 0x4c, 0xc2, 0x03, 0x1e, 0x83, 0xdd, 0xd4, 0x53, + 0x7a, 0xf7, 0xe5, 0xf6, 0xac, 0x18, 0xc1, 0x0d, 0xa3, 0xbe, 0x73, 0xf7, 0x7f, 0x4a, 0x74, 0x49, + 0xd9, 0x9b, 0x58, 0xe8, 0xe6, 0xda, 0xf9, 0xdc, 0xe5, 0x42, 0xe2, 0xef, 0x57, 0xee, 0x8d, 0xbc, + 0x5c, 0x1d, 0x8a, 0xad, 0xaf, 0xed, 0xba, 0xa9, 0xa5, 0x33, 0x43, 0x16, 0x6e, 0xed, 0x07, 0xd4, + 0xe2, 0x12, 0x62, 0x61, 0xd7, 0xf7, 0x1a, 0xfb, 0xbd, 0xdb, 0x47, 0xaf, 0x7e, 0x08, 0xc1, 0x6b, + 0x26, 0x4d, 0xeb, 0x44, 0x09, 0xd2, 0x52, 0xd7, 0xfb, 0xa3, 0xf1, 0x82, 0x06, 0xd5, 0x49, 0xe2, + 0xb7, 0xd0, 0x56, 0x5e, 0x9a, 0xba, 0xbf, 0xed, 0xa0, 0xa7, 0xb6, 0x62, 0x22, 0xe8, 0xcc, 0x87, + 0xdf, 0x47, 0x9d, 0x42, 0x40, 0x9e, 0xb0, 0x18, 0xcc, 0xaa, 0xe7, 0x7d, 0x9d, 0x1a, 0x9c, 0xce, + 0x23, 0xf0, 0x4d, 0xd4, 0x28, 0xf8, 0xc0, 0xac, 0xba, 0x67, 0x02, 0x1b, 0xa7, 0x27, 0x5f, 0x50, + 0x85, 0x63, 0x0f, 0xb5, 0xa3, 0x3c, 0x2d, 0x32, 0x61, 0x37, 0xf7, 0x1a, 0xfb, 0xdd, 0x00, 0xa9, + 0x8b, 0xf9, 0x52, 0x23, 0xd4, 0x78, 0x30, 0x41, 0xed, 0x42, 0xdd, 0x83, 0xb0, 0x5b, 0x3a, 0xe6, + 0x86, 0x8a, 0x39, 0xd5, 0xc8, 0xf3, 0x89, 0xdb, 0xf9, 0x1a, 0xc6, 0xda, 0xa0, 0x26, 0x0a, 0x27, + 0xa8, 0x05, 0x3f, 0xcb, 0x9c, 0xd9, 0x6d, 0x3d, 0xca, 0x93, 0xab, 0xbd, 0x5b, 0x72, 0x47, 0x69, + 0xdd, 0x49, 0x64, 0x3e, 0xae, 0x26, 0xab, 0x31, 0x5a, 0xa6, 0xd9, 0x05, 0x84, 0xaa, 0x18, 0x7c, + 0x1d, 0x35, 0x86, 0x30, 0x2e, 0x1f, 0x10, 0x55, 0x9f, 0xf8, 0x73, 0xd4, 0x1a, 0xb1, 0xf3, 0x02, + 0xcc, 0x7f, 0xe4, 0xbd, 0x8d, 0xf5, 0x68, 0xb5, 0x6f, 0x15, 0x85, 0x96, 0xcc, 0xa3, 0xfa, 0xa1, + 0xe5, 0xfd, 0x65, 0x21, 0x77, 0xc3, 0xeb, 0xc7, 0x3f, 0x21, 0x14, 0xce, 0xde, 0xa6, 0xb0, 0x2d, + 0xdd, 0xff, 0xf1, 0xab, 0xf7, 0x3f, 0x7f, 0xe7, 0xd5, 0x8f, 0x72, 0x0e, 0x09, 0xba, 0x90, 0x0a, + 0x1f, 0xa0, 0xde, 0x82, 0xb4, 0xee, 0x74, 0x3b, 0xb8, 0x36, 0x9d, 0xb8, 0xbd, 0x05, 0x71, 0xba, + 0x18, 0xe3, 0x7d, 0x62, 0xc6, 0xa6, 0x1b, 0xc5, 0xee, 0xec, 0xfe, 0x2d, 0xbd, 0xe3, 0xee, 0xf2, + 0xfd, 0x1e, 0x75, 0x7e, 0xfd, 0xdd, 0xad, 0x3d, 0xf9, 0x7b, 0xaf, 0x16, 0xdc, 0xba, 0xb8, 0x74, + 0x6a, 0x4f, 0x2f, 0x9d, 0xda, 0xb3, 0x4b, 0xa7, 0xf6, 0x64, 0xea, 0x58, 0x17, 0x53, 0xc7, 0x7a, + 0x3a, 0x75, 0xac, 0x67, 0x53, 0xc7, 0xfa, 0x67, 0xea, 0x58, 0xbf, 0xfc, 0xeb, 0xd4, 0xbe, 0xdb, + 0x32, 0xdd, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x73, 0x7d, 0xca, 0x2a, 0xb4, 0x07, 0x00, 0x00, } diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto similarity index 93% rename from vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.proto rename to vendor/k8s.io/api/certificates/v1beta1/generated.proto index 215dc39a4..1d79767ed 100644 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,14 +19,12 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.certificates.v1beta1; +package k8s.io.api.certificates.v1beta1; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; @@ -80,7 +78,7 @@ message CertificateSigningRequestSpec { // valid for. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - repeated string keyUsage = 5; + repeated string usages = 5; // Information about the requesting user. // See user.Info interface for details. diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/register.go b/vendor/k8s.io/api/certificates/v1beta1/register.go similarity index 80% rename from vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/register.go rename to vendor/k8s.io/api/certificates/v1beta1/register.go index 6574de971..b4f3af9b9 100644 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/register.go +++ b/vendor/k8s.io/api/certificates/v1beta1/register.go @@ -39,11 +39,14 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addConversionFuncs, addDefaultingFuncs) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &CertificateSigningRequest{}, @@ -54,6 +57,3 @@ func addKnownTypes(scheme *runtime.Scheme) error { metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } - -func (obj *CertificateSigningRequest) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *CertificateSigningRequestList) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go similarity index 95% rename from vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.go rename to vendor/k8s.io/api/certificates/v1beta1/types.go index a9149ba8d..bb9e82d30 100644 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -22,8 +22,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// +genclient=true -// +nonNamespaced=true +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Describes a certificate signing request type CertificateSigningRequest struct { @@ -51,7 +52,7 @@ type CertificateSigningRequestSpec struct { // valid for. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - Usages []KeyUsage `json:"usages,omitempty" protobuf:"bytes,5,opt,name=keyUsage"` + Usages []KeyUsage `json:"usages,omitempty" protobuf:"bytes,5,opt,name=usages"` // Information about the requesting user. // See user.Info interface for details. @@ -112,6 +113,8 @@ type CertificateSigningRequestCondition struct { LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,4,opt,name=lastUpdateTime"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + type CertificateSigningRequestList struct { metav1.TypeMeta `json:",inline"` // +optional @@ -128,7 +131,7 @@ type KeyUsage string const ( UsageSigning KeyUsage = "signing" UsageDigitalSignature KeyUsage = "digital signature" - UsageContentCommittment KeyUsage = "content committment" + UsageContentCommittment KeyUsage = "content commitment" UsageKeyEncipherment KeyUsage = "key encipherment" UsageKeyAgreement KeyUsage = "key agreement" UsageDataEncipherment KeyUsage = "data encipherment" diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go similarity index 97% rename from vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go index 4fd91df06..f6a7e16ac 100644 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ package v1beta1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ "": "Describes a certificate signing request", "spec": "The certificate request itself and any additional information.", diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..ffd24c30f --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,194 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequest) DeepCopyInto(out *CertificateSigningRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequest. +func (in *CertificateSigningRequest) DeepCopy() *CertificateSigningRequest { + if in == nil { + return nil + } + out := new(CertificateSigningRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateSigningRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestCondition) DeepCopyInto(out *CertificateSigningRequestCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestCondition. +func (in *CertificateSigningRequestCondition) DeepCopy() *CertificateSigningRequestCondition { + if in == nil { + return nil + } + out := new(CertificateSigningRequestCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestList) DeepCopyInto(out *CertificateSigningRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CertificateSigningRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestList. +func (in *CertificateSigningRequestList) DeepCopy() *CertificateSigningRequestList { + if in == nil { + return nil + } + out := new(CertificateSigningRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateSigningRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningRequestSpec) { + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]KeyUsage, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]string, len(val)) + copy((*out)[key], val) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestSpec. +func (in *CertificateSigningRequestSpec) DeepCopy() *CertificateSigningRequestSpec { + if in == nil { + return nil + } + out := new(CertificateSigningRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestStatus) DeepCopyInto(out *CertificateSigningRequestStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CertificateSigningRequestCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestStatus. +func (in *CertificateSigningRequestStatus) DeepCopy() *CertificateSigningRequestStatus { + if in == nil { + return nil + } + out := new(CertificateSigningRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtraValue) DeepCopyInto(out *ExtraValue) { + { + in := &in + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. +func (in ExtraValue) DeepCopy() ExtraValue { + if in == nil { + return nil + } + out := new(ExtraValue) + in.DeepCopyInto(out) + return *out +} diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go new file mode 100644 index 000000000..16a0cfced --- /dev/null +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -0,0 +1,81 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file should be consistent with pkg/api/annotation_key_constants.go. + +package v1 + +const ( + // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy + // webhook backend fails. + ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" + + // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation + PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" + + // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods + MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" + + // TolerationsAnnotationKey represents the key of tolerations data (json serialized) + // in the Annotations of a Pod. + TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" + + // TaintsAnnotationKey represents the key of taints data (json serialized) + // in the Annotations of a Node. + TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" + + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime. + SeccompProfileRuntimeDefault string = "runtime/default" + + // DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker. + // This is now deprecated and should be replaced by SeccompProfileRuntimeDefault. + DeprecatedSeccompProfileDockerDefault string = "docker/default" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // annotation key prefix used to identify non-convertible json paths. + NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" + + kubectlPrefix = "kubectl.kubernetes.io/" + + // LastAppliedConfigAnnotation is the annotation used to store the previous + // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. + LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" + + // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers + // + // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to + // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow + // access only from the CIDRs currently allocated to MIT & the USPS. + // + // Not all cloud providers support this annotation, though AWS & GCE do. + AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" +) diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/defaults.go b/vendor/k8s.io/api/core/v1/doc.go similarity index 78% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/defaults.go rename to vendor/k8s.io/api/core/v1/doc.go index cb49b06ac..96994c624 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/defaults.go +++ b/vendor/k8s.io/api/core/v1/doc.go @@ -14,12 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return scheme.AddDefaultingFuncs() -} +// Package v1 is the v1 version of the core API. +package v1 // import "k8s.io/api/core/v1" diff --git a/vendor/k8s.io/client-go/pkg/api/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go similarity index 61% rename from vendor/k8s.io/client-go/pkg/api/v1/generated.pb.go rename to vendor/k8s.io/api/core/v1/generated.pb.go index 20f632dc9..48b5a3c5b 100644 --- a/vendor/k8s.io/client-go/pkg/api/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,14 +15,14 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/api/v1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto // DO NOT EDIT! /* Package v1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/api/v1/generated.proto + k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto It has these top-level messages: AWSElasticBlockStoreVolumeSource @@ -30,11 +30,16 @@ limitations under the License. AttachedVolume AvoidPods AzureDiskVolumeSource + AzureFilePersistentVolumeSource AzureFileVolumeSource Binding + CSIPersistentVolumeSource Capabilities + CephFSPersistentVolumeSource CephFSVolumeSource + CinderPersistentVolumeSource CinderVolumeSource + ClientIPConfig ComponentCondition ComponentStatus ComponentStatusList @@ -42,6 +47,7 @@ limitations under the License. ConfigMapEnvSource ConfigMapKeySelector ConfigMapList + ConfigMapNodeConfigSource ConfigMapProjection ConfigMapVolumeSource Container @@ -53,7 +59,6 @@ limitations under the License. ContainerStateWaiting ContainerStatus DaemonEndpoint - DeleteOptions DownwardAPIProjection DownwardAPIVolumeFile DownwardAPIVolumeSource @@ -68,9 +73,11 @@ limitations under the License. EnvVarSource Event EventList + EventSeries EventSource ExecAction FCVolumeSource + FlexPersistentVolumeSource FlexVolumeSource FlockerVolumeSource GCEPersistentDiskVolumeSource @@ -79,7 +86,9 @@ limitations under the License. HTTPGetAction HTTPHeader Handler + HostAlias HostPathVolumeSource + ISCSIPersistentVolumeSource ISCSIVolumeSource KeyToPath Lifecycle @@ -88,10 +97,10 @@ limitations under the License. LimitRangeList LimitRangeSpec List - ListOptions LoadBalancerIngress LoadBalancerStatus LocalObjectReference + LocalVolumeSource NFSVolumeSource Namespace NamespaceList @@ -101,6 +110,8 @@ limitations under the License. NodeAddress NodeAffinity NodeCondition + NodeConfigSource + NodeConfigStatus NodeDaemonEndpoints NodeList NodeProxyOptions @@ -112,10 +123,10 @@ limitations under the License. NodeStatus NodeSystemInfo ObjectFieldSelector - ObjectMeta ObjectReference PersistentVolume PersistentVolumeClaim + PersistentVolumeClaimCondition PersistentVolumeClaimList PersistentVolumeClaimSpec PersistentVolumeClaimStatus @@ -131,11 +142,14 @@ limitations under the License. PodAntiAffinity PodAttachOptions PodCondition + PodDNSConfig + PodDNSConfigOption PodExecOptions PodList PodLogOptions PodPortForwardOptions PodProxyOptions + PodReadinessGate PodSecurityContext PodSignature PodSpec @@ -151,6 +165,7 @@ limitations under the License. Probe ProjectedVolumeSource QuobyteVolumeSource + RBDPersistentVolumeSource RBDVolumeSource RangeAllocation ReplicationController @@ -165,29 +180,41 @@ limitations under the License. ResourceQuotaStatus ResourceRequirements SELinuxOptions + ScaleIOPersistentVolumeSource ScaleIOVolumeSource + ScopeSelector + ScopedResourceSelectorRequirement Secret SecretEnvSource SecretKeySelector SecretList SecretProjection + SecretReference SecretVolumeSource SecurityContext SerializedReference Service ServiceAccount ServiceAccountList + ServiceAccountTokenProjection ServiceList ServicePort ServiceProxyOptions ServiceSpec ServiceStatus + SessionAffinityConfig + StorageOSPersistentVolumeSource + StorageOSVolumeSource Sysctl TCPSocketAction Taint Toleration + TopologySelectorLabelRequirement + TopologySelectorTerm Volume + VolumeDevice VolumeMount + VolumeNodeAffinity VolumeProjection VolumeSource VsphereVirtualDiskVolumeSource @@ -205,9 +232,10 @@ import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + import strings "strings" import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" import io "io" @@ -218,7 +246,9 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} @@ -242,932 +272,1099 @@ func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolum func (*AzureDiskVolumeSource) ProtoMessage() {} func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } +func (*AzureFilePersistentVolumeSource) ProtoMessage() {} +func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} +} + func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } func (*AzureFileVolumeSource) ProtoMessage() {} -func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *Binding) Reset() { *m = Binding{} } func (*Binding) ProtoMessage() {} -func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } +func (*CSIPersistentVolumeSource) ProtoMessage() {} +func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{8} +} func (m *Capabilities) Reset() { *m = Capabilities{} } func (*Capabilities) ProtoMessage() {} -func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } +func (*CephFSPersistentVolumeSource) ProtoMessage() {} +func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{10} +} func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } func (*CephFSVolumeSource) ProtoMessage() {} -func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } +func (*CinderPersistentVolumeSource) ProtoMessage() {} +func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{12} +} func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } func (*CinderVolumeSource) ProtoMessage() {} -func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } +func (*ClientIPConfig) ProtoMessage() {} +func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} -func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} -func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} -func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} -func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} -func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } + +func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } +func (*ConfigMapNodeConfigSource) ProtoMessage() {} +func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{22} +} func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} -func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{24} + return fileDescriptorGenerated, []int{30} } func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } - -func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } -func (*DeleteOptions) ProtoMessage() {} -func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} -func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{31} + return fileDescriptorGenerated, []int{36} } func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} -func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } + +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } + +func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } +func (*FlexPersistentVolumeSource) ProtoMessage() {} +func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{52} +} func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{48} + return fileDescriptorGenerated, []int{55} } func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } func (m *Handler) Reset() { *m = Handler{} } func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } + +func (m *HostAlias) Reset() { *m = HostAlias{} } +func (*HostAlias) ProtoMessage() {} +func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } + +func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } +func (*ISCSIPersistentVolumeSource) ProtoMessage() {} +func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{63} +} func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } - -func (m *ListOptions) Reset() { *m = ListOptions{} } -func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } + +func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } +func (*LocalVolumeSource) ProtoMessage() {} +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } + +func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } +func (*NodeConfigSource) ProtoMessage() {} +func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } + +func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } +func (*NodeConfigStatus) ProtoMessage() {} +func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{81} + return fileDescriptorGenerated, []int{92} } func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } - -func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } -func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } + +func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } +func (*PersistentVolumeClaimCondition) ProtoMessage() {} +func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{101} +} func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{91} + return fileDescriptorGenerated, []int{102} } func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{92} + return fileDescriptorGenerated, []int{103} } func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{93} + return fileDescriptorGenerated, []int{104} } func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{94} + return fileDescriptorGenerated, []int{105} } func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } -func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } -func (*PersistentVolumeSource) ProtoMessage() {} -func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } +func (*PersistentVolumeSource) ProtoMessage() {} +func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{107} +} func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } -func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } -func (*PersistentVolumeStatus) ProtoMessage() {} -func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } +func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } +func (*PersistentVolumeStatus) ProtoMessage() {} +func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{109} +} func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{99} + return fileDescriptorGenerated, []int{110} } func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{103} } +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{104} } +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{105} } +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } + +func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } +func (*PodDNSConfig) ProtoMessage() {} +func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } + +func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } +func (*PodDNSConfigOption) ProtoMessage() {} +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{109} } +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } + +func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } +func (*PodReadinessGate) ProtoMessage() {} +func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{122} + return fileDescriptorGenerated, []int{136} } func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } + +func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } +func (*RBDPersistentVolumeSource) ProtoMessage() {} +func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{140} +} func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{129} + return fileDescriptorGenerated, []int{144} } func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{130} + return fileDescriptorGenerated, []int{145} } func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{131} + return fileDescriptorGenerated, []int{146} } func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{132} + return fileDescriptorGenerated, []int{147} } func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } + +func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } +func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} +func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{155} +} func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } + +func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } +func (*ScopeSelector) ProtoMessage() {} +func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } + +func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } +func (*ScopedResourceSelectorRequirement) ProtoMessage() {} +func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{158} +} func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } + +func (m *SecretReference) Reset() { *m = SecretReference{} } +func (*SecretReference) ProtoMessage() {} +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } + +func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } +func (*ServiceAccountTokenProjection) ProtoMessage() {} +func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{171} +} func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } + +func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } +func (*SessionAffinityConfig) ProtoMessage() {} +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } + +func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } +func (*StorageOSPersistentVolumeSource) ProtoMessage() {} +func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{178} +} + +func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } +func (*StorageOSVolumeSource) ProtoMessage() {} +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } + +func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } +func (*TopologySelectorLabelRequirement) ProtoMessage() {} +func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{184} +} + +func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } +func (*TopologySelectorTerm) ProtoMessage() {} +func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} } func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{186} } + +func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } +func (*VolumeDevice) ProtoMessage() {} +func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{187} } func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{188} } + +func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } +func (*VolumeNodeAffinity) ProtoMessage() {} +func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{165} + return fileDescriptorGenerated, []int{192} } func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{166} + return fileDescriptorGenerated, []int{193} } func init() { - proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.AWSElasticBlockStoreVolumeSource") - proto.RegisterType((*Affinity)(nil), "k8s.io.client-go.pkg.api.v1.Affinity") - proto.RegisterType((*AttachedVolume)(nil), "k8s.io.client-go.pkg.api.v1.AttachedVolume") - proto.RegisterType((*AvoidPods)(nil), "k8s.io.client-go.pkg.api.v1.AvoidPods") - proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.AzureDiskVolumeSource") - proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.AzureFileVolumeSource") - proto.RegisterType((*Binding)(nil), "k8s.io.client-go.pkg.api.v1.Binding") - proto.RegisterType((*Capabilities)(nil), "k8s.io.client-go.pkg.api.v1.Capabilities") - proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.CephFSVolumeSource") - proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.CinderVolumeSource") - proto.RegisterType((*ComponentCondition)(nil), "k8s.io.client-go.pkg.api.v1.ComponentCondition") - proto.RegisterType((*ComponentStatus)(nil), "k8s.io.client-go.pkg.api.v1.ComponentStatus") - proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.client-go.pkg.api.v1.ComponentStatusList") - proto.RegisterType((*ConfigMap)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMap") - proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapEnvSource") - proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapKeySelector") - proto.RegisterType((*ConfigMapList)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapList") - proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapProjection") - proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapVolumeSource") - proto.RegisterType((*Container)(nil), "k8s.io.client-go.pkg.api.v1.Container") - proto.RegisterType((*ContainerImage)(nil), "k8s.io.client-go.pkg.api.v1.ContainerImage") - proto.RegisterType((*ContainerPort)(nil), "k8s.io.client-go.pkg.api.v1.ContainerPort") - proto.RegisterType((*ContainerState)(nil), "k8s.io.client-go.pkg.api.v1.ContainerState") - proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStateRunning") - proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStateTerminated") - proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStateWaiting") - proto.RegisterType((*ContainerStatus)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStatus") - proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.client-go.pkg.api.v1.DaemonEndpoint") - proto.RegisterType((*DeleteOptions)(nil), "k8s.io.client-go.pkg.api.v1.DeleteOptions") - proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.client-go.pkg.api.v1.DownwardAPIProjection") - proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.client-go.pkg.api.v1.DownwardAPIVolumeFile") - proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.DownwardAPIVolumeSource") - proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.EmptyDirVolumeSource") - proto.RegisterType((*EndpointAddress)(nil), "k8s.io.client-go.pkg.api.v1.EndpointAddress") - proto.RegisterType((*EndpointPort)(nil), "k8s.io.client-go.pkg.api.v1.EndpointPort") - proto.RegisterType((*EndpointSubset)(nil), "k8s.io.client-go.pkg.api.v1.EndpointSubset") - proto.RegisterType((*Endpoints)(nil), "k8s.io.client-go.pkg.api.v1.Endpoints") - proto.RegisterType((*EndpointsList)(nil), "k8s.io.client-go.pkg.api.v1.EndpointsList") - proto.RegisterType((*EnvFromSource)(nil), "k8s.io.client-go.pkg.api.v1.EnvFromSource") - proto.RegisterType((*EnvVar)(nil), "k8s.io.client-go.pkg.api.v1.EnvVar") - proto.RegisterType((*EnvVarSource)(nil), "k8s.io.client-go.pkg.api.v1.EnvVarSource") - proto.RegisterType((*Event)(nil), "k8s.io.client-go.pkg.api.v1.Event") - proto.RegisterType((*EventList)(nil), "k8s.io.client-go.pkg.api.v1.EventList") - proto.RegisterType((*EventSource)(nil), "k8s.io.client-go.pkg.api.v1.EventSource") - proto.RegisterType((*ExecAction)(nil), "k8s.io.client-go.pkg.api.v1.ExecAction") - proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.FCVolumeSource") - proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.FlexVolumeSource") - proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.FlockerVolumeSource") - proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.GCEPersistentDiskVolumeSource") - proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.GitRepoVolumeSource") - proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.GlusterfsVolumeSource") - proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.client-go.pkg.api.v1.HTTPGetAction") - proto.RegisterType((*HTTPHeader)(nil), "k8s.io.client-go.pkg.api.v1.HTTPHeader") - proto.RegisterType((*Handler)(nil), "k8s.io.client-go.pkg.api.v1.Handler") - proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.HostPathVolumeSource") - proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ISCSIVolumeSource") - proto.RegisterType((*KeyToPath)(nil), "k8s.io.client-go.pkg.api.v1.KeyToPath") - proto.RegisterType((*Lifecycle)(nil), "k8s.io.client-go.pkg.api.v1.Lifecycle") - proto.RegisterType((*LimitRange)(nil), "k8s.io.client-go.pkg.api.v1.LimitRange") - proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.client-go.pkg.api.v1.LimitRangeItem") - proto.RegisterType((*LimitRangeList)(nil), "k8s.io.client-go.pkg.api.v1.LimitRangeList") - proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.client-go.pkg.api.v1.LimitRangeSpec") - proto.RegisterType((*List)(nil), "k8s.io.client-go.pkg.api.v1.List") - proto.RegisterType((*ListOptions)(nil), "k8s.io.client-go.pkg.api.v1.ListOptions") - proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.client-go.pkg.api.v1.LoadBalancerIngress") - proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.client-go.pkg.api.v1.LoadBalancerStatus") - proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.client-go.pkg.api.v1.LocalObjectReference") - proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.NFSVolumeSource") - proto.RegisterType((*Namespace)(nil), "k8s.io.client-go.pkg.api.v1.Namespace") - proto.RegisterType((*NamespaceList)(nil), "k8s.io.client-go.pkg.api.v1.NamespaceList") - proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.client-go.pkg.api.v1.NamespaceSpec") - proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.client-go.pkg.api.v1.NamespaceStatus") - proto.RegisterType((*Node)(nil), "k8s.io.client-go.pkg.api.v1.Node") - proto.RegisterType((*NodeAddress)(nil), "k8s.io.client-go.pkg.api.v1.NodeAddress") - proto.RegisterType((*NodeAffinity)(nil), "k8s.io.client-go.pkg.api.v1.NodeAffinity") - proto.RegisterType((*NodeCondition)(nil), "k8s.io.client-go.pkg.api.v1.NodeCondition") - proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.client-go.pkg.api.v1.NodeDaemonEndpoints") - proto.RegisterType((*NodeList)(nil), "k8s.io.client-go.pkg.api.v1.NodeList") - proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.client-go.pkg.api.v1.NodeProxyOptions") - proto.RegisterType((*NodeResources)(nil), "k8s.io.client-go.pkg.api.v1.NodeResources") - proto.RegisterType((*NodeSelector)(nil), "k8s.io.client-go.pkg.api.v1.NodeSelector") - proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.client-go.pkg.api.v1.NodeSelectorRequirement") - proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.client-go.pkg.api.v1.NodeSelectorTerm") - proto.RegisterType((*NodeSpec)(nil), "k8s.io.client-go.pkg.api.v1.NodeSpec") - proto.RegisterType((*NodeStatus)(nil), "k8s.io.client-go.pkg.api.v1.NodeStatus") - proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.client-go.pkg.api.v1.NodeSystemInfo") - proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.client-go.pkg.api.v1.ObjectFieldSelector") - proto.RegisterType((*ObjectMeta)(nil), "k8s.io.client-go.pkg.api.v1.ObjectMeta") - proto.RegisterType((*ObjectReference)(nil), "k8s.io.client-go.pkg.api.v1.ObjectReference") - proto.RegisterType((*PersistentVolume)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolume") - proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaim") - proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimList") - proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimSpec") - proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimStatus") - proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimVolumeSource") - proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeList") - proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeSource") - proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeSpec") - proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeStatus") - proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PhotonPersistentDiskVolumeSource") - proto.RegisterType((*Pod)(nil), "k8s.io.client-go.pkg.api.v1.Pod") - proto.RegisterType((*PodAffinity)(nil), "k8s.io.client-go.pkg.api.v1.PodAffinity") - proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.client-go.pkg.api.v1.PodAffinityTerm") - proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.client-go.pkg.api.v1.PodAntiAffinity") - proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodAttachOptions") - proto.RegisterType((*PodCondition)(nil), "k8s.io.client-go.pkg.api.v1.PodCondition") - proto.RegisterType((*PodExecOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodExecOptions") - proto.RegisterType((*PodList)(nil), "k8s.io.client-go.pkg.api.v1.PodList") - proto.RegisterType((*PodLogOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodLogOptions") - proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodPortForwardOptions") - proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodProxyOptions") - proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.client-go.pkg.api.v1.PodSecurityContext") - proto.RegisterType((*PodSignature)(nil), "k8s.io.client-go.pkg.api.v1.PodSignature") - proto.RegisterType((*PodSpec)(nil), "k8s.io.client-go.pkg.api.v1.PodSpec") - proto.RegisterType((*PodStatus)(nil), "k8s.io.client-go.pkg.api.v1.PodStatus") - proto.RegisterType((*PodStatusResult)(nil), "k8s.io.client-go.pkg.api.v1.PodStatusResult") - proto.RegisterType((*PodTemplate)(nil), "k8s.io.client-go.pkg.api.v1.PodTemplate") - proto.RegisterType((*PodTemplateList)(nil), "k8s.io.client-go.pkg.api.v1.PodTemplateList") - proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.client-go.pkg.api.v1.PodTemplateSpec") - proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PortworxVolumeSource") - proto.RegisterType((*Preconditions)(nil), "k8s.io.client-go.pkg.api.v1.Preconditions") - proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.client-go.pkg.api.v1.PreferAvoidPodsEntry") - proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.client-go.pkg.api.v1.PreferredSchedulingTerm") - proto.RegisterType((*Probe)(nil), "k8s.io.client-go.pkg.api.v1.Probe") - proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ProjectedVolumeSource") - proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.QuobyteVolumeSource") - proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.RBDVolumeSource") - proto.RegisterType((*RangeAllocation)(nil), "k8s.io.client-go.pkg.api.v1.RangeAllocation") - proto.RegisterType((*ReplicationController)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationController") - proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerCondition") - proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerList") - proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerSpec") - proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerStatus") - proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.client-go.pkg.api.v1.ResourceFieldSelector") - proto.RegisterType((*ResourceQuota)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuota") - proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuotaList") - proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuotaSpec") - proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuotaStatus") - proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.client-go.pkg.api.v1.ResourceRequirements") - proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.client-go.pkg.api.v1.SELinuxOptions") - proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ScaleIOVolumeSource") - proto.RegisterType((*Secret)(nil), "k8s.io.client-go.pkg.api.v1.Secret") - proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.client-go.pkg.api.v1.SecretEnvSource") - proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.client-go.pkg.api.v1.SecretKeySelector") - proto.RegisterType((*SecretList)(nil), "k8s.io.client-go.pkg.api.v1.SecretList") - proto.RegisterType((*SecretProjection)(nil), "k8s.io.client-go.pkg.api.v1.SecretProjection") - proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.SecretVolumeSource") - proto.RegisterType((*SecurityContext)(nil), "k8s.io.client-go.pkg.api.v1.SecurityContext") - proto.RegisterType((*SerializedReference)(nil), "k8s.io.client-go.pkg.api.v1.SerializedReference") - proto.RegisterType((*Service)(nil), "k8s.io.client-go.pkg.api.v1.Service") - proto.RegisterType((*ServiceAccount)(nil), "k8s.io.client-go.pkg.api.v1.ServiceAccount") - proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.client-go.pkg.api.v1.ServiceAccountList") - proto.RegisterType((*ServiceList)(nil), "k8s.io.client-go.pkg.api.v1.ServiceList") - proto.RegisterType((*ServicePort)(nil), "k8s.io.client-go.pkg.api.v1.ServicePort") - proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.client-go.pkg.api.v1.ServiceProxyOptions") - proto.RegisterType((*ServiceSpec)(nil), "k8s.io.client-go.pkg.api.v1.ServiceSpec") - proto.RegisterType((*ServiceStatus)(nil), "k8s.io.client-go.pkg.api.v1.ServiceStatus") - proto.RegisterType((*Sysctl)(nil), "k8s.io.client-go.pkg.api.v1.Sysctl") - proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.client-go.pkg.api.v1.TCPSocketAction") - proto.RegisterType((*Taint)(nil), "k8s.io.client-go.pkg.api.v1.Taint") - proto.RegisterType((*Toleration)(nil), "k8s.io.client-go.pkg.api.v1.Toleration") - proto.RegisterType((*Volume)(nil), "k8s.io.client-go.pkg.api.v1.Volume") - proto.RegisterType((*VolumeMount)(nil), "k8s.io.client-go.pkg.api.v1.VolumeMount") - proto.RegisterType((*VolumeProjection)(nil), "k8s.io.client-go.pkg.api.v1.VolumeProjection") - proto.RegisterType((*VolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.VolumeSource") - proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.VsphereVirtualDiskVolumeSource") - proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.client-go.pkg.api.v1.WeightedPodAffinityTerm") + proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") + proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") + proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") + proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") + proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") + proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") + proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") + proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") + proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") + proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") + proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") + proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") + proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") + proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") + proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") + proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") + proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") + proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") + proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") + proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") + proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") + proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") + proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource") + proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") + proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") + proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") + proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") + proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") + proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") + proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") + proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") + proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") + proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") + proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") + proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") + proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") + proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") + proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") + proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") + proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") + proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") + proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") + proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") + proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") + proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") + proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") + proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") + proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") + proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") + proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") + proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") + proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") + proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") + proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") + proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") + proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") + proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") + proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") + proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") + proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") + proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") + proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") + proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") + proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") + proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") + proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") + proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") + proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") + proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") + proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") + proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") + proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") + proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") + proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") + proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") + proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") + proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") + proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") + proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") + proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") + proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") + proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") + proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") + proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") + proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") + proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") + proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") + proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") + proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") + proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") + proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") + proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") + proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") + proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") + proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") + proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") + proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") + proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") + proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") + proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") + proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") + proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") + proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") + proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") + proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") + proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") + proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") + proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") + proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") + proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") + proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") + proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") + proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") + proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") + proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") + proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") + proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") + proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") + proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") + proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") + proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") + proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") + proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") + proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") + proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") + proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") + proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") + proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") + proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") + proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") + proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") + proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") + proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") + proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") + proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") + proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") + proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") + proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") + proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") + proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") + proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") + proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") + proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") + proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") + proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") + proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") + proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") + proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector") + proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement") + proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") + proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") + proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") + proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") + proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") + proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") + proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") + proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") + proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") + proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") + proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") + proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") + proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") + proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") + proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") + proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") + proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") + proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") + proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") + proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") + proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") + proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") + proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") + proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") + proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") + proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement") + proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") + proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") + proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") + proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") + proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") + proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") + proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") + proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") + proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") } -func (m *AWSElasticBlockStoreVolumeSource) Marshal() (data []byte, err error) { +func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) - i += copy(data[i:], m.VolumeID) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i += copy(dAtA[i:], m.VolumeID) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 i++ - i = encodeVarintGenerated(data, i, uint64(m.Partition)) - data[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + dAtA[i] = 0x20 i++ if m.ReadOnly { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ return i, nil } -func (m *Affinity) Marshal() (data []byte, err error) { +func (m *Affinity) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Affinity) MarshalTo(data []byte) (int, error) { +func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.NodeAffinity != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.NodeAffinity.Size())) - n1, err := m.NodeAffinity.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) + n1, err := m.NodeAffinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 } if m.PodAffinity != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.PodAffinity.Size())) - n2, err := m.PodAffinity.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinity.Size())) + n2, err := m.PodAffinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 } if m.PodAntiAffinity != nil { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.PodAntiAffinity.Size())) - n3, err := m.PodAntiAffinity.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.PodAntiAffinity.Size())) + n3, err := m.PodAntiAffinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1176,53 +1373,53 @@ func (m *Affinity) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *AttachedVolume) Marshal() (data []byte, err error) { +func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *AttachedVolume) MarshalTo(data []byte) (int, error) { +func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.DevicePath))) - i += copy(data[i:], m.DevicePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i += copy(dAtA[i:], m.DevicePath) return i, nil } -func (m *AvoidPods) Marshal() (data []byte, err error) { +func (m *AvoidPods) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *AvoidPods) MarshalTo(data []byte) (int, error) { +func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.PreferAvoidPods) > 0 { for _, msg := range m.PreferAvoidPods { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1232,115 +1429,161 @@ func (m *AvoidPods) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *AzureDiskVolumeSource) Marshal() (data []byte, err error) { +func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *AzureDiskVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.DiskName))) - i += copy(data[i:], m.DiskName) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) + i += copy(dAtA[i:], m.DiskName) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.DataDiskURI))) - i += copy(data[i:], m.DataDiskURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) + i += copy(dAtA[i:], m.DataDiskURI) if m.CachingMode != nil { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.CachingMode))) - i += copy(data[i:], *m.CachingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) + i += copy(dAtA[i:], *m.CachingMode) } if m.FSType != nil { - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.FSType))) - i += copy(data[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i += copy(dAtA[i:], *m.FSType) } if m.ReadOnly != nil { - data[i] = 0x28 + dAtA[i] = 0x28 i++ if *m.ReadOnly { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ } + if m.Kind != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) + i += copy(dAtA[i:], *m.Kind) + } return i, nil } -func (m *AzureFileVolumeSource) Marshal() (data []byte, err error) { +func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *AzureFileVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) - i += copy(data[i:], m.SecretName) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ShareName))) - i += copy(data[i:], m.ShareName) - data[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i += copy(dAtA[i:], m.ShareName) + dAtA[i] = 0x18 i++ if m.ReadOnly { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 + } + i++ + if m.SecretNamespace != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) + i += copy(dAtA[i:], *m.SecretNamespace) + } + return i, nil +} + +func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i += copy(dAtA[i:], m.ShareName) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } i++ return i, nil } -func (m *Binding) Marshal() (data []byte, err error) { +func (m *Binding) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Binding) MarshalTo(data []byte) (int, error) { +func (m *Binding) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n4 - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) - n5, err := m.Target.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n5, err := m.Target.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1348,495 +1591,796 @@ func (m *Binding) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *Capabilities) Marshal() (data []byte, err error) { +func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Capabilities) MarshalTo(data []byte) (int, error) { +func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) + i += copy(dAtA[i:], m.VolumeHandle) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + if len(m.VolumeAttributes) > 0 { + keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) + for k := range m.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + for _, k := range keysForVolumeAttributes { + dAtA[i] = 0x2a + i++ + v := m.VolumeAttributes[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.ControllerPublishSecretRef != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ControllerPublishSecretRef.Size())) + n6, err := m.ControllerPublishSecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.NodeStageSecretRef != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodeStageSecretRef.Size())) + n7, err := m.NodeStageSecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.NodePublishSecretRef != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size())) + n8, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *Capabilities) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Add) > 0 { for _, s := range m.Add { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.Drop) > 0 { for _, s := range m.Drop { - data[i] = 0x12 + dAtA[i] = 0x12 i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } return i, nil } -func (m *CephFSVolumeSource) Marshal() (data []byte, err error) { +func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CephFSVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Monitors) > 0 { for _, s := range m.Monitors { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.User))) - i += copy(data[i:], m.User) - data[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SecretFile))) - i += copy(data[i:], m.SecretFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i += copy(dAtA[i:], m.SecretFile) if m.SecretRef != nil { - data[i] = 0x2a + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n6, err := m.SecretRef.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n9, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n9 } - data[i] = 0x30 + dAtA[i] = 0x30 i++ if m.ReadOnly { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ return i, nil } -func (m *CinderVolumeSource) Marshal() (data []byte, err error) { +func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CinderVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) - i += copy(data[i:], m.VolumeID) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i += copy(dAtA[i:], m.SecretFile) + if m.SecretRef != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n10, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + dAtA[i] = 0x30 i++ if m.ReadOnly { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ return i, nil } -func (m *ComponentCondition) Marshal() (data []byte, err error) { +func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ComponentCondition) MarshalTo(data []byte) (int, error) { +func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i += copy(dAtA[i:], m.VolumeID) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x22 + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Error))) - i += copy(data[i:], m.Error) - return i, nil -} - -func (m *ComponentStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ComponentStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ComponentStatusList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ComponentStatusList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ConfigMap) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ConfigMap) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n9 - if len(m.Data) > 0 { - for k := range m.Data { - data[i] = 0x12 - i++ - v := m.Data[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - return i, nil -} - -func (m *ConfigMapEnvSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ConfigMapEnvSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n10, err := m.LocalObjectReference.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 - if m.Optional != nil { - data[i] = 0x10 + if m.SecretRef != nil { + dAtA[i] = 0x22 i++ - if *m.Optional { - data[i] = 1 - } else { - data[i] = 0 + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n11, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i++ + i += n11 } return i, nil } -func (m *ConfigMapKeySelector) Marshal() (data []byte, err error) { +func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ConfigMapKeySelector) MarshalTo(data []byte) (int, error) { +func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n11, err := m.LocalObjectReference.MarshalTo(data[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i += copy(dAtA[i:], m.VolumeID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n11 - data[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - if m.Optional != nil { - data[i] = 0x18 + if m.SecretRef != nil { + dAtA[i] = 0x22 i++ - if *m.Optional { - data[i] = 1 - } else { - data[i] = 0 + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n12, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i++ + i += n12 } return i, nil } -func (m *ConfigMapList) Marshal() (data []byte, err error) { +func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ConfigMapList) MarshalTo(data []byte) (int, error) { +func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n12, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n12 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } + if m.TimeoutSeconds != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) } return i, nil } -func (m *ConfigMapProjection) Marshal() (data []byte, err error) { +func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ConfigMapProjection) MarshalTo(data []byte) (int, error) { +func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n13, err := m.LocalObjectReference.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + return i, nil +} + +func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n13 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - if m.Optional != nil { - data[i] = 0x20 - i++ - if *m.Optional { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } return i, nil } -func (m *ConfigMapVolumeSource) Marshal() (data []byte, err error) { +func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ConfigMapVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n14, err := m.LocalObjectReference.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n14, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n14 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ConfigMap) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n15, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for _, k := range keysForData { + dAtA[i] = 0x12 + i++ + v := m.Data[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.BinaryData) > 0 { + keysForBinaryData := make([]string, 0, len(m.BinaryData)) + for k := range m.BinaryData { + keysForBinaryData = append(keysForBinaryData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + for _, k := range keysForBinaryData { + dAtA[i] = 0x1a + i++ + v := m.BinaryData[string(k)] + byteSize := 0 + if v != nil { + byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + } + return i, nil +} + +func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n16, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + if m.Optional != nil { + dAtA[i] = 0x10 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n17, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + if m.Optional != nil { + dAtA[i] = 0x18 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n18, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey))) + i += copy(dAtA[i:], m.KubeletConfigKey) + return i, nil +} + +func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n19, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Optional != nil { + dAtA[i] = 0x20 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n20, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1844,86 +2388,86 @@ func (m *ConfigMapVolumeSource) MarshalTo(data []byte) (int, error) { } } if m.DefaultMode != nil { - data[i] = 0x18 + dAtA[i] = 0x18 i++ - i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) } if m.Optional != nil { - data[i] = 0x20 + dAtA[i] = 0x20 i++ if *m.Optional { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ } return i, nil } -func (m *Container) Marshal() (data []byte, err error) { +func (m *Container) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Container) MarshalTo(data []byte) (int, error) { +func (m *Container) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Image))) - i += copy(data[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) if len(m.Command) > 0 { for _, s := range m.Command { - data[i] = 0x1a + dAtA[i] = 0x1a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.Args) > 0 { for _, s := range m.Args { - data[i] = 0x22 + dAtA[i] = 0x22 i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } - data[i] = 0x2a + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.WorkingDir))) - i += copy(data[i:], m.WorkingDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) + i += copy(dAtA[i:], m.WorkingDir) if len(m.Ports) > 0 { for _, msg := range m.Ports { - data[i] = 0x32 + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1932,30 +2476,30 @@ func (m *Container) MarshalTo(data []byte) (int, error) { } if len(m.Env) > 0 { for _, msg := range m.Env { - data[i] = 0x3a + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - data[i] = 0x42 + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) - n15, err := m.Resources.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) + n21, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n21 if len(m.VolumeMounts) > 0 { for _, msg := range m.VolumeMounts { - data[i] = 0x4a + dAtA[i] = 0x4a i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1963,482 +2507,447 @@ func (m *Container) MarshalTo(data []byte) (int, error) { } } if m.LivenessProbe != nil { - data[i] = 0x52 + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(data, i, uint64(m.LivenessProbe.Size())) - n16, err := m.LivenessProbe.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.LivenessProbe.Size())) + n22, err := m.LivenessProbe.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n22 } if m.ReadinessProbe != nil { - data[i] = 0x5a + dAtA[i] = 0x5a i++ - i = encodeVarintGenerated(data, i, uint64(m.ReadinessProbe.Size())) - n17, err := m.ReadinessProbe.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size())) + n23, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n23 } if m.Lifecycle != nil { - data[i] = 0x62 + dAtA[i] = 0x62 i++ - i = encodeVarintGenerated(data, i, uint64(m.Lifecycle.Size())) - n18, err := m.Lifecycle.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size())) + n24, err := m.Lifecycle.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n24 } - data[i] = 0x6a + dAtA[i] = 0x6a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.TerminationMessagePath))) - i += copy(data[i:], m.TerminationMessagePath) - data[i] = 0x72 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) + i += copy(dAtA[i:], m.TerminationMessagePath) + dAtA[i] = 0x72 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ImagePullPolicy))) - i += copy(data[i:], m.ImagePullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i += copy(dAtA[i:], m.ImagePullPolicy) if m.SecurityContext != nil { - data[i] = 0x7a + dAtA[i] = 0x7a i++ - i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) - n19, err := m.SecurityContext.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) + n25, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n25 } - data[i] = 0x80 + dAtA[i] = 0x80 i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ if m.Stdin { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ - data[i] = 0x88 + dAtA[i] = 0x88 i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ if m.StdinOnce { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ - data[i] = 0x90 + dAtA[i] = 0x90 i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ if m.TTY { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ if len(m.EnvFrom) > 0 { for _, msg := range m.EnvFrom { - data[i] = 0x9a + dAtA[i] = 0x9a i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - data[i] = 0xa2 + dAtA[i] = 0xa2 i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.TerminationMessagePolicy))) - i += copy(data[i:], m.TerminationMessagePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) + i += copy(dAtA[i:], m.TerminationMessagePolicy) + if len(m.VolumeDevices) > 0 { + for _, msg := range m.VolumeDevices { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } -func (m *ContainerImage) Marshal() (data []byte, err error) { +func (m *ContainerImage) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ContainerImage) MarshalTo(data []byte) (int, error) { +func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Names) > 0 { for _, s := range m.Names { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(m.SizeBytes)) + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) return i, nil } -func (m *ContainerPort) Marshal() (data []byte, err error) { +func (m *ContainerPort) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ContainerPort) MarshalTo(data []byte) (int, error) { +func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(m.HostPort)) - data[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) + dAtA[i] = 0x18 i++ - i = encodeVarintGenerated(data, i, uint64(m.ContainerPort)) - data[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) - i += copy(data[i:], m.Protocol) - data[i] = 0x2a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i += copy(dAtA[i:], m.Protocol) + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.HostIP))) - i += copy(data[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i += copy(dAtA[i:], m.HostIP) return i, nil } -func (m *ContainerState) Marshal() (data []byte, err error) { +func (m *ContainerState) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ContainerState) MarshalTo(data []byte) (int, error) { +func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Waiting != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Waiting.Size())) - n20, err := m.Waiting.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Waiting.Size())) + n26, err := m.Waiting.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n26 } if m.Running != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Running.Size())) - n21, err := m.Running.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size())) + n27, err := m.Running.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n27 } if m.Terminated != nil { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.Terminated.Size())) - n22, err := m.Terminated.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n22 - } - return i, nil -} - -func (m *ContainerStateRunning) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ContainerStateRunning) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) - n23, err := m.StartedAt.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n23 - return i, nil -} - -func (m *ContainerStateTerminated) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ContainerStateTerminated) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ExitCode)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Signal)) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) - n24, err := m.StartedAt.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n24 - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FinishedAt.Size())) - n25, err := m.FinishedAt.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n25 - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID))) - i += copy(data[i:], m.ContainerID) - return i, nil -} - -func (m *ContainerStateWaiting) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ContainerStateWaiting) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *ContainerStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ContainerStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.State.Size())) - n26, err := m.State.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n26 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTerminationState.Size())) - n27, err := m.LastTerminationState.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n27 - data[i] = 0x20 - i++ - if m.Ready { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.RestartCount)) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Image))) - i += copy(data[i:], m.Image) - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ImageID))) - i += copy(data[i:], m.ImageID) - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID))) - i += copy(data[i:], m.ContainerID) - return i, nil -} - -func (m *DaemonEndpoint) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DaemonEndpoint) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Port)) - return i, nil -} - -func (m *DeleteOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DeleteOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.GracePeriodSeconds != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.GracePeriodSeconds)) - } - if m.Preconditions != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Preconditions.Size())) - n28, err := m.Preconditions.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size())) + n28, err := m.Terminated.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n28 } - if m.OrphanDependents != nil { - data[i] = 0x18 - i++ - if *m.OrphanDependents { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - if m.PropagationPolicy != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.PropagationPolicy))) - i += copy(data[i:], *m.PropagationPolicy) - } return i, nil } -func (m *DownwardAPIProjection) Marshal() (data []byte, err error) { +func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *DownwardAPIProjection) MarshalTo(data []byte) (int, error) { +func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) + n29, err := m.StartedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + return i, nil +} + +func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) + n30, err := m.StartedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) + n31, err := m.FinishedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + return i, nil +} + +func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.State.Size())) + n32, err := m.State.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTerminationState.Size())) + n33, err := m.LastTerminationState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x20 + i++ + if m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) + i += copy(dAtA[i:], m.ImageID) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + return i, nil +} + +func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + return i, nil +} + +func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -2448,74 +2957,74 @@ func (m *DownwardAPIProjection) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *DownwardAPIVolumeFile) Marshal() (data []byte, err error) { +func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *DownwardAPIVolumeFile) MarshalTo(data []byte) (int, error) { +func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) if m.FieldRef != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) - n29, err := m.FieldRef.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) + n34, err := m.FieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n34 } if m.ResourceFieldRef != nil { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size())) - n30, err := m.ResourceFieldRef.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) + n35, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n35 } if m.Mode != nil { - data[i] = 0x20 + dAtA[i] = 0x20 i++ - i = encodeVarintGenerated(data, i, uint64(*m.Mode)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) } return i, nil } -func (m *DownwardAPIVolumeSource) Marshal() (data []byte, err error) { +func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *DownwardAPIVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -2523,306 +3032,37 @@ func (m *DownwardAPIVolumeSource) MarshalTo(data []byte) (int, error) { } } if m.DefaultMode != nil { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) } return i, nil } -func (m *EmptyDirVolumeSource) Marshal() (data []byte, err error) { +func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *EmptyDirVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Medium))) - i += copy(data[i:], m.Medium) - return i, nil -} - -func (m *EndpointAddress) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EndpointAddress) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.IP))) - i += copy(data[i:], m.IP) - if m.TargetRef != nil { - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) + i += copy(dAtA[i:], m.Medium) + if m.SizeLimit != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetRef.Size())) - n31, err := m.TargetRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n31 - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) - i += copy(data[i:], m.Hostname) - if m.NodeName != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.NodeName))) - i += copy(data[i:], *m.NodeName) - } - return i, nil -} - -func (m *EndpointPort) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EndpointPort) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Port)) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) - i += copy(data[i:], m.Protocol) - return i, nil -} - -func (m *EndpointSubset) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EndpointSubset) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Addresses) > 0 { - for _, msg := range m.Addresses { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.NotReadyAddresses) > 0 { - for _, msg := range m.NotReadyAddresses { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Endpoints) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Endpoints) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n32, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n32 - if len(m.Subsets) > 0 { - for _, msg := range m.Subsets { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *EndpointsList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EndpointsList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n33, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n33 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *EnvFromSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EnvFromSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Prefix))) - i += copy(data[i:], m.Prefix) - if m.ConfigMapRef != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ConfigMapRef.Size())) - n34, err := m.ConfigMapRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n34 - } - if m.SecretRef != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n35, err := m.SecretRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n35 - } - return i, nil -} - -func (m *EnvVar) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EnvVar) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Value))) - i += copy(data[i:], m.Value) - if m.ValueFrom != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.ValueFrom.Size())) - n36, err := m.ValueFrom.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) + n36, err := m.SizeLimit.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -2831,166 +3071,122 @@ func (m *EnvVar) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *EnvVarSource) Marshal() (data []byte, err error) { +func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *EnvVarSource) MarshalTo(data []byte) (int, error) { +func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.FieldRef != nil { - data[i] = 0xa + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i += copy(dAtA[i:], m.IP) + if m.TargetRef != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) - n37, err := m.FieldRef.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetRef.Size())) + n37, err := m.TargetRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n37 } - if m.ResourceFieldRef != nil { - data[i] = 0x12 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + if m.NodeName != nil { + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size())) - n38, err := m.ResourceFieldRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n38 - } - if m.ConfigMapKeyRef != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.ConfigMapKeyRef.Size())) - n39, err := m.ConfigMapKeyRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n39 - } - if m.SecretKeyRef != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SecretKeyRef.Size())) - n40, err := m.SecretKeyRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n40 + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i += copy(dAtA[i:], *m.NodeName) } return i, nil } -func (m *Event) Marshal() (data []byte, err error) { +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Event) MarshalTo(data []byte) (int, error) { +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n41, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n41 - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(m.InvolvedObject.Size())) - n42, err := m.InvolvedObject.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n42 - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Source.Size())) - n43, err := m.Source.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n43 - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FirstTimestamp.Size())) - n44, err := m.FirstTimestamp.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n44 - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTimestamp.Size())) - n45, err := m.LastTimestamp.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n45 - data[i] = 0x40 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Count)) - data[i] = 0x4a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i += copy(dAtA[i:], m.Protocol) return i, nil } -func (m *EventList) Marshal() (data []byte, err error) { +func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *EventList) MarshalTo(data []byte) (int, error) { +func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n46, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n46 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 + if len(m.Addresses) > 0 { + for _, msg := range m.Addresses { + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NotReadyAddresses) > 0 { + for _, msg := range m.NotReadyAddresses { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -3000,344 +3196,826 @@ func (m *EventList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *EventSource) Marshal() (data []byte, err error) { +func (m *Endpoints) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *EventSource) MarshalTo(data []byte) (int, error) { +func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Component))) - i += copy(data[i:], m.Component) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Host))) - i += copy(data[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n38, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + if len(m.Subsets) > 0 { + for _, msg := range m.Subsets { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } -func (m *ExecAction) Marshal() (data []byte, err error) { +func (m *EndpointsList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ExecAction) MarshalTo(data []byte) (int, error) { +func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n39, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) + i += copy(dAtA[i:], m.Prefix) + if m.ConfigMapRef != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) + n40, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n41, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil +} + +func (m *EnvVar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + if m.ValueFrom != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size())) + n42, err := m.ValueFrom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + return i, nil +} + +func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.FieldRef != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) + n43, err := m.FieldRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + if m.ResourceFieldRef != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) + n44, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + if m.ConfigMapKeyRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size())) + n45, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + if m.SecretKeyRef != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size())) + n46, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + } + return i, nil +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n47, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size())) + n48, err := m.InvolvedObject.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) + n49, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size())) + n50, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size())) + n51, err := m.LastTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) + n52, err := m.EventTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + if m.Series != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) + n53, err := m.Series.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 + } + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i += copy(dAtA[i:], m.Action) + if m.Related != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) + n54, err := m.Related.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n54 + } + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i += copy(dAtA[i:], m.ReportingController) + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i += copy(dAtA[i:], m.ReportingInstance) + return i, nil +} + +func (m *EventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n55, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n55 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EventSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) + n56, err := m.LastObservedTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n56 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i += copy(dAtA[i:], m.State) + return i, nil +} + +func (m *EventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) + i += copy(dAtA[i:], m.Component) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) + return i, nil +} + +func (m *ExecAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Command) > 0 { for _, s := range m.Command { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } return i, nil } -func (m *FCVolumeSource) Marshal() (data []byte, err error) { +func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *FCVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.TargetWWNs) > 0 { for _, s := range m.TargetWWNs { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if m.Lun != nil { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(*m.Lun)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) } - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x20 i++ if m.ReadOnly { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ + if len(m.WWIDs) > 0 { + for _, s := range m.WWIDs { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } -func (m *FlexVolumeSource) Marshal() (data []byte, err error) { +func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *FlexVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Driver))) - i += copy(data[i:], m.Driver) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) if m.SecretRef != nil { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n47, err := m.SecretRef.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n57, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n47 + i += n57 } - data[i] = 0x20 + dAtA[i] = 0x20 i++ if m.ReadOnly { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) for k := range m.Options { - data[i] = 0x2a + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for _, k := range keysForOptions { + dAtA[i] = 0x2a i++ - v := m.Options[k] + v := m.Options[string(k)] mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } return i, nil } -func (m *FlockerVolumeSource) Marshal() (data []byte, err error) { +func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *FlockerVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.DatasetName))) - i += copy(data[i:], m.DatasetName) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.DatasetUUID))) - i += copy(data[i:], m.DatasetUUID) - return i, nil -} - -func (m *GCEPersistentDiskVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n58, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 } - return data[:n], nil -} - -func (m *GCEPersistentDiskVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.PDName))) - i += copy(data[i:], m.PDName) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Partition)) - data[i] = 0x20 + dAtA[i] = 0x20 i++ if m.ReadOnly { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for _, k := range keysForOptions { + dAtA[i] = 0x2a + i++ + v := m.Options[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } return i, nil } -func (m *GitRepoVolumeSource) Marshal() (data []byte, err error) { +func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *GitRepoVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Repository))) - i += copy(data[i:], m.Repository) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) + i += copy(dAtA[i:], m.DatasetName) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Revision))) - i += copy(data[i:], m.Revision) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Directory))) - i += copy(data[i:], m.Directory) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) + i += copy(dAtA[i:], m.DatasetUUID) return i, nil } -func (m *GlusterfsVolumeSource) Marshal() (data []byte, err error) { +func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *GlusterfsVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.EndpointsName))) - i += copy(data[i:], m.EndpointsName) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) + i += copy(dAtA[i:], m.PDName) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - data[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + dAtA[i] = 0x20 i++ if m.ReadOnly { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ return i, nil } -func (m *HTTPGetAction) Marshal() (data []byte, err error) { +func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *HTTPGetAction) MarshalTo(data []byte) (int, error) { +func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) + i += copy(dAtA[i:], m.Repository) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) - n48, err := m.Port.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) + i += copy(dAtA[i:], m.Revision) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) + i += copy(dAtA[i:], m.Directory) + return i, nil +} + +func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i += copy(dAtA[i:], m.EndpointsName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) + n59, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 - data[i] = 0x1a + i += n59 + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Host))) - i += copy(data[i:], m.Host) - data[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Scheme))) - i += copy(data[i:], m.Scheme) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) + i += copy(dAtA[i:], m.Scheme) if len(m.HTTPHeaders) > 0 { for _, msg := range m.HTTPHeaders { - data[i] = 0x2a + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -3347,1006 +4025,638 @@ func (m *HTTPGetAction) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *HTTPHeader) Marshal() (data []byte, err error) { +func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *HTTPHeader) MarshalTo(data []byte) (int, error) { +func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Value))) - i += copy(data[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) return i, nil } -func (m *Handler) Marshal() (data []byte, err error) { +func (m *Handler) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Handler) MarshalTo(data []byte) (int, error) { +func (m *Handler) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Exec != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Exec.Size())) - n49, err := m.Exec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) + n60, err := m.Exec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n60 } if m.HTTPGet != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.HTTPGet.Size())) - n50, err := m.HTTPGet.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) + n61, err := m.HTTPGet.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n61 } if m.TCPSocket != nil { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.TCPSocket.Size())) - n51, err := m.TCPSocket.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) + n62, err := m.TCPSocket.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n62 } return i, nil } -func (m *HostPathVolumeSource) Marshal() (data []byte, err error) { +func (m *HostAlias) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *HostPathVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i += copy(dAtA[i:], m.IP) + if len(m.Hostnames) > 0 { + for _, s := range m.Hostnames { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } -func (m *ISCSIVolumeSource) Marshal() (data []byte, err error) { +func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ISCSIVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.TargetPortal))) - i += copy(data[i:], m.TargetPortal) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + if m.Type != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i += copy(dAtA[i:], *m.Type) + } + return i, nil +} + +func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.IQN))) - i += copy(data[i:], m.IQN) - data[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i += copy(dAtA[i:], m.TargetPortal) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Lun)) - data[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i += copy(dAtA[i:], m.IQN) + dAtA[i] = 0x18 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ISCSIInterface))) - i += copy(data[i:], m.ISCSIInterface) - data[i] = 0x2a + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i += copy(dAtA[i:], m.ISCSIInterface) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x30 i++ if m.ReadOnly { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ if len(m.Portals) > 0 { for _, s := range m.Portals { - data[i] = 0x3a + dAtA[i] = 0x3a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } + dAtA[i] = 0x40 + i++ + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecretRef != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n63, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n63 + } + dAtA[i] = 0x58 + i++ + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.InitiatorName != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i += copy(dAtA[i:], *m.InitiatorName) + } return i, nil } -func (m *KeyToPath) Marshal() (data []byte, err error) { +func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *KeyToPath) MarshalTo(data []byte) (int, error) { +func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i += copy(dAtA[i:], m.TargetPortal) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - if m.Mode != nil { - data[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i += copy(dAtA[i:], m.IQN) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i += copy(dAtA[i:], m.ISCSIInterface) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x30 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.Portals) > 0 { + for _, s := range m.Portals { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x40 + i++ + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecretRef != nil { + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(data, i, uint64(*m.Mode)) + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n64, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n64 + } + dAtA[i] = 0x58 + i++ + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.InitiatorName != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i += copy(dAtA[i:], *m.InitiatorName) } return i, nil } -func (m *Lifecycle) Marshal() (data []byte, err error) { +func (m *KeyToPath) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Lifecycle) MarshalTo(data []byte) (int, error) { +func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + if m.Mode != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + } + return i, nil +} + +func (m *Lifecycle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.PostStart != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.PostStart.Size())) - n52, err := m.PostStart.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) + n65, err := m.PostStart.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n65 } if m.PreStop != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.PreStop.Size())) - n53, err := m.PreStop.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) + n66, err := m.PreStop.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n66 } return i, nil } -func (m *LimitRange) Marshal() (data []byte, err error) { +func (m *LimitRange) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *LimitRange) MarshalTo(data []byte) (int, error) { +func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n54, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n54 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n55, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n55 - return i, nil -} - -func (m *LimitRangeItem) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - if len(m.Max) > 0 { - for k := range m.Max { - data[i] = 0x12 - i++ - v := m.Max[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n56, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n56 - } - } - if len(m.Min) > 0 { - for k := range m.Min { - data[i] = 0x1a - i++ - v := m.Min[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n57, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n57 - } - } - if len(m.Default) > 0 { - for k := range m.Default { - data[i] = 0x22 - i++ - v := m.Default[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n58, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n58 - } - } - if len(m.DefaultRequest) > 0 { - for k := range m.DefaultRequest { - data[i] = 0x2a - i++ - v := m.DefaultRequest[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n59, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n59 - } - } - if len(m.MaxLimitRequestRatio) > 0 { - for k := range m.MaxLimitRequestRatio { - data[i] = 0x32 - i++ - v := m.MaxLimitRequestRatio[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n60, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n60 - } - } - return i, nil -} - -func (m *LimitRangeList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LimitRangeList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n61, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n61 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LimitRangeSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LimitRangeSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Limits) > 0 { - for _, msg := range m.Limits { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *List) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *List) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n62, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n62 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ListOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ListOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.LabelSelector))) - i += copy(data[i:], m.LabelSelector) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FieldSelector))) - i += copy(data[i:], m.FieldSelector) - data[i] = 0x18 - i++ - if m.Watch { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) - i += copy(data[i:], m.ResourceVersion) - if m.TimeoutSeconds != nil { - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.TimeoutSeconds)) - } - return i, nil -} - -func (m *LoadBalancerIngress) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LoadBalancerIngress) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.IP))) - i += copy(data[i:], m.IP) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) - i += copy(data[i:], m.Hostname) - return i, nil -} - -func (m *LoadBalancerStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LoadBalancerStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LocalObjectReference) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LocalObjectReference) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - return i, nil -} - -func (m *NFSVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NFSVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Server))) - i += copy(data[i:], m.Server) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - data[i] = 0x18 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *Namespace) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Namespace) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n63, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n63 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n64, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n64 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n65, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n65 - return i, nil -} - -func (m *NamespaceList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NamespaceList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n66, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n66 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NamespaceSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NamespaceSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *NamespaceStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NamespaceStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) - i += copy(data[i:], m.Phase) - return i, nil -} - -func (m *Node) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Node) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n67, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n67, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n67 - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n68, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n68, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n68 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n69, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n69 return i, nil } -func (m *NodeAddress) Marshal() (data []byte, err error) { +func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *NodeAddress) MarshalTo(data []byte) (int, error) { +func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Address))) - i += copy(data[i:], m.Address) - return i, nil -} - -func (m *NodeAffinity) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeAffinity) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n70, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(data[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if len(m.Max) > 0 { + keysForMax := make([]string, 0, len(m.Max)) + for k := range m.Max { + keysForMax = append(keysForMax, string(k)) } - i += n70 - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - data[i] = 0x12 + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + for _, k := range keysForMax { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + v := m.Max[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n69, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n + i += n69 + } + } + if len(m.Min) > 0 { + keysForMin := make([]string, 0, len(m.Min)) + for k := range m.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + for _, k := range keysForMin { + dAtA[i] = 0x1a + i++ + v := m.Min[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n70, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n70 + } + } + if len(m.Default) > 0 { + keysForDefault := make([]string, 0, len(m.Default)) + for k := range m.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + for _, k := range keysForDefault { + dAtA[i] = 0x22 + i++ + v := m.Default[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n71, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n71 + } + } + if len(m.DefaultRequest) > 0 { + keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) + for k := range m.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + for _, k := range keysForDefaultRequest { + dAtA[i] = 0x2a + i++ + v := m.DefaultRequest[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n72, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n72 + } + } + if len(m.MaxLimitRequestRatio) > 0 { + keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) + for k := range m.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + for _, k := range keysForMaxLimitRequestRatio { + dAtA[i] = 0x32 + i++ + v := m.MaxLimitRequestRatio[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n73, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n73 } } return i, nil } -func (m *NodeCondition) Marshal() (data []byte, err error) { +func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *NodeCondition) MarshalTo(data []byte) (int, error) { +func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastHeartbeatTime.Size())) - n71, err := m.LastHeartbeatTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n71 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n72, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n72 - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *NodeDaemonEndpoints) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeDaemonEndpoints) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.KubeletEndpoint.Size())) - n73, err := m.KubeletEndpoint.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n73 - return i, nil -} - -func (m *NodeList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n74, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n74, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n74 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -4356,89 +4666,737 @@ func (m *NodeList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *NodeProxyOptions) Marshal() (data []byte, err error) { +func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *NodeProxyOptions) MarshalTo(data []byte) (int, error) { +func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - return i, nil -} - -func (m *NodeResources) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeResources) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Capacity) > 0 { - for k := range m.Capacity { - data[i] = 0xa + if len(m.Limits) > 0 { + for _, msg := range m.Limits { + dAtA[i] = 0xa i++ - v := m.Capacity[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n75, err := (&v).MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n75 + i += n } } return i, nil } -func (m *NodeSelector) Marshal() (data []byte, err error) { +func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *NodeSelector) MarshalTo(data []byte) (int, error) { +func (m *List) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n75, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n75 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i += copy(dAtA[i:], m.IP) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + return i, nil +} + +func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} + +func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil +} + +func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) + i += copy(dAtA[i:], m.Server) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *Namespace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n76, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n76 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n77, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n77 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n78, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n78 + return i, nil +} + +func (m *NamespaceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n79, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n79 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + return i, nil +} + +func (m *Node) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n80, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n80 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n81, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n81 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n82, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n82 + return i, nil +} + +func (m *NodeAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) + i += copy(dAtA[i:], m.Address) + return i, nil +} + +func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) + n83, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n83 + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) + n84, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n84 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n85, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n85 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ConfigMap != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) + n86, err := m.ConfigMap.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n86 + } + return i, nil +} + +func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Assigned != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Assigned.Size())) + n87, err := m.Assigned.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n87 + } + if m.Active != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Active.Size())) + n88, err := m.Active.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n88 + } + if m.LastKnownGood != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastKnownGood.Size())) + n89, err := m.LastKnownGood.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n89 + } + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + return i, nil +} + +func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) + n90, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n90 + return i, nil +} + +func (m *NodeList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n91, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n91 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil +} + +func (m *NodeResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for _, k := range keysForCapacity { + dAtA[i] = 0xa + i++ + v := m.Capacity[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n92, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n92 + } + } + return i, nil +} + +func (m *NodeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.NodeSelectorTerms) > 0 { for _, msg := range m.NodeSelectorTerms { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -4448,68 +5406,80 @@ func (m *NodeSelector) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *NodeSelectorRequirement) Marshal() (data []byte, err error) { +func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *NodeSelectorRequirement) MarshalTo(data []byte) (int, error) { +func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) - i += copy(data[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i += copy(dAtA[i:], m.Operator) if len(m.Values) > 0 { for _, s := range m.Values { - data[i] = 0x1a + dAtA[i] = 0x1a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } return i, nil } -func (m *NodeSelectorTerm) Marshal() (data []byte, err error) { +func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *NodeSelectorTerm) MarshalTo(data []byte) (int, error) { +func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.MatchExpressions) > 0 { for _, msg := range m.MatchExpressions { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.MatchFields) > 0 { + for _, msg := range m.MatchFields { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -4519,125 +5489,153 @@ func (m *NodeSelectorTerm) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *NodeSpec) Marshal() (data []byte, err error) { +func (m *NodeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *NodeSpec) MarshalTo(data []byte) (int, error) { +func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.PodCIDR))) - i += copy(data[i:], m.PodCIDR) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) + i += copy(dAtA[i:], m.PodCIDR) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ExternalID))) - i += copy(data[i:], m.ExternalID) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUse_ExternalID))) + i += copy(dAtA[i:], m.DoNotUse_ExternalID) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ProviderID))) - i += copy(data[i:], m.ProviderID) - data[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) + i += copy(dAtA[i:], m.ProviderID) + dAtA[i] = 0x20 i++ if m.Unschedulable { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ if len(m.Taints) > 0 { for _, msg := range m.Taints { - data[i] = 0x2a + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } + if m.ConfigSource != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) + n93, err := m.ConfigSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n93 + } return i, nil } -func (m *NodeStatus) Marshal() (data []byte, err error) { +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *NodeStatus) MarshalTo(data []byte) (int, error) { +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) for k := range m.Capacity { - data[i] = 0xa + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for _, k := range keysForCapacity { + dAtA[i] = 0xa i++ - v := m.Capacity[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa + v := m.Capacity[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n76, err := (&v).MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n94, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n76 + i += n94 } } if len(m.Allocatable) > 0 { + keysForAllocatable := make([]string, 0, len(m.Allocatable)) for k := range m.Allocatable { - data[i] = 0x12 + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + for _, k := range keysForAllocatable { + dAtA[i] = 0x12 i++ - v := m.Allocatable[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa + v := m.Allocatable[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n77, err := (&v).MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n95, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n77 + i += n95 } } - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) - i += copy(data[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) if len(m.Conditions) > 0 { for _, msg := range m.Conditions { - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -4646,38 +5644,38 @@ func (m *NodeStatus) MarshalTo(data []byte) (int, error) { } if len(m.Addresses) > 0 { for _, msg := range m.Addresses { - data[i] = 0x2a + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - data[i] = 0x32 + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(data, i, uint64(m.DaemonEndpoints.Size())) - n78, err := m.DaemonEndpoints.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) + n96, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n78 - data[i] = 0x3a + i += n96 + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(data, i, uint64(m.NodeInfo.Size())) - n79, err := m.NodeInfo.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) + n97, err := m.NodeInfo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n79 + i += n97 if len(m.Images) > 0 { for _, msg := range m.Images { - data[i] = 0x42 + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -4686,1112 +5684,1053 @@ func (m *NodeStatus) MarshalTo(data []byte) (int, error) { } if len(m.VolumesInUse) > 0 { for _, s := range m.VolumesInUse { - data[i] = 0x4a + dAtA[i] = 0x4a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.VolumesAttached) > 0 { for _, msg := range m.VolumesAttached { - data[i] = 0x52 + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - return i, nil -} - -func (m *NodeSystemInfo) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeSystemInfo) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.MachineID))) - i += copy(data[i:], m.MachineID) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SystemUUID))) - i += copy(data[i:], m.SystemUUID) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.BootID))) - i += copy(data[i:], m.BootID) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.KernelVersion))) - i += copy(data[i:], m.KernelVersion) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.OSImage))) - i += copy(data[i:], m.OSImage) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContainerRuntimeVersion))) - i += copy(data[i:], m.ContainerRuntimeVersion) - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.KubeletVersion))) - i += copy(data[i:], m.KubeletVersion) - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.KubeProxyVersion))) - i += copy(data[i:], m.KubeProxyVersion) - data[i] = 0x4a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.OperatingSystem))) - i += copy(data[i:], m.OperatingSystem) - data[i] = 0x52 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Architecture))) - i += copy(data[i:], m.Architecture) - return i, nil -} - -func (m *ObjectFieldSelector) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ObjectFieldSelector) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FieldPath))) - i += copy(data[i:], m.FieldPath) - return i, nil -} - -func (m *ObjectMeta) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ObjectMeta) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.GenerateName))) - i += copy(data[i:], m.GenerateName) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) - i += copy(data[i:], m.Namespace) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) - i += copy(data[i:], m.SelfLink) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.UID))) - i += copy(data[i:], m.UID) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) - i += copy(data[i:], m.ResourceVersion) - data[i] = 0x38 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Generation)) - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CreationTimestamp.Size())) - n80, err := m.CreationTimestamp.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n80 - if m.DeletionTimestamp != nil { - data[i] = 0x4a + if m.Config != nil { + dAtA[i] = 0x5a i++ - i = encodeVarintGenerated(data, i, uint64(m.DeletionTimestamp.Size())) - n81, err := m.DeletionTimestamp.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n81 - } - if m.DeletionGracePeriodSeconds != nil { - data[i] = 0x50 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.DeletionGracePeriodSeconds)) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - data[i] = 0x5a - i++ - v := m.Labels[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - if len(m.Annotations) > 0 { - for k := range m.Annotations { - data[i] = 0x62 - i++ - v := m.Annotations[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - if len(m.OwnerReferences) > 0 { - for _, msg := range m.OwnerReferences { - data[i] = 0x6a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - data[i] = 0x72 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x7a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ClusterName))) - i += copy(data[i:], m.ClusterName) - return i, nil -} - -func (m *ObjectReference) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ObjectReference) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) - i += copy(data[i:], m.Namespace) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.UID))) - i += copy(data[i:], m.UID) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) - i += copy(data[i:], m.ResourceVersion) - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FieldPath))) - i += copy(data[i:], m.FieldPath) - return i, nil -} - -func (m *PersistentVolume) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolume) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n82, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n82 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n83, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n83 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n84, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n84 - return i, nil -} - -func (m *PersistentVolumeClaim) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeClaim) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n85, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n85 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n86, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n86 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n87, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n87 - return i, nil -} - -func (m *PersistentVolumeClaimList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeClaimList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n88, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n88 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PersistentVolumeClaimSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeClaimSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) - n89, err := m.Resources.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n89 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.VolumeName))) - i += copy(data[i:], m.VolumeName) - if m.Selector != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n90, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n90 - } - if m.StorageClassName != nil { - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.StorageClassName))) - i += copy(data[i:], *m.StorageClassName) - } - return i, nil -} - -func (m *PersistentVolumeClaimStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeClaimStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) - i += copy(data[i:], m.Phase) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - data[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.Capacity) > 0 { - for k := range m.Capacity { - data[i] = 0x1a - i++ - v := m.Capacity[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n91, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n91 - } - } - return i, nil -} - -func (m *PersistentVolumeClaimVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeClaimVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ClaimName))) - i += copy(data[i:], m.ClaimName) - data[i] = 0x10 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *PersistentVolumeList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n92, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n92 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PersistentVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.GCEPersistentDisk != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) - n93, err := m.GCEPersistentDisk.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n93 - } - if m.AWSElasticBlockStore != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) - n94, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n94 - } - if m.HostPath != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) - n95, err := m.HostPath.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n95 - } - if m.Glusterfs != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) - n96, err := m.Glusterfs.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n96 - } - if m.NFS != nil { - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) - n97, err := m.NFS.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n97 - } - if m.RBD != nil { - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) - n98, err := m.RBD.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Config.Size())) + n98, err := m.Config.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n98 } - if m.ISCSI != nil { - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) - n99, err := m.ISCSI.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n99 + return i, nil +} + +func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - if m.Cinder != nil { - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) - n100, err := m.Cinder.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n100 + return dAtA[:n], nil +} + +func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) + i += copy(dAtA[i:], m.MachineID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) + i += copy(dAtA[i:], m.SystemUUID) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) + i += copy(dAtA[i:], m.BootID) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) + i += copy(dAtA[i:], m.KernelVersion) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) + i += copy(dAtA[i:], m.OSImage) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) + i += copy(dAtA[i:], m.ContainerRuntimeVersion) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) + i += copy(dAtA[i:], m.KubeletVersion) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) + i += copy(dAtA[i:], m.KubeProxyVersion) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) + i += copy(dAtA[i:], m.OperatingSystem) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) + i += copy(dAtA[i:], m.Architecture) + return i, nil +} + +func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - if m.CephFS != nil { - data[i] = 0x4a - i++ - i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) - n101, err := m.CephFS.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n101 + return dAtA[:n], nil +} + +func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i += copy(dAtA[i:], m.FieldPath) + return i, nil +} + +func (m *ObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - if m.FC != nil { - data[i] = 0x52 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) - n102, err := m.FC.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n102 + return dAtA[:n], nil +} + +func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i += copy(dAtA[i:], m.FieldPath) + return i, nil +} + +func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - if m.Flocker != nil { - data[i] = 0x5a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) - n103, err := m.Flocker.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n103 + return dAtA[:n], nil +} + +func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n99, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if m.FlexVolume != nil { - data[i] = 0x62 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) - n104, err := m.FlexVolume.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n104 + i += n99 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n100, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if m.AzureFile != nil { - data[i] = 0x6a - i++ - i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) - n105, err := m.AzureFile.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n105 + i += n100 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n101, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if m.VsphereVolume != nil { - data[i] = 0x72 - i++ - i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size())) - n106, err := m.VsphereVolume.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n106 + i += n101 + return i, nil +} + +func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - if m.Quobyte != nil { - data[i] = 0x7a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Quobyte.Size())) - n107, err := m.Quobyte.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n107 + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n102, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if m.AzureDisk != nil { - data[i] = 0x82 - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AzureDisk.Size())) - n108, err := m.AzureDisk.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n108 + i += n102 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n103, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if m.PhotonPersistentDisk != nil { - data[i] = 0x8a + i += n103 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n104, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n104 + return i, nil +} + +func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) + n105, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n105 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n106, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n106 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n107, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n107 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) + n108, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n108 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + if m.Selector != nil { + dAtA[i] = 0x22 i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.PhotonPersistentDisk.Size())) - n109, err := m.PhotonPersistentDisk.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n109, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n109 } - if m.PortworxVolume != nil { - data[i] = 0x92 + if m.StorageClassName != nil { + dAtA[i] = 0x2a i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.PortworxVolume.Size())) - n110, err := m.PortworxVolume.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n110 + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) + i += copy(dAtA[i:], *m.StorageClassName) } - if m.ScaleIO != nil { - data[i] = 0x9a + if m.VolumeMode != nil { + dAtA[i] = 0x32 i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ScaleIO.Size())) - n111, err := m.ScaleIO.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n111 + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i += copy(dAtA[i:], *m.VolumeMode) } return i, nil } -func (m *PersistentVolumeSpec) Marshal() (data []byte, err error) { +func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PersistentVolumeSpec) MarshalTo(data []byte) (int, error) { +func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.Capacity) > 0 { - for k := range m.Capacity { - data[i] = 0xa - i++ - v := m.Capacity[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n112, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n112 - } - } - data[i] = 0x12 + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeSource.Size())) - n113, err := m.PersistentVolumeSource.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n113 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) if len(m.AccessModes) > 0 { for _, s := range m.AccessModes { - data[i] = 0x1a + dAtA[i] = 0x12 i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } - if m.ClaimRef != nil { - data[i] = 0x22 + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for _, k := range keysForCapacity { + dAtA[i] = 0x1a + i++ + v := m.Capacity[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n110, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n110 + } + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) + i += copy(dAtA[i:], m.ClaimName) + dAtA[i] = 0x10 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n111, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n111 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GCEPersistentDisk != nil { + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ClaimRef.Size())) - n114, err := m.ClaimRef.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) + n112, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n112 + } + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n113, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n113 + } + if m.HostPath != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) + n114, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n114 } - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.PersistentVolumeReclaimPolicy))) - i += copy(data[i:], m.PersistentVolumeReclaimPolicy) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.StorageClassName))) - i += copy(data[i:], m.StorageClassName) - return i, nil -} - -func (m *PersistentVolumeStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) - i += copy(data[i:], m.Phase) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - return i, nil -} - -func (m *PhotonPersistentDiskVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PhotonPersistentDiskVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.PdID))) - i += copy(data[i:], m.PdID) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - return i, nil -} - -func (m *Pod) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Pod) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n115, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n115 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n116, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n116 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n117, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n117 - return i, nil -} - -func (m *PodAffinity) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodAffinity) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodAffinityTerm) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodAffinityTerm) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.LabelSelector != nil { - data[i] = 0xa + if m.Glusterfs != nil { + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(m.LabelSelector.Size())) - n118, err := m.LabelSelector.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n115, err := m.Glusterfs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n115 + } + if m.NFS != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n116, err := m.NFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n116 + } + if m.RBD != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n117, err := m.RBD.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n117 + } + if m.ISCSI != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n118, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n118 } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - data[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) + if m.Cinder != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n119, err := m.Cinder.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n119 + } + if m.CephFS != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n120, err := m.CephFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n120 + } + if m.FC != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) + n121, err := m.FC.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n121 + } + if m.Flocker != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n122, err := m.Flocker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n122 + } + if m.FlexVolume != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n123, err := m.FlexVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n123 + } + if m.AzureFile != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) + n124, err := m.AzureFile.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n124 + } + if m.VsphereVolume != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) + n125, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n125 + } + if m.Quobyte != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) + n126, err := m.Quobyte.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n126 + } + if m.AzureDisk != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) + n127, err := m.AzureDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n127 + } + if m.PhotonPersistentDisk != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) + n128, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n128 + } + if m.PortworxVolume != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) + n129, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n129 + } + if m.ScaleIO != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) + n130, err := m.ScaleIO.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n130 + } + if m.Local != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) + n131, err := m.Local.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n131 + } + if m.StorageOS != nil { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) + n132, err := m.StorageOS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n132 + } + if m.CSI != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) + n133, err := m.CSI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n133 } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.TopologyKey))) - i += copy(data[i:], m.TopologyKey) return i, nil } -func (m *PodAntiAffinity) Marshal() (data []byte, err error) { +func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodAntiAffinity) MarshalTo(data []byte) (int, error) { +func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for _, k := range keysForCapacity { + dAtA[i] = 0xa + i++ + v := m.Capacity[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n134, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n134 + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) + n135, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n135 + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.ClaimRef != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) + n136, err := m.ClaimRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n136 + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) + i += copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) + i += copy(dAtA[i:], m.StorageClassName) + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.VolumeMode != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i += copy(dAtA[i:], *m.VolumeMode) + } + if m.NodeAffinity != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) + n137, err := m.NodeAffinity.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n137 + } + return i, nil +} + +func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + return i, nil +} + +func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) + i += copy(dAtA[i:], m.PdID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + return i, nil +} + +func (m *Pod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Pod) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n138, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n138 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n139, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n139 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n140, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n140 + return i, nil +} + +func (m *PodAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -5800,10 +6739,10 @@ func (m *PodAntiAffinity) MarshalTo(data []byte) (int, error) { } if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -5813,439 +6752,655 @@ func (m *PodAntiAffinity) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *PodAttachOptions) Marshal() (data []byte, err error) { +func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodAttachOptions) MarshalTo(data []byte) (int, error) { +func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 - i++ - if m.Stdin { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x10 - i++ - if m.Stdout { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x18 - i++ - if m.Stderr { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x20 - i++ - if m.TTY { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Container))) - i += copy(data[i:], m.Container) - return i, nil -} - -func (m *PodCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) - n119, err := m.LastProbeTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n119 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n120, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n120 - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *PodExecOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodExecOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Stdin { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x10 - i++ - if m.Stdout { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x18 - i++ - if m.Stderr { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x20 - i++ - if m.TTY { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Container))) - i += copy(data[i:], m.Container) - if len(m.Command) > 0 { - for _, s := range m.Command { - data[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *PodList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n121, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n121 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodLogOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodLogOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Container))) - i += copy(data[i:], m.Container) - data[i] = 0x10 - i++ - if m.Follow { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x18 - i++ - if m.Previous { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - if m.SinceSeconds != nil { - data[i] = 0x20 + if m.LabelSelector != nil { + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(*m.SinceSeconds)) - } - if m.SinceTime != nil { - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.SinceTime.Size())) - n122, err := m.SinceTime.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) + n141, err := m.LabelSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n122 + i += n141 } - data[i] = 0x30 + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x1a i++ - if m.Timestamps { - data[i] = 1 - } else { - data[i] = 0 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) + i += copy(dAtA[i:], m.TopologyKey) + return i, nil +} + +func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - i++ - if m.TailLines != nil { - data[i] = 0x38 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.TailLines)) + return dAtA[:n], nil +} + +func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - if m.LimitBytes != nil { - data[i] = 0x40 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.LimitBytes)) + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } return i, nil } -func (m *PodPortForwardOptions) Marshal() (data []byte, err error) { +func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodPortForwardOptions) MarshalTo(data []byte) (int, error) { +func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x10 + i++ + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x18 + i++ + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x20 + i++ + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i += copy(dAtA[i:], m.Container) + return i, nil +} + +func (m *PodCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) + n142, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n142 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n143, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n143 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Value != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i += copy(dAtA[i:], *m.Value) + } + return i, nil +} + +func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x10 + i++ + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x18 + i++ + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x20 + i++ + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i += copy(dAtA[i:], m.Container) + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *PodList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n144, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n144 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i += copy(dAtA[i:], m.Container) + dAtA[i] = 0x10 + i++ + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x18 + i++ + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SinceSeconds != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) + n145, err := m.SinceTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n145 + } + dAtA[i] = 0x30 + i++ + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.TailLines != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + } + return i, nil +} + +func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Ports) > 0 { for _, num := range m.Ports { - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(num)) + i = encodeVarintGenerated(dAtA, i, uint64(num)) } } return i, nil } -func (m *PodProxyOptions) Marshal() (data []byte, err error) { +func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodProxyOptions) MarshalTo(data []byte) (int, error) { +func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) return i, nil } -func (m *PodSecurityContext) Marshal() (data []byte, err error) { +func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodSecurityContext) MarshalTo(data []byte) (int, error) { +func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) + i += copy(dAtA[i:], m.ConditionType) + return i, nil +} + +func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.SELinuxOptions != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) - n123, err := m.SELinuxOptions.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) + n146, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n123 + i += n146 } if m.RunAsUser != nil { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(*m.RunAsUser)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) } if m.RunAsNonRoot != nil { - data[i] = 0x18 + dAtA[i] = 0x18 i++ if *m.RunAsNonRoot { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ } if len(m.SupplementalGroups) > 0 { for _, num := range m.SupplementalGroups { - data[i] = 0x20 + dAtA[i] = 0x20 i++ - i = encodeVarintGenerated(data, i, uint64(num)) + i = encodeVarintGenerated(dAtA, i, uint64(num)) } } if m.FSGroup != nil { - data[i] = 0x28 + dAtA[i] = 0x28 i++ - i = encodeVarintGenerated(data, i, uint64(*m.FSGroup)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) + } + if m.RunAsGroup != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + } + if len(m.Sysctls) > 0 { + for _, msg := range m.Sysctls { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } return i, nil } -func (m *PodSignature) Marshal() (data []byte, err error) { +func (m *PodSignature) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodSignature) MarshalTo(data []byte) (int, error) { +func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.PodController != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.PodController.Size())) - n124, err := m.PodController.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) + n147, err := m.PodController.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n124 + i += n147 } return i, nil } -func (m *PodSpec) Marshal() (data []byte, err error) { +func (m *PodSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodSpec) MarshalTo(data []byte) (int, error) { +func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Volumes) > 0 { for _, msg := range m.Volumes { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -6254,147 +7409,152 @@ func (m *PodSpec) MarshalTo(data []byte) (int, error) { } if len(m.Containers) > 0 { for _, msg := range m.Containers { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.RestartPolicy))) - i += copy(data[i:], m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i += copy(dAtA[i:], m.RestartPolicy) if m.TerminationGracePeriodSeconds != nil { - data[i] = 0x20 + dAtA[i] = 0x20 i++ - i = encodeVarintGenerated(data, i, uint64(*m.TerminationGracePeriodSeconds)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) } if m.ActiveDeadlineSeconds != nil { - data[i] = 0x28 + dAtA[i] = 0x28 i++ - i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) } - data[i] = 0x32 + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.DNSPolicy))) - i += copy(data[i:], m.DNSPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) + i += copy(dAtA[i:], m.DNSPolicy) if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) for k := range m.NodeSelector { - data[i] = 0x3a + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for _, k := range keysForNodeSelector { + dAtA[i] = 0x3a i++ - v := m.NodeSelector[k] + v := m.NodeSelector[string(k)] mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - data[i] = 0x42 + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServiceAccountName))) - i += copy(data[i:], m.ServiceAccountName) - data[i] = 0x4a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i += copy(dAtA[i:], m.ServiceAccountName) + dAtA[i] = 0x4a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.DeprecatedServiceAccount))) - i += copy(data[i:], m.DeprecatedServiceAccount) - data[i] = 0x52 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) + i += copy(dAtA[i:], m.DeprecatedServiceAccount) + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.NodeName))) - i += copy(data[i:], m.NodeName) - data[i] = 0x58 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + dAtA[i] = 0x58 i++ if m.HostNetwork { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ - data[i] = 0x60 + dAtA[i] = 0x60 i++ if m.HostPID { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ - data[i] = 0x68 + dAtA[i] = 0x68 i++ if m.HostIPC { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ if m.SecurityContext != nil { - data[i] = 0x72 + dAtA[i] = 0x72 i++ - i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) - n125, err := m.SecurityContext.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) + n148, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n125 + i += n148 } if len(m.ImagePullSecrets) > 0 { for _, msg := range m.ImagePullSecrets { - data[i] = 0x7a + dAtA[i] = 0x7a i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - data[i] = 0x82 + dAtA[i] = 0x82 i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) - i += copy(data[i:], m.Hostname) - data[i] = 0x8a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + dAtA[i] = 0x8a i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Subdomain))) - i += copy(data[i:], m.Subdomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i += copy(dAtA[i:], m.Subdomain) if m.Affinity != nil { - data[i] = 0x92 + dAtA[i] = 0x92 i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ - i = encodeVarintGenerated(data, i, uint64(m.Affinity.Size())) - n126, err := m.Affinity.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) + n149, err := m.Affinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n126 + i += n149 } - data[i] = 0x9a + dAtA[i] = 0x9a i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SchedulerName))) - i += copy(data[i:], m.SchedulerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i += copy(dAtA[i:], m.SchedulerName) if len(m.InitContainers) > 0 { for _, msg := range m.InitContainers { - data[i] = 0xa2 + dAtA[i] = 0xa2 i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -6402,219 +7562,288 @@ func (m *PodSpec) MarshalTo(data []byte) (int, error) { } } if m.AutomountServiceAccountToken != nil { - data[i] = 0xa8 + dAtA[i] = 0xa8 i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ if *m.AutomountServiceAccountToken { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ } if len(m.Tolerations) > 0 { for _, msg := range m.Tolerations { - data[i] = 0xb2 + dAtA[i] = 0xb2 i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - return i, nil -} - -func (m *PodStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) - i += copy(data[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0x12 + if len(m.HostAliases) > 0 { + for _, msg := range m.HostAliases { + dAtA[i] = 0xba i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - data[i] = 0x1a + dAtA[i] = 0xc2 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x22 + dAtA[i] = 0x1 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.HostIP))) - i += copy(data[i:], m.HostIP) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.PodIP))) - i += copy(data[i:], m.PodIP) - if m.StartTime != nil { - data[i] = 0x3a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) + i += copy(dAtA[i:], m.PriorityClassName) + if m.Priority != nil { + dAtA[i] = 0xc8 i++ - i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) - n127, err := m.StartTime.MarshalTo(data[i:]) + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + } + if m.DNSConfig != nil { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) + n150, err := m.DNSConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n127 + i += n150 + } + if m.ShareProcessNamespace != nil { + dAtA[i] = 0xd8 + i++ + dAtA[i] = 0x1 + i++ + if *m.ShareProcessNamespace { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.ReadinessGates) > 0 { + for _, msg := range m.ReadinessGates { + dAtA[i] = 0xe2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i += copy(dAtA[i:], m.HostIP) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) + i += copy(dAtA[i:], m.PodIP) + if m.StartTime != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) + n151, err := m.StartTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n151 } if len(m.ContainerStatuses) > 0 { for _, msg := range m.ContainerStatuses { - data[i] = 0x42 + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - data[i] = 0x4a + dAtA[i] = 0x4a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.QOSClass))) - i += copy(data[i:], m.QOSClass) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) + i += copy(dAtA[i:], m.QOSClass) if len(m.InitContainerStatuses) > 0 { for _, msg := range m.InitContainerStatuses { - data[i] = 0x52 + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName))) + i += copy(dAtA[i:], m.NominatedNodeName) return i, nil } -func (m *PodStatusResult) Marshal() (data []byte, err error) { +func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodStatusResult) MarshalTo(data []byte) (int, error) { +func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n128, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n152, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n128 - data[i] = 0x12 + i += n152 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n129, err := m.Status.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n153, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n129 + i += n153 return i, nil } -func (m *PodTemplate) Marshal() (data []byte, err error) { +func (m *PodTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodTemplate) MarshalTo(data []byte) (int, error) { +func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n130, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n154, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n130 - data[i] = 0x12 + i += n154 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n131, err := m.Template.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n155, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n131 + i += n155 return i, nil } -func (m *PodTemplateList) Marshal() (data []byte, err error) { +func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodTemplateList) MarshalTo(data []byte) (int, error) { +func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n132, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n156, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n132 + i += n156 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -6624,231 +7853,231 @@ func (m *PodTemplateList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *PodTemplateSpec) Marshal() (data []byte, err error) { +func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodTemplateSpec) MarshalTo(data []byte) (int, error) { +func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n133, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n157, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n133 - data[i] = 0x12 + i += n157 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n134, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n158, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n134 + i += n158 return i, nil } -func (m *PortworxVolumeSource) Marshal() (data []byte, err error) { +func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PortworxVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) - i += copy(data[i:], m.VolumeID) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i += copy(dAtA[i:], m.VolumeID) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 i++ if m.ReadOnly { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ return i, nil } -func (m *Preconditions) Marshal() (data []byte, err error) { +func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Preconditions) MarshalTo(data []byte) (int, error) { +func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.UID != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.UID))) - i += copy(data[i:], *m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i += copy(dAtA[i:], *m.UID) } return i, nil } -func (m *PreferAvoidPodsEntry) Marshal() (data []byte, err error) { +func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PreferAvoidPodsEntry) MarshalTo(data []byte) (int, error) { +func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.PodSignature.Size())) - n135, err := m.PodSignature.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) + n159, err := m.PodSignature.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n135 - data[i] = 0x12 + i += n159 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.EvictionTime.Size())) - n136, err := m.EvictionTime.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) + n160, err := m.EvictionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n136 - data[i] = 0x1a + i += n160 + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) return i, nil } -func (m *PreferredSchedulingTerm) Marshal() (data []byte, err error) { +func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PreferredSchedulingTerm) MarshalTo(data []byte) (int, error) { +func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.Weight)) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Preference.Size())) - n137, err := m.Preference.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) + n161, err := m.Preference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n137 + i += n161 return i, nil } -func (m *Probe) Marshal() (data []byte, err error) { +func (m *Probe) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Probe) MarshalTo(data []byte) (int, error) { +func (m *Probe) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Handler.Size())) - n138, err := m.Handler.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) + n162, err := m.Handler.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n138 - data[i] = 0x10 + i += n162 + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(m.InitialDelaySeconds)) - data[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) + dAtA[i] = 0x18 i++ - i = encodeVarintGenerated(data, i, uint64(m.TimeoutSeconds)) - data[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) + dAtA[i] = 0x20 i++ - i = encodeVarintGenerated(data, i, uint64(m.PeriodSeconds)) - data[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) + dAtA[i] = 0x28 i++ - i = encodeVarintGenerated(data, i, uint64(m.SuccessThreshold)) - data[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) + dAtA[i] = 0x30 i++ - i = encodeVarintGenerated(data, i, uint64(m.FailureThreshold)) + i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) return i, nil } -func (m *ProjectedVolumeSource) Marshal() (data []byte, err error) { +func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ProjectedVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Sources) > 0 { for _, msg := range m.Sources { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -6856,1190 +8085,268 @@ func (m *ProjectedVolumeSource) MarshalTo(data []byte) (int, error) { } } if m.DefaultMode != nil { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) } return i, nil } -func (m *QuobyteVolumeSource) Marshal() (data []byte, err error) { +func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *QuobyteVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Registry))) - i += copy(data[i:], m.Registry) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) + i += copy(dAtA[i:], m.Registry) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Volume))) - i += copy(data[i:], m.Volume) - data[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) + i += copy(dAtA[i:], m.Volume) + dAtA[i] = 0x18 i++ if m.ReadOnly { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.User))) - i += copy(data[i:], m.User) - data[i] = 0x2a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) return i, nil } -func (m *RBDVolumeSource) Marshal() (data []byte, err error) { +func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RBDVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.CephMonitors) > 0 { for _, s := range m.CephMonitors { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.RBDImage))) - i += copy(data[i:], m.RBDImage) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i += copy(dAtA[i:], m.RBDImage) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.RBDPool))) - i += copy(data[i:], m.RBDPool) - data[i] = 0x2a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i += copy(dAtA[i:], m.RBDPool) + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.RadosUser))) - i += copy(data[i:], m.RadosUser) - data[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i += copy(dAtA[i:], m.RadosUser) + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Keyring))) - i += copy(data[i:], m.Keyring) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i += copy(dAtA[i:], m.Keyring) if m.SecretRef != nil { - data[i] = 0x3a + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n139, err := m.SecretRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n139 - } - data[i] = 0x40 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *RangeAllocation) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RangeAllocation) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n140, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n140 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Range))) - i += copy(data[i:], m.Range) - if m.Data != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Data))) - i += copy(data[i:], m.Data) - } - return i, nil -} - -func (m *ReplicationController) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicationController) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n141, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n141 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n142, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n142 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n143, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n143 - return i, nil -} - -func (m *ReplicationControllerCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicationControllerCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n144, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n144 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *ReplicationControllerList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicationControllerList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n145, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n145 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ReplicationControllerSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicationControllerSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) - } - if len(m.Selector) > 0 { - for k := range m.Selector { - data[i] = 0x12 - i++ - v := m.Selector[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - if m.Template != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n146, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n146 - } - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) - return i, nil -} - -func (m *ReplicationControllerStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicationControllerStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas)) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ResourceFieldSelector) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceFieldSelector) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContainerName))) - i += copy(data[i:], m.ContainerName) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) - i += copy(data[i:], m.Resource) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Divisor.Size())) - n147, err := m.Divisor.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n147 - return i, nil -} - -func (m *ResourceQuota) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceQuota) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n148, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n148 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n149, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n149 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n150, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n150 - return i, nil -} - -func (m *ResourceQuotaList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceQuotaList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n151, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n151 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ResourceQuotaSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceQuotaSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hard) > 0 { - for k := range m.Hard { - data[i] = 0xa - i++ - v := m.Hard[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n152, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n152 - } - } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - data[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *ResourceQuotaStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceQuotaStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hard) > 0 { - for k := range m.Hard { - data[i] = 0xa - i++ - v := m.Hard[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n153, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n153 - } - } - if len(m.Used) > 0 { - for k := range m.Used { - data[i] = 0x12 - i++ - v := m.Used[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n154, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n154 - } - } - return i, nil -} - -func (m *ResourceRequirements) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceRequirements) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Limits) > 0 { - for k := range m.Limits { - data[i] = 0xa - i++ - v := m.Limits[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n155, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n155 - } - } - if len(m.Requests) > 0 { - for k := range m.Requests { - data[i] = 0x12 - i++ - v := m.Requests[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n156, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n156 - } - } - return i, nil -} - -func (m *SELinuxOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SELinuxOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.User))) - i += copy(data[i:], m.User) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Role))) - i += copy(data[i:], m.Role) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Level))) - i += copy(data[i:], m.Level) - return i, nil -} - -func (m *ScaleIOVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ScaleIOVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Gateway))) - i += copy(data[i:], m.Gateway) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.System))) - i += copy(data[i:], m.System) - if m.SecretRef != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n157, err := m.SecretRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n157 - } - data[i] = 0x20 - i++ - if m.SSLEnabled { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ProtectionDomain))) - i += copy(data[i:], m.ProtectionDomain) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.StoragePool))) - i += copy(data[i:], m.StoragePool) - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.StorageMode))) - i += copy(data[i:], m.StorageMode) - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.VolumeName))) - i += copy(data[i:], m.VolumeName) - data[i] = 0x4a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x50 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *Secret) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Secret) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n158, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n158 - if len(m.Data) > 0 { - for k := range m.Data { - data[i] = 0x12 - i++ - v := m.Data[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - if len(m.StringData) > 0 { - for k := range m.StringData { - data[i] = 0x22 - i++ - v := m.StringData[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - return i, nil -} - -func (m *SecretEnvSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SecretEnvSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n159, err := m.LocalObjectReference.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n159 - if m.Optional != nil { - data[i] = 0x10 - i++ - if *m.Optional { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - return i, nil -} - -func (m *SecretKeySelector) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SecretKeySelector) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n160, err := m.LocalObjectReference.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n160 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - if m.Optional != nil { - data[i] = 0x18 - i++ - if *m.Optional { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - return i, nil -} - -func (m *SecretList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SecretList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n161, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n161 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *SecretProjection) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SecretProjection) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n162, err := m.LocalObjectReference.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n162 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Optional != nil { - data[i] = 0x20 - i++ - if *m.Optional { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - return i, nil -} - -func (m *SecretVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SecretVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) - i += copy(data[i:], m.SecretName) - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) - } - if m.Optional != nil { - data[i] = 0x20 - i++ - if *m.Optional { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - return i, nil -} - -func (m *SecurityContext) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SecurityContext) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Capabilities != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Capabilities.Size())) - n163, err := m.Capabilities.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n163, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n163 } - if m.Privileged != nil { - data[i] = 0x10 - i++ - if *m.Privileged { - data[i] = 1 - } else { - data[i] = 0 - } - i++ + dAtA[i] = 0x40 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.SELinuxOptions != nil { - data[i] = 0x1a + i++ + return i, nil +} + +func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i += copy(dAtA[i:], m.RBDImage) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i += copy(dAtA[i:], m.RBDPool) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i += copy(dAtA[i:], m.RadosUser) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i += copy(dAtA[i:], m.Keyring) + if m.SecretRef != nil { + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) - n164, err := m.SELinuxOptions.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n164, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n164 } - if m.RunAsUser != nil { - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - data[i] = 0x28 - i++ - if *m.RunAsNonRoot { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - if m.ReadOnlyRootFilesystem != nil { - data[i] = 0x30 - i++ - if *m.ReadOnlyRootFilesystem { - data[i] = 1 - } else { - data[i] = 0 - } - i++ + dAtA[i] = 0x40 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ return i, nil } -func (m *SerializedReference) Marshal() (data []byte, err error) { +func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *SerializedReference) MarshalTo(data []byte) (int, error) { +func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Reference.Size())) - n165, err := m.Reference.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n165, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n165 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) + i += copy(dAtA[i:], m.Range) + if m.Data != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } return i, nil } -func (m *Service) Marshal() (data []byte, err error) { +func (m *ReplicationController) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Service) MarshalTo(data []byte) (int, error) { +func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n166, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n166, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n166 - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n167, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n167, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n167 - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n168, err := m.Status.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n168, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -8047,35 +8354,1288 @@ func (m *Service) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ServiceAccount) Marshal() (data []byte, err error) { +func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ServiceAccount) MarshalTo(data []byte) (int, error) { +func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n169, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n169, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n169 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n170, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n170 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for _, k := range keysForSelector { + dAtA[i] = 0x12 + i++ + v := m.Selector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.Template != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n171, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n171 + } + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + return i, nil +} + +func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i += copy(dAtA[i:], m.ContainerName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) + n172, err := m.Divisor.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n172 + return i, nil +} + +func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n173, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n173 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n174, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n174 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n175, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n175 + return i, nil +} + +func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n176, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n176 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for _, k := range keysForHard { + dAtA[i] = 0xa + i++ + v := m.Hard[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n177, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n177 + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.ScopeSelector != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ScopeSelector.Size())) + n178, err := m.ScopeSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n178 + } + return i, nil +} + +func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for _, k := range keysForHard { + dAtA[i] = 0xa + i++ + v := m.Hard[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n179, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n179 + } + } + if len(m.Used) > 0 { + keysForUsed := make([]string, 0, len(m.Used)) + for k := range m.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + for _, k := range keysForUsed { + dAtA[i] = 0x12 + i++ + v := m.Used[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n180, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n180 + } + } + return i, nil +} + +func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Limits) > 0 { + keysForLimits := make([]string, 0, len(m.Limits)) + for k := range m.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + for _, k := range keysForLimits { + dAtA[i] = 0xa + i++ + v := m.Limits[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n181, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n181 + } + } + if len(m.Requests) > 0 { + keysForRequests := make([]string, 0, len(m.Requests)) + for k := range m.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + for _, k := range keysForRequests { + dAtA[i] = 0x12 + i++ + v := m.Requests[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n182, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n182 + } + } + return i, nil +} + +func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i += copy(dAtA[i:], m.Level) + return i, nil +} + +func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i += copy(dAtA[i:], m.Gateway) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i += copy(dAtA[i:], m.System) + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n183, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n183 + } + dAtA[i] = 0x20 + i++ + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i += copy(dAtA[i:], m.ProtectionDomain) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i += copy(dAtA[i:], m.StoragePool) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i += copy(dAtA[i:], m.StorageMode) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x50 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i += copy(dAtA[i:], m.Gateway) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i += copy(dAtA[i:], m.System) + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n184, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n184 + } + dAtA[i] = 0x20 + i++ + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i += copy(dAtA[i:], m.ProtectionDomain) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i += copy(dAtA[i:], m.StoragePool) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i += copy(dAtA[i:], m.StorageMode) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x50 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *ScopeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName))) + i += copy(dAtA[i:], m.ScopeName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i += copy(dAtA[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Secret) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n185, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n185 + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for _, k := range keysForData { + dAtA[i] = 0x12 + i++ + v := m.Data[string(k)] + byteSize := 0 + if v != nil { + byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if len(m.StringData) > 0 { + keysForStringData := make([]string, 0, len(m.StringData)) + for k := range m.StringData { + keysForStringData = append(keysForStringData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + for _, k := range keysForStringData { + dAtA[i] = 0x22 + i++ + v := m.StringData[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n186, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n186 + if m.Optional != nil { + dAtA[i] = 0x10 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n187, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n187 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + if m.Optional != nil { + dAtA[i] = 0x18 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecretList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n188, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n188 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SecretProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n189, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n189 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Optional != nil { + dAtA[i] = 0x20 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecretReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + return i, nil +} + +func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + } + if m.Optional != nil { + dAtA[i] = 0x20 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Capabilities != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) + n190, err := m.Capabilities.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n190 + } + if m.Privileged != nil { + dAtA[i] = 0x10 + i++ + if *m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.SELinuxOptions != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) + n191, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n191 + } + if m.RunAsUser != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + dAtA[i] = 0x28 + i++ + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ReadOnlyRootFilesystem != nil { + dAtA[i] = 0x30 + i++ + if *m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.AllowPrivilegeEscalation != nil { + dAtA[i] = 0x38 + i++ + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.RunAsGroup != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + } + return i, nil +} + +func (m *SerializedReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) + n192, err := m.Reference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n192 + return i, nil +} + +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n193, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n193 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n194, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n194 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n195, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n195 + return i, nil +} + +func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n196, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n196 if len(m.Secrets) > 0 { for _, msg := range m.Secrets { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -8084,10 +9644,10 @@ func (m *ServiceAccount) MarshalTo(data []byte) (int, error) { } if len(m.ImagePullSecrets) > 0 { for _, msg := range m.ImagePullSecrets { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -8095,47 +9655,47 @@ func (m *ServiceAccount) MarshalTo(data []byte) (int, error) { } } if m.AutomountServiceAccountToken != nil { - data[i] = 0x20 + dAtA[i] = 0x20 i++ if *m.AutomountServiceAccountToken { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ } return i, nil } -func (m *ServiceAccountList) Marshal() (data []byte, err error) { +func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ServiceAccountList) MarshalTo(data []byte) (int, error) { +func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n170, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n197, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n170 + i += n197 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -8145,35 +9705,66 @@ func (m *ServiceAccountList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ServiceList) Marshal() (data []byte, err error) { +func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ServiceList) MarshalTo(data []byte) (int, error) { +func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n171, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i += copy(dAtA[i:], m.Audience) + if m.ExpirationSeconds != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil +} + +func (m *ServiceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n198, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n171 + i += n198 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -8183,89 +9774,89 @@ func (m *ServiceList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ServicePort) Marshal() (data []byte, err error) { +func (m *ServicePort) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ServicePort) MarshalTo(data []byte) (int, error) { +func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) - i += copy(data[i:], m.Protocol) - data[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i += copy(dAtA[i:], m.Protocol) + dAtA[i] = 0x18 i++ - i = encodeVarintGenerated(data, i, uint64(m.Port)) - data[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetPort.Size())) - n172, err := m.TargetPort.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) + n199, err := m.TargetPort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n172 - data[i] = 0x28 + i += n199 + dAtA[i] = 0x28 i++ - i = encodeVarintGenerated(data, i, uint64(m.NodePort)) + i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) return i, nil } -func (m *ServiceProxyOptions) Marshal() (data []byte, err error) { +func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ServiceProxyOptions) MarshalTo(data []byte) (int, error) { +func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) return i, nil } -func (m *ServiceSpec) Marshal() (data []byte, err error) { +func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ServiceSpec) MarshalTo(data []byte) (int, error) { +func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Ports) > 0 { for _, msg := range m.Ports { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -8273,741 +9864,1043 @@ func (m *ServiceSpec) MarshalTo(data []byte) (int, error) { } } if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { - data[i] = 0x12 + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for _, k := range keysForSelector { + dAtA[i] = 0x12 i++ - v := m.Selector[k] + v := m.Selector[string(k)] mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ClusterIP))) - i += copy(data[i:], m.ClusterIP) - data[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) + i += copy(dAtA[i:], m.ClusterIP) + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if len(m.ExternalIPs) > 0 { for _, s := range m.ExternalIPs { - data[i] = 0x2a + dAtA[i] = 0x2a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } - if len(m.DeprecatedPublicIPs) > 0 { - for _, s := range m.DeprecatedPublicIPs { - data[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x3a + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SessionAffinity))) - i += copy(data[i:], m.SessionAffinity) - data[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) + i += copy(dAtA[i:], m.SessionAffinity) + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.LoadBalancerIP))) - i += copy(data[i:], m.LoadBalancerIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) + i += copy(dAtA[i:], m.LoadBalancerIP) if len(m.LoadBalancerSourceRanges) > 0 { for _, s := range m.LoadBalancerSourceRanges { - data[i] = 0x4a + dAtA[i] = 0x4a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } - data[i] = 0x52 + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ExternalName))) - i += copy(data[i:], m.ExternalName) - return i, nil -} - -func (m *ServiceStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ServiceStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) + i += copy(dAtA[i:], m.ExternalName) + dAtA[i] = 0x5a i++ - i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) - n173, err := m.LoadBalancer.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n173 - return i, nil -} - -func (m *Sysctl) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Sysctl) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) + i += copy(dAtA[i:], m.ExternalTrafficPolicy) + dAtA[i] = 0x60 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) + dAtA[i] = 0x68 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Value))) - i += copy(data[i:], m.Value) - return i, nil -} - -func (m *TCPSocketAction) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *TCPSocketAction) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) - n174, err := m.Port.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n174 - return i, nil -} - -func (m *Taint) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Taint) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Value))) - i += copy(data[i:], m.Value) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Effect))) - i += copy(data[i:], m.Effect) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.TimeAdded.Size())) - n175, err := m.TimeAdded.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n175 - return i, nil -} - -func (m *Toleration) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Toleration) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) - i += copy(data[i:], m.Operator) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Value))) - i += copy(data[i:], m.Value) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Effect))) - i += copy(data[i:], m.Effect) - if m.TolerationSeconds != nil { - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.TolerationSeconds)) - } - return i, nil -} - -func (m *Volume) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Volume) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.VolumeSource.Size())) - n176, err := m.VolumeSource.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n176 - return i, nil -} - -func (m *VolumeMount) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *VolumeMount) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x10 - i++ - if m.ReadOnly { - data[i] = 1 + if m.PublishNotReadyAddresses { + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.MountPath))) - i += copy(data[i:], m.MountPath) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SubPath))) - i += copy(data[i:], m.SubPath) - return i, nil -} - -func (m *VolumeProjection) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *VolumeProjection) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Secret != nil { - data[i] = 0xa + if m.SessionAffinityConfig != nil { + dAtA[i] = 0x72 i++ - i = encodeVarintGenerated(data, i, uint64(m.Secret.Size())) - n177, err := m.Secret.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n177 - } - if m.DownwardAPI != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.DownwardAPI.Size())) - n178, err := m.DownwardAPI.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n178 - } - if m.ConfigMap != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.ConfigMap.Size())) - n179, err := m.ConfigMap.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n179 - } - return i, nil -} - -func (m *VolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *VolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.HostPath != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) - n180, err := m.HostPath.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n180 - } - if m.EmptyDir != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.EmptyDir.Size())) - n181, err := m.EmptyDir.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n181 - } - if m.GCEPersistentDisk != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) - n182, err := m.GCEPersistentDisk.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n182 - } - if m.AWSElasticBlockStore != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) - n183, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n183 - } - if m.GitRepo != nil { - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.GitRepo.Size())) - n184, err := m.GitRepo.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n184 - } - if m.Secret != nil { - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Secret.Size())) - n185, err := m.Secret.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n185 - } - if m.NFS != nil { - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) - n186, err := m.NFS.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n186 - } - if m.ISCSI != nil { - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) - n187, err := m.ISCSI.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n187 - } - if m.Glusterfs != nil { - data[i] = 0x4a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) - n188, err := m.Glusterfs.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n188 - } - if m.PersistentVolumeClaim != nil { - data[i] = 0x52 - i++ - i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeClaim.Size())) - n189, err := m.PersistentVolumeClaim.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n189 - } - if m.RBD != nil { - data[i] = 0x5a - i++ - i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) - n190, err := m.RBD.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n190 - } - if m.FlexVolume != nil { - data[i] = 0x62 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) - n191, err := m.FlexVolume.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n191 - } - if m.Cinder != nil { - data[i] = 0x6a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) - n192, err := m.Cinder.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n192 - } - if m.CephFS != nil { - data[i] = 0x72 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) - n193, err := m.CephFS.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n193 - } - if m.Flocker != nil { - data[i] = 0x7a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) - n194, err := m.Flocker.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n194 - } - if m.DownwardAPI != nil { - data[i] = 0x82 - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.DownwardAPI.Size())) - n195, err := m.DownwardAPI.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n195 - } - if m.FC != nil { - data[i] = 0x8a - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) - n196, err := m.FC.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n196 - } - if m.AzureFile != nil { - data[i] = 0x92 - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) - n197, err := m.AzureFile.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n197 - } - if m.ConfigMap != nil { - data[i] = 0x9a - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ConfigMap.Size())) - n198, err := m.ConfigMap.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n198 - } - if m.VsphereVolume != nil { - data[i] = 0xa2 - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size())) - n199, err := m.VsphereVolume.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n199 - } - if m.Quobyte != nil { - data[i] = 0xaa - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Quobyte.Size())) - n200, err := m.Quobyte.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) + n200, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n200 } - if m.AzureDisk != nil { - data[i] = 0xb2 - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AzureDisk.Size())) - n201, err := m.AzureDisk.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n201 + return i, nil +} + +func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - if m.PhotonPersistentDisk != nil { - data[i] = 0xba + return dAtA[:n], nil +} + +func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) + n201, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n201 + return i, nil +} + +func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ClientIP != nil { + dAtA[i] = 0xa i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.PhotonPersistentDisk.Size())) - n202, err := m.PhotonPersistentDisk.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) + n202, err := m.ClientIP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n202 } - if m.PortworxVolume != nil { - data[i] = 0xc2 + return i, nil +} + +func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i += copy(dAtA[i:], m.VolumeNamespace) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecretRef != nil { + dAtA[i] = 0x2a i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.PortworxVolume.Size())) - n203, err := m.PortworxVolume.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n203, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n203 } - if m.ScaleIO != nil { - data[i] = 0xca + return i, nil +} + +func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i += copy(dAtA[i:], m.VolumeNamespace) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecretRef != nil { + dAtA[i] = 0x2a i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ScaleIO.Size())) - n204, err := m.ScaleIO.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n204, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n204 } - if m.Projected != nil { - data[i] = 0xd2 - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Projected.Size())) - n205, err := m.Projected.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n205 - } return i, nil } -func (m *VsphereVirtualDiskVolumeSource) Marshal() (data []byte, err error) { +func (m *Sysctl) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *VsphereVirtualDiskVolumeSource) MarshalTo(data []byte) (int, error) { +func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.VolumePath))) - i += copy(data[i:], m.VolumePath) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) return i, nil } -func (m *WeightedPodAffinityTerm) Marshal() (data []byte, err error) { +func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *WeightedPodAffinityTerm) MarshalTo(data []byte) (int, error) { +func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Weight)) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.PodAffinityTerm.Size())) - n206, err := m.PodAffinityTerm.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) + n205, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n206 + i += n205 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) return i, nil } -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func (m *Taint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Taint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i += copy(dAtA[i:], m.Effect) + if m.TimeAdded != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) + n206, err := m.TimeAdded.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n206 + } + return i, nil +} + +func (m *Toleration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i += copy(dAtA[i:], m.Operator) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i += copy(dAtA[i:], m.Effect) + if m.TolerationSeconds != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + } + return i, nil +} + +func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + if len(m.Values) > 0 { + for _, s := range m.Values { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchLabelExpressions) > 0 { + for _, msg := range m.MatchLabelExpressions { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Volume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Volume) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) + n207, err := m.VolumeSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n207 + return i, nil +} + +func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i += copy(dAtA[i:], m.DevicePath) + return i, nil +} + +func (m *VolumeMount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i += copy(dAtA[i:], m.MountPath) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) + i += copy(dAtA[i:], m.SubPath) + if m.MountPropagation != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) + i += copy(dAtA[i:], *m.MountPropagation) + } + return i, nil +} + +func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Required != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Required.Size())) + n208, err := m.Required.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n208 + } + return i, nil +} + +func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) + n209, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n209 + } + if m.DownwardAPI != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) + n210, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n210 + } + if m.ConfigMap != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) + n211, err := m.ConfigMap.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n211 + } + if m.ServiceAccountToken != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ServiceAccountToken.Size())) + n212, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n212 + } + return i, nil +} + +func (m *VolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HostPath != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) + n213, err := m.HostPath.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n213 + } + if m.EmptyDir != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) + n214, err := m.EmptyDir.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n214 + } + if m.GCEPersistentDisk != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) + n215, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n215 + } + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n216, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n216 + } + if m.GitRepo != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) + n217, err := m.GitRepo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n217 + } + if m.Secret != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) + n218, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n218 + } + if m.NFS != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n219, err := m.NFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n219 + } + if m.ISCSI != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n220, err := m.ISCSI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n220 + } + if m.Glusterfs != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n221, err := m.Glusterfs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n221 + } + if m.PersistentVolumeClaim != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) + n222, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n222 + } + if m.RBD != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n223, err := m.RBD.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n223 + } + if m.FlexVolume != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n224, err := m.FlexVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n224 + } + if m.Cinder != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n225, err := m.Cinder.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n225 + } + if m.CephFS != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n226, err := m.CephFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n226 + } + if m.Flocker != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n227, err := m.Flocker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n227 + } + if m.DownwardAPI != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) + n228, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n228 + } + if m.FC != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) + n229, err := m.FC.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n229 + } + if m.AzureFile != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) + n230, err := m.AzureFile.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n230 + } + if m.ConfigMap != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) + n231, err := m.ConfigMap.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n231 + } + if m.VsphereVolume != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) + n232, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n232 + } + if m.Quobyte != nil { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) + n233, err := m.Quobyte.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n233 + } + if m.AzureDisk != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) + n234, err := m.AzureDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n234 + } + if m.PhotonPersistentDisk != nil { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) + n235, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n235 + } + if m.PortworxVolume != nil { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) + n236, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n236 + } + if m.ScaleIO != nil { + dAtA[i] = 0xca + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) + n237, err := m.ScaleIO.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n237 + } + if m.Projected != nil { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) + n238, err := m.Projected.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n238 + } + if m.StorageOS != nil { + dAtA[i] = 0xda + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) + n239, err := m.StorageOS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n239 + } + return i, nil +} + +func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) + i += copy(dAtA[i:], m.VolumePath) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) + i += copy(dAtA[i:], m.StoragePolicyName) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) + i += copy(dAtA[i:], m.StoragePolicyID) + return i, nil +} + +func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) + n240, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n240 + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { @@ -9080,6 +10973,25 @@ func (m *AzureDiskVolumeSource) Size() (n int) { if m.ReadOnly != nil { n += 2 } + if m.Kind != nil { + l = len(*m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AzureFilePersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretNamespace != nil { + l = len(*m.SecretNamespace) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -9104,6 +11016,39 @@ func (m *Binding) Size() (n int) { return n } +func (m *CSIPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeHandle) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeAttributes) > 0 { + for k, v := range m.VolumeAttributes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ControllerPublishSecretRef != nil { + l = m.ControllerPublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeStageSecretRef != nil { + l = m.NodeStageSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodePublishSecretRef != nil { + l = m.NodePublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *Capabilities) Size() (n int) { var l int _ = l @@ -9122,6 +11067,29 @@ func (m *Capabilities) Size() (n int) { return n } +func (m *CephFSPersistentVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + func (m *CephFSVolumeSource) Size() (n int) { var l int _ = l @@ -9145,6 +11113,21 @@ func (m *CephFSVolumeSource) Size() (n int) { return n } +func (m *CinderPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *CinderVolumeSource) Size() (n int) { var l int _ = l @@ -9153,6 +11136,19 @@ func (m *CinderVolumeSource) Size() (n int) { l = len(m.FSType) n += 1 + l + sovGenerated(uint64(l)) n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClientIPConfig) Size() (n int) { + var l int + _ = l + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } return n } @@ -9211,6 +11207,18 @@ func (m *ConfigMap) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if len(m.BinaryData) > 0 { + for k, v := range m.BinaryData { + _ = k + _ = v + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } return n } @@ -9252,6 +11260,22 @@ func (m *ConfigMapList) Size() (n int) { return n } +func (m *ConfigMapNodeConfigSource) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletConfigKey) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ConfigMapProjection) Size() (n int) { var l int _ = l @@ -9361,6 +11385,12 @@ func (m *Container) Size() (n int) { } l = len(m.TerminationMessagePolicy) n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -9472,26 +11502,6 @@ func (m *DaemonEndpoint) Size() (n int) { return n } -func (m *DeleteOptions) Size() (n int) { - var l int - _ = l - if m.GracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds)) - } - if m.Preconditions != nil { - l = m.Preconditions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.OrphanDependents != nil { - n += 2 - } - if m.PropagationPolicy != nil { - l = len(*m.PropagationPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *DownwardAPIProjection) Size() (n int) { var l int _ = l @@ -9543,6 +11553,10 @@ func (m *EmptyDirVolumeSource) Size() (n int) { _ = l l = len(m.Medium) n += 1 + l + sovGenerated(uint64(l)) + if m.SizeLimit != nil { + l = m.SizeLimit.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -9699,6 +11713,22 @@ func (m *Event) Size() (n int) { n += 1 + sovGenerated(uint64(m.Count)) l = len(m.Type) n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -9716,6 +11746,17 @@ func (m *EventList) Size() (n int) { return n } +func (m *EventSeries) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *EventSource) Size() (n int) { var l int _ = l @@ -9753,6 +11794,35 @@ func (m *FCVolumeSource) Size() (n int) { l = len(m.FSType) n += 1 + l + sovGenerated(uint64(l)) n += 2 + if len(m.WWIDs) > 0 { + for _, s := range m.WWIDs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlexPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } return n } @@ -9872,11 +11942,61 @@ func (m *Handler) Size() (n int) { return n } +func (m *HostAlias) Size() (n int) { + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hostnames) > 0 { + for _, s := range m.Hostnames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *HostPathVolumeSource) Size() (n int) { var l int _ = l l = len(m.Path) n += 1 + l + sovGenerated(uint64(l)) + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ISCSIPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -9899,6 +12019,16 @@ func (m *ISCSIVolumeSource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -10032,22 +12162,6 @@ func (m *List) Size() (n int) { return n } -func (m *ListOptions) Size() (n int) { - var l int - _ = l - l = len(m.LabelSelector) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldSelector) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) - } - return n -} - func (m *LoadBalancerIngress) Size() (n int) { var l int _ = l @@ -10078,6 +12192,14 @@ func (m *LocalObjectReference) Size() (n int) { return n } +func (m *LocalVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *NFSVolumeSource) Size() (n int) { var l int _ = l @@ -10191,6 +12313,36 @@ func (m *NodeCondition) Size() (n int) { return n } +func (m *NodeConfigSource) Size() (n int) { + var l int + _ = l + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NodeConfigStatus) Size() (n int) { + var l int + _ = l + if m.Assigned != nil { + l = m.Assigned.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Active != nil { + l = m.Active.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastKnownGood != nil { + l = m.LastKnownGood.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *NodeDaemonEndpoints) Size() (n int) { var l int _ = l @@ -10273,6 +12425,12 @@ func (m *NodeSelectorTerm) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.MatchFields) > 0 { + for _, e := range m.MatchFields { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -10281,7 +12439,7 @@ func (m *NodeSpec) Size() (n int) { _ = l l = len(m.PodCIDR) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ExternalID) + l = len(m.DoNotUse_ExternalID) n += 1 + l + sovGenerated(uint64(l)) l = len(m.ProviderID) n += 1 + l + sovGenerated(uint64(l)) @@ -10292,6 +12450,10 @@ func (m *NodeSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.ConfigSource != nil { + l = m.ConfigSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -10352,6 +12514,10 @@ func (m *NodeStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -10391,64 +12557,6 @@ func (m *ObjectFieldSelector) Size() (n int) { return n } -func (m *ObjectMeta) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.GenerateName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SelfLink) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Generation)) - l = m.CreationTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.DeletionTimestamp != nil { - l = m.DeletionTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DeletionGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.OwnerReferences) > 0 { - for _, e := range m.OwnerReferences { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.ClusterName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *ObjectReference) Size() (n int) { var l int _ = l @@ -10493,6 +12601,24 @@ func (m *PersistentVolumeClaim) Size() (n int) { return n } +func (m *PersistentVolumeClaimCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *PersistentVolumeClaimList) Size() (n int) { var l int _ = l @@ -10528,6 +12654,10 @@ func (m *PersistentVolumeClaimSpec) Size() (n int) { l = len(*m.StorageClassName) n += 1 + l + sovGenerated(uint64(l)) } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -10551,6 +12681,12 @@ func (m *PersistentVolumeClaimStatus) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -10656,6 +12792,18 @@ func (m *PersistentVolumeSource) Size() (n int) { l = m.ScaleIO.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.Local != nil { + l = m.Local.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -10687,6 +12835,20 @@ func (m *PersistentVolumeSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.StorageClassName) n += 1 + l + sovGenerated(uint64(l)) + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -10808,6 +12970,42 @@ func (m *PodCondition) Size() (n int) { return n } +func (m *PodDNSConfig) Size() (n int) { + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDNSConfigOption) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *PodExecOptions) Size() (n int) { var l int _ = l @@ -10883,6 +13081,14 @@ func (m *PodProxyOptions) Size() (n int) { return n } +func (m *PodReadinessGate) Size() (n int) { + var l int + _ = l + l = len(m.ConditionType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *PodSecurityContext) Size() (n int) { var l int _ = l @@ -10904,6 +13110,15 @@ func (m *PodSecurityContext) Size() (n int) { if m.FSGroup != nil { n += 1 + sovGenerated(uint64(*m.FSGroup)) } + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + } + if len(m.Sysctls) > 0 { + for _, e := range m.Sysctls { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -10994,6 +13209,30 @@ func (m *PodSpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if len(m.HostAliases) > 0 { + for _, e := range m.HostAliases { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.PriorityClassName) + n += 2 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 2 + sovGenerated(uint64(*m.Priority)) + } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ShareProcessNamespace != nil { + n += 3 + } + if len(m.ReadinessGates) > 0 { + for _, e := range m.ReadinessGates { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -11034,6 +13273,8 @@ func (m *PodStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.NominatedNodeName) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -11168,6 +13409,33 @@ func (m *QuobyteVolumeSource) Size() (n int) { return n } +func (m *RBDPersistentVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + func (m *RBDVolumeSource) Size() (n int) { var l int _ = l @@ -11346,6 +13614,10 @@ func (m *ResourceQuotaSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.ScopeSelector != nil { + l = m.ScopeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -11411,6 +13683,32 @@ func (m *SELinuxOptions) Size() (n int) { return n } +func (m *ScaleIOPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + func (m *ScaleIOVolumeSource) Size() (n int) { var l int _ = l @@ -11437,6 +13735,34 @@ func (m *ScaleIOVolumeSource) Size() (n int) { return n } +func (m *ScopeSelector) Size() (n int) { + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ScopedResourceSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.ScopeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *Secret) Size() (n int) { var l int _ = l @@ -11446,7 +13772,11 @@ func (m *Secret) Size() (n int) { for k, v := range m.Data { _ = k _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } @@ -11518,6 +13848,16 @@ func (m *SecretProjection) Size() (n int) { return n } +func (m *SecretReference) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *SecretVolumeSource) Size() (n int) { var l int _ = l @@ -11561,6 +13901,12 @@ func (m *SecurityContext) Size() (n int) { if m.ReadOnlyRootFilesystem != nil { n += 2 } + if m.AllowPrivilegeEscalation != nil { + n += 2 + } + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + } return n } @@ -11621,6 +13967,19 @@ func (m *ServiceAccountList) Size() (n int) { return n } +func (m *ServiceAccountTokenProjection) Size() (n int) { + var l int + _ = l + l = len(m.Audience) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ServiceList) Size() (n int) { var l int _ = l @@ -11684,12 +14043,6 @@ func (m *ServiceSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.DeprecatedPublicIPs) > 0 { - for _, s := range m.DeprecatedPublicIPs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } l = len(m.SessionAffinity) n += 1 + l + sovGenerated(uint64(l)) l = len(m.LoadBalancerIP) @@ -11702,6 +14055,14 @@ func (m *ServiceSpec) Size() (n int) { } l = len(m.ExternalName) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExternalTrafficPolicy) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) + n += 2 + if m.SessionAffinityConfig != nil { + l = m.SessionAffinityConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -11713,6 +14074,50 @@ func (m *ServiceStatus) Size() (n int) { return n } +func (m *SessionAffinityConfig) Size() (n int) { + var l int + _ = l + if m.ClientIP != nil { + l = m.ClientIP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageOSPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageOSVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *Sysctl) Size() (n int) { var l int _ = l @@ -11728,6 +14133,8 @@ func (m *TCPSocketAction) Size() (n int) { _ = l l = m.Port.Size() n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -11740,8 +14147,10 @@ func (m *Taint) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Effect) n += 1 + l + sovGenerated(uint64(l)) - l = m.TimeAdded.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.TimeAdded != nil { + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -11762,6 +14171,32 @@ func (m *Toleration) Size() (n int) { return n } +func (m *TopologySelectorLabelRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TopologySelectorTerm) Size() (n int) { + var l int + _ = l + if len(m.MatchLabelExpressions) > 0 { + for _, e := range m.MatchLabelExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *Volume) Size() (n int) { var l int _ = l @@ -11772,6 +14207,16 @@ func (m *Volume) Size() (n int) { return n } +func (m *VolumeDevice) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *VolumeMount) Size() (n int) { var l int _ = l @@ -11782,6 +14227,20 @@ func (m *VolumeMount) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.SubPath) n += 1 + l + sovGenerated(uint64(l)) + if m.MountPropagation != nil { + l = len(*m.MountPropagation) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeNodeAffinity) Size() (n int) { + var l int + _ = l + if m.Required != nil { + l = m.Required.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -11800,6 +14259,10 @@ func (m *VolumeProjection) Size() (n int) { l = m.ConfigMap.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ServiceAccountToken != nil { + l = m.ServiceAccountToken.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -11910,6 +14373,10 @@ func (m *VolumeSource) Size() (n int) { l = m.Projected.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -11920,6 +14387,10 @@ func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.FSType) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyID) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -12001,6 +14472,20 @@ func (this *AzureDiskVolumeSource) String() string { `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, `FSType:` + valueToStringGenerated(this.FSType) + `,`, `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `Kind:` + valueToStringGenerated(this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFilePersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`, `}`, }, "") return s @@ -12028,6 +14513,33 @@ func (this *Binding) String() string { }, "") return s } +func (this *CSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) + for k := range this.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + mapStringForVolumeAttributes := "map[string]string{" + for _, k := range keysForVolumeAttributes { + mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + } + mapStringForVolumeAttributes += "}" + s := strings.Join([]string{`&CSIPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, + `ControllerPublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerPublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, + `NodeStageSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodeStageSecretRef), "SecretReference", "SecretReference", 1) + `,`, + `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s +} func (this *Capabilities) String() string { if this == nil { return "nil" @@ -12039,6 +14551,21 @@ func (this *Capabilities) String() string { }, "") return s } +func (this *CephFSPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSPersistentVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} func (this *CephFSVolumeSource) String() string { if this == nil { return "nil" @@ -12054,6 +14581,19 @@ func (this *CephFSVolumeSource) String() string { }, "") return s } +func (this *CinderPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderPersistentVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s +} func (this *CinderVolumeSource) String() string { if this == nil { return "nil" @@ -12062,6 +14602,17 @@ func (this *CinderVolumeSource) String() string { `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClientIPConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClientIPConfig{`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, `}`, }, "") return s @@ -12115,9 +14666,20 @@ func (this *ConfigMap) String() string { mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) } mapStringForData += "}" + keysForBinaryData := make([]string, 0, len(this.BinaryData)) + for k := range this.BinaryData { + keysForBinaryData = append(keysForBinaryData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + mapStringForBinaryData := "map[string][]byte{" + for _, k := range keysForBinaryData { + mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k]) + } + mapStringForBinaryData += "}" s := strings.Join([]string{`&ConfigMap{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Data:` + mapStringForData + `,`, + `BinaryData:` + mapStringForBinaryData + `,`, `}`, }, "") return s @@ -12156,6 +14718,20 @@ func (this *ConfigMapList) String() string { }, "") return s } +func (this *ConfigMapNodeConfigSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapNodeConfigSource{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`, + `}`, + }, "") + return s +} func (this *ConfigMapProjection) String() string { if this == nil { return "nil" @@ -12206,6 +14782,7 @@ func (this *Container) String() string { `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeDevices), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -12311,19 +14888,6 @@ func (this *DaemonEndpoint) String() string { }, "") return s } -func (this *DeleteOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteOptions{`, - `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, - `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, - `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, - `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, - `}`, - }, "") - return s -} func (this *DownwardAPIProjection) String() string { if this == nil { return "nil" @@ -12364,6 +14928,7 @@ func (this *EmptyDirVolumeSource) String() string { } s := strings.Join([]string{`&EmptyDirVolumeSource{`, `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, + `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -12478,6 +15043,12 @@ func (this *Event) String() string { `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Count:` + fmt.Sprintf("%v", this.Count) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "ObjectReference", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, `}`, }, "") return s @@ -12493,6 +15064,18 @@ func (this *EventList) String() string { }, "") return s } +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} func (this *EventSource) String() string { if this == nil { return "nil" @@ -12523,6 +15106,31 @@ func (this *FCVolumeSource) String() string { `Lun:` + valueToStringGenerated(this.Lun) + `,`, `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`, + `}`, + }, "") + return s +} +func (this *FlexPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, `}`, }, "") return s @@ -12636,12 +15244,44 @@ func (this *Handler) String() string { }, "") return s } +func (this *HostAlias) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostAlias{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, + `}`, + }, "") + return s +} func (this *HostPathVolumeSource) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&HostPathVolumeSource{`, `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Type:` + valueToStringGenerated(this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, `}`, }, "") return s @@ -12658,6 +15298,10 @@ func (this *ISCSIVolumeSource) String() string { `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, `}`, }, "") return s @@ -12793,20 +15437,6 @@ func (this *List) String() string { }, "") return s } -func (this *ListOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListOptions{`, - `LabelSelector:` + fmt.Sprintf("%v", this.LabelSelector) + `,`, - `FieldSelector:` + fmt.Sprintf("%v", this.FieldSelector) + `,`, - `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `}`, - }, "") - return s -} func (this *LoadBalancerIngress) String() string { if this == nil { return "nil" @@ -12838,6 +15468,16 @@ func (this *LocalObjectReference) String() string { }, "") return s } +func (this *LocalVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} func (this *NFSVolumeSource) String() string { if this == nil { return "nil" @@ -12942,6 +15582,29 @@ func (this *NodeCondition) String() string { }, "") return s } +func (this *NodeConfigSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeConfigSource{`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeConfigStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeConfigStatus{`, + `Assigned:` + strings.Replace(fmt.Sprintf("%v", this.Assigned), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Active:` + strings.Replace(fmt.Sprintf("%v", this.Active), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `LastKnownGood:` + strings.Replace(fmt.Sprintf("%v", this.LastKnownGood), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} func (this *NodeDaemonEndpoints) String() string { if this == nil { return "nil" @@ -13021,6 +15684,7 @@ func (this *NodeSelectorTerm) String() string { } s := strings.Join([]string{`&NodeSelectorTerm{`, `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, + `MatchFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchFields), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -13031,10 +15695,11 @@ func (this *NodeSpec) String() string { } s := strings.Join([]string{`&NodeSpec{`, `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, - `ExternalID:` + fmt.Sprintf("%v", this.ExternalID) + `,`, + `DoNotUse_ExternalID:` + fmt.Sprintf("%v", this.DoNotUse_ExternalID) + `,`, `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, `Taints:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Taints), "Taint", "Taint", 1), `&`, ``, 1) + `,`, + `ConfigSource:` + strings.Replace(fmt.Sprintf("%v", this.ConfigSource), "NodeConfigSource", "NodeConfigSource", 1) + `,`, `}`, }, "") return s @@ -13074,6 +15739,7 @@ func (this *NodeStatus) String() string { `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + `,`, `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, `VolumesAttached:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumesAttached), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + `,`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, `}`, }, "") return s @@ -13108,50 +15774,6 @@ func (this *ObjectFieldSelector) String() string { }, "") return s } -func (this *ObjectMeta) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - s := strings.Join([]string{`&ObjectMeta{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, - `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, - `Labels:` + mapStringForLabels + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1), `&`, ``, 1) + `,`, - `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, - `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, - `}`, - }, "") - return s -} func (this *ObjectReference) String() string { if this == nil { return "nil" @@ -13192,6 +15814,21 @@ func (this *PersistentVolumeClaim) String() string { }, "") return s } +func (this *PersistentVolumeClaimCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *PersistentVolumeClaimList) String() string { if this == nil { return "nil" @@ -13213,6 +15850,7 @@ func (this *PersistentVolumeClaimSpec) String() string { `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `}`, }, "") return s @@ -13235,6 +15873,7 @@ func (this *PersistentVolumeClaimStatus) String() string { `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, `Capacity:` + mapStringForCapacity + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -13271,20 +15910,23 @@ func (this *PersistentVolumeSource) String() string { `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, - `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, - `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, - `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, + `Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, `}`, }, "") return s @@ -13310,6 +15952,9 @@ func (this *PersistentVolumeSpec) String() string { `ClaimRef:` + strings.Replace(fmt.Sprintf("%v", this.ClaimRef), "ObjectReference", "ObjectReference", 1) + `,`, `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, + `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, `}`, }, "") return s @@ -13412,6 +16057,29 @@ func (this *PodCondition) String() string { }, "") return s } +func (this *PodDNSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfigOption) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDNSConfigOption{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} func (this *PodExecOptions) String() string { if this == nil { return "nil" @@ -13475,6 +16143,16 @@ func (this *PodProxyOptions) String() string { }, "") return s } +func (this *PodReadinessGate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodReadinessGate{`, + `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, + `}`, + }, "") + return s +} func (this *PodSecurityContext) String() string { if this == nil { return "nil" @@ -13485,6 +16163,8 @@ func (this *PodSecurityContext) String() string { `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, + `Sysctls:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sysctls), "Sysctl", "Sysctl", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -13536,6 +16216,12 @@ func (this *PodSpec) String() string { `InitContainers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainers), "Container", "Container", 1), `&`, ``, 1) + `,`, `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, `Tolerations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tolerations), "Toleration", "Toleration", 1), `&`, ``, 1) + `,`, + `HostAliases:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostAliases), "HostAlias", "HostAlias", 1), `&`, ``, 1) + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "PodDNSConfig", 1) + `,`, + `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, + `ReadinessGates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ReadinessGates), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -13555,6 +16241,7 @@ func (this *PodStatus) String() string { `ContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, `InitContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, + `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, `}`, }, "") return s @@ -13689,6 +16376,23 @@ func (this *QuobyteVolumeSource) String() string { }, "") return s } +func (this *RBDPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDPersistentVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} func (this *RBDVolumeSource) String() string { if this == nil { return "nil" @@ -13845,6 +16549,7 @@ func (this *ResourceQuotaSpec) String() string { s := strings.Join([]string{`&ResourceQuotaSpec{`, `Hard:` + mapStringForHard + `,`, `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `ScopeSelector:` + strings.Replace(fmt.Sprintf("%v", this.ScopeSelector), "ScopeSelector", "ScopeSelector", 1) + `,`, `}`, }, "") return s @@ -13924,6 +16629,25 @@ func (this *SELinuxOptions) String() string { }, "") return s } +func (this *ScaleIOPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} func (this *ScaleIOVolumeSource) String() string { if this == nil { return "nil" @@ -13943,6 +16667,28 @@ func (this *ScaleIOVolumeSource) String() string { }, "") return s } +func (this *ScopeSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScopeSelector{`, + `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScopedResourceSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`, + `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} func (this *Secret) String() string { if this == nil { return "nil" @@ -14022,6 +16768,17 @@ func (this *SecretProjection) String() string { }, "") return s } +func (this *SecretReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} func (this *SecretVolumeSource) String() string { if this == nil { return "nil" @@ -14046,6 +16803,8 @@ func (this *SecurityContext) String() string { `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, `}`, }, "") return s @@ -14096,6 +16855,18 @@ func (this *ServiceAccountList) String() string { }, "") return s } +func (this *ServiceAccountTokenProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountTokenProjection{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} func (this *ServiceList) String() string { if this == nil { return "nil" @@ -14151,11 +16922,14 @@ func (this *ServiceSpec) String() string { `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, - `DeprecatedPublicIPs:` + fmt.Sprintf("%v", this.DeprecatedPublicIPs) + `,`, `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, + `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, + `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, + `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, + `SessionAffinityConfig:` + strings.Replace(fmt.Sprintf("%v", this.SessionAffinityConfig), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, `}`, }, "") return s @@ -14170,6 +16944,44 @@ func (this *ServiceStatus) String() string { }, "") return s } +func (this *SessionAffinityConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionAffinityConfig{`, + `ClientIP:` + strings.Replace(fmt.Sprintf("%v", this.ClientIP), "ClientIPConfig", "ClientIPConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "ObjectReference", "ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} func (this *Sysctl) String() string { if this == nil { return "nil" @@ -14187,6 +16999,7 @@ func (this *TCPSocketAction) String() string { } s := strings.Join([]string{`&TCPSocketAction{`, `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, `}`, }, "") return s @@ -14199,7 +17012,7 @@ func (this *Taint) String() string { `Key:` + fmt.Sprintf("%v", this.Key) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TimeAdded:` + strings.Replace(strings.Replace(this.TimeAdded.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `}`, }, "") return s @@ -14218,6 +17031,27 @@ func (this *Toleration) String() string { }, "") return s } +func (this *TopologySelectorLabelRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologySelectorLabelRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *TopologySelectorTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologySelectorTerm{`, + `MatchLabelExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchLabelExpressions), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *Volume) String() string { if this == nil { return "nil" @@ -14229,6 +17063,17 @@ func (this *Volume) String() string { }, "") return s } +func (this *VolumeDevice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeDevice{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} func (this *VolumeMount) String() string { if this == nil { return "nil" @@ -14238,6 +17083,17 @@ func (this *VolumeMount) String() string { `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, + `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeNodeAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeAffinity{`, + `Required:` + strings.Replace(fmt.Sprintf("%v", this.Required), "NodeSelector", "NodeSelector", 1) + `,`, `}`, }, "") return s @@ -14250,6 +17106,7 @@ func (this *VolumeProjection) String() string { `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretProjection", "SecretProjection", 1) + `,`, `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, + `ServiceAccountToken:` + strings.Replace(fmt.Sprintf("%v", this.ServiceAccountToken), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, `}`, }, "") return s @@ -14285,6 +17142,7 @@ func (this *VolumeSource) String() string { `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, `}`, }, "") return s @@ -14296,6 +17154,8 @@ func (this *VsphereVirtualDiskVolumeSource) String() string { s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, + `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, `}`, }, "") return s @@ -14319,8 +17179,8 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -14332,7 +17192,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -14360,7 +17220,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -14375,7 +17235,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(data[iNdEx:postIndex]) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -14389,7 +17249,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -14404,7 +17264,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(data[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -14418,7 +17278,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Partition |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -14437,7 +17297,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -14447,7 +17307,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { m.ReadOnly = bool(v != 0) default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -14466,8 +17326,8 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *Affinity) Unmarshal(data []byte) error { - l := len(data) +func (m *Affinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -14479,7 +17339,7 @@ func (m *Affinity) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -14507,7 +17367,7 @@ func (m *Affinity) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -14524,7 +17384,7 @@ func (m *Affinity) Unmarshal(data []byte) error { if m.NodeAffinity == nil { m.NodeAffinity = &NodeAffinity{} } - if err := m.NodeAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14540,7 +17400,7 @@ func (m *Affinity) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -14557,7 +17417,7 @@ func (m *Affinity) Unmarshal(data []byte) error { if m.PodAffinity == nil { m.PodAffinity = &PodAffinity{} } - if err := m.PodAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14573,7 +17433,7 @@ func (m *Affinity) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -14590,13 +17450,13 @@ func (m *Affinity) Unmarshal(data []byte) error { if m.PodAntiAffinity == nil { m.PodAntiAffinity = &PodAntiAffinity{} } - if err := m.PodAntiAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -14615,8 +17475,8 @@ func (m *Affinity) Unmarshal(data []byte) error { } return nil } -func (m *AttachedVolume) Unmarshal(data []byte) error { - l := len(data) +func (m *AttachedVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -14628,7 +17488,7 @@ func (m *AttachedVolume) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -14656,7 +17516,7 @@ func (m *AttachedVolume) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -14671,7 +17531,7 @@ func (m *AttachedVolume) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = UniqueVolumeName(data[iNdEx:postIndex]) + m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -14685,7 +17545,7 @@ func (m *AttachedVolume) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -14700,11 +17560,11 @@ func (m *AttachedVolume) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DevicePath = string(data[iNdEx:postIndex]) + m.DevicePath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -14723,8 +17583,8 @@ func (m *AttachedVolume) Unmarshal(data []byte) error { } return nil } -func (m *AvoidPods) Unmarshal(data []byte) error { - l := len(data) +func (m *AvoidPods) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -14736,7 +17596,7 @@ func (m *AvoidPods) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -14764,7 +17624,7 @@ func (m *AvoidPods) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -14779,13 +17639,13 @@ func (m *AvoidPods) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) - if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -14804,8 +17664,8 @@ func (m *AvoidPods) Unmarshal(data []byte) error { } return nil } -func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -14817,7 +17677,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -14845,7 +17705,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -14860,7 +17720,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DiskName = string(data[iNdEx:postIndex]) + m.DiskName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -14874,7 +17734,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -14889,7 +17749,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DataDiskURI = string(data[iNdEx:postIndex]) + m.DataDiskURI = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -14903,7 +17763,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -14918,7 +17778,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := AzureDataDiskCachingMode(data[iNdEx:postIndex]) + s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) m.CachingMode = &s iNdEx = postIndex case 4: @@ -14933,7 +17793,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -14948,7 +17808,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.FSType = &s iNdEx = postIndex case 5: @@ -14963,7 +17823,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -14972,9 +17832,39 @@ func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { } b := bool(v != 0) m.ReadOnly = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) + m.Kind = &s + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -14993,8 +17883,8 @@ func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *AzureFileVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -15006,7 +17896,165 @@ func (m *AzureFileVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.SecretNamespace = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15034,7 +18082,7 @@ func (m *AzureFileVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15049,7 +18097,7 @@ func (m *AzureFileVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(data[iNdEx:postIndex]) + m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -15063,7 +18111,7 @@ func (m *AzureFileVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15078,7 +18126,7 @@ func (m *AzureFileVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ShareName = string(data[iNdEx:postIndex]) + m.ShareName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -15092,7 +18140,7 @@ func (m *AzureFileVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -15102,7 +18150,7 @@ func (m *AzureFileVolumeSource) Unmarshal(data []byte) error { m.ReadOnly = bool(v != 0) default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -15121,8 +18169,8 @@ func (m *AzureFileVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *Binding) Unmarshal(data []byte) error { - l := len(data) +func (m *Binding) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -15134,7 +18182,7 @@ func (m *Binding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15162,7 +18210,7 @@ func (m *Binding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -15176,7 +18224,7 @@ func (m *Binding) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15192,7 +18240,7 @@ func (m *Binding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -15206,13 +18254,13 @@ func (m *Binding) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -15231,8 +18279,8 @@ func (m *Binding) Unmarshal(data []byte) error { } return nil } -func (m *Capabilities) Unmarshal(data []byte) error { - l := len(data) +func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -15244,7 +18292,379 @@ func (m *Capabilities) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeHandle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.VolumeAttributes == nil { + m.VolumeAttributes = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.VolumeAttributes[mapkey] = mapvalue + } else { + var mapvalue string + m.VolumeAttributes[mapkey] = mapvalue + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ControllerPublishSecretRef == nil { + m.ControllerPublishSecretRef = &SecretReference{} + } + if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeStageSecretRef == nil { + m.NodeStageSecretRef = &SecretReference{} + } + if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodePublishSecretRef == nil { + m.NodePublishSecretRef = &SecretReference{} + } + if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Capabilities) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15272,7 +18692,7 @@ func (m *Capabilities) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15287,7 +18707,7 @@ func (m *Capabilities) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Add = append(m.Add, Capability(data[iNdEx:postIndex])) + m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { @@ -15301,7 +18721,7 @@ func (m *Capabilities) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15316,11 +18736,11 @@ func (m *Capabilities) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Drop = append(m.Drop, Capability(data[iNdEx:postIndex])) + m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -15339,8 +18759,8 @@ func (m *Capabilities) Unmarshal(data []byte) error { } return nil } -func (m *CephFSVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -15352,7 +18772,226 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15380,7 +19019,7 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15395,7 +19034,7 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Monitors = append(m.Monitors, string(data[iNdEx:postIndex])) + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { @@ -15409,7 +19048,7 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15424,7 +19063,7 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(data[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -15438,7 +19077,7 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15453,7 +19092,7 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.User = string(data[iNdEx:postIndex]) + m.User = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -15467,7 +19106,7 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15482,7 +19121,7 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretFile = string(data[iNdEx:postIndex]) + m.SecretFile = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -15496,7 +19135,7 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -15513,7 +19152,7 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { if m.SecretRef == nil { m.SecretRef = &LocalObjectReference{} } - if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15529,7 +19168,7 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -15539,7 +19178,7 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { m.ReadOnly = bool(v != 0) default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -15558,8 +19197,8 @@ func (m *CephFSVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *CinderVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -15571,7 +19210,168 @@ func (m *CinderVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15599,7 +19399,7 @@ func (m *CinderVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15614,7 +19414,7 @@ func (m *CinderVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(data[iNdEx:postIndex]) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -15628,7 +19428,7 @@ func (m *CinderVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15643,7 +19443,7 @@ func (m *CinderVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(data[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -15657,7 +19457,7 @@ func (m *CinderVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -15665,9 +19465,42 @@ func (m *CinderVolumeSource) Unmarshal(data []byte) error { } } m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -15686,8 +19519,8 @@ func (m *CinderVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *ComponentCondition) Unmarshal(data []byte) error { - l := len(data) +func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -15699,7 +19532,77 @@ func (m *ComponentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15727,7 +19630,7 @@ func (m *ComponentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15742,7 +19645,7 @@ func (m *ComponentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ComponentConditionType(data[iNdEx:postIndex]) + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -15756,7 +19659,7 @@ func (m *ComponentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15771,7 +19674,7 @@ func (m *ComponentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(data[iNdEx:postIndex]) + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -15785,7 +19688,7 @@ func (m *ComponentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15800,7 +19703,7 @@ func (m *ComponentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -15814,7 +19717,7 @@ func (m *ComponentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15829,11 +19732,11 @@ func (m *ComponentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Error = string(data[iNdEx:postIndex]) + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -15852,8 +19755,8 @@ func (m *ComponentCondition) Unmarshal(data []byte) error { } return nil } -func (m *ComponentStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -15865,7 +19768,7 @@ func (m *ComponentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -15893,7 +19796,7 @@ func (m *ComponentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -15907,7 +19810,7 @@ func (m *ComponentStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15923,7 +19826,7 @@ func (m *ComponentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -15938,13 +19841,13 @@ func (m *ComponentStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Conditions = append(m.Conditions, ComponentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -15963,8 +19866,8 @@ func (m *ComponentStatus) Unmarshal(data []byte) error { } return nil } -func (m *ComponentStatusList) Unmarshal(data []byte) error { - l := len(data) +func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -15976,7 +19879,7 @@ func (m *ComponentStatusList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -16004,7 +19907,7 @@ func (m *ComponentStatusList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16018,7 +19921,7 @@ func (m *ComponentStatusList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -16034,7 +19937,7 @@ func (m *ComponentStatusList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16049,13 +19952,13 @@ func (m *ComponentStatusList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, ComponentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -16074,8 +19977,8 @@ func (m *ComponentStatusList) Unmarshal(data []byte) error { } return nil } -func (m *ConfigMap) Unmarshal(data []byte) error { - l := len(data) +func (m *ConfigMap) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -16087,7 +19990,7 @@ func (m *ConfigMap) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -16115,7 +20018,7 @@ func (m *ConfigMap) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16129,7 +20032,7 @@ func (m *ConfigMap) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -16145,7 +20048,7 @@ func (m *ConfigMap) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16167,7 +20070,7 @@ func (m *ConfigMap) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -16182,7 +20085,7 @@ func (m *ConfigMap) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -16197,56 +20100,178 @@ func (m *ConfigMap) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue if m.Data == nil { m.Data = make(map[string]string) } - m.Data[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Data[mapkey] = mapvalue + } else { + var mapvalue string + m.Data[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.BinaryData == nil { + m.BinaryData = make(map[string][]byte) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + m.BinaryData[mapkey] = mapvalue + } else { + var mapvalue []byte + m.BinaryData[mapkey] = mapvalue + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -16265,8 +20290,8 @@ func (m *ConfigMap) Unmarshal(data []byte) error { } return nil } -func (m *ConfigMapEnvSource) Unmarshal(data []byte) error { - l := len(data) +func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -16278,7 +20303,7 @@ func (m *ConfigMapEnvSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -16306,7 +20331,7 @@ func (m *ConfigMapEnvSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16320,7 +20345,7 @@ func (m *ConfigMapEnvSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -16336,7 +20361,7 @@ func (m *ConfigMapEnvSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16347,7 +20372,7 @@ func (m *ConfigMapEnvSource) Unmarshal(data []byte) error { m.Optional = &b default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -16366,8 +20391,8 @@ func (m *ConfigMapEnvSource) Unmarshal(data []byte) error { } return nil } -func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { - l := len(data) +func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -16379,7 +20404,7 @@ func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -16407,7 +20432,7 @@ func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16421,7 +20446,7 @@ func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -16437,7 +20462,7 @@ func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -16452,7 +20477,7 @@ func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(data[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -16466,7 +20491,7 @@ func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16477,7 +20502,7 @@ func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { m.Optional = &b default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -16496,8 +20521,8 @@ func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { } return nil } -func (m *ConfigMapList) Unmarshal(data []byte) error { - l := len(data) +func (m *ConfigMapList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -16509,7 +20534,7 @@ func (m *ConfigMapList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -16537,7 +20562,7 @@ func (m *ConfigMapList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16551,7 +20576,7 @@ func (m *ConfigMapList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -16567,7 +20592,7 @@ func (m *ConfigMapList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16582,13 +20607,13 @@ func (m *ConfigMapList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, ConfigMap{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -16607,8 +20632,8 @@ func (m *ConfigMapList) Unmarshal(data []byte) error { } return nil } -func (m *ConfigMapProjection) Unmarshal(data []byte) error { - l := len(data) +func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -16620,7 +20645,202 @@ func (m *ConfigMapProjection) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeletConfigKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -16648,7 +20868,7 @@ func (m *ConfigMapProjection) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16662,7 +20882,7 @@ func (m *ConfigMapProjection) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -16678,7 +20898,7 @@ func (m *ConfigMapProjection) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16693,7 +20913,7 @@ func (m *ConfigMapProjection) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -16709,7 +20929,7 @@ func (m *ConfigMapProjection) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16720,7 +20940,7 @@ func (m *ConfigMapProjection) Unmarshal(data []byte) error { m.Optional = &b default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -16739,8 +20959,8 @@ func (m *ConfigMapProjection) Unmarshal(data []byte) error { } return nil } -func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -16752,7 +20972,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -16780,7 +21000,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16794,7 +21014,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -16810,7 +21030,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16825,7 +21045,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -16841,7 +21061,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -16861,7 +21081,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -16872,7 +21092,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { m.Optional = &b default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -16891,8 +21111,8 @@ func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *Container) Unmarshal(data []byte) error { - l := len(data) +func (m *Container) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -16904,7 +21124,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -16932,7 +21152,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -16947,7 +21167,7 @@ func (m *Container) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -16961,7 +21181,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -16976,7 +21196,7 @@ func (m *Container) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Image = string(data[iNdEx:postIndex]) + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -16990,7 +21210,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17005,7 +21225,7 @@ func (m *Container) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Command = append(m.Command, string(data[iNdEx:postIndex])) + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { @@ -17019,7 +21239,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17034,7 +21254,7 @@ func (m *Container) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Args = append(m.Args, string(data[iNdEx:postIndex])) + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 5: if wireType != 2 { @@ -17048,7 +21268,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17063,7 +21283,7 @@ func (m *Container) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.WorkingDir = string(data[iNdEx:postIndex]) + m.WorkingDir = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -17077,7 +21297,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17092,7 +21312,7 @@ func (m *Container) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Ports = append(m.Ports, ContainerPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17108,7 +21328,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17123,7 +21343,7 @@ func (m *Container) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Env = append(m.Env, EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17139,7 +21359,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17153,7 +21373,7 @@ func (m *Container) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17169,7 +21389,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17184,7 +21404,7 @@ func (m *Container) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17200,7 +21420,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17217,7 +21437,7 @@ func (m *Container) Unmarshal(data []byte) error { if m.LivenessProbe == nil { m.LivenessProbe = &Probe{} } - if err := m.LivenessProbe.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17233,7 +21453,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17250,7 +21470,7 @@ func (m *Container) Unmarshal(data []byte) error { if m.ReadinessProbe == nil { m.ReadinessProbe = &Probe{} } - if err := m.ReadinessProbe.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17266,7 +21486,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17283,7 +21503,7 @@ func (m *Container) Unmarshal(data []byte) error { if m.Lifecycle == nil { m.Lifecycle = &Lifecycle{} } - if err := m.Lifecycle.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17299,7 +21519,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17314,7 +21534,7 @@ func (m *Container) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TerminationMessagePath = string(data[iNdEx:postIndex]) + m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 14: if wireType != 2 { @@ -17328,7 +21548,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17343,7 +21563,7 @@ func (m *Container) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ImagePullPolicy = PullPolicy(data[iNdEx:postIndex]) + m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 15: if wireType != 2 { @@ -17357,7 +21577,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17374,7 +21594,7 @@ func (m *Container) Unmarshal(data []byte) error { if m.SecurityContext == nil { m.SecurityContext = &SecurityContext{} } - if err := m.SecurityContext.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17390,7 +21610,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17410,7 +21630,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17430,7 +21650,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17450,7 +21670,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17465,7 +21685,7 @@ func (m *Container) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17481,7 +21701,7 @@ func (m *Container) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17496,11 +21716,42 @@ func (m *Container) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TerminationMessagePolicy = TerminationMessagePolicy(data[iNdEx:postIndex]) + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -17519,8 +21770,8 @@ func (m *Container) Unmarshal(data []byte) error { } return nil } -func (m *ContainerImage) Unmarshal(data []byte) error { - l := len(data) +func (m *ContainerImage) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -17532,7 +21783,7 @@ func (m *ContainerImage) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17560,7 +21811,7 @@ func (m *ContainerImage) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17575,7 +21826,7 @@ func (m *ContainerImage) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Names = append(m.Names, string(data[iNdEx:postIndex])) + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 0 { @@ -17589,7 +21840,7 @@ func (m *ContainerImage) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.SizeBytes |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -17598,7 +21849,7 @@ func (m *ContainerImage) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -17617,8 +21868,8 @@ func (m *ContainerImage) Unmarshal(data []byte) error { } return nil } -func (m *ContainerPort) Unmarshal(data []byte) error { - l := len(data) +func (m *ContainerPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -17630,7 +21881,7 @@ func (m *ContainerPort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17658,7 +21909,7 @@ func (m *ContainerPort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17673,7 +21924,7 @@ func (m *ContainerPort) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { @@ -17687,7 +21938,7 @@ func (m *ContainerPort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.HostPort |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -17706,7 +21957,7 @@ func (m *ContainerPort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.ContainerPort |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -17725,7 +21976,7 @@ func (m *ContainerPort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17740,7 +21991,7 @@ func (m *ContainerPort) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(data[iNdEx:postIndex]) + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -17754,7 +22005,7 @@ func (m *ContainerPort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17769,11 +22020,11 @@ func (m *ContainerPort) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.HostIP = string(data[iNdEx:postIndex]) + m.HostIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -17792,8 +22043,8 @@ func (m *ContainerPort) Unmarshal(data []byte) error { } return nil } -func (m *ContainerState) Unmarshal(data []byte) error { - l := len(data) +func (m *ContainerState) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -17805,7 +22056,7 @@ func (m *ContainerState) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17833,7 +22084,7 @@ func (m *ContainerState) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17850,7 +22101,7 @@ func (m *ContainerState) Unmarshal(data []byte) error { if m.Waiting == nil { m.Waiting = &ContainerStateWaiting{} } - if err := m.Waiting.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17866,7 +22117,7 @@ func (m *ContainerState) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17883,7 +22134,7 @@ func (m *ContainerState) Unmarshal(data []byte) error { if m.Running == nil { m.Running = &ContainerStateRunning{} } - if err := m.Running.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17899,7 +22150,7 @@ func (m *ContainerState) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17916,13 +22167,13 @@ func (m *ContainerState) Unmarshal(data []byte) error { if m.Terminated == nil { m.Terminated = &ContainerStateTerminated{} } - if err := m.Terminated.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -17941,8 +22192,8 @@ func (m *ContainerState) Unmarshal(data []byte) error { } return nil } -func (m *ContainerStateRunning) Unmarshal(data []byte) error { - l := len(data) +func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -17954,7 +22205,7 @@ func (m *ContainerStateRunning) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -17982,7 +22233,7 @@ func (m *ContainerStateRunning) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -17996,13 +22247,13 @@ func (m *ContainerStateRunning) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -18021,8 +22272,8 @@ func (m *ContainerStateRunning) Unmarshal(data []byte) error { } return nil } -func (m *ContainerStateTerminated) Unmarshal(data []byte) error { - l := len(data) +func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -18034,7 +22285,7 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18062,7 +22313,7 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.ExitCode |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -18081,7 +22332,7 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Signal |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -18100,7 +22351,7 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18115,7 +22366,7 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -18129,7 +22380,7 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18144,7 +22395,7 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -18158,7 +22409,7 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -18172,7 +22423,7 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18188,7 +22439,7 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -18202,7 +22453,7 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FinishedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18218,7 +22469,7 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18233,11 +22484,11 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(data[iNdEx:postIndex]) + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -18256,8 +22507,8 @@ func (m *ContainerStateTerminated) Unmarshal(data []byte) error { } return nil } -func (m *ContainerStateWaiting) Unmarshal(data []byte) error { - l := len(data) +func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -18269,7 +22520,7 @@ func (m *ContainerStateWaiting) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18297,7 +22548,7 @@ func (m *ContainerStateWaiting) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18312,7 +22563,7 @@ func (m *ContainerStateWaiting) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -18326,7 +22577,7 @@ func (m *ContainerStateWaiting) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18341,11 +22592,11 @@ func (m *ContainerStateWaiting) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -18364,8 +22615,8 @@ func (m *ContainerStateWaiting) Unmarshal(data []byte) error { } return nil } -func (m *ContainerStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -18377,7 +22628,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18405,7 +22656,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18420,7 +22671,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -18434,7 +22685,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -18448,7 +22699,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.State.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18464,7 +22715,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -18478,7 +22729,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTerminationState.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18494,7 +22745,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -18514,7 +22765,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.RestartCount |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -18533,7 +22784,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18548,7 +22799,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Image = string(data[iNdEx:postIndex]) + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { @@ -18562,7 +22813,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18577,7 +22828,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ImageID = string(data[iNdEx:postIndex]) + m.ImageID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 8: if wireType != 2 { @@ -18591,7 +22842,7 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18606,11 +22857,11 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(data[iNdEx:postIndex]) + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -18629,8 +22880,8 @@ func (m *ContainerStatus) Unmarshal(data []byte) error { } return nil } -func (m *DaemonEndpoint) Unmarshal(data []byte) error { - l := len(data) +func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -18642,7 +22893,7 @@ func (m *DaemonEndpoint) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18670,7 +22921,7 @@ func (m *DaemonEndpoint) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Port |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -18679,7 +22930,7 @@ func (m *DaemonEndpoint) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -18698,8 +22949,8 @@ func (m *DaemonEndpoint) Unmarshal(data []byte) error { } return nil } -func (m *DeleteOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -18711,161 +22962,7 @@ func (m *DeleteOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.GracePeriodSeconds = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Preconditions == nil { - m.Preconditions = &Preconditions{} - } - if err := m.Preconditions.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.OrphanDependents = &b - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PropagationPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := DeletionPropagation(data[iNdEx:postIndex]) - m.PropagationPolicy = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DownwardAPIProjection) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18893,7 +22990,7 @@ func (m *DownwardAPIProjection) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -18908,13 +23005,13 @@ func (m *DownwardAPIProjection) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -18933,8 +23030,8 @@ func (m *DownwardAPIProjection) Unmarshal(data []byte) error { } return nil } -func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { - l := len(data) +func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -18946,7 +23043,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18974,7 +23071,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -18989,7 +23086,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(data[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -19003,7 +23100,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -19020,7 +23117,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { if m.FieldRef == nil { m.FieldRef = &ObjectFieldSelector{} } - if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -19036,7 +23133,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -19053,7 +23150,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { if m.ResourceFieldRef == nil { m.ResourceFieldRef = &ResourceFieldSelector{} } - if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -19069,7 +23166,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -19079,7 +23176,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { m.Mode = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -19098,8 +23195,8 @@ func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { } return nil } -func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -19111,7 +23208,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19139,7 +23236,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -19154,7 +23251,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -19170,7 +23267,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -19180,7 +23277,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { m.DefaultMode = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -19199,8 +23296,8 @@ func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *EmptyDirVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -19212,7 +23309,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19240,7 +23337,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19255,11 +23352,44 @@ func (m *EmptyDirVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Medium = StorageMedium(data[iNdEx:postIndex]) + m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SizeLimit == nil { + m.SizeLimit = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -19278,8 +23408,8 @@ func (m *EmptyDirVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *EndpointAddress) Unmarshal(data []byte) error { - l := len(data) +func (m *EndpointAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -19291,7 +23421,7 @@ func (m *EndpointAddress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19319,7 +23449,7 @@ func (m *EndpointAddress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19334,7 +23464,7 @@ func (m *EndpointAddress) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(data[iNdEx:postIndex]) + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -19348,7 +23478,7 @@ func (m *EndpointAddress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -19365,7 +23495,7 @@ func (m *EndpointAddress) Unmarshal(data []byte) error { if m.TargetRef == nil { m.TargetRef = &ObjectReference{} } - if err := m.TargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -19381,7 +23511,7 @@ func (m *EndpointAddress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19396,7 +23526,7 @@ func (m *EndpointAddress) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(data[iNdEx:postIndex]) + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -19410,7 +23540,7 @@ func (m *EndpointAddress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19425,12 +23555,12 @@ func (m *EndpointAddress) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.NodeName = &s iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -19449,8 +23579,8 @@ func (m *EndpointAddress) Unmarshal(data []byte) error { } return nil } -func (m *EndpointPort) Unmarshal(data []byte) error { - l := len(data) +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -19462,7 +23592,7 @@ func (m *EndpointPort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19490,7 +23620,7 @@ func (m *EndpointPort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19505,7 +23635,7 @@ func (m *EndpointPort) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { @@ -19519,7 +23649,7 @@ func (m *EndpointPort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Port |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -19538,7 +23668,7 @@ func (m *EndpointPort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19553,11 +23683,11 @@ func (m *EndpointPort) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(data[iNdEx:postIndex]) + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -19576,8 +23706,8 @@ func (m *EndpointPort) Unmarshal(data []byte) error { } return nil } -func (m *EndpointSubset) Unmarshal(data []byte) error { - l := len(data) +func (m *EndpointSubset) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -19589,7 +23719,7 @@ func (m *EndpointSubset) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19617,7 +23747,7 @@ func (m *EndpointSubset) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -19632,7 +23762,7 @@ func (m *EndpointSubset) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Addresses = append(m.Addresses, EndpointAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -19648,7 +23778,7 @@ func (m *EndpointSubset) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -19663,7 +23793,7 @@ func (m *EndpointSubset) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) - if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -19679,7 +23809,7 @@ func (m *EndpointSubset) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -19694,13 +23824,13 @@ func (m *EndpointSubset) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Ports = append(m.Ports, EndpointPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -19719,8 +23849,8 @@ func (m *EndpointSubset) Unmarshal(data []byte) error { } return nil } -func (m *Endpoints) Unmarshal(data []byte) error { - l := len(data) +func (m *Endpoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -19732,7 +23862,7 @@ func (m *Endpoints) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19760,7 +23890,7 @@ func (m *Endpoints) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -19774,7 +23904,7 @@ func (m *Endpoints) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -19790,7 +23920,7 @@ func (m *Endpoints) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -19805,13 +23935,13 @@ func (m *Endpoints) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Subsets = append(m.Subsets, EndpointSubset{}) - if err := m.Subsets[len(m.Subsets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -19830,8 +23960,8 @@ func (m *Endpoints) Unmarshal(data []byte) error { } return nil } -func (m *EndpointsList) Unmarshal(data []byte) error { - l := len(data) +func (m *EndpointsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -19843,7 +23973,7 @@ func (m *EndpointsList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19871,7 +24001,7 @@ func (m *EndpointsList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -19885,7 +24015,7 @@ func (m *EndpointsList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -19901,7 +24031,7 @@ func (m *EndpointsList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -19916,13 +24046,13 @@ func (m *EndpointsList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, Endpoints{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -19941,8 +24071,8 @@ func (m *EndpointsList) Unmarshal(data []byte) error { } return nil } -func (m *EnvFromSource) Unmarshal(data []byte) error { - l := len(data) +func (m *EnvFromSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -19954,7 +24084,7 @@ func (m *EnvFromSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19982,7 +24112,7 @@ func (m *EnvFromSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -19997,7 +24127,7 @@ func (m *EnvFromSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Prefix = string(data[iNdEx:postIndex]) + m.Prefix = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -20011,7 +24141,7 @@ func (m *EnvFromSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20028,7 +24158,7 @@ func (m *EnvFromSource) Unmarshal(data []byte) error { if m.ConfigMapRef == nil { m.ConfigMapRef = &ConfigMapEnvSource{} } - if err := m.ConfigMapRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20044,7 +24174,7 @@ func (m *EnvFromSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20061,13 +24191,13 @@ func (m *EnvFromSource) Unmarshal(data []byte) error { if m.SecretRef == nil { m.SecretRef = &SecretEnvSource{} } - if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -20086,8 +24216,8 @@ func (m *EnvFromSource) Unmarshal(data []byte) error { } return nil } -func (m *EnvVar) Unmarshal(data []byte) error { - l := len(data) +func (m *EnvVar) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -20099,7 +24229,7 @@ func (m *EnvVar) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20127,7 +24257,7 @@ func (m *EnvVar) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20142,7 +24272,7 @@ func (m *EnvVar) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -20156,7 +24286,7 @@ func (m *EnvVar) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20171,7 +24301,7 @@ func (m *EnvVar) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(data[iNdEx:postIndex]) + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -20185,7 +24315,7 @@ func (m *EnvVar) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20202,13 +24332,13 @@ func (m *EnvVar) Unmarshal(data []byte) error { if m.ValueFrom == nil { m.ValueFrom = &EnvVarSource{} } - if err := m.ValueFrom.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -20227,8 +24357,8 @@ func (m *EnvVar) Unmarshal(data []byte) error { } return nil } -func (m *EnvVarSource) Unmarshal(data []byte) error { - l := len(data) +func (m *EnvVarSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -20240,7 +24370,7 @@ func (m *EnvVarSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20268,7 +24398,7 @@ func (m *EnvVarSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20285,7 +24415,7 @@ func (m *EnvVarSource) Unmarshal(data []byte) error { if m.FieldRef == nil { m.FieldRef = &ObjectFieldSelector{} } - if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20301,7 +24431,7 @@ func (m *EnvVarSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20318,7 +24448,7 @@ func (m *EnvVarSource) Unmarshal(data []byte) error { if m.ResourceFieldRef == nil { m.ResourceFieldRef = &ResourceFieldSelector{} } - if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20334,7 +24464,7 @@ func (m *EnvVarSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20351,7 +24481,7 @@ func (m *EnvVarSource) Unmarshal(data []byte) error { if m.ConfigMapKeyRef == nil { m.ConfigMapKeyRef = &ConfigMapKeySelector{} } - if err := m.ConfigMapKeyRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20367,7 +24497,7 @@ func (m *EnvVarSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20384,13 +24514,13 @@ func (m *EnvVarSource) Unmarshal(data []byte) error { if m.SecretKeyRef == nil { m.SecretKeyRef = &SecretKeySelector{} } - if err := m.SecretKeyRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -20409,8 +24539,8 @@ func (m *EnvVarSource) Unmarshal(data []byte) error { } return nil } -func (m *Event) Unmarshal(data []byte) error { - l := len(data) +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -20422,7 +24552,7 @@ func (m *Event) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20450,7 +24580,7 @@ func (m *Event) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20464,7 +24594,7 @@ func (m *Event) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20480,7 +24610,7 @@ func (m *Event) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20494,7 +24624,7 @@ func (m *Event) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.InvolvedObject.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.InvolvedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20510,7 +24640,7 @@ func (m *Event) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20525,7 +24655,7 @@ func (m *Event) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -20539,7 +24669,7 @@ func (m *Event) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20554,7 +24684,7 @@ func (m *Event) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -20568,7 +24698,7 @@ func (m *Event) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20582,7 +24712,7 @@ func (m *Event) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Source.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20598,7 +24728,7 @@ func (m *Event) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20612,7 +24742,7 @@ func (m *Event) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FirstTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.FirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20628,7 +24758,7 @@ func (m *Event) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20642,7 +24772,7 @@ func (m *Event) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20658,7 +24788,7 @@ func (m *Event) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Count |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -20677,7 +24807,7 @@ func (m *Event) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20692,11 +24822,194 @@ func (m *Event) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(data[iNdEx:postIndex]) + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Series == nil { + m.Series = &EventSeries{} + } + if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Related == nil { + m.Related = &ObjectReference{} + } + if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingController", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingController = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingInstance = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -20715,8 +25028,8 @@ func (m *Event) Unmarshal(data []byte) error { } return nil } -func (m *EventList) Unmarshal(data []byte) error { - l := len(data) +func (m *EventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -20728,7 +25041,7 @@ func (m *EventList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20756,7 +25069,7 @@ func (m *EventList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20770,7 +25083,7 @@ func (m *EventList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20786,7 +25099,7 @@ func (m *EventList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -20801,13 +25114,13 @@ func (m *EventList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, Event{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -20826,8 +25139,8 @@ func (m *EventList) Unmarshal(data []byte) error { } return nil } -func (m *EventSource) Unmarshal(data []byte) error { - l := len(data) +func (m *EventSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -20839,7 +25152,135 @@ func (m *EventSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = EventSeriesState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20867,7 +25308,7 @@ func (m *EventSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20882,7 +25323,7 @@ func (m *EventSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Component = string(data[iNdEx:postIndex]) + m.Component = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -20896,7 +25337,7 @@ func (m *EventSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20911,11 +25352,11 @@ func (m *EventSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(data[iNdEx:postIndex]) + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -20934,8 +25375,8 @@ func (m *EventSource) Unmarshal(data []byte) error { } return nil } -func (m *ExecAction) Unmarshal(data []byte) error { - l := len(data) +func (m *ExecAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -20947,7 +25388,7 @@ func (m *ExecAction) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20975,7 +25416,7 @@ func (m *ExecAction) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -20990,11 +25431,11 @@ func (m *ExecAction) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Command = append(m.Command, string(data[iNdEx:postIndex])) + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -21013,8 +25454,8 @@ func (m *ExecAction) Unmarshal(data []byte) error { } return nil } -func (m *FCVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -21026,7 +25467,7 @@ func (m *FCVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21054,7 +25495,7 @@ func (m *FCVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21069,7 +25510,7 @@ func (m *FCVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetWWNs = append(m.TargetWWNs, string(data[iNdEx:postIndex])) + m.TargetWWNs = append(m.TargetWWNs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 0 { @@ -21083,7 +25524,7 @@ func (m *FCVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -21103,7 +25544,7 @@ func (m *FCVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21118,7 +25559,7 @@ func (m *FCVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(data[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 0 { @@ -21132,7 +25573,7 @@ func (m *FCVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -21140,9 +25581,38 @@ func (m *FCVolumeSource) Unmarshal(data []byte) error { } } m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -21161,8 +25631,8 @@ func (m *FCVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *FlexVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -21174,7 +25644,284 @@ func (m *FlexVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlexPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlexPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Options == nil { + m.Options = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Options[mapkey] = mapvalue + } else { + var mapvalue string + m.Options[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21202,7 +25949,7 @@ func (m *FlexVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21217,7 +25964,7 @@ func (m *FlexVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Driver = string(data[iNdEx:postIndex]) + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -21231,7 +25978,7 @@ func (m *FlexVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21246,7 +25993,7 @@ func (m *FlexVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(data[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -21260,7 +26007,7 @@ func (m *FlexVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -21277,7 +26024,7 @@ func (m *FlexVolumeSource) Unmarshal(data []byte) error { if m.SecretRef == nil { m.SecretRef = &LocalObjectReference{} } - if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -21293,7 +26040,7 @@ func (m *FlexVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -21313,7 +26060,7 @@ func (m *FlexVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -21335,7 +26082,7 @@ func (m *FlexVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21350,7 +26097,7 @@ func (m *FlexVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21365,56 +26112,61 @@ func (m *FlexVolumeSource) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue if m.Options == nil { m.Options = make(map[string]string) } - m.Options[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Options[mapkey] = mapvalue + } else { + var mapvalue string + m.Options[mapkey] = mapvalue + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -21433,8 +26185,8 @@ func (m *FlexVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *FlockerVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -21446,7 +26198,7 @@ func (m *FlockerVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21474,7 +26226,7 @@ func (m *FlockerVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21489,7 +26241,7 @@ func (m *FlockerVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DatasetName = string(data[iNdEx:postIndex]) + m.DatasetName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -21503,7 +26255,7 @@ func (m *FlockerVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21518,11 +26270,11 @@ func (m *FlockerVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DatasetUUID = string(data[iNdEx:postIndex]) + m.DatasetUUID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -21541,8 +26293,8 @@ func (m *FlockerVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -21554,7 +26306,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21582,7 +26334,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21597,7 +26349,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PDName = string(data[iNdEx:postIndex]) + m.PDName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -21611,7 +26363,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21626,7 +26378,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(data[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -21640,7 +26392,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Partition |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -21659,7 +26411,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -21669,7 +26421,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { m.ReadOnly = bool(v != 0) default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -21688,8 +26440,8 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *GitRepoVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -21701,7 +26453,7 @@ func (m *GitRepoVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21729,7 +26481,7 @@ func (m *GitRepoVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21744,7 +26496,7 @@ func (m *GitRepoVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Repository = string(data[iNdEx:postIndex]) + m.Repository = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -21758,7 +26510,7 @@ func (m *GitRepoVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21773,7 +26525,7 @@ func (m *GitRepoVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Revision = string(data[iNdEx:postIndex]) + m.Revision = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -21787,7 +26539,7 @@ func (m *GitRepoVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21802,11 +26554,11 @@ func (m *GitRepoVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Directory = string(data[iNdEx:postIndex]) + m.Directory = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -21825,8 +26577,8 @@ func (m *GitRepoVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -21838,7 +26590,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21866,7 +26618,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21881,7 +26633,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EndpointsName = string(data[iNdEx:postIndex]) + m.EndpointsName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -21895,7 +26647,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21910,7 +26662,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(data[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -21924,7 +26676,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -21934,7 +26686,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error { m.ReadOnly = bool(v != 0) default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -21953,8 +26705,8 @@ func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *HTTPGetAction) Unmarshal(data []byte) error { - l := len(data) +func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -21966,7 +26718,7 @@ func (m *HTTPGetAction) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -21994,7 +26746,7 @@ func (m *HTTPGetAction) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22009,7 +26761,7 @@ func (m *HTTPGetAction) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(data[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -22023,7 +26775,7 @@ func (m *HTTPGetAction) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -22037,7 +26789,7 @@ func (m *HTTPGetAction) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -22053,7 +26805,7 @@ func (m *HTTPGetAction) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22068,7 +26820,7 @@ func (m *HTTPGetAction) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(data[iNdEx:postIndex]) + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -22082,7 +26834,7 @@ func (m *HTTPGetAction) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22097,7 +26849,7 @@ func (m *HTTPGetAction) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Scheme = URIScheme(data[iNdEx:postIndex]) + m.Scheme = URIScheme(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -22111,7 +26863,7 @@ func (m *HTTPGetAction) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -22126,13 +26878,13 @@ func (m *HTTPGetAction) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.HTTPHeaders = append(m.HTTPHeaders, HTTPHeader{}) - if err := m.HTTPHeaders[len(m.HTTPHeaders)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.HTTPHeaders[len(m.HTTPHeaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -22151,8 +26903,8 @@ func (m *HTTPGetAction) Unmarshal(data []byte) error { } return nil } -func (m *HTTPHeader) Unmarshal(data []byte) error { - l := len(data) +func (m *HTTPHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -22164,7 +26916,7 @@ func (m *HTTPHeader) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22192,7 +26944,7 @@ func (m *HTTPHeader) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22207,7 +26959,7 @@ func (m *HTTPHeader) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -22221,7 +26973,7 @@ func (m *HTTPHeader) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22236,11 +26988,11 @@ func (m *HTTPHeader) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(data[iNdEx:postIndex]) + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -22259,8 +27011,8 @@ func (m *HTTPHeader) Unmarshal(data []byte) error { } return nil } -func (m *Handler) Unmarshal(data []byte) error { - l := len(data) +func (m *Handler) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -22272,7 +27024,7 @@ func (m *Handler) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22300,7 +27052,7 @@ func (m *Handler) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -22317,7 +27069,7 @@ func (m *Handler) Unmarshal(data []byte) error { if m.Exec == nil { m.Exec = &ExecAction{} } - if err := m.Exec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -22333,7 +27085,7 @@ func (m *Handler) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -22350,7 +27102,7 @@ func (m *Handler) Unmarshal(data []byte) error { if m.HTTPGet == nil { m.HTTPGet = &HTTPGetAction{} } - if err := m.HTTPGet.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.HTTPGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -22366,7 +27118,7 @@ func (m *Handler) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -22383,13 +27135,13 @@ func (m *Handler) Unmarshal(data []byte) error { if m.TCPSocket == nil { m.TCPSocket = &TCPSocketAction{} } - if err := m.TCPSocket.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.TCPSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -22408,8 +27160,8 @@ func (m *Handler) Unmarshal(data []byte) error { } return nil } -func (m *HostPathVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *HostAlias) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -22421,7 +27173,115 @@ func (m *HostPathVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostAlias: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostAlias: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostnames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostnames = append(m.Hostnames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22449,7 +27309,7 @@ func (m *HostPathVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22464,11 +27324,41 @@ func (m *HostPathVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(data[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := HostPathType(dAtA[iNdEx:postIndex]) + m.Type = &s iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -22487,8 +27377,8 @@ func (m *HostPathVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -22500,7 +27390,344 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetPortal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IQN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + m.Lun = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lun |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DiscoveryCHAPAuth = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SessionCHAPAuth = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.InitiatorName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22528,7 +27755,7 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22543,7 +27770,7 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetPortal = string(data[iNdEx:postIndex]) + m.TargetPortal = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -22557,7 +27784,7 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22572,7 +27799,7 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IQN = string(data[iNdEx:postIndex]) + m.IQN = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -22586,7 +27813,7 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Lun |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -22605,7 +27832,7 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22620,7 +27847,7 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ISCSIInterface = string(data[iNdEx:postIndex]) + m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -22634,7 +27861,7 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22649,7 +27876,7 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(data[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 0 { @@ -22663,7 +27890,7 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -22683,7 +27910,7 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22698,11 +27925,114 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Portals = append(m.Portals, string(data[iNdEx:postIndex])) + m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DiscoveryCHAPAuth = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SessionCHAPAuth = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.InitiatorName = &s iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -22721,8 +28051,8 @@ func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *KeyToPath) Unmarshal(data []byte) error { - l := len(data) +func (m *KeyToPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -22734,7 +28064,7 @@ func (m *KeyToPath) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22762,7 +28092,7 @@ func (m *KeyToPath) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22777,7 +28107,7 @@ func (m *KeyToPath) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(data[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -22791,7 +28121,7 @@ func (m *KeyToPath) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22806,7 +28136,7 @@ func (m *KeyToPath) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(data[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -22820,7 +28150,7 @@ func (m *KeyToPath) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -22830,7 +28160,7 @@ func (m *KeyToPath) Unmarshal(data []byte) error { m.Mode = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -22849,8 +28179,8 @@ func (m *KeyToPath) Unmarshal(data []byte) error { } return nil } -func (m *Lifecycle) Unmarshal(data []byte) error { - l := len(data) +func (m *Lifecycle) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -22862,7 +28192,7 @@ func (m *Lifecycle) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -22890,7 +28220,7 @@ func (m *Lifecycle) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -22907,7 +28237,7 @@ func (m *Lifecycle) Unmarshal(data []byte) error { if m.PostStart == nil { m.PostStart = &Handler{} } - if err := m.PostStart.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -22923,7 +28253,7 @@ func (m *Lifecycle) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -22940,13 +28270,13 @@ func (m *Lifecycle) Unmarshal(data []byte) error { if m.PreStop == nil { m.PreStop = &Handler{} } - if err := m.PreStop.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PreStop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -22965,8 +28295,8 @@ func (m *Lifecycle) Unmarshal(data []byte) error { } return nil } -func (m *LimitRange) Unmarshal(data []byte) error { - l := len(data) +func (m *LimitRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -22978,7 +28308,7 @@ func (m *LimitRange) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23006,7 +28336,7 @@ func (m *LimitRange) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -23020,7 +28350,7 @@ func (m *LimitRange) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -23036,7 +28366,7 @@ func (m *LimitRange) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -23050,13 +28380,13 @@ func (m *LimitRange) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -23075,8 +28405,8 @@ func (m *LimitRange) Unmarshal(data []byte) error { } return nil } -func (m *LimitRangeItem) Unmarshal(data []byte) error { - l := len(data) +func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -23088,7 +28418,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23116,7 +28446,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23131,7 +28461,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = LimitType(data[iNdEx:postIndex]) + m.Type = LimitType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -23145,7 +28475,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -23167,7 +28497,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23182,7 +28512,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23197,57 +28527,62 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Max == nil { m.Max = make(ResourceList) } - m.Max[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Max[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Max[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex case 3: if wireType != 2 { @@ -23261,7 +28596,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -23283,7 +28618,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23298,7 +28633,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23313,57 +28648,62 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Min == nil { m.Min = make(ResourceList) } - m.Min[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Min[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Min[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex case 4: if wireType != 2 { @@ -23377,7 +28717,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -23399,7 +28739,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23414,7 +28754,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23429,57 +28769,62 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Default == nil { m.Default = make(ResourceList) } - m.Default[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Default[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Default[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex case 5: if wireType != 2 { @@ -23493,7 +28838,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -23515,7 +28860,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23530,7 +28875,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23545,57 +28890,62 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.DefaultRequest == nil { m.DefaultRequest = make(ResourceList) } - m.DefaultRequest[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.DefaultRequest[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.DefaultRequest[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex case 6: if wireType != 2 { @@ -23609,7 +28959,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -23631,7 +28981,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23646,7 +28996,7 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23661,61 +29011,66 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.MaxLimitRequestRatio == nil { m.MaxLimitRequestRatio = make(ResourceList) } - m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.MaxLimitRequestRatio[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -23734,8 +29089,8 @@ func (m *LimitRangeItem) Unmarshal(data []byte) error { } return nil } -func (m *LimitRangeList) Unmarshal(data []byte) error { - l := len(data) +func (m *LimitRangeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -23747,7 +29102,7 @@ func (m *LimitRangeList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23775,7 +29130,7 @@ func (m *LimitRangeList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -23789,7 +29144,7 @@ func (m *LimitRangeList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -23805,7 +29160,7 @@ func (m *LimitRangeList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -23820,13 +29175,13 @@ func (m *LimitRangeList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, LimitRange{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -23845,8 +29200,8 @@ func (m *LimitRangeList) Unmarshal(data []byte) error { } return nil } -func (m *LimitRangeSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -23858,7 +29213,7 @@ func (m *LimitRangeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23886,7 +29241,7 @@ func (m *LimitRangeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -23901,13 +29256,13 @@ func (m *LimitRangeSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Limits = append(m.Limits, LimitRangeItem{}) - if err := m.Limits[len(m.Limits)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Limits[len(m.Limits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -23926,8 +29281,8 @@ func (m *LimitRangeSpec) Unmarshal(data []byte) error { } return nil } -func (m *List) Unmarshal(data []byte) error { - l := len(data) +func (m *List) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -23939,7 +29294,7 @@ func (m *List) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -23967,7 +29322,7 @@ func (m *List) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -23981,7 +29336,7 @@ func (m *List) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -23997,7 +29352,7 @@ func (m *List) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -24012,13 +29367,13 @@ func (m *List) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -24037,8 +29392,8 @@ func (m *List) Unmarshal(data []byte) error { } return nil } -func (m *ListOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -24050,184 +29405,7 @@ func (m *ListOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelSelector = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldSelector = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Watch = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LoadBalancerIngress) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24255,7 +29433,7 @@ func (m *LoadBalancerIngress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24270,7 +29448,7 @@ func (m *LoadBalancerIngress) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(data[iNdEx:postIndex]) + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -24284,7 +29462,7 @@ func (m *LoadBalancerIngress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24299,11 +29477,11 @@ func (m *LoadBalancerIngress) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(data[iNdEx:postIndex]) + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -24322,8 +29500,8 @@ func (m *LoadBalancerIngress) Unmarshal(data []byte) error { } return nil } -func (m *LoadBalancerStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -24335,7 +29513,7 @@ func (m *LoadBalancerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24363,7 +29541,7 @@ func (m *LoadBalancerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -24378,13 +29556,13 @@ func (m *LoadBalancerStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Ingress = append(m.Ingress, LoadBalancerIngress{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -24403,8 +29581,8 @@ func (m *LoadBalancerStatus) Unmarshal(data []byte) error { } return nil } -func (m *LocalObjectReference) Unmarshal(data []byte) error { - l := len(data) +func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -24416,7 +29594,7 @@ func (m *LocalObjectReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24444,7 +29622,7 @@ func (m *LocalObjectReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24459,11 +29637,11 @@ func (m *LocalObjectReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -24482,8 +29660,8 @@ func (m *LocalObjectReference) Unmarshal(data []byte) error { } return nil } -func (m *NFSVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -24495,7 +29673,86 @@ func (m *NFSVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24523,7 +29780,7 @@ func (m *NFSVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24538,7 +29795,7 @@ func (m *NFSVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Server = string(data[iNdEx:postIndex]) + m.Server = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -24552,7 +29809,7 @@ func (m *NFSVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24567,7 +29824,7 @@ func (m *NFSVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(data[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -24581,7 +29838,7 @@ func (m *NFSVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -24591,7 +29848,7 @@ func (m *NFSVolumeSource) Unmarshal(data []byte) error { m.ReadOnly = bool(v != 0) default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -24610,8 +29867,8 @@ func (m *NFSVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *Namespace) Unmarshal(data []byte) error { - l := len(data) +func (m *Namespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -24623,7 +29880,7 @@ func (m *Namespace) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24651,7 +29908,7 @@ func (m *Namespace) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -24665,7 +29922,7 @@ func (m *Namespace) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24681,7 +29938,7 @@ func (m *Namespace) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -24695,7 +29952,7 @@ func (m *Namespace) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24711,7 +29968,7 @@ func (m *Namespace) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -24725,13 +29982,13 @@ func (m *Namespace) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -24750,8 +30007,8 @@ func (m *Namespace) Unmarshal(data []byte) error { } return nil } -func (m *NamespaceList) Unmarshal(data []byte) error { - l := len(data) +func (m *NamespaceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -24763,7 +30020,7 @@ func (m *NamespaceList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24791,7 +30048,7 @@ func (m *NamespaceList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -24805,7 +30062,7 @@ func (m *NamespaceList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24821,7 +30078,7 @@ func (m *NamespaceList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -24836,13 +30093,13 @@ func (m *NamespaceList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, Namespace{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -24861,8 +30118,8 @@ func (m *NamespaceList) Unmarshal(data []byte) error { } return nil } -func (m *NamespaceSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -24874,7 +30131,7 @@ func (m *NamespaceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24902,7 +30159,7 @@ func (m *NamespaceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24917,11 +30174,11 @@ func (m *NamespaceSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Finalizers = append(m.Finalizers, FinalizerName(data[iNdEx:postIndex])) + m.Finalizers = append(m.Finalizers, FinalizerName(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -24940,8 +30197,8 @@ func (m *NamespaceSpec) Unmarshal(data []byte) error { } return nil } -func (m *NamespaceStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -24953,7 +30210,7 @@ func (m *NamespaceStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24981,7 +30238,7 @@ func (m *NamespaceStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -24996,11 +30253,11 @@ func (m *NamespaceStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = NamespacePhase(data[iNdEx:postIndex]) + m.Phase = NamespacePhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -25019,8 +30276,8 @@ func (m *NamespaceStatus) Unmarshal(data []byte) error { } return nil } -func (m *Node) Unmarshal(data []byte) error { - l := len(data) +func (m *Node) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -25032,7 +30289,7 @@ func (m *Node) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25060,7 +30317,7 @@ func (m *Node) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -25074,7 +30331,7 @@ func (m *Node) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25090,7 +30347,7 @@ func (m *Node) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -25104,7 +30361,7 @@ func (m *Node) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25120,7 +30377,7 @@ func (m *Node) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -25134,13 +30391,13 @@ func (m *Node) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -25159,8 +30416,8 @@ func (m *Node) Unmarshal(data []byte) error { } return nil } -func (m *NodeAddress) Unmarshal(data []byte) error { - l := len(data) +func (m *NodeAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -25172,7 +30429,7 @@ func (m *NodeAddress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25200,7 +30457,7 @@ func (m *NodeAddress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25215,7 +30472,7 @@ func (m *NodeAddress) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = NodeAddressType(data[iNdEx:postIndex]) + m.Type = NodeAddressType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -25229,7 +30486,7 @@ func (m *NodeAddress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25244,11 +30501,11 @@ func (m *NodeAddress) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Address = string(data[iNdEx:postIndex]) + m.Address = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -25267,8 +30524,8 @@ func (m *NodeAddress) Unmarshal(data []byte) error { } return nil } -func (m *NodeAffinity) Unmarshal(data []byte) error { - l := len(data) +func (m *NodeAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -25280,7 +30537,7 @@ func (m *NodeAffinity) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25308,7 +30565,7 @@ func (m *NodeAffinity) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -25325,7 +30582,7 @@ func (m *NodeAffinity) Unmarshal(data []byte) error { if m.RequiredDuringSchedulingIgnoredDuringExecution == nil { m.RequiredDuringSchedulingIgnoredDuringExecution = &NodeSelector{} } - if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25341,7 +30598,7 @@ func (m *NodeAffinity) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -25356,13 +30613,13 @@ func (m *NodeAffinity) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, PreferredSchedulingTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -25381,8 +30638,8 @@ func (m *NodeAffinity) Unmarshal(data []byte) error { } return nil } -func (m *NodeCondition) Unmarshal(data []byte) error { - l := len(data) +func (m *NodeCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -25394,7 +30651,7 @@ func (m *NodeCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25422,7 +30679,7 @@ func (m *NodeCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25437,7 +30694,7 @@ func (m *NodeCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = NodeConditionType(data[iNdEx:postIndex]) + m.Type = NodeConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -25451,7 +30708,7 @@ func (m *NodeCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25466,7 +30723,7 @@ func (m *NodeCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(data[iNdEx:postIndex]) + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -25480,7 +30737,7 @@ func (m *NodeCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -25494,7 +30751,7 @@ func (m *NodeCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastHeartbeatTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastHeartbeatTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25510,7 +30767,7 @@ func (m *NodeCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -25524,7 +30781,7 @@ func (m *NodeCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25540,7 +30797,7 @@ func (m *NodeCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25555,7 +30812,7 @@ func (m *NodeCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -25569,7 +30826,7 @@ func (m *NodeCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25584,11 +30841,11 @@ func (m *NodeCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -25607,8 +30864,8 @@ func (m *NodeCondition) Unmarshal(data []byte) error { } return nil } -func (m *NodeDaemonEndpoints) Unmarshal(data []byte) error { - l := len(data) +func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -25620,7 +30877,268 @@ func (m *NodeDaemonEndpoints) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeConfigSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapNodeConfigSource{} + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Assigned", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Assigned == nil { + m.Assigned = &NodeConfigSource{} + } + if err := m.Assigned.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Active == nil { + m.Active = &NodeConfigSource{} + } + if err := m.Active.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastKnownGood", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastKnownGood == nil { + m.LastKnownGood = &NodeConfigSource{} + } + if err := m.LastKnownGood.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25648,7 +31166,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -25662,13 +31180,13 @@ func (m *NodeDaemonEndpoints) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.KubeletEndpoint.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.KubeletEndpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -25687,8 +31205,8 @@ func (m *NodeDaemonEndpoints) Unmarshal(data []byte) error { } return nil } -func (m *NodeList) Unmarshal(data []byte) error { - l := len(data) +func (m *NodeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -25700,7 +31218,7 @@ func (m *NodeList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25728,7 +31246,7 @@ func (m *NodeList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -25742,7 +31260,7 @@ func (m *NodeList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25758,7 +31276,7 @@ func (m *NodeList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -25773,13 +31291,13 @@ func (m *NodeList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, Node{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -25798,8 +31316,8 @@ func (m *NodeList) Unmarshal(data []byte) error { } return nil } -func (m *NodeProxyOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -25811,7 +31329,7 @@ func (m *NodeProxyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25839,7 +31357,7 @@ func (m *NodeProxyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25854,11 +31372,11 @@ func (m *NodeProxyOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(data[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -25877,8 +31395,8 @@ func (m *NodeProxyOptions) Unmarshal(data []byte) error { } return nil } -func (m *NodeResources) Unmarshal(data []byte) error { - l := len(data) +func (m *NodeResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -25890,7 +31408,7 @@ func (m *NodeResources) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25918,7 +31436,7 @@ func (m *NodeResources) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -25940,7 +31458,7 @@ func (m *NodeResources) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25955,7 +31473,7 @@ func (m *NodeResources) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -25970,61 +31488,66 @@ func (m *NodeResources) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Capacity == nil { m.Capacity = make(ResourceList) } - m.Capacity[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -26043,8 +31566,8 @@ func (m *NodeResources) Unmarshal(data []byte) error { } return nil } -func (m *NodeSelector) Unmarshal(data []byte) error { - l := len(data) +func (m *NodeSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -26056,7 +31579,7 @@ func (m *NodeSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26084,7 +31607,7 @@ func (m *NodeSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -26099,13 +31622,13 @@ func (m *NodeSelector) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.NodeSelectorTerms = append(m.NodeSelectorTerms, NodeSelectorTerm{}) - if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -26124,8 +31647,8 @@ func (m *NodeSelector) Unmarshal(data []byte) error { } return nil } -func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { - l := len(data) +func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -26137,7 +31660,7 @@ func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26165,7 +31688,7 @@ func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26180,7 +31703,7 @@ func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(data[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -26194,7 +31717,7 @@ func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26209,7 +31732,7 @@ func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Operator = NodeSelectorOperator(data[iNdEx:postIndex]) + m.Operator = NodeSelectorOperator(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -26223,7 +31746,7 @@ func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26238,11 +31761,11 @@ func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Values = append(m.Values, string(data[iNdEx:postIndex])) + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -26261,8 +31784,8 @@ func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { } return nil } -func (m *NodeSelectorTerm) Unmarshal(data []byte) error { - l := len(data) +func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -26274,7 +31797,7 @@ func (m *NodeSelectorTerm) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26302,7 +31825,7 @@ func (m *NodeSelectorTerm) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -26317,13 +31840,44 @@ func (m *NodeSelectorTerm) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.MatchExpressions = append(m.MatchExpressions, NodeSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchFields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchFields = append(m.MatchFields, NodeSelectorRequirement{}) + if err := m.MatchFields[len(m.MatchFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -26342,8 +31896,8 @@ func (m *NodeSelectorTerm) Unmarshal(data []byte) error { } return nil } -func (m *NodeSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *NodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -26355,7 +31909,7 @@ func (m *NodeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26383,7 +31937,7 @@ func (m *NodeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26398,11 +31952,11 @@ func (m *NodeSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PodCIDR = string(data[iNdEx:postIndex]) + m.PodCIDR = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DoNotUse_ExternalID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26412,7 +31966,7 @@ func (m *NodeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26427,7 +31981,7 @@ func (m *NodeSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalID = string(data[iNdEx:postIndex]) + m.DoNotUse_ExternalID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -26441,7 +31995,7 @@ func (m *NodeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26456,7 +32010,7 @@ func (m *NodeSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ProviderID = string(data[iNdEx:postIndex]) + m.ProviderID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 0 { @@ -26470,7 +32024,7 @@ func (m *NodeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -26490,7 +32044,7 @@ func (m *NodeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -26505,13 +32059,46 @@ func (m *NodeSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Taints = append(m.Taints, Taint{}) - if err := m.Taints[len(m.Taints)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigSource == nil { + m.ConfigSource = &NodeConfigSource{} + } + if err := m.ConfigSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -26530,8 +32117,8 @@ func (m *NodeSpec) Unmarshal(data []byte) error { } return nil } -func (m *NodeStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *NodeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -26543,7 +32130,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26571,7 +32158,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -26593,7 +32180,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26608,7 +32195,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26623,57 +32210,62 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Capacity == nil { m.Capacity = make(ResourceList) } - m.Capacity[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex case 2: if wireType != 2 { @@ -26687,7 +32279,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -26709,7 +32301,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26724,7 +32316,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26739,57 +32331,62 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Allocatable == nil { m.Allocatable = make(ResourceList) } - m.Allocatable[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Allocatable[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Allocatable[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex case 3: if wireType != 2 { @@ -26803,7 +32400,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -26818,7 +32415,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = NodePhase(data[iNdEx:postIndex]) + m.Phase = NodePhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -26832,7 +32429,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -26847,7 +32444,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Conditions = append(m.Conditions, NodeCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26863,7 +32460,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -26878,7 +32475,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Addresses = append(m.Addresses, NodeAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26894,7 +32491,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -26908,7 +32505,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.DaemonEndpoints.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.DaemonEndpoints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26924,7 +32521,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -26938,7 +32535,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.NodeInfo.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26954,7 +32551,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -26969,7 +32566,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Images = append(m.Images, ContainerImage{}) - if err := m.Images[len(m.Images)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26985,7 +32582,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27000,7 +32597,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumesInUse = append(m.VolumesInUse, UniqueVolumeName(data[iNdEx:postIndex])) + m.VolumesInUse = append(m.VolumesInUse, UniqueVolumeName(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 10: if wireType != 2 { @@ -27014,7 +32611,7 @@ func (m *NodeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -27029,13 +32626,46 @@ func (m *NodeStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.VolumesAttached = append(m.VolumesAttached, AttachedVolume{}) - if err := m.VolumesAttached[len(m.VolumesAttached)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.VolumesAttached[len(m.VolumesAttached)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &NodeConfigStatus{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -27054,8 +32684,8 @@ func (m *NodeStatus) Unmarshal(data []byte) error { } return nil } -func (m *NodeSystemInfo) Unmarshal(data []byte) error { - l := len(data) +func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -27067,7 +32697,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27095,7 +32725,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27110,7 +32740,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MachineID = string(data[iNdEx:postIndex]) + m.MachineID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -27124,7 +32754,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27139,7 +32769,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SystemUUID = string(data[iNdEx:postIndex]) + m.SystemUUID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -27153,7 +32783,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27168,7 +32798,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.BootID = string(data[iNdEx:postIndex]) + m.BootID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -27182,7 +32812,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27197,7 +32827,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.KernelVersion = string(data[iNdEx:postIndex]) + m.KernelVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -27211,7 +32841,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27226,7 +32856,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.OSImage = string(data[iNdEx:postIndex]) + m.OSImage = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -27240,7 +32870,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27255,7 +32885,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerRuntimeVersion = string(data[iNdEx:postIndex]) + m.ContainerRuntimeVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { @@ -27269,7 +32899,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27284,7 +32914,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.KubeletVersion = string(data[iNdEx:postIndex]) + m.KubeletVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 8: if wireType != 2 { @@ -27298,7 +32928,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27313,7 +32943,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.KubeProxyVersion = string(data[iNdEx:postIndex]) + m.KubeProxyVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 9: if wireType != 2 { @@ -27327,7 +32957,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27342,7 +32972,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.OperatingSystem = string(data[iNdEx:postIndex]) + m.OperatingSystem = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 10: if wireType != 2 { @@ -27356,7 +32986,7 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27371,11 +33001,11 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Architecture = string(data[iNdEx:postIndex]) + m.Architecture = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -27394,8 +33024,8 @@ func (m *NodeSystemInfo) Unmarshal(data []byte) error { } return nil } -func (m *ObjectFieldSelector) Unmarshal(data []byte) error { - l := len(data) +func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -27407,7 +33037,7 @@ func (m *ObjectFieldSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27435,7 +33065,7 @@ func (m *ObjectFieldSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27450,7 +33080,7 @@ func (m *ObjectFieldSelector) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(data[iNdEx:postIndex]) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -27464,7 +33094,7 @@ func (m *ObjectFieldSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -27479,11 +33109,11 @@ func (m *ObjectFieldSelector) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FieldPath = string(data[iNdEx:postIndex]) + m.FieldPath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -27502,8 +33132,8 @@ func (m *ObjectFieldSelector) Unmarshal(data []byte) error { } return nil } -func (m *ObjectMeta) Unmarshal(data []byte) error { - l := len(data) +func (m *ObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -27515,644 +33145,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GenerateName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SelfLink = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) - } - m.Generation = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Generation |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CreationTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeletionTimestamp == nil { - m.DeletionTimestamp = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.DeletionTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DeletionGracePeriodSeconds = &v - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Labels == nil { - m.Labels = make(map[string]string) - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OwnerReferences = append(m.OwnerReferences, k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{}) - if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Finalizers = append(m.Finalizers, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClusterName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectReference) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28180,7 +33173,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28195,7 +33188,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -28209,7 +33202,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28224,7 +33217,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(data[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -28238,7 +33231,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28253,7 +33246,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -28267,7 +33260,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28282,7 +33275,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -28296,7 +33289,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28311,7 +33304,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(data[iNdEx:postIndex]) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -28325,7 +33318,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28340,7 +33333,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(data[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { @@ -28354,7 +33347,7 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28369,11 +33362,11 @@ func (m *ObjectReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FieldPath = string(data[iNdEx:postIndex]) + m.FieldPath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -28392,8 +33385,8 @@ func (m *ObjectReference) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolume) Unmarshal(data []byte) error { - l := len(data) +func (m *PersistentVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -28405,7 +33398,7 @@ func (m *PersistentVolume) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28433,7 +33426,7 @@ func (m *PersistentVolume) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -28447,7 +33440,7 @@ func (m *PersistentVolume) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -28463,7 +33456,7 @@ func (m *PersistentVolume) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -28477,7 +33470,7 @@ func (m *PersistentVolume) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -28493,7 +33486,7 @@ func (m *PersistentVolume) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -28507,13 +33500,13 @@ func (m *PersistentVolume) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -28532,8 +33525,8 @@ func (m *PersistentVolume) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { - l := len(data) +func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -28545,7 +33538,7 @@ func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28573,7 +33566,7 @@ func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -28587,7 +33580,7 @@ func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -28603,7 +33596,7 @@ func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -28617,7 +33610,7 @@ func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -28633,7 +33626,7 @@ func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -28647,13 +33640,13 @@ func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -28672,8 +33665,8 @@ func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { - l := len(data) +func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -28685,7 +33678,233 @@ func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PersistentVolumeClaimConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28713,7 +33932,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -28727,7 +33946,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -28743,7 +33962,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -28758,13 +33977,13 @@ func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, PersistentVolumeClaim{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -28783,8 +34002,8 @@ func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -28796,7 +34015,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28824,7 +34043,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28839,7 +34058,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { @@ -28853,7 +34072,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -28867,7 +34086,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -28883,7 +34102,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28898,7 +34117,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeName = string(data[iNdEx:postIndex]) + m.VolumeName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -28912,7 +34131,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -28929,7 +34148,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { if m.Selector == nil { m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -28945,7 +34164,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -28960,12 +34179,42 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.StorageClassName = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PersistentVolumeMode(dAtA[iNdEx:postIndex]) + m.VolumeMode = &s + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -28984,8 +34233,8 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -28997,7 +34246,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -29025,7 +34274,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -29040,7 +34289,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = PersistentVolumeClaimPhase(data[iNdEx:postIndex]) + m.Phase = PersistentVolumeClaimPhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -29054,7 +34303,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -29069,7 +34318,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { @@ -29083,7 +34332,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29105,7 +34354,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -29120,7 +34369,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -29135,61 +34384,97 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Capacity == nil { m.Capacity = make(ResourceList) } - m.Capacity[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PersistentVolumeClaimCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -29208,8 +34493,8 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolumeClaimVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -29221,7 +34506,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -29249,7 +34534,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -29264,7 +34549,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClaimName = string(data[iNdEx:postIndex]) + m.ClaimName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { @@ -29278,7 +34563,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29288,7 +34573,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(data []byte) error { m.ReadOnly = bool(v != 0) default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -29307,8 +34592,8 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolumeList) Unmarshal(data []byte) error { - l := len(data) +func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -29320,7 +34605,7 @@ func (m *PersistentVolumeList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -29348,7 +34633,7 @@ func (m *PersistentVolumeList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29362,7 +34647,7 @@ func (m *PersistentVolumeList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29378,7 +34663,7 @@ func (m *PersistentVolumeList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29393,13 +34678,13 @@ func (m *PersistentVolumeList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, PersistentVolume{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -29418,8 +34703,8 @@ func (m *PersistentVolumeList) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -29431,7 +34716,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -29459,7 +34744,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29476,7 +34761,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if m.GCEPersistentDisk == nil { m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} } - if err := m.GCEPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.GCEPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29492,7 +34777,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29509,7 +34794,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if m.AWSElasticBlockStore == nil { m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} } - if err := m.AWSElasticBlockStore.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.AWSElasticBlockStore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29525,7 +34810,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29542,7 +34827,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if m.HostPath == nil { m.HostPath = &HostPathVolumeSource{} } - if err := m.HostPath.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.HostPath.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29558,7 +34843,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29575,7 +34860,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if m.Glusterfs == nil { m.Glusterfs = &GlusterfsVolumeSource{} } - if err := m.Glusterfs.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29591,7 +34876,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29608,7 +34893,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if m.NFS == nil { m.NFS = &NFSVolumeSource{} } - if err := m.NFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.NFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29624,7 +34909,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29639,9 +34924,9 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.RBD == nil { - m.RBD = &RBDVolumeSource{} + m.RBD = &RBDPersistentVolumeSource{} } - if err := m.RBD.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RBD.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29657,7 +34942,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29672,9 +34957,9 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.ISCSI == nil { - m.ISCSI = &ISCSIVolumeSource{} + m.ISCSI = &ISCSIPersistentVolumeSource{} } - if err := m.ISCSI.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29690,7 +34975,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29705,9 +34990,9 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.Cinder == nil { - m.Cinder = &CinderVolumeSource{} + m.Cinder = &CinderPersistentVolumeSource{} } - if err := m.Cinder.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Cinder.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29723,7 +35008,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29738,9 +35023,9 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.CephFS == nil { - m.CephFS = &CephFSVolumeSource{} + m.CephFS = &CephFSPersistentVolumeSource{} } - if err := m.CephFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.CephFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29756,7 +35041,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29773,7 +35058,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if m.FC == nil { m.FC = &FCVolumeSource{} } - if err := m.FC.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.FC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29789,7 +35074,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29806,7 +35091,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if m.Flocker == nil { m.Flocker = &FlockerVolumeSource{} } - if err := m.Flocker.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Flocker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29822,7 +35107,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29837,9 +35122,9 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.FlexVolume == nil { - m.FlexVolume = &FlexVolumeSource{} + m.FlexVolume = &FlexPersistentVolumeSource{} } - if err := m.FlexVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.FlexVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29855,7 +35140,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29870,9 +35155,9 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.AzureFile == nil { - m.AzureFile = &AzureFileVolumeSource{} + m.AzureFile = &AzureFilePersistentVolumeSource{} } - if err := m.AzureFile.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.AzureFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29888,7 +35173,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29905,7 +35190,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if m.VsphereVolume == nil { m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} } - if err := m.VsphereVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.VsphereVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29921,7 +35206,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29938,7 +35223,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if m.Quobyte == nil { m.Quobyte = &QuobyteVolumeSource{} } - if err := m.Quobyte.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Quobyte.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29954,7 +35239,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -29971,7 +35256,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if m.AzureDisk == nil { m.AzureDisk = &AzureDiskVolumeSource{} } - if err := m.AzureDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29987,7 +35272,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -30004,7 +35289,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if m.PhotonPersistentDisk == nil { m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} } - if err := m.PhotonPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PhotonPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -30020,7 +35305,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -30037,7 +35322,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if m.PortworxVolume == nil { m.PortworxVolume = &PortworxVolumeSource{} } - if err := m.PortworxVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PortworxVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -30053,7 +35338,7 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -30068,15 +35353,114 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.ScaleIO == nil { - m.ScaleIO = &ScaleIOVolumeSource{} + m.ScaleIO = &ScaleIOPersistentVolumeSource{} } - if err := m.ScaleIO.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Local", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Local == nil { + m.Local = &LocalVolumeSource{} + } + if err := m.Local.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageOS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StorageOS == nil { + m.StorageOS = &StorageOSPersistentVolumeSource{} + } + if err := m.StorageOS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CSI == nil { + m.CSI = &CSIPersistentVolumeSource{} + } + if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -30095,8 +35479,8 @@ func (m *PersistentVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -30108,7 +35492,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30136,7 +35520,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -30158,7 +35542,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30173,7 +35557,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30188,57 +35572,62 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Capacity == nil { m.Capacity = make(ResourceList) } - m.Capacity[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex case 2: if wireType != 2 { @@ -30252,7 +35641,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -30266,7 +35655,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PersistentVolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PersistentVolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -30282,7 +35671,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30297,7 +35686,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { @@ -30311,7 +35700,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -30328,7 +35717,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if m.ClaimRef == nil { m.ClaimRef = &ObjectReference{} } - if err := m.ClaimRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ClaimRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -30344,7 +35733,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30359,7 +35748,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(data[iNdEx:postIndex]) + m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -30373,7 +35762,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30388,11 +35777,103 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.StorageClassName = string(data[iNdEx:postIndex]) + m.StorageClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PersistentVolumeMode(dAtA[iNdEx:postIndex]) + m.VolumeMode = &s + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeAffinity == nil { + m.NodeAffinity = &VolumeNodeAffinity{} + } + if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -30411,8 +35892,8 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -30424,7 +35905,7 @@ func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30452,7 +35933,7 @@ func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30467,7 +35948,7 @@ func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = PersistentVolumePhase(data[iNdEx:postIndex]) + m.Phase = PersistentVolumePhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -30481,7 +35962,7 @@ func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30496,7 +35977,7 @@ func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -30510,7 +35991,7 @@ func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30525,11 +36006,11 @@ func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -30548,8 +36029,8 @@ func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { } return nil } -func (m *PhotonPersistentDiskVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -30561,7 +36042,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30589,7 +36070,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30604,7 +36085,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PdID = string(data[iNdEx:postIndex]) + m.PdID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -30618,7 +36099,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30633,11 +36114,11 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(data[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -30656,8 +36137,8 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *Pod) Unmarshal(data []byte) error { - l := len(data) +func (m *Pod) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -30669,7 +36150,7 @@ func (m *Pod) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30697,7 +36178,7 @@ func (m *Pod) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -30711,7 +36192,7 @@ func (m *Pod) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -30727,7 +36208,7 @@ func (m *Pod) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -30741,7 +36222,7 @@ func (m *Pod) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -30757,7 +36238,7 @@ func (m *Pod) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -30771,13 +36252,13 @@ func (m *Pod) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -30796,8 +36277,8 @@ func (m *Pod) Unmarshal(data []byte) error { } return nil } -func (m *PodAffinity) Unmarshal(data []byte) error { - l := len(data) +func (m *PodAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -30809,7 +36290,7 @@ func (m *PodAffinity) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30837,7 +36318,7 @@ func (m *PodAffinity) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -30852,7 +36333,7 @@ func (m *PodAffinity) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) - if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -30868,7 +36349,7 @@ func (m *PodAffinity) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -30883,13 +36364,13 @@ func (m *PodAffinity) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -30908,8 +36389,8 @@ func (m *PodAffinity) Unmarshal(data []byte) error { } return nil } -func (m *PodAffinityTerm) Unmarshal(data []byte) error { - l := len(data) +func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -30921,7 +36402,7 @@ func (m *PodAffinityTerm) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30949,7 +36430,7 @@ func (m *PodAffinityTerm) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -30966,7 +36447,7 @@ func (m *PodAffinityTerm) Unmarshal(data []byte) error { if m.LabelSelector == nil { m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } - if err := m.LabelSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -30982,7 +36463,7 @@ func (m *PodAffinityTerm) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -30997,7 +36478,7 @@ func (m *PodAffinityTerm) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespaces = append(m.Namespaces, string(data[iNdEx:postIndex])) + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { @@ -31011,7 +36492,7 @@ func (m *PodAffinityTerm) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31026,11 +36507,11 @@ func (m *PodAffinityTerm) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TopologyKey = string(data[iNdEx:postIndex]) + m.TopologyKey = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -31049,8 +36530,8 @@ func (m *PodAffinityTerm) Unmarshal(data []byte) error { } return nil } -func (m *PodAntiAffinity) Unmarshal(data []byte) error { - l := len(data) +func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -31062,7 +36543,7 @@ func (m *PodAntiAffinity) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31090,7 +36571,7 @@ func (m *PodAntiAffinity) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31105,7 +36586,7 @@ func (m *PodAntiAffinity) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) - if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -31121,7 +36602,7 @@ func (m *PodAntiAffinity) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31136,13 +36617,13 @@ func (m *PodAntiAffinity) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -31161,8 +36642,8 @@ func (m *PodAntiAffinity) Unmarshal(data []byte) error { } return nil } -func (m *PodAttachOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -31174,7 +36655,7 @@ func (m *PodAttachOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31202,7 +36683,7 @@ func (m *PodAttachOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31222,7 +36703,7 @@ func (m *PodAttachOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31242,7 +36723,7 @@ func (m *PodAttachOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31262,7 +36743,7 @@ func (m *PodAttachOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31282,7 +36763,7 @@ func (m *PodAttachOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31297,11 +36778,11 @@ func (m *PodAttachOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Container = string(data[iNdEx:postIndex]) + m.Container = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -31320,8 +36801,8 @@ func (m *PodAttachOptions) Unmarshal(data []byte) error { } return nil } -func (m *PodCondition) Unmarshal(data []byte) error { - l := len(data) +func (m *PodCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -31333,7 +36814,7 @@ func (m *PodCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31361,7 +36842,7 @@ func (m *PodCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31376,7 +36857,7 @@ func (m *PodCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = PodConditionType(data[iNdEx:postIndex]) + m.Type = PodConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -31390,7 +36871,7 @@ func (m *PodCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31405,7 +36886,7 @@ func (m *PodCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(data[iNdEx:postIndex]) + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -31419,7 +36900,7 @@ func (m *PodCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31433,7 +36914,7 @@ func (m *PodCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -31449,7 +36930,7 @@ func (m *PodCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31463,7 +36944,7 @@ func (m *PodCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -31479,7 +36960,7 @@ func (m *PodCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31494,7 +36975,7 @@ func (m *PodCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -31508,7 +36989,7 @@ func (m *PodCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31523,11 +37004,11 @@ func (m *PodCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -31546,8 +37027,8 @@ func (m *PodCondition) Unmarshal(data []byte) error { } return nil } -func (m *PodExecOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -31559,7 +37040,255 @@ func (m *PodExecOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDNSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDNSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nameservers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nameservers = append(m.Nameservers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Searches", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Searches = append(m.Searches, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, PodDNSConfigOption{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodExecOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31587,7 +37316,7 @@ func (m *PodExecOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31607,7 +37336,7 @@ func (m *PodExecOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31627,7 +37356,7 @@ func (m *PodExecOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31647,7 +37376,7 @@ func (m *PodExecOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31667,7 +37396,7 @@ func (m *PodExecOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31682,7 +37411,7 @@ func (m *PodExecOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Container = string(data[iNdEx:postIndex]) + m.Container = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -31696,7 +37425,7 @@ func (m *PodExecOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31711,11 +37440,11 @@ func (m *PodExecOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Command = append(m.Command, string(data[iNdEx:postIndex])) + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -31734,8 +37463,8 @@ func (m *PodExecOptions) Unmarshal(data []byte) error { } return nil } -func (m *PodList) Unmarshal(data []byte) error { - l := len(data) +func (m *PodList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -31747,7 +37476,7 @@ func (m *PodList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31775,7 +37504,7 @@ func (m *PodList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31789,7 +37518,7 @@ func (m *PodList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -31805,7 +37534,7 @@ func (m *PodList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31820,13 +37549,13 @@ func (m *PodList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, Pod{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -31845,8 +37574,8 @@ func (m *PodList) Unmarshal(data []byte) error { } return nil } -func (m *PodLogOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *PodLogOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -31858,7 +37587,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31886,7 +37615,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -31901,7 +37630,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Container = string(data[iNdEx:postIndex]) + m.Container = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { @@ -31915,7 +37644,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31935,7 +37664,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31955,7 +37684,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -31975,7 +37704,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -31992,7 +37721,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { if m.SinceTime == nil { m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } - if err := m.SinceTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -32008,7 +37737,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -32028,7 +37757,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -32048,7 +37777,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -32058,7 +37787,7 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { m.LimitBytes = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -32077,8 +37806,8 @@ func (m *PodLogOptions) Unmarshal(data []byte) error { } return nil } -func (m *PodPortForwardOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -32090,7 +37819,7 @@ func (m *PodPortForwardOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -32107,28 +37836,70 @@ func (m *PodPortForwardOptions) Unmarshal(data []byte) error { } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.Ports = append(m.Ports, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - m.Ports = append(m.Ports, v) default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -32147,8 +37918,8 @@ func (m *PodPortForwardOptions) Unmarshal(data []byte) error { } return nil } -func (m *PodProxyOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -32160,7 +37931,7 @@ func (m *PodProxyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -32188,7 +37959,7 @@ func (m *PodProxyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -32203,11 +37974,11 @@ func (m *PodProxyOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(data[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -32226,8 +37997,8 @@ func (m *PodProxyOptions) Unmarshal(data []byte) error { } return nil } -func (m *PodSecurityContext) Unmarshal(data []byte) error { - l := len(data) +func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -32239,7 +38010,86 @@ func (m *PodSecurityContext) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodReadinessGate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodReadinessGate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConditionType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConditionType = PodConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -32267,7 +38117,7 @@ func (m *PodSecurityContext) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -32284,7 +38134,7 @@ func (m *PodSecurityContext) Unmarshal(data []byte) error { if m.SELinuxOptions == nil { m.SELinuxOptions = &SELinuxOptions{} } - if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -32300,7 +38150,7 @@ func (m *PodSecurityContext) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -32320,7 +38170,7 @@ func (m *PodSecurityContext) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -32330,25 +38180,67 @@ func (m *PodSecurityContext) Unmarshal(data []byte) error { b := bool(v != 0) m.RunAsNonRoot = &b case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.SupplementalGroups = append(m.SupplementalGroups, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) } - m.SupplementalGroups = append(m.SupplementalGroups, v) case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) @@ -32361,7 +38253,7 @@ func (m *PodSecurityContext) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -32369,9 +38261,60 @@ func (m *PodSecurityContext) Unmarshal(data []byte) error { } } m.FSGroup = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsGroup = &v + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sysctls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sysctls = append(m.Sysctls, Sysctl{}) + if err := m.Sysctls[len(m.Sysctls)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -32390,8 +38333,8 @@ func (m *PodSecurityContext) Unmarshal(data []byte) error { } return nil } -func (m *PodSignature) Unmarshal(data []byte) error { - l := len(data) +func (m *PodSignature) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -32403,7 +38346,7 @@ func (m *PodSignature) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -32431,7 +38374,7 @@ func (m *PodSignature) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -32448,13 +38391,13 @@ func (m *PodSignature) Unmarshal(data []byte) error { if m.PodController == nil { m.PodController = &k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{} } - if err := m.PodController.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PodController.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -32473,8 +38416,8 @@ func (m *PodSignature) Unmarshal(data []byte) error { } return nil } -func (m *PodSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *PodSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -32486,7 +38429,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -32514,7 +38457,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -32529,7 +38472,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Volumes = append(m.Volumes, Volume{}) - if err := m.Volumes[len(m.Volumes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -32545,7 +38488,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -32560,7 +38503,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Containers = append(m.Containers, Container{}) - if err := m.Containers[len(m.Containers)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -32576,7 +38519,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -32591,7 +38534,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RestartPolicy = RestartPolicy(data[iNdEx:postIndex]) + m.RestartPolicy = RestartPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 0 { @@ -32605,7 +38548,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -32625,7 +38568,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -32645,7 +38588,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -32660,7 +38603,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DNSPolicy = DNSPolicy(data[iNdEx:postIndex]) + m.DNSPolicy = DNSPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { @@ -32674,7 +38617,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -32696,7 +38639,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -32711,7 +38654,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -32726,52 +38669,57 @@ func (m *PodSpec) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue if m.NodeSelector == nil { m.NodeSelector = make(map[string]string) } - m.NodeSelector[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.NodeSelector[mapkey] = mapvalue + } else { + var mapvalue string + m.NodeSelector[mapkey] = mapvalue + } iNdEx = postIndex case 8: if wireType != 2 { @@ -32785,7 +38733,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -32800,7 +38748,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceAccountName = string(data[iNdEx:postIndex]) + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 9: if wireType != 2 { @@ -32814,7 +38762,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -32829,7 +38777,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DeprecatedServiceAccount = string(data[iNdEx:postIndex]) + m.DeprecatedServiceAccount = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 10: if wireType != 2 { @@ -32843,7 +38791,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -32858,7 +38806,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NodeName = string(data[iNdEx:postIndex]) + m.NodeName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 11: if wireType != 0 { @@ -32872,7 +38820,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -32892,7 +38840,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -32912,7 +38860,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -32932,7 +38880,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -32949,7 +38897,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if m.SecurityContext == nil { m.SecurityContext = &PodSecurityContext{} } - if err := m.SecurityContext.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -32965,7 +38913,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -32980,7 +38928,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) - if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -32996,7 +38944,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33011,7 +38959,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(data[iNdEx:postIndex]) + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 17: if wireType != 2 { @@ -33025,7 +38973,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33040,7 +38988,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Subdomain = string(data[iNdEx:postIndex]) + m.Subdomain = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 18: if wireType != 2 { @@ -33054,7 +39002,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33071,7 +39019,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if m.Affinity == nil { m.Affinity = &Affinity{} } - if err := m.Affinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -33087,7 +39035,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33102,7 +39050,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SchedulerName = string(data[iNdEx:postIndex]) + m.SchedulerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 20: if wireType != 2 { @@ -33116,7 +39064,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33131,7 +39079,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.InitContainers = append(m.InitContainers, Container{}) - if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -33147,7 +39095,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33168,7 +39116,7 @@ func (m *PodSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33183,13 +39131,178 @@ func (m *PodSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Tolerations = append(m.Tolerations, Toleration{}) - if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostAliases = append(m.HostAliases, HostAlias{}) + if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PriorityClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNSConfig == nil { + m.DNSConfig = &PodDNSConfig{} + } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareProcessNamespace", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ShareProcessNamespace = &b + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessGates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReadinessGates = append(m.ReadinessGates, PodReadinessGate{}) + if err := m.ReadinessGates[len(m.ReadinessGates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -33208,8 +39321,8 @@ func (m *PodSpec) Unmarshal(data []byte) error { } return nil } -func (m *PodStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *PodStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -33221,7 +39334,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33249,7 +39362,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33264,7 +39377,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = PodPhase(data[iNdEx:postIndex]) + m.Phase = PodPhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -33278,7 +39391,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33293,7 +39406,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Conditions = append(m.Conditions, PodCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -33309,7 +39422,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33324,7 +39437,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -33338,7 +39451,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33353,7 +39466,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -33367,7 +39480,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33382,7 +39495,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.HostIP = string(data[iNdEx:postIndex]) + m.HostIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -33396,7 +39509,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33411,7 +39524,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PodIP = string(data[iNdEx:postIndex]) + m.PodIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { @@ -33425,7 +39538,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33442,7 +39555,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if m.StartTime == nil { m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } - if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -33458,7 +39571,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33473,7 +39586,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{}) - if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -33489,7 +39602,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33504,7 +39617,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.QOSClass = PodQOSClass(data[iNdEx:postIndex]) + m.QOSClass = PodQOSClass(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 10: if wireType != 2 { @@ -33518,7 +39631,7 @@ func (m *PodStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33533,13 +39646,42 @@ func (m *PodStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.InitContainerStatuses = append(m.InitContainerStatuses, ContainerStatus{}) - if err := m.InitContainerStatuses[len(m.InitContainerStatuses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.InitContainerStatuses[len(m.InitContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NominatedNodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NominatedNodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -33558,8 +39700,8 @@ func (m *PodStatus) Unmarshal(data []byte) error { } return nil } -func (m *PodStatusResult) Unmarshal(data []byte) error { - l := len(data) +func (m *PodStatusResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -33571,7 +39713,7 @@ func (m *PodStatusResult) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33599,7 +39741,7 @@ func (m *PodStatusResult) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33613,7 +39755,7 @@ func (m *PodStatusResult) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -33629,7 +39771,7 @@ func (m *PodStatusResult) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33643,13 +39785,13 @@ func (m *PodStatusResult) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -33668,8 +39810,8 @@ func (m *PodStatusResult) Unmarshal(data []byte) error { } return nil } -func (m *PodTemplate) Unmarshal(data []byte) error { - l := len(data) +func (m *PodTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -33681,7 +39823,7 @@ func (m *PodTemplate) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33709,7 +39851,7 @@ func (m *PodTemplate) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33723,7 +39865,7 @@ func (m *PodTemplate) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -33739,7 +39881,7 @@ func (m *PodTemplate) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33753,13 +39895,13 @@ func (m *PodTemplate) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -33778,8 +39920,8 @@ func (m *PodTemplate) Unmarshal(data []byte) error { } return nil } -func (m *PodTemplateList) Unmarshal(data []byte) error { - l := len(data) +func (m *PodTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -33791,7 +39933,7 @@ func (m *PodTemplateList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33819,7 +39961,7 @@ func (m *PodTemplateList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33833,7 +39975,7 @@ func (m *PodTemplateList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -33849,7 +39991,7 @@ func (m *PodTemplateList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33864,13 +40006,13 @@ func (m *PodTemplateList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, PodTemplate{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -33889,8 +40031,8 @@ func (m *PodTemplateList) Unmarshal(data []byte) error { } return nil } -func (m *PodTemplateSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -33902,7 +40044,7 @@ func (m *PodTemplateSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -33930,7 +40072,7 @@ func (m *PodTemplateSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33944,7 +40086,7 @@ func (m *PodTemplateSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -33960,7 +40102,7 @@ func (m *PodTemplateSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -33974,13 +40116,13 @@ func (m *PodTemplateSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -33999,8 +40141,8 @@ func (m *PodTemplateSpec) Unmarshal(data []byte) error { } return nil } -func (m *PortworxVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -34012,7 +40154,7 @@ func (m *PortworxVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34040,7 +40182,7 @@ func (m *PortworxVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34055,7 +40197,7 @@ func (m *PortworxVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(data[iNdEx:postIndex]) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -34069,7 +40211,7 @@ func (m *PortworxVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34084,7 +40226,7 @@ func (m *PortworxVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(data[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -34098,7 +40240,7 @@ func (m *PortworxVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -34108,7 +40250,7 @@ func (m *PortworxVolumeSource) Unmarshal(data []byte) error { m.ReadOnly = bool(v != 0) default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -34127,8 +40269,8 @@ func (m *PortworxVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *Preconditions) Unmarshal(data []byte) error { - l := len(data) +func (m *Preconditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -34140,7 +40282,7 @@ func (m *Preconditions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34168,7 +40310,7 @@ func (m *Preconditions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34183,12 +40325,12 @@ func (m *Preconditions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) m.UID = &s iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -34207,8 +40349,8 @@ func (m *Preconditions) Unmarshal(data []byte) error { } return nil } -func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { - l := len(data) +func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -34220,7 +40362,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34248,7 +40390,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -34262,7 +40404,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PodSignature.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PodSignature.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -34278,7 +40420,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -34292,7 +40434,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.EvictionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.EvictionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -34308,7 +40450,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34323,7 +40465,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -34337,7 +40479,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34352,11 +40494,11 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -34375,8 +40517,8 @@ func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { } return nil } -func (m *PreferredSchedulingTerm) Unmarshal(data []byte) error { - l := len(data) +func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -34388,7 +40530,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34416,7 +40558,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Weight |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -34435,7 +40577,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -34449,13 +40591,13 @@ func (m *PreferredSchedulingTerm) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Preference.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Preference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -34474,8 +40616,8 @@ func (m *PreferredSchedulingTerm) Unmarshal(data []byte) error { } return nil } -func (m *Probe) Unmarshal(data []byte) error { - l := len(data) +func (m *Probe) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -34487,7 +40629,7 @@ func (m *Probe) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34515,7 +40657,7 @@ func (m *Probe) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -34529,7 +40671,7 @@ func (m *Probe) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Handler.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Handler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -34545,7 +40687,7 @@ func (m *Probe) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.InitialDelaySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -34564,7 +40706,7 @@ func (m *Probe) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.TimeoutSeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -34583,7 +40725,7 @@ func (m *Probe) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.PeriodSeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -34602,7 +40744,7 @@ func (m *Probe) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.SuccessThreshold |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -34621,7 +40763,7 @@ func (m *Probe) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.FailureThreshold |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -34630,7 +40772,7 @@ func (m *Probe) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -34649,8 +40791,8 @@ func (m *Probe) Unmarshal(data []byte) error { } return nil } -func (m *ProjectedVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -34662,7 +40804,7 @@ func (m *ProjectedVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34690,7 +40832,7 @@ func (m *ProjectedVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -34705,7 +40847,7 @@ func (m *ProjectedVolumeSource) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, VolumeProjection{}) - if err := m.Sources[len(m.Sources)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -34721,7 +40863,7 @@ func (m *ProjectedVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -34731,7 +40873,7 @@ func (m *ProjectedVolumeSource) Unmarshal(data []byte) error { m.DefaultMode = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -34750,8 +40892,8 @@ func (m *ProjectedVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -34763,7 +40905,7 @@ func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34791,7 +40933,7 @@ func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34806,7 +40948,7 @@ func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Registry = string(data[iNdEx:postIndex]) + m.Registry = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -34820,7 +40962,7 @@ func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34835,7 +40977,7 @@ func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Volume = string(data[iNdEx:postIndex]) + m.Volume = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -34849,7 +40991,7 @@ func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -34869,7 +41011,7 @@ func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34884,7 +41026,7 @@ func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.User = string(data[iNdEx:postIndex]) + m.User = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -34898,7 +41040,7 @@ func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34913,11 +41055,11 @@ func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(data[iNdEx:postIndex]) + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -34936,8 +41078,8 @@ func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *RBDVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -34949,7 +41091,284 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RBDPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RBDPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CephMonitors = append(m.CephMonitors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDPool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RadosUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyring = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34977,7 +41396,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -34992,7 +41411,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.CephMonitors = append(m.CephMonitors, string(data[iNdEx:postIndex])) + m.CephMonitors = append(m.CephMonitors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { @@ -35006,7 +41425,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35021,7 +41440,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RBDImage = string(data[iNdEx:postIndex]) + m.RBDImage = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -35035,7 +41454,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35050,7 +41469,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(data[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -35064,7 +41483,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35079,7 +41498,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RBDPool = string(data[iNdEx:postIndex]) + m.RBDPool = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -35093,7 +41512,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35108,7 +41527,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RadosUser = string(data[iNdEx:postIndex]) + m.RadosUser = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -35122,7 +41541,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35137,7 +41556,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyring = string(data[iNdEx:postIndex]) + m.Keyring = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { @@ -35151,7 +41570,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -35168,7 +41587,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if m.SecretRef == nil { m.SecretRef = &LocalObjectReference{} } - if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -35184,7 +41603,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -35194,7 +41613,7 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { m.ReadOnly = bool(v != 0) default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -35213,8 +41632,8 @@ func (m *RBDVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *RangeAllocation) Unmarshal(data []byte) error { - l := len(data) +func (m *RangeAllocation) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -35226,7 +41645,7 @@ func (m *RangeAllocation) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35254,7 +41673,7 @@ func (m *RangeAllocation) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -35268,7 +41687,7 @@ func (m *RangeAllocation) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -35284,7 +41703,7 @@ func (m *RangeAllocation) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35299,7 +41718,7 @@ func (m *RangeAllocation) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Range = string(data[iNdEx:postIndex]) + m.Range = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -35313,7 +41732,7 @@ func (m *RangeAllocation) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -35327,14 +41746,14 @@ func (m *RangeAllocation) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) if m.Data == nil { m.Data = []byte{} } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -35353,8 +41772,8 @@ func (m *RangeAllocation) Unmarshal(data []byte) error { } return nil } -func (m *ReplicationController) Unmarshal(data []byte) error { - l := len(data) +func (m *ReplicationController) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -35366,7 +41785,7 @@ func (m *ReplicationController) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35394,7 +41813,7 @@ func (m *ReplicationController) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -35408,7 +41827,7 @@ func (m *ReplicationController) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -35424,7 +41843,7 @@ func (m *ReplicationController) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -35438,7 +41857,7 @@ func (m *ReplicationController) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -35454,7 +41873,7 @@ func (m *ReplicationController) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -35468,13 +41887,13 @@ func (m *ReplicationController) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -35493,8 +41912,8 @@ func (m *ReplicationController) Unmarshal(data []byte) error { } return nil } -func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { - l := len(data) +func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -35506,7 +41925,7 @@ func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35534,7 +41953,7 @@ func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35549,7 +41968,7 @@ func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ReplicationControllerConditionType(data[iNdEx:postIndex]) + m.Type = ReplicationControllerConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -35563,7 +41982,7 @@ func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35578,7 +41997,7 @@ func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(data[iNdEx:postIndex]) + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -35592,7 +42011,7 @@ func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -35606,7 +42025,7 @@ func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -35622,7 +42041,7 @@ func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35637,7 +42056,7 @@ func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -35651,7 +42070,7 @@ func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35666,11 +42085,11 @@ func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -35689,8 +42108,8 @@ func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { } return nil } -func (m *ReplicationControllerList) Unmarshal(data []byte) error { - l := len(data) +func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -35702,7 +42121,7 @@ func (m *ReplicationControllerList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35730,7 +42149,7 @@ func (m *ReplicationControllerList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -35744,7 +42163,7 @@ func (m *ReplicationControllerList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -35760,7 +42179,7 @@ func (m *ReplicationControllerList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -35775,13 +42194,13 @@ func (m *ReplicationControllerList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, ReplicationController{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -35800,8 +42219,8 @@ func (m *ReplicationControllerList) Unmarshal(data []byte) error { } return nil } -func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -35813,7 +42232,7 @@ func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35841,7 +42260,7 @@ func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -35861,7 +42280,7 @@ func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -35883,7 +42302,7 @@ func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35898,7 +42317,7 @@ func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -35913,52 +42332,57 @@ func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue if m.Selector == nil { m.Selector = make(map[string]string) } - m.Selector[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue + } iNdEx = postIndex case 3: if wireType != 2 { @@ -35972,7 +42396,7 @@ func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -35989,7 +42413,7 @@ func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { if m.Template == nil { m.Template = &PodTemplateSpec{} } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -36005,7 +42429,7 @@ func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -36014,7 +42438,7 @@ func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -36033,8 +42457,8 @@ func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { } return nil } -func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -36046,7 +42470,7 @@ func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36074,7 +42498,7 @@ func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -36093,7 +42517,7 @@ func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -36112,7 +42536,7 @@ func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -36131,7 +42555,7 @@ func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -36150,7 +42574,7 @@ func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -36169,7 +42593,7 @@ func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -36184,13 +42608,13 @@ func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Conditions = append(m.Conditions, ReplicationControllerCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -36209,8 +42633,8 @@ func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { } return nil } -func (m *ResourceFieldSelector) Unmarshal(data []byte) error { - l := len(data) +func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -36222,7 +42646,7 @@ func (m *ResourceFieldSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36250,7 +42674,7 @@ func (m *ResourceFieldSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36265,7 +42689,7 @@ func (m *ResourceFieldSelector) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerName = string(data[iNdEx:postIndex]) + m.ContainerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -36279,7 +42703,7 @@ func (m *ResourceFieldSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36294,7 +42718,7 @@ func (m *ResourceFieldSelector) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Resource = string(data[iNdEx:postIndex]) + m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -36308,7 +42732,7 @@ func (m *ResourceFieldSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -36322,13 +42746,13 @@ func (m *ResourceFieldSelector) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Divisor.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Divisor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -36347,8 +42771,8 @@ func (m *ResourceFieldSelector) Unmarshal(data []byte) error { } return nil } -func (m *ResourceQuota) Unmarshal(data []byte) error { - l := len(data) +func (m *ResourceQuota) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -36360,7 +42784,7 @@ func (m *ResourceQuota) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36388,7 +42812,7 @@ func (m *ResourceQuota) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -36402,7 +42826,7 @@ func (m *ResourceQuota) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -36418,7 +42842,7 @@ func (m *ResourceQuota) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -36432,7 +42856,7 @@ func (m *ResourceQuota) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -36448,7 +42872,7 @@ func (m *ResourceQuota) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -36462,13 +42886,13 @@ func (m *ResourceQuota) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -36487,8 +42911,8 @@ func (m *ResourceQuota) Unmarshal(data []byte) error { } return nil } -func (m *ResourceQuotaList) Unmarshal(data []byte) error { - l := len(data) +func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -36500,7 +42924,7 @@ func (m *ResourceQuotaList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36528,7 +42952,7 @@ func (m *ResourceQuotaList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -36542,7 +42966,7 @@ func (m *ResourceQuotaList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -36558,7 +42982,7 @@ func (m *ResourceQuotaList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -36573,13 +42997,13 @@ func (m *ResourceQuotaList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, ResourceQuota{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -36598,8 +43022,8 @@ func (m *ResourceQuotaList) Unmarshal(data []byte) error { } return nil } -func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -36611,7 +43035,7 @@ func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36639,7 +43063,7 @@ func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -36661,7 +43085,7 @@ func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36676,7 +43100,7 @@ func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36691,57 +43115,62 @@ func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Hard == nil { m.Hard = make(ResourceList) } - m.Hard[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Hard[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Hard[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex case 2: if wireType != 2 { @@ -36755,7 +43184,7 @@ func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36770,11 +43199,44 @@ func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Scopes = append(m.Scopes, ResourceQuotaScope(data[iNdEx:postIndex])) + m.Scopes = append(m.Scopes, ResourceQuotaScope(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScopeSelector == nil { + m.ScopeSelector = &ScopeSelector{} + } + if err := m.ScopeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -36793,8 +43255,8 @@ func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { } return nil } -func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -36806,7 +43268,7 @@ func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36834,7 +43296,7 @@ func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -36856,7 +43318,7 @@ func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36871,7 +43333,7 @@ func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36886,57 +43348,62 @@ func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Hard == nil { m.Hard = make(ResourceList) } - m.Hard[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Hard[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Hard[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex case 2: if wireType != 2 { @@ -36950,7 +43417,7 @@ func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -36972,7 +43439,7 @@ func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -36987,7 +43454,7 @@ func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37002,61 +43469,66 @@ func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Used == nil { m.Used = make(ResourceList) } - m.Used[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Used[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Used[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -37075,8 +43547,8 @@ func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { } return nil } -func (m *ResourceRequirements) Unmarshal(data []byte) error { - l := len(data) +func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -37088,7 +43560,7 @@ func (m *ResourceRequirements) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37116,7 +43588,7 @@ func (m *ResourceRequirements) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -37138,7 +43610,7 @@ func (m *ResourceRequirements) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37153,7 +43625,7 @@ func (m *ResourceRequirements) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37168,57 +43640,62 @@ func (m *ResourceRequirements) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Limits == nil { m.Limits = make(ResourceList) } - m.Limits[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Limits[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Limits[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex case 2: if wireType != 2 { @@ -37232,7 +43709,7 @@ func (m *ResourceRequirements) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -37254,7 +43731,7 @@ func (m *ResourceRequirements) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37269,7 +43746,7 @@ func (m *ResourceRequirements) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37284,61 +43761,66 @@ func (m *ResourceRequirements) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex if m.Requests == nil { m.Requests = make(ResourceList) } - m.Requests[ResourceName(mapkey)] = *mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Requests[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Requests[ResourceName(mapkey)] = mapvalue + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -37357,8 +43839,8 @@ func (m *ResourceRequirements) Unmarshal(data []byte) error { } return nil } -func (m *SELinuxOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -37370,7 +43852,7 @@ func (m *SELinuxOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37398,7 +43880,7 @@ func (m *SELinuxOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37413,7 +43895,7 @@ func (m *SELinuxOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.User = string(data[iNdEx:postIndex]) + m.User = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -37427,7 +43909,7 @@ func (m *SELinuxOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37442,7 +43924,7 @@ func (m *SELinuxOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Role = string(data[iNdEx:postIndex]) + m.Role = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -37456,7 +43938,7 @@ func (m *SELinuxOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37471,7 +43953,7 @@ func (m *SELinuxOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(data[iNdEx:postIndex]) + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -37485,7 +43967,7 @@ func (m *SELinuxOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37500,11 +43982,11 @@ func (m *SELinuxOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Level = string(data[iNdEx:postIndex]) + m.Level = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -37523,8 +44005,8 @@ func (m *SELinuxOptions) Unmarshal(data []byte) error { } return nil } -func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -37536,7 +44018,333 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleIOPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleIOPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field System", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.System = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SSLEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SSLEnabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtectionDomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProtectionDomain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageMode = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37564,7 +44372,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37579,7 +44387,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Gateway = string(data[iNdEx:postIndex]) + m.Gateway = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -37593,7 +44401,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37608,7 +44416,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.System = string(data[iNdEx:postIndex]) + m.System = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -37622,7 +44430,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -37639,7 +44447,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if m.SecretRef == nil { m.SecretRef = &LocalObjectReference{} } - if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -37655,7 +44463,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -37675,7 +44483,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37690,7 +44498,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ProtectionDomain = string(data[iNdEx:postIndex]) + m.ProtectionDomain = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -37704,7 +44512,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37719,7 +44527,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.StoragePool = string(data[iNdEx:postIndex]) + m.StoragePool = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { @@ -37733,7 +44541,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37748,7 +44556,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.StorageMode = string(data[iNdEx:postIndex]) + m.StorageMode = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 8: if wireType != 2 { @@ -37762,7 +44570,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37777,7 +44585,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeName = string(data[iNdEx:postIndex]) + m.VolumeName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 9: if wireType != 2 { @@ -37791,7 +44599,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37806,7 +44614,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(data[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 10: if wireType != 0 { @@ -37820,7 +44628,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -37830,7 +44638,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { m.ReadOnly = bool(v != 0) default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -37849,8 +44657,8 @@ func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *Secret) Unmarshal(data []byte) error { - l := len(data) +func (m *ScopeSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -37862,7 +44670,225 @@ func (m *Secret) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, ScopedResourceSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopedResourceSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopedResourceSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScopeName = ResourceQuotaScope(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = ScopeSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Secret) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37890,7 +44916,7 @@ func (m *Secret) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -37904,7 +44930,7 @@ func (m *Secret) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -37920,7 +44946,7 @@ func (m *Secret) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -37942,7 +44968,7 @@ func (m *Secret) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37957,7 +44983,7 @@ func (m *Secret) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -37972,53 +44998,58 @@ func (m *Secret) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := make([]byte, mapbyteLen) - copy(mapvalue, data[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex if m.Data == nil { m.Data = make(map[string][]byte) } - m.Data[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + m.Data[mapkey] = mapvalue + } else { + var mapvalue []byte + m.Data[mapkey] = mapvalue + } iNdEx = postIndex case 3: if wireType != 2 { @@ -38032,7 +45063,7 @@ func (m *Secret) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -38047,7 +45078,7 @@ func (m *Secret) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = SecretType(data[iNdEx:postIndex]) + m.Type = SecretType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -38061,7 +45092,7 @@ func (m *Secret) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38083,7 +45114,7 @@ func (m *Secret) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -38098,7 +45129,7 @@ func (m *Secret) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -38113,56 +45144,61 @@ func (m *Secret) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue if m.StringData == nil { m.StringData = make(map[string]string) } - m.StringData[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.StringData[mapkey] = mapvalue + } else { + var mapvalue string + m.StringData[mapkey] = mapvalue + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -38181,8 +45217,8 @@ func (m *Secret) Unmarshal(data []byte) error { } return nil } -func (m *SecretEnvSource) Unmarshal(data []byte) error { - l := len(data) +func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -38194,7 +45230,7 @@ func (m *SecretEnvSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -38222,7 +45258,7 @@ func (m *SecretEnvSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38236,7 +45272,7 @@ func (m *SecretEnvSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -38252,7 +45288,7 @@ func (m *SecretEnvSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38263,7 +45299,7 @@ func (m *SecretEnvSource) Unmarshal(data []byte) error { m.Optional = &b default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -38282,8 +45318,8 @@ func (m *SecretEnvSource) Unmarshal(data []byte) error { } return nil } -func (m *SecretKeySelector) Unmarshal(data []byte) error { - l := len(data) +func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -38295,7 +45331,7 @@ func (m *SecretKeySelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -38323,7 +45359,7 @@ func (m *SecretKeySelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38337,7 +45373,7 @@ func (m *SecretKeySelector) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -38353,7 +45389,7 @@ func (m *SecretKeySelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -38368,7 +45404,7 @@ func (m *SecretKeySelector) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(data[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -38382,7 +45418,7 @@ func (m *SecretKeySelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38393,7 +45429,7 @@ func (m *SecretKeySelector) Unmarshal(data []byte) error { m.Optional = &b default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -38412,8 +45448,8 @@ func (m *SecretKeySelector) Unmarshal(data []byte) error { } return nil } -func (m *SecretList) Unmarshal(data []byte) error { - l := len(data) +func (m *SecretList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -38425,7 +45461,7 @@ func (m *SecretList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -38453,7 +45489,7 @@ func (m *SecretList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38467,7 +45503,7 @@ func (m *SecretList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -38483,7 +45519,7 @@ func (m *SecretList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38498,13 +45534,13 @@ func (m *SecretList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, Secret{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -38523,8 +45559,8 @@ func (m *SecretList) Unmarshal(data []byte) error { } return nil } -func (m *SecretProjection) Unmarshal(data []byte) error { - l := len(data) +func (m *SecretProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -38536,7 +45572,7 @@ func (m *SecretProjection) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -38564,7 +45600,7 @@ func (m *SecretProjection) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38578,7 +45614,7 @@ func (m *SecretProjection) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -38594,7 +45630,7 @@ func (m *SecretProjection) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38609,7 +45645,7 @@ func (m *SecretProjection) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -38625,7 +45661,7 @@ func (m *SecretProjection) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38636,7 +45672,7 @@ func (m *SecretProjection) Unmarshal(data []byte) error { m.Optional = &b default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -38655,8 +45691,8 @@ func (m *SecretProjection) Unmarshal(data []byte) error { } return nil } -func (m *SecretVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *SecretReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -38668,7 +45704,115 @@ func (m *SecretVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -38696,7 +45840,7 @@ func (m *SecretVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -38711,7 +45855,7 @@ func (m *SecretVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(data[iNdEx:postIndex]) + m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -38725,7 +45869,7 @@ func (m *SecretVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38740,7 +45884,7 @@ func (m *SecretVolumeSource) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -38756,7 +45900,7 @@ func (m *SecretVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -38776,7 +45920,7 @@ func (m *SecretVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38787,7 +45931,7 @@ func (m *SecretVolumeSource) Unmarshal(data []byte) error { m.Optional = &b default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -38806,8 +45950,8 @@ func (m *SecretVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *SecurityContext) Unmarshal(data []byte) error { - l := len(data) +func (m *SecurityContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -38819,7 +45963,7 @@ func (m *SecurityContext) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -38847,7 +45991,7 @@ func (m *SecurityContext) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38864,7 +46008,7 @@ func (m *SecurityContext) Unmarshal(data []byte) error { if m.Capabilities == nil { m.Capabilities = &Capabilities{} } - if err := m.Capabilities.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Capabilities.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -38880,7 +46024,7 @@ func (m *SecurityContext) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38901,7 +46045,7 @@ func (m *SecurityContext) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38918,7 +46062,7 @@ func (m *SecurityContext) Unmarshal(data []byte) error { if m.SELinuxOptions == nil { m.SELinuxOptions = &SELinuxOptions{} } - if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -38934,7 +46078,7 @@ func (m *SecurityContext) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -38954,7 +46098,7 @@ func (m *SecurityContext) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38975,7 +46119,7 @@ func (m *SecurityContext) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -38984,9 +46128,50 @@ func (m *SecurityContext) Unmarshal(data []byte) error { } b := bool(v != 0) m.ReadOnlyRootFilesystem = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsGroup = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -39005,8 +46190,8 @@ func (m *SecurityContext) Unmarshal(data []byte) error { } return nil } -func (m *SerializedReference) Unmarshal(data []byte) error { - l := len(data) +func (m *SerializedReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -39018,7 +46203,7 @@ func (m *SerializedReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -39046,7 +46231,7 @@ func (m *SerializedReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39060,13 +46245,13 @@ func (m *SerializedReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Reference.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -39085,8 +46270,8 @@ func (m *SerializedReference) Unmarshal(data []byte) error { } return nil } -func (m *Service) Unmarshal(data []byte) error { - l := len(data) +func (m *Service) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -39098,7 +46283,7 @@ func (m *Service) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -39126,7 +46311,7 @@ func (m *Service) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39140,7 +46325,7 @@ func (m *Service) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39156,7 +46341,7 @@ func (m *Service) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39170,7 +46355,7 @@ func (m *Service) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39186,7 +46371,7 @@ func (m *Service) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39200,13 +46385,13 @@ func (m *Service) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -39225,8 +46410,8 @@ func (m *Service) Unmarshal(data []byte) error { } return nil } -func (m *ServiceAccount) Unmarshal(data []byte) error { - l := len(data) +func (m *ServiceAccount) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -39238,7 +46423,7 @@ func (m *ServiceAccount) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -39266,7 +46451,7 @@ func (m *ServiceAccount) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39280,7 +46465,7 @@ func (m *ServiceAccount) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39296,7 +46481,7 @@ func (m *ServiceAccount) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39311,7 +46496,7 @@ func (m *ServiceAccount) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Secrets = append(m.Secrets, ObjectReference{}) - if err := m.Secrets[len(m.Secrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39327,7 +46512,7 @@ func (m *ServiceAccount) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39342,7 +46527,7 @@ func (m *ServiceAccount) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) - if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39358,7 +46543,7 @@ func (m *ServiceAccount) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39369,7 +46554,7 @@ func (m *ServiceAccount) Unmarshal(data []byte) error { m.AutomountServiceAccountToken = &b default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -39388,8 +46573,8 @@ func (m *ServiceAccount) Unmarshal(data []byte) error { } return nil } -func (m *ServiceAccountList) Unmarshal(data []byte) error { - l := len(data) +func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -39401,7 +46586,7 @@ func (m *ServiceAccountList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -39429,7 +46614,7 @@ func (m *ServiceAccountList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39443,7 +46628,7 @@ func (m *ServiceAccountList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39459,7 +46644,7 @@ func (m *ServiceAccountList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39474,13 +46659,13 @@ func (m *ServiceAccountList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, ServiceAccount{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -39499,8 +46684,8 @@ func (m *ServiceAccountList) Unmarshal(data []byte) error { } return nil } -func (m *ServiceList) Unmarshal(data []byte) error { - l := len(data) +func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -39512,7 +46697,135 @@ func (m *ServiceList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountTokenProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountTokenProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audience = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -39540,7 +46853,7 @@ func (m *ServiceList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39554,7 +46867,7 @@ func (m *ServiceList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39570,7 +46883,7 @@ func (m *ServiceList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39585,13 +46898,13 @@ func (m *ServiceList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, Service{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -39610,8 +46923,8 @@ func (m *ServiceList) Unmarshal(data []byte) error { } return nil } -func (m *ServicePort) Unmarshal(data []byte) error { - l := len(data) +func (m *ServicePort) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -39623,7 +46936,7 @@ func (m *ServicePort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -39651,7 +46964,7 @@ func (m *ServicePort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -39666,7 +46979,7 @@ func (m *ServicePort) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -39680,7 +46993,7 @@ func (m *ServicePort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -39695,7 +47008,7 @@ func (m *ServicePort) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(data[iNdEx:postIndex]) + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -39709,7 +47022,7 @@ func (m *ServicePort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Port |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -39728,7 +47041,7 @@ func (m *ServicePort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39742,7 +47055,7 @@ func (m *ServicePort) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.TargetPort.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.TargetPort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39758,7 +47071,7 @@ func (m *ServicePort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.NodePort |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -39767,7 +47080,7 @@ func (m *ServicePort) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -39786,8 +47099,8 @@ func (m *ServicePort) Unmarshal(data []byte) error { } return nil } -func (m *ServiceProxyOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -39799,7 +47112,7 @@ func (m *ServiceProxyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -39827,7 +47140,7 @@ func (m *ServiceProxyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -39842,11 +47155,11 @@ func (m *ServiceProxyOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(data[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -39865,8 +47178,8 @@ func (m *ServiceProxyOptions) Unmarshal(data []byte) error { } return nil } -func (m *ServiceSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *ServiceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -39878,7 +47191,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -39906,7 +47219,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39921,7 +47234,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Ports = append(m.Ports, ServicePort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -39937,7 +47250,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -39959,7 +47272,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -39974,7 +47287,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -39989,52 +47302,57 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue if m.Selector == nil { m.Selector = make(map[string]string) } - m.Selector[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue + } iNdEx = postIndex case 3: if wireType != 2 { @@ -40048,7 +47366,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40063,7 +47381,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterIP = string(data[iNdEx:postIndex]) + m.ClusterIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -40077,7 +47395,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40092,7 +47410,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ServiceType(data[iNdEx:postIndex]) + m.Type = ServiceType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -40106,7 +47424,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40121,36 +47439,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalIPs = append(m.ExternalIPs, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedPublicIPs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeprecatedPublicIPs = append(m.DeprecatedPublicIPs, string(data[iNdEx:postIndex])) + m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 7: if wireType != 2 { @@ -40164,7 +47453,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40179,7 +47468,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SessionAffinity = ServiceAffinity(data[iNdEx:postIndex]) + m.SessionAffinity = ServiceAffinity(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 8: if wireType != 2 { @@ -40193,7 +47482,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40208,7 +47497,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LoadBalancerIP = string(data[iNdEx:postIndex]) + m.LoadBalancerIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 9: if wireType != 2 { @@ -40222,7 +47511,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40237,7 +47526,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(data[iNdEx:postIndex])) + m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 10: if wireType != 2 { @@ -40251,7 +47540,7 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40266,11 +47555,112 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalName = string(data[iNdEx:postIndex]) + m.ExternalName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HealthCheckNodePort", wireType) + } + m.HealthCheckNodePort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HealthCheckNodePort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishNotReadyAddresses", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PublishNotReadyAddresses = bool(v != 0) + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinityConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SessionAffinityConfig == nil { + m.SessionAffinityConfig = &SessionAffinityConfig{} + } + if err := m.SessionAffinityConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -40289,8 +47679,8 @@ func (m *ServiceSpec) Unmarshal(data []byte) error { } return nil } -func (m *ServiceStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *ServiceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -40302,7 +47692,7 @@ func (m *ServiceStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40330,7 +47720,7 @@ func (m *ServiceStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -40344,13 +47734,13 @@ func (m *ServiceStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -40369,8 +47759,8 @@ func (m *ServiceStatus) Unmarshal(data []byte) error { } return nil } -func (m *Sysctl) Unmarshal(data []byte) error { - l := len(data) +func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -40382,7 +47772,470 @@ func (m *Sysctl) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionAffinityConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionAffinityConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientIP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientIP == nil { + m.ClientIP = &ClientIPConfig{} + } + if err := m.ClientIP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageOSPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageOSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &ObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageOSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageOSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sysctl) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40410,7 +48263,7 @@ func (m *Sysctl) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40425,7 +48278,7 @@ func (m *Sysctl) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -40439,7 +48292,7 @@ func (m *Sysctl) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40454,11 +48307,11 @@ func (m *Sysctl) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(data[iNdEx:postIndex]) + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -40477,8 +48330,8 @@ func (m *Sysctl) Unmarshal(data []byte) error { } return nil } -func (m *TCPSocketAction) Unmarshal(data []byte) error { - l := len(data) +func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -40490,7 +48343,7 @@ func (m *TCPSocketAction) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40518,7 +48371,7 @@ func (m *TCPSocketAction) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -40532,13 +48385,42 @@ func (m *TCPSocketAction) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -40557,8 +48439,8 @@ func (m *TCPSocketAction) Unmarshal(data []byte) error { } return nil } -func (m *Taint) Unmarshal(data []byte) error { - l := len(data) +func (m *Taint) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -40570,7 +48452,7 @@ func (m *Taint) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40598,7 +48480,7 @@ func (m *Taint) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40613,7 +48495,7 @@ func (m *Taint) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(data[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -40627,7 +48509,7 @@ func (m *Taint) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40642,7 +48524,7 @@ func (m *Taint) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(data[iNdEx:postIndex]) + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -40656,7 +48538,7 @@ func (m *Taint) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40671,7 +48553,7 @@ func (m *Taint) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Effect = TaintEffect(data[iNdEx:postIndex]) + m.Effect = TaintEffect(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -40685,7 +48567,7 @@ func (m *Taint) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -40699,13 +48581,16 @@ func (m *Taint) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.TimeAdded.Unmarshal(data[iNdEx:postIndex]); err != nil { + if m.TimeAdded == nil { + m.TimeAdded = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -40724,8 +48609,8 @@ func (m *Taint) Unmarshal(data []byte) error { } return nil } -func (m *Toleration) Unmarshal(data []byte) error { - l := len(data) +func (m *Toleration) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -40737,7 +48622,7 @@ func (m *Toleration) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40765,7 +48650,7 @@ func (m *Toleration) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40780,7 +48665,7 @@ func (m *Toleration) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(data[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -40794,7 +48679,7 @@ func (m *Toleration) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40809,7 +48694,7 @@ func (m *Toleration) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Operator = TolerationOperator(data[iNdEx:postIndex]) + m.Operator = TolerationOperator(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -40823,7 +48708,7 @@ func (m *Toleration) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40838,7 +48723,7 @@ func (m *Toleration) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(data[iNdEx:postIndex]) + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -40852,7 +48737,7 @@ func (m *Toleration) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40867,7 +48752,7 @@ func (m *Toleration) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Effect = TaintEffect(data[iNdEx:postIndex]) + m.Effect = TaintEffect(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 0 { @@ -40881,7 +48766,7 @@ func (m *Toleration) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -40891,7 +48776,7 @@ func (m *Toleration) Unmarshal(data []byte) error { m.TolerationSeconds = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -40910,8 +48795,8 @@ func (m *Toleration) Unmarshal(data []byte) error { } return nil } -func (m *Volume) Unmarshal(data []byte) error { - l := len(data) +func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -40923,7 +48808,196 @@ func (m *Volume) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologySelectorLabelRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologySelectorLabelRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologySelectorTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologySelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchLabelExpressions = append(m.MatchLabelExpressions, TopologySelectorLabelRequirement{}) + if err := m.MatchLabelExpressions[len(m.MatchLabelExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40951,7 +49025,7 @@ func (m *Volume) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -40966,7 +49040,7 @@ func (m *Volume) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -40980,7 +49054,7 @@ func (m *Volume) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -40994,13 +49068,13 @@ func (m *Volume) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.VolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.VolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -41019,8 +49093,8 @@ func (m *Volume) Unmarshal(data []byte) error { } return nil } -func (m *VolumeMount) Unmarshal(data []byte) error { - l := len(data) +func (m *VolumeDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -41032,7 +49106,115 @@ func (m *VolumeMount) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeMount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -41060,7 +49242,7 @@ func (m *VolumeMount) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -41075,7 +49257,7 @@ func (m *VolumeMount) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { @@ -41089,7 +49271,7 @@ func (m *VolumeMount) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41109,7 +49291,7 @@ func (m *VolumeMount) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -41124,7 +49306,7 @@ func (m *VolumeMount) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MountPath = string(data[iNdEx:postIndex]) + m.MountPath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -41138,7 +49320,7 @@ func (m *VolumeMount) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -41153,11 +49335,41 @@ func (m *VolumeMount) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SubPath = string(data[iNdEx:postIndex]) + m.SubPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPropagation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MountPropagationMode(dAtA[iNdEx:postIndex]) + m.MountPropagation = &s iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -41176,8 +49388,8 @@ func (m *VolumeMount) Unmarshal(data []byte) error { } return nil } -func (m *VolumeProjection) Unmarshal(data []byte) error { - l := len(data) +func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -41189,7 +49401,90 @@ func (m *VolumeProjection) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeNodeAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeNodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Required == nil { + m.Required = &NodeSelector{} + } + if err := m.Required.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -41217,7 +49512,7 @@ func (m *VolumeProjection) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41234,7 +49529,7 @@ func (m *VolumeProjection) Unmarshal(data []byte) error { if m.Secret == nil { m.Secret = &SecretProjection{} } - if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41250,7 +49545,7 @@ func (m *VolumeProjection) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41267,7 +49562,7 @@ func (m *VolumeProjection) Unmarshal(data []byte) error { if m.DownwardAPI == nil { m.DownwardAPI = &DownwardAPIProjection{} } - if err := m.DownwardAPI.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41283,7 +49578,7 @@ func (m *VolumeProjection) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41300,13 +49595,46 @@ func (m *VolumeProjection) Unmarshal(data []byte) error { if m.ConfigMap == nil { m.ConfigMap = &ConfigMapProjection{} } - if err := m.ConfigMap.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountToken", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccountToken == nil { + m.ServiceAccountToken = &ServiceAccountTokenProjection{} + } + if err := m.ServiceAccountToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -41325,8 +49653,8 @@ func (m *VolumeProjection) Unmarshal(data []byte) error { } return nil } -func (m *VolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *VolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -41338,7 +49666,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -41366,7 +49694,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41383,7 +49711,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.HostPath == nil { m.HostPath = &HostPathVolumeSource{} } - if err := m.HostPath.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.HostPath.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41399,7 +49727,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41416,7 +49744,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.EmptyDir == nil { m.EmptyDir = &EmptyDirVolumeSource{} } - if err := m.EmptyDir.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.EmptyDir.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41432,7 +49760,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41449,7 +49777,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.GCEPersistentDisk == nil { m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} } - if err := m.GCEPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.GCEPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41465,7 +49793,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41482,7 +49810,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.AWSElasticBlockStore == nil { m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} } - if err := m.AWSElasticBlockStore.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.AWSElasticBlockStore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41498,7 +49826,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41515,7 +49843,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.GitRepo == nil { m.GitRepo = &GitRepoVolumeSource{} } - if err := m.GitRepo.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.GitRepo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41531,7 +49859,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41548,7 +49876,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.Secret == nil { m.Secret = &SecretVolumeSource{} } - if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41564,7 +49892,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41581,7 +49909,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.NFS == nil { m.NFS = &NFSVolumeSource{} } - if err := m.NFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.NFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41597,7 +49925,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41614,7 +49942,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.ISCSI == nil { m.ISCSI = &ISCSIVolumeSource{} } - if err := m.ISCSI.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41630,7 +49958,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41647,7 +49975,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.Glusterfs == nil { m.Glusterfs = &GlusterfsVolumeSource{} } - if err := m.Glusterfs.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41663,7 +49991,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41680,7 +50008,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.PersistentVolumeClaim == nil { m.PersistentVolumeClaim = &PersistentVolumeClaimVolumeSource{} } - if err := m.PersistentVolumeClaim.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PersistentVolumeClaim.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41696,7 +50024,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41713,7 +50041,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.RBD == nil { m.RBD = &RBDVolumeSource{} } - if err := m.RBD.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RBD.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41729,7 +50057,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41746,7 +50074,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.FlexVolume == nil { m.FlexVolume = &FlexVolumeSource{} } - if err := m.FlexVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.FlexVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41762,7 +50090,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41779,7 +50107,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.Cinder == nil { m.Cinder = &CinderVolumeSource{} } - if err := m.Cinder.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Cinder.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41795,7 +50123,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41812,7 +50140,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.CephFS == nil { m.CephFS = &CephFSVolumeSource{} } - if err := m.CephFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.CephFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41828,7 +50156,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41845,7 +50173,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.Flocker == nil { m.Flocker = &FlockerVolumeSource{} } - if err := m.Flocker.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Flocker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41861,7 +50189,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41878,7 +50206,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.DownwardAPI == nil { m.DownwardAPI = &DownwardAPIVolumeSource{} } - if err := m.DownwardAPI.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41894,7 +50222,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41911,7 +50239,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.FC == nil { m.FC = &FCVolumeSource{} } - if err := m.FC.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.FC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41927,7 +50255,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41944,7 +50272,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.AzureFile == nil { m.AzureFile = &AzureFileVolumeSource{} } - if err := m.AzureFile.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.AzureFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41960,7 +50288,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -41977,7 +50305,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.ConfigMap == nil { m.ConfigMap = &ConfigMapVolumeSource{} } - if err := m.ConfigMap.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41993,7 +50321,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -42010,7 +50338,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.VsphereVolume == nil { m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} } - if err := m.VsphereVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.VsphereVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -42026,7 +50354,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -42043,7 +50371,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.Quobyte == nil { m.Quobyte = &QuobyteVolumeSource{} } - if err := m.Quobyte.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Quobyte.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -42059,7 +50387,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -42076,7 +50404,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.AzureDisk == nil { m.AzureDisk = &AzureDiskVolumeSource{} } - if err := m.AzureDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -42092,7 +50420,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -42109,7 +50437,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.PhotonPersistentDisk == nil { m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} } - if err := m.PhotonPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PhotonPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -42125,7 +50453,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -42142,7 +50470,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.PortworxVolume == nil { m.PortworxVolume = &PortworxVolumeSource{} } - if err := m.PortworxVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PortworxVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -42158,7 +50486,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -42175,7 +50503,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.ScaleIO == nil { m.ScaleIO = &ScaleIOVolumeSource{} } - if err := m.ScaleIO.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -42191,7 +50519,7 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -42208,13 +50536,46 @@ func (m *VolumeSource) Unmarshal(data []byte) error { if m.Projected == nil { m.Projected = &ProjectedVolumeSource{} } - if err := m.Projected.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Projected.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageOS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StorageOS == nil { + m.StorageOS = &StorageOSVolumeSource{} + } + if err := m.StorageOS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -42233,8 +50594,8 @@ func (m *VolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *VsphereVirtualDiskVolumeSource) Unmarshal(data []byte) error { - l := len(data) +func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -42246,7 +50607,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -42274,7 +50635,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -42289,7 +50650,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumePath = string(data[iNdEx:postIndex]) + m.VolumePath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -42303,7 +50664,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -42318,11 +50679,69 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(data[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePolicyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePolicyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePolicyID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePolicyID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -42341,8 +50760,8 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(data []byte) error { } return nil } -func (m *WeightedPodAffinityTerm) Unmarshal(data []byte) error { - l := len(data) +func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -42354,7 +50773,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -42382,7 +50801,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Weight |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -42401,7 +50820,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -42415,13 +50834,13 @@ func (m *WeightedPodAffinityTerm) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PodAffinityTerm.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PodAffinityTerm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -42440,8 +50859,8 @@ func (m *WeightedPodAffinityTerm) Unmarshal(data []byte) error { } return nil } -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -42452,7 +50871,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -42470,7 +50889,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -42487,7 +50906,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -42510,7 +50929,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -42521,7 +50940,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -42545,694 +50964,804 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 11000 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0xbd, 0x7d, 0x70, 0x24, 0xc7, - 0x75, 0x18, 0xae, 0xd9, 0xc5, 0xd7, 0x3e, 0x7c, 0x37, 0x70, 0x47, 0x10, 0x22, 0x0f, 0xc7, 0xa1, - 0x48, 0x1d, 0xc9, 0x23, 0x20, 0x1e, 0x49, 0xf1, 0x24, 0xea, 0x47, 0x09, 0xc0, 0x02, 0x77, 0xd0, - 0x7d, 0x2d, 0x7b, 0x71, 0x77, 0x14, 0xc5, 0x9f, 0xc8, 0xb9, 0x9d, 0x06, 0x30, 0xbc, 0xc1, 0xcc, - 0x72, 0x66, 0x16, 0x77, 0x90, 0xa2, 0x2a, 0x5b, 0x51, 0xc9, 0x49, 0x59, 0x49, 0xe4, 0x72, 0x54, - 0x95, 0x72, 0x52, 0xa5, 0x94, 0xab, 0xe2, 0x28, 0x9f, 0x8e, 0xa2, 0xb2, 0x24, 0x97, 0xe5, 0xa4, - 0xe2, 0x58, 0x8e, 0x5c, 0x95, 0x38, 0xaa, 0x72, 0x25, 0x76, 0xca, 0x15, 0xd8, 0x82, 0x2a, 0xfe, - 0x23, 0x7f, 0xe4, 0x8f, 0xf8, 0x3f, 0x24, 0x95, 0x4a, 0xf5, 0xe7, 0x74, 0xcf, 0xee, 0x62, 0x66, - 0xc1, 0x03, 0x7c, 0x52, 0xe5, 0xbf, 0xdd, 0x7e, 0xaf, 0x5f, 0x7f, 0x4c, 0xf7, 0xeb, 0xf7, 0x5e, - 0xbf, 0xf7, 0x1a, 0xce, 0xdf, 0xbd, 0x18, 0xcf, 0x7b, 0xe1, 0xc2, 0xdd, 0xd6, 0x1d, 0x12, 0x05, - 0x24, 0x21, 0xf1, 0x42, 0xf3, 0xee, 0xe6, 0x82, 0xd3, 0xf4, 0x16, 0x76, 0x5e, 0x58, 0xd8, 0x24, - 0x01, 0x89, 0x9c, 0x84, 0xb8, 0xf3, 0xcd, 0x28, 0x4c, 0x42, 0xf4, 0x18, 0xc7, 0x9e, 0x4f, 0xb1, - 0xe7, 0x9b, 0x77, 0x37, 0xe7, 0x9d, 0xa6, 0x37, 0xbf, 0xf3, 0xc2, 0xec, 0xf3, 0x9b, 0x5e, 0xb2, - 0xd5, 0xba, 0x33, 0xdf, 0x08, 0xb7, 0x17, 0x36, 0xc3, 0xcd, 0x70, 0x81, 0x55, 0xba, 0xd3, 0xda, - 0x60, 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x13, 0x9b, 0x7d, 0x49, 0x34, 0xed, 0x34, 0xbd, 0x6d, 0xa7, - 0xb1, 0xe5, 0x05, 0x24, 0xda, 0x55, 0x8d, 0x47, 0x24, 0x0e, 0x5b, 0x51, 0x83, 0x64, 0xbb, 0x70, - 0x68, 0xad, 0x78, 0x61, 0x9b, 0x24, 0x4e, 0x87, 0x8e, 0xcf, 0x2e, 0x74, 0xab, 0x15, 0xb5, 0x82, - 0xc4, 0xdb, 0x6e, 0x6f, 0xe6, 0xa3, 0x79, 0x15, 0xe2, 0xc6, 0x16, 0xd9, 0x76, 0xda, 0xea, 0xbd, - 0xd8, 0xad, 0x5e, 0x2b, 0xf1, 0xfc, 0x05, 0x2f, 0x48, 0xe2, 0x24, 0x3a, 0x6c, 0x4c, 0x31, 0x89, - 0x76, 0x48, 0x94, 0x0e, 0x88, 0xdc, 0x77, 0xb6, 0x9b, 0x3e, 0xe9, 0x30, 0x26, 0xfb, 0x8f, 0x2c, - 0x38, 0xbb, 0x78, 0xbb, 0xbe, 0xe2, 0x3b, 0x71, 0xe2, 0x35, 0x96, 0xfc, 0xb0, 0x71, 0xb7, 0x9e, - 0x84, 0x11, 0xb9, 0x15, 0xfa, 0xad, 0x6d, 0x52, 0x67, 0xd3, 0x87, 0xce, 0xc3, 0xd0, 0x0e, 0xfb, - 0xbf, 0x56, 0x9d, 0xb1, 0xce, 0x5a, 0xe7, 0x2a, 0x4b, 0x13, 0x3f, 0xdc, 0x9b, 0xfb, 0xc0, 0xfe, - 0xde, 0xdc, 0xd0, 0x2d, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x1a, 0x06, 0x36, 0xe2, 0xf5, 0xdd, 0x26, - 0x99, 0x29, 0x31, 0xdc, 0x31, 0x81, 0x3b, 0xb0, 0x5a, 0xa7, 0xa5, 0x58, 0x40, 0xd1, 0x02, 0x54, - 0x9a, 0x4e, 0x94, 0x78, 0x89, 0x17, 0x06, 0x33, 0xe5, 0xb3, 0xd6, 0xb9, 0xfe, 0xa5, 0x49, 0x81, - 0x5a, 0xa9, 0x49, 0x00, 0x4e, 0x71, 0x68, 0x37, 0x22, 0xe2, 0xb8, 0x37, 0x02, 0x7f, 0x77, 0xa6, - 0xef, 0xac, 0x75, 0x6e, 0x28, 0xed, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0xdf, 0x2b, 0xc1, 0xd0, - 0xe2, 0xc6, 0x86, 0x17, 0x78, 0xc9, 0x2e, 0x7a, 0x07, 0x46, 0x82, 0xd0, 0x25, 0xf2, 0x3f, 0x1b, - 0xc5, 0xf0, 0x85, 0x67, 0xe7, 0x0f, 0x5b, 0x8a, 0xf3, 0xd7, 0xb5, 0x1a, 0x4b, 0x13, 0xfb, 0x7b, - 0x73, 0x23, 0x7a, 0x09, 0x36, 0x28, 0xa2, 0xb7, 0x60, 0xb8, 0x19, 0xba, 0xaa, 0x81, 0x12, 0x6b, - 0xe0, 0x99, 0xc3, 0x1b, 0xa8, 0xa5, 0x15, 0x96, 0xc6, 0xf7, 0xf7, 0xe6, 0x86, 0xb5, 0x02, 0xac, - 0x93, 0x43, 0x3e, 0x8c, 0xd3, 0xbf, 0x41, 0xe2, 0xa9, 0x16, 0xca, 0xac, 0x85, 0xe7, 0xf3, 0x5b, - 0xd0, 0x2a, 0x2d, 0x4d, 0xed, 0xef, 0xcd, 0x8d, 0x67, 0x0a, 0x71, 0x96, 0xb4, 0xfd, 0x79, 0x18, - 0x5b, 0x4c, 0x12, 0xa7, 0xb1, 0x45, 0x5c, 0xfe, 0x7d, 0xd1, 0x4b, 0xd0, 0x17, 0x38, 0xdb, 0x44, - 0x7c, 0xfd, 0xb3, 0x62, 0xda, 0xfb, 0xae, 0x3b, 0xdb, 0xe4, 0x60, 0x6f, 0x6e, 0xe2, 0x66, 0xe0, - 0xbd, 0xd7, 0x12, 0x6b, 0x86, 0x96, 0x61, 0x86, 0x8d, 0x2e, 0x00, 0xb8, 0x64, 0xc7, 0x6b, 0x90, - 0x9a, 0x93, 0x6c, 0x89, 0xd5, 0x80, 0x44, 0x5d, 0xa8, 0x2a, 0x08, 0xd6, 0xb0, 0xec, 0x2f, 0x59, - 0x50, 0x59, 0xdc, 0x09, 0x3d, 0xb7, 0x16, 0xba, 0x31, 0x6a, 0xc1, 0x78, 0x33, 0x22, 0x1b, 0x24, - 0x52, 0x45, 0x33, 0xd6, 0xd9, 0xf2, 0xb9, 0xe1, 0x0b, 0x17, 0x72, 0xc6, 0x6d, 0x56, 0x5a, 0x09, - 0x92, 0x68, 0x77, 0xe9, 0x11, 0xd1, 0xf4, 0x78, 0x06, 0x8a, 0xb3, 0x6d, 0xd8, 0xbf, 0x54, 0x82, - 0x53, 0x8b, 0x9f, 0x6f, 0x45, 0xa4, 0xea, 0xc5, 0x77, 0xb3, 0x5b, 0xc1, 0xf5, 0xe2, 0xbb, 0xd7, - 0xd3, 0xc9, 0x50, 0x6b, 0xb0, 0x2a, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc3, 0x20, 0xfd, 0x7d, 0x13, - 0xaf, 0x89, 0xd1, 0x4f, 0x09, 0xe4, 0xe1, 0xaa, 0x93, 0x38, 0x55, 0x0e, 0xc2, 0x12, 0x07, 0x5d, - 0x83, 0xe1, 0x06, 0xdb, 0xef, 0x9b, 0xd7, 0x42, 0x97, 0xb0, 0x2f, 0x5c, 0x59, 0x7a, 0x8e, 0xa2, - 0x2f, 0xa7, 0xc5, 0x07, 0x7b, 0x73, 0x33, 0xbc, 0x6f, 0x82, 0x84, 0x06, 0xc3, 0x7a, 0x7d, 0x64, - 0xab, 0x8d, 0xd8, 0xc7, 0x28, 0x41, 0x87, 0x4d, 0x78, 0x4e, 0xdb, 0x53, 0xfd, 0x6c, 0x4f, 0x8d, - 0x74, 0xd9, 0x4f, 0xff, 0xd8, 0x12, 0x73, 0xb2, 0xea, 0xf9, 0x26, 0x7b, 0xb8, 0x00, 0x10, 0x93, - 0x46, 0x44, 0x12, 0x6d, 0x56, 0xd4, 0x67, 0xae, 0x2b, 0x08, 0xd6, 0xb0, 0xe8, 0xe6, 0x8f, 0xb7, - 0x9c, 0x88, 0xad, 0x16, 0x31, 0x37, 0x6a, 0xf3, 0xd7, 0x25, 0x00, 0xa7, 0x38, 0xc6, 0xe6, 0x2f, - 0xe7, 0x6e, 0xfe, 0xdf, 0xb1, 0x60, 0x70, 0xc9, 0x0b, 0x5c, 0x2f, 0xd8, 0x44, 0xef, 0xc0, 0x10, - 0xe5, 0xe8, 0xae, 0x93, 0x38, 0x62, 0xdf, 0x7f, 0x44, 0x2e, 0x1e, 0x9d, 0xc1, 0xca, 0xe5, 0x13, - 0xcf, 0x53, 0x6c, 0xba, 0x88, 0x6e, 0xdc, 0x79, 0x97, 0x34, 0x92, 0x6b, 0x24, 0x71, 0xd2, 0xe1, - 0xa4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x61, 0x20, 0x71, 0xa2, 0x4d, 0x92, 0x88, 0x6d, 0x9f, 0xb3, - 0x29, 0x39, 0x0d, 0x4c, 0x97, 0x1c, 0x09, 0x1a, 0x24, 0x65, 0x90, 0xeb, 0x8c, 0x08, 0x16, 0xc4, - 0xec, 0x06, 0x8c, 0x2c, 0x3b, 0x4d, 0xe7, 0x8e, 0xe7, 0x7b, 0x89, 0x47, 0x62, 0xf4, 0x61, 0x28, - 0x3b, 0xae, 0xcb, 0x36, 0x40, 0x65, 0xe9, 0xd4, 0xfe, 0xde, 0x5c, 0x79, 0xd1, 0x75, 0x0f, 0xf6, - 0xe6, 0x40, 0x61, 0xed, 0x62, 0x8a, 0x81, 0x9e, 0x85, 0x3e, 0x37, 0x0a, 0x9b, 0x33, 0x25, 0x86, - 0x79, 0x9a, 0xee, 0xd4, 0x6a, 0x14, 0x36, 0x33, 0xa8, 0x0c, 0xc7, 0xfe, 0x41, 0x09, 0xd0, 0x32, - 0x69, 0x6e, 0xad, 0xd6, 0x8d, 0x6f, 0x7a, 0x0e, 0x86, 0xb6, 0xc3, 0xc0, 0x4b, 0xc2, 0x28, 0x16, - 0x0d, 0xb2, 0x75, 0x71, 0x4d, 0x94, 0x61, 0x05, 0x45, 0x67, 0xa1, 0xaf, 0x99, 0x6e, 0xef, 0x11, - 0xc9, 0x1a, 0xd8, 0xc6, 0x66, 0x10, 0x8a, 0xd1, 0x8a, 0x49, 0x24, 0xd6, 0xb3, 0xc2, 0xb8, 0x19, - 0x93, 0x08, 0x33, 0x48, 0xba, 0x82, 0xe8, 0xda, 0x12, 0xab, 0x35, 0xb3, 0x82, 0x28, 0x04, 0x6b, - 0x58, 0xe8, 0x6d, 0xa8, 0xf0, 0x7f, 0x98, 0x6c, 0xb0, 0xa5, 0x9b, 0xcb, 0x14, 0xae, 0x86, 0x0d, - 0xc7, 0xcf, 0x4e, 0xfe, 0x28, 0x5b, 0x71, 0x92, 0x10, 0x4e, 0x69, 0x1a, 0x2b, 0x6e, 0x20, 0x77, - 0xc5, 0xfd, 0x1d, 0x0b, 0xd0, 0xb2, 0x17, 0xb8, 0x24, 0x3a, 0x81, 0xa3, 0xb3, 0xb7, 0xcd, 0xf0, - 0x27, 0xb4, 0x6b, 0xe1, 0x76, 0x33, 0x0c, 0x48, 0x90, 0x2c, 0x87, 0x81, 0xcb, 0x8f, 0xd3, 0x8f, - 0x43, 0x5f, 0x42, 0x9b, 0xe2, 0xdd, 0x7a, 0x5a, 0x7e, 0x16, 0xda, 0xc0, 0xc1, 0xde, 0xdc, 0xe9, - 0xf6, 0x1a, 0xac, 0x0b, 0xac, 0x0e, 0xfa, 0x18, 0x0c, 0xc4, 0x89, 0x93, 0xb4, 0x62, 0xd1, 0xd1, - 0x27, 0x64, 0x47, 0xeb, 0xac, 0xf4, 0x60, 0x6f, 0x6e, 0x5c, 0x55, 0xe3, 0x45, 0x58, 0x54, 0x40, - 0xcf, 0xc0, 0xe0, 0x36, 0x89, 0x63, 0x67, 0x53, 0x32, 0xb8, 0x71, 0x51, 0x77, 0xf0, 0x1a, 0x2f, - 0xc6, 0x12, 0x8e, 0x9e, 0x84, 0x7e, 0x12, 0x45, 0x61, 0x24, 0x56, 0xc4, 0xa8, 0x40, 0xec, 0x5f, - 0xa1, 0x85, 0x98, 0xc3, 0xec, 0xff, 0x62, 0xc1, 0xb8, 0xea, 0x2b, 0x6f, 0xeb, 0x04, 0xb6, 0xbc, - 0x0b, 0xd0, 0x90, 0x03, 0x8c, 0xd9, 0x46, 0xd3, 0xda, 0xe8, 0xbc, 0xfc, 0xda, 0x27, 0x34, 0x6d, - 0x43, 0x15, 0xc5, 0x58, 0xa3, 0x6b, 0xff, 0x3b, 0x0b, 0xa6, 0x32, 0x63, 0xbb, 0xea, 0xc5, 0x09, - 0x7a, 0xab, 0x6d, 0x7c, 0xf3, 0xc5, 0xc6, 0x47, 0x6b, 0xb3, 0xd1, 0xa9, 0xf5, 0x22, 0x4b, 0xb4, - 0xb1, 0x61, 0xe8, 0xf7, 0x12, 0xb2, 0x2d, 0x87, 0xf5, 0x7c, 0xc1, 0x61, 0xf1, 0xfe, 0xa5, 0x5f, - 0x69, 0x8d, 0xd2, 0xc0, 0x9c, 0x94, 0xfd, 0xbf, 0x2c, 0xa8, 0x2c, 0x87, 0xc1, 0x86, 0xb7, 0x79, - 0xcd, 0x69, 0x9e, 0xc0, 0xf7, 0xa9, 0x43, 0x1f, 0xa3, 0xce, 0x87, 0xf0, 0x42, 0xde, 0x10, 0x44, - 0xc7, 0xe6, 0xe9, 0x99, 0xca, 0x85, 0x05, 0xc5, 0xa6, 0x68, 0x11, 0x66, 0xc4, 0x66, 0x5f, 0x81, - 0x8a, 0x42, 0x40, 0x13, 0x50, 0xbe, 0x4b, 0xb8, 0x24, 0x59, 0xc1, 0xf4, 0x27, 0x9a, 0x86, 0xfe, - 0x1d, 0xc7, 0x6f, 0x89, 0xcd, 0x8b, 0xf9, 0x9f, 0x8f, 0x97, 0x2e, 0x5a, 0xf6, 0x0f, 0xd8, 0x0e, - 0x14, 0x8d, 0xac, 0x04, 0x3b, 0x82, 0x39, 0x7c, 0xd9, 0x82, 0x69, 0xbf, 0x03, 0x53, 0x12, 0x73, - 0x72, 0x14, 0x76, 0xf6, 0x98, 0xe8, 0xf6, 0x74, 0x27, 0x28, 0xee, 0xd8, 0x1a, 0xe5, 0xf5, 0x61, - 0x93, 0x2e, 0x38, 0xc7, 0x67, 0x5d, 0x17, 0x32, 0xc0, 0x0d, 0x51, 0x86, 0x15, 0xd4, 0xfe, 0x73, - 0x0b, 0xa6, 0xd5, 0x38, 0xae, 0x90, 0xdd, 0x3a, 0xf1, 0x49, 0x23, 0x09, 0xa3, 0x87, 0x65, 0x24, - 0x8f, 0xf3, 0x6f, 0xc2, 0x79, 0xd2, 0xb0, 0x20, 0x50, 0xbe, 0x42, 0x76, 0xf9, 0x07, 0xd2, 0x07, - 0x5a, 0x3e, 0x74, 0xa0, 0xbf, 0x65, 0xc1, 0xa8, 0x1a, 0xe8, 0x09, 0x6c, 0xb9, 0xab, 0xe6, 0x96, - 0xfb, 0x70, 0xc1, 0xf5, 0xda, 0x65, 0xb3, 0xfd, 0xed, 0x12, 0x65, 0x1b, 0x02, 0xa7, 0x16, 0x85, - 0x74, 0x92, 0x28, 0xc7, 0x7f, 0x48, 0xbe, 0x52, 0x6f, 0x83, 0xbd, 0x42, 0x76, 0xd7, 0x43, 0x2a, - 0x4d, 0x74, 0x1e, 0xac, 0xf1, 0x51, 0xfb, 0x0e, 0xfd, 0xa8, 0xbf, 0x5f, 0x82, 0x53, 0x6a, 0x5a, - 0x8c, 0x53, 0xfa, 0x67, 0x72, 0x62, 0x5e, 0x80, 0x61, 0x97, 0x6c, 0x38, 0x2d, 0x3f, 0x51, 0xda, - 0x44, 0x3f, 0x57, 0x33, 0xab, 0x69, 0x31, 0xd6, 0x71, 0x7a, 0x98, 0xcb, 0x6f, 0x0c, 0x33, 0x7e, - 0x9e, 0x38, 0x74, 0xd5, 0x53, 0x09, 0x4f, 0x53, 0x0f, 0x47, 0x74, 0xf5, 0x50, 0xa8, 0x82, 0x4f, - 0x42, 0xbf, 0xb7, 0x4d, 0xcf, 0xfc, 0x92, 0x79, 0x94, 0xaf, 0xd1, 0x42, 0xcc, 0x61, 0xe8, 0x29, - 0x18, 0x6c, 0x84, 0xdb, 0xdb, 0x4e, 0xe0, 0xce, 0x94, 0x99, 0xcc, 0x39, 0x4c, 0xc5, 0x82, 0x65, - 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x06, 0x7d, 0x4e, 0xb4, 0x19, 0xcf, 0xf4, 0x31, 0x9c, 0x21, 0xda, - 0xd2, 0x62, 0xb4, 0x19, 0x63, 0x56, 0x4a, 0x65, 0xc9, 0x7b, 0x61, 0x74, 0xd7, 0x0b, 0x36, 0xab, - 0x5e, 0xc4, 0x04, 0x43, 0x4d, 0x96, 0xbc, 0xad, 0x20, 0x58, 0xc3, 0x42, 0x35, 0xe8, 0x6f, 0x86, - 0x51, 0x12, 0xcf, 0x0c, 0xb0, 0x89, 0x7f, 0x2e, 0x77, 0xfb, 0xf1, 0x71, 0xd7, 0xc2, 0x28, 0x49, - 0x87, 0x42, 0xff, 0xc5, 0x98, 0x13, 0x42, 0xcb, 0x50, 0x26, 0xc1, 0xce, 0xcc, 0x20, 0xa3, 0xf7, - 0xa1, 0xc3, 0xe9, 0xad, 0x04, 0x3b, 0xb7, 0x9c, 0x28, 0xe5, 0x57, 0x2b, 0xc1, 0x0e, 0xa6, 0xb5, - 0x51, 0x03, 0x2a, 0xd2, 0x84, 0x15, 0xcf, 0x0c, 0x15, 0x59, 0x8a, 0x58, 0xa0, 0x63, 0xf2, 0x5e, - 0xcb, 0x8b, 0xc8, 0x36, 0x09, 0x92, 0x38, 0x55, 0xac, 0x24, 0x34, 0xc6, 0x29, 0x5d, 0xd4, 0x80, - 0x11, 0x2e, 0x7f, 0x5e, 0x0b, 0x5b, 0x41, 0x12, 0xcf, 0x54, 0x58, 0x97, 0x73, 0x2c, 0x17, 0xb7, - 0xd2, 0x1a, 0x4b, 0xd3, 0x82, 0xfc, 0x88, 0x56, 0x18, 0x63, 0x83, 0x28, 0x7a, 0x0b, 0x46, 0x7d, - 0x6f, 0x87, 0x04, 0x24, 0x8e, 0x6b, 0x51, 0x78, 0x87, 0xcc, 0x00, 0x1b, 0xcd, 0x93, 0x79, 0x5a, - 0x7c, 0x78, 0x87, 0x2c, 0x4d, 0xee, 0xef, 0xcd, 0x8d, 0x5e, 0xd5, 0x6b, 0x63, 0x93, 0x18, 0x7a, - 0x1b, 0xc6, 0xa8, 0xb0, 0xeb, 0xa5, 0xe4, 0x87, 0x8b, 0x93, 0x47, 0xfb, 0x7b, 0x73, 0x63, 0xd8, - 0xa8, 0x8e, 0x33, 0xe4, 0xd0, 0x3a, 0x54, 0x7c, 0x6f, 0x83, 0x34, 0x76, 0x1b, 0x3e, 0x99, 0x19, - 0x61, 0xb4, 0x73, 0x36, 0xe7, 0x55, 0x89, 0xce, 0x15, 0x0c, 0xf5, 0x17, 0xa7, 0x84, 0xd0, 0x2d, - 0x38, 0x9d, 0x90, 0x68, 0xdb, 0x0b, 0x1c, 0xba, 0xa9, 0x84, 0xf4, 0xcb, 0x4c, 0x25, 0xa3, 0x6c, - 0xd5, 0x9e, 0x11, 0x13, 0x7b, 0x7a, 0xbd, 0x23, 0x16, 0xee, 0x52, 0x1b, 0xdd, 0x80, 0x71, 0xb6, - 0x9f, 0x6a, 0x2d, 0xdf, 0xaf, 0x85, 0xbe, 0xd7, 0xd8, 0x9d, 0x19, 0x63, 0x04, 0x9f, 0x92, 0x06, - 0x90, 0x35, 0x13, 0x4c, 0x15, 0xc3, 0xf4, 0x1f, 0xce, 0xd6, 0x46, 0x3e, 0x8c, 0xc7, 0xa4, 0xd1, - 0x8a, 0xbc, 0x64, 0x97, 0xae, 0x7d, 0x72, 0x3f, 0x99, 0x19, 0x2f, 0xa2, 0xe8, 0xd6, 0xcd, 0x4a, - 0xdc, 0xfa, 0x94, 0x29, 0xc4, 0x59, 0xd2, 0x94, 0x55, 0xc4, 0x89, 0xeb, 0x05, 0x33, 0x13, 0x8c, - 0x03, 0xa9, 0xfd, 0x55, 0xa7, 0x85, 0x98, 0xc3, 0x98, 0xfd, 0x80, 0xfe, 0xb8, 0x41, 0xb9, 0xf4, - 0x24, 0x43, 0x4c, 0xed, 0x07, 0x12, 0x80, 0x53, 0x1c, 0x2a, 0x1a, 0x24, 0xc9, 0xee, 0x0c, 0x62, - 0xa8, 0x6a, 0xab, 0xad, 0xaf, 0x7f, 0x06, 0xd3, 0x72, 0x74, 0x0b, 0x06, 0x49, 0xb0, 0xb3, 0x1a, - 0x85, 0xdb, 0x33, 0x53, 0x45, 0x78, 0xc0, 0x0a, 0x47, 0xe6, 0xe7, 0x47, 0xaa, 0xc2, 0x88, 0x62, - 0x2c, 0x89, 0xa1, 0xfb, 0x30, 0xd3, 0xe1, 0x2b, 0xf1, 0x8f, 0x32, 0xcd, 0x3e, 0xca, 0x27, 0x44, - 0xdd, 0x99, 0xf5, 0x2e, 0x78, 0x07, 0x87, 0xc0, 0x70, 0x57, 0xea, 0xf6, 0x1d, 0x18, 0x53, 0x8c, - 0x8a, 0x7d, 0x6f, 0x34, 0x07, 0xfd, 0x94, 0x17, 0x4b, 0x85, 0xbe, 0x42, 0x27, 0x95, 0xb2, 0xe8, - 0x18, 0xf3, 0x72, 0x36, 0xa9, 0xde, 0xe7, 0xc9, 0xd2, 0x6e, 0x42, 0xb8, 0x62, 0x57, 0xd6, 0x26, - 0x55, 0x02, 0x70, 0x8a, 0x63, 0xff, 0x1f, 0x2e, 0x26, 0xa5, 0xdc, 0xb0, 0xc0, 0x49, 0x70, 0x1e, - 0x86, 0xb6, 0xc2, 0x38, 0xa1, 0xd8, 0xac, 0x8d, 0xfe, 0x54, 0x30, 0xba, 0x2c, 0xca, 0xb1, 0xc2, - 0x40, 0xaf, 0xc2, 0x68, 0x43, 0x6f, 0x40, 0x1c, 0x63, 0xa7, 0x44, 0x15, 0xb3, 0x75, 0x6c, 0xe2, - 0xa2, 0x8b, 0x30, 0xc4, 0xac, 0xdc, 0x8d, 0xd0, 0x17, 0x2a, 0xa4, 0x3c, 0x95, 0x87, 0x6a, 0xa2, - 0xfc, 0x40, 0xfb, 0x8d, 0x15, 0x36, 0x55, 0xc4, 0x69, 0x17, 0xd6, 0x6a, 0xe2, 0x00, 0x51, 0x8a, - 0xf8, 0x65, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0x17, 0x25, 0x6d, 0x96, 0xa9, 0x02, 0x44, 0xd0, 0x9b, - 0x30, 0x78, 0xcf, 0xf1, 0x12, 0x2f, 0xd8, 0x14, 0xd2, 0xc3, 0x8b, 0x05, 0x4f, 0x13, 0x56, 0xfd, - 0x36, 0xaf, 0xca, 0x4f, 0x3e, 0xf1, 0x07, 0x4b, 0x82, 0x94, 0x76, 0xd4, 0x0a, 0x02, 0x4a, 0xbb, - 0xd4, 0x3b, 0x6d, 0xcc, 0xab, 0x72, 0xda, 0xe2, 0x0f, 0x96, 0x04, 0xd1, 0x06, 0x80, 0x5c, 0x4b, - 0xc4, 0x15, 0xd6, 0xe5, 0x8f, 0xf6, 0x42, 0x7e, 0x5d, 0xd5, 0x5e, 0x1a, 0xa3, 0x67, 0x6d, 0xfa, - 0x1f, 0x6b, 0x94, 0xed, 0x84, 0x09, 0x61, 0xed, 0xdd, 0x42, 0x9f, 0xa5, 0x5b, 0xda, 0x89, 0x12, - 0xe2, 0x2e, 0x26, 0x59, 0x03, 0xfd, 0xe1, 0x22, 0xf6, 0xba, 0xb7, 0x4d, 0xf4, 0xed, 0x2f, 0x88, - 0xe0, 0x94, 0x9e, 0xfd, 0xdd, 0x32, 0xcc, 0x74, 0xeb, 0x2e, 0x5d, 0x92, 0xe4, 0xbe, 0x97, 0x2c, - 0x53, 0x31, 0xc9, 0x32, 0x97, 0xe4, 0x8a, 0x28, 0xc7, 0x0a, 0x83, 0xae, 0x8d, 0xd8, 0xdb, 0x94, - 0xca, 0x52, 0x7f, 0xba, 0x36, 0xea, 0xac, 0x14, 0x0b, 0x28, 0xc5, 0x8b, 0x88, 0x13, 0x8b, 0xcb, - 0x0d, 0x6d, 0x0d, 0x61, 0x56, 0x8a, 0x05, 0x54, 0x37, 0x88, 0xf4, 0xe5, 0x18, 0x44, 0x8c, 0x29, - 0xea, 0x7f, 0xb0, 0x53, 0x84, 0x3e, 0x07, 0xb0, 0xe1, 0x05, 0x5e, 0xbc, 0xc5, 0xa8, 0x0f, 0xf4, - 0x4c, 0x5d, 0x09, 0x59, 0xab, 0x8a, 0x0a, 0xd6, 0x28, 0xa2, 0x97, 0x61, 0x58, 0x6d, 0xcf, 0xb5, - 0xea, 0xcc, 0xa0, 0x69, 0x10, 0x4f, 0x79, 0x55, 0x15, 0xeb, 0x78, 0xf6, 0xbb, 0xd9, 0xf5, 0x22, - 0x76, 0x85, 0x36, 0xbf, 0x56, 0xd1, 0xf9, 0x2d, 0x1d, 0x3e, 0xbf, 0xf6, 0x7f, 0x2e, 0xc3, 0xb8, - 0xd1, 0x58, 0x2b, 0x2e, 0xc0, 0xd1, 0x5e, 0xa7, 0x07, 0x96, 0x93, 0x10, 0xb1, 0x27, 0xcf, 0xf7, - 0xb2, 0x69, 0xf4, 0xe3, 0x8d, 0xee, 0x05, 0x4e, 0x09, 0x6d, 0x41, 0xc5, 0x77, 0x62, 0x66, 0x52, - 0x21, 0x62, 0x2f, 0xf6, 0x46, 0x36, 0x55, 0x3f, 0x9c, 0x38, 0xd1, 0x4e, 0x0f, 0xde, 0x4a, 0x4a, - 0x9c, 0x9e, 0xb6, 0x54, 0xd8, 0x91, 0x37, 0x6a, 0xaa, 0x3b, 0x54, 0x22, 0xda, 0xc5, 0x1c, 0x86, - 0x2e, 0xc2, 0x48, 0x44, 0xd8, 0x4a, 0x59, 0xa6, 0xf2, 0x1c, 0x5b, 0x7a, 0xfd, 0xa9, 0xe0, 0x87, - 0x35, 0x18, 0x36, 0x30, 0x53, 0xb9, 0x7f, 0xe0, 0x10, 0xb9, 0xff, 0x19, 0x18, 0x64, 0x3f, 0xd4, - 0xaa, 0x50, 0x5f, 0x68, 0x8d, 0x17, 0x63, 0x09, 0xcf, 0x2e, 0xa2, 0xa1, 0x82, 0x8b, 0xe8, 0x59, - 0x18, 0xab, 0x3a, 0x64, 0x3b, 0x0c, 0x56, 0x02, 0xb7, 0x19, 0x7a, 0x41, 0x82, 0x66, 0xa0, 0x8f, - 0x9d, 0x27, 0x7c, 0xbf, 0xf7, 0x51, 0x0a, 0xb8, 0x8f, 0xca, 0xee, 0xf6, 0x9f, 0x94, 0x60, 0xb4, - 0x4a, 0x7c, 0x92, 0x10, 0xae, 0xf7, 0xc4, 0x68, 0x15, 0xd0, 0x66, 0xe4, 0x34, 0x48, 0x8d, 0x44, - 0x5e, 0xe8, 0xd6, 0x49, 0x23, 0x0c, 0xd8, 0x45, 0x14, 0x3d, 0x20, 0x4f, 0xef, 0xef, 0xcd, 0xa1, - 0x4b, 0x6d, 0x50, 0xdc, 0xa1, 0x06, 0x72, 0x61, 0xb4, 0x19, 0x11, 0xc3, 0x6e, 0x68, 0xe5, 0x8b, - 0x1a, 0x35, 0xbd, 0x0a, 0x97, 0x86, 0x8d, 0x22, 0x6c, 0x12, 0x45, 0x9f, 0x82, 0x89, 0x30, 0x6a, - 0x6e, 0x39, 0x41, 0x95, 0x34, 0x49, 0xe0, 0x52, 0x15, 0x40, 0x58, 0x3b, 0xa6, 0xf7, 0xf7, 0xe6, - 0x26, 0x6e, 0x64, 0x60, 0xb8, 0x0d, 0x1b, 0xbd, 0x09, 0x93, 0xcd, 0x28, 0x6c, 0x3a, 0x9b, 0x6c, - 0xc9, 0x08, 0x69, 0x85, 0xf3, 0xa6, 0xf3, 0xfb, 0x7b, 0x73, 0x93, 0xb5, 0x2c, 0xf0, 0x60, 0x6f, - 0x6e, 0x8a, 0x4d, 0x19, 0x2d, 0x49, 0x81, 0xb8, 0x9d, 0x8c, 0xfd, 0x1e, 0x9c, 0xaa, 0x86, 0xf7, - 0x82, 0x7b, 0x4e, 0xe4, 0x2e, 0xd6, 0xd6, 0x34, 0xe3, 0xc4, 0x1b, 0x52, 0xf9, 0xe5, 0x17, 0x7c, - 0x39, 0x27, 0x9b, 0x46, 0x83, 0xab, 0x1d, 0xab, 0x9e, 0x4f, 0xba, 0x98, 0x43, 0xfe, 0x49, 0xc9, - 0x68, 0x33, 0xc5, 0x57, 0x77, 0x17, 0x56, 0xd7, 0xbb, 0x8b, 0xcf, 0xc2, 0xd0, 0x86, 0x47, 0x7c, - 0x17, 0x93, 0x0d, 0xf1, 0xb5, 0x5e, 0x28, 0x72, 0xb9, 0xb3, 0x4a, 0xeb, 0x48, 0xeb, 0x18, 0x57, - 0xa2, 0x57, 0x05, 0x19, 0xac, 0x08, 0xa2, 0x16, 0x4c, 0x48, 0x3d, 0x4c, 0x42, 0xc5, 0x66, 0x7f, - 0xb1, 0x98, 0x9a, 0x67, 0x36, 0xc3, 0x3e, 0x2f, 0xce, 0x10, 0xc4, 0x6d, 0x4d, 0x50, 0xfd, 0x79, - 0x9b, 0x1e, 0x75, 0x7d, 0x6c, 0xe9, 0x33, 0xfd, 0x99, 0x99, 0x02, 0x58, 0xa9, 0xfd, 0x6b, 0x16, - 0x3c, 0xd2, 0x36, 0x5b, 0xc2, 0x4e, 0x72, 0x6c, 0xdf, 0x28, 0x6b, 0xac, 0x28, 0xe5, 0x1b, 0x2b, - 0xec, 0x1b, 0x30, 0xbd, 0xb2, 0xdd, 0x4c, 0x76, 0xab, 0x9e, 0x79, 0xe5, 0xf2, 0x0a, 0x0c, 0x6c, - 0x13, 0xd7, 0x6b, 0x6d, 0x8b, 0xcf, 0x3a, 0x27, 0xcf, 0x85, 0x6b, 0xac, 0xf4, 0x60, 0x6f, 0x6e, - 0xb4, 0x9e, 0x84, 0x91, 0xb3, 0x49, 0x78, 0x01, 0x16, 0xe8, 0xf6, 0x8f, 0x2d, 0x18, 0x97, 0xfc, - 0x61, 0xd1, 0x75, 0x23, 0x12, 0xc7, 0x68, 0x16, 0x4a, 0x5e, 0x53, 0x10, 0x02, 0x41, 0xa8, 0xb4, - 0x56, 0xc3, 0x25, 0xaf, 0x89, 0xde, 0x84, 0x0a, 0xbf, 0xa9, 0x4b, 0x17, 0x47, 0x8f, 0x37, 0x7f, - 0x4c, 0x37, 0x5c, 0x97, 0x34, 0x70, 0x4a, 0x4e, 0x4a, 0xc9, 0xec, 0xe4, 0x29, 0x9b, 0xf7, 0x46, - 0x97, 0x45, 0x39, 0x56, 0x18, 0xe8, 0x1c, 0x0c, 0x05, 0xa1, 0xcb, 0x2f, 0x53, 0xf9, 0x3e, 0x65, - 0x4b, 0xee, 0xba, 0x28, 0xc3, 0x0a, 0x6a, 0x7f, 0xd5, 0x82, 0x11, 0x39, 0xc6, 0x82, 0x02, 0x3b, - 0xdd, 0x24, 0xa9, 0xb0, 0x9e, 0x6e, 0x12, 0x2a, 0x70, 0x33, 0x88, 0x21, 0x67, 0x97, 0x7b, 0x91, - 0xb3, 0xed, 0xdf, 0x2c, 0xc1, 0x98, 0xec, 0x4e, 0xbd, 0x75, 0x27, 0x26, 0x54, 0x0c, 0xa9, 0x38, - 0x7c, 0xf2, 0x89, 0x5c, 0x67, 0xcf, 0xe7, 0xe9, 0x62, 0xc6, 0x37, 0x4b, 0xc5, 0x9c, 0x45, 0x49, - 0x07, 0xa7, 0x24, 0xd1, 0x0e, 0x4c, 0x06, 0x61, 0xc2, 0x8e, 0x37, 0x05, 0x2f, 0x76, 0xd3, 0x91, - 0x6d, 0xe7, 0x51, 0xd1, 0xce, 0xe4, 0xf5, 0x2c, 0x3d, 0xdc, 0xde, 0x04, 0xba, 0x21, 0x6d, 0x4c, - 0x65, 0xd6, 0xd6, 0xb3, 0xc5, 0xda, 0xea, 0x6e, 0x62, 0xb2, 0x7f, 0xcf, 0x82, 0x8a, 0x44, 0x3b, - 0x89, 0x2b, 0xaf, 0xdb, 0x30, 0x18, 0xb3, 0x4f, 0x24, 0xa7, 0xeb, 0x7c, 0xb1, 0x21, 0xf0, 0xef, - 0x9a, 0x9e, 0xe9, 0xfc, 0x7f, 0x8c, 0x25, 0x35, 0x66, 0x6c, 0x57, 0x03, 0x79, 0xe8, 0x8c, 0xed, - 0xaa, 0x67, 0xdd, 0x6f, 0xb6, 0x46, 0x0d, 0x6b, 0x00, 0x15, 0x4c, 0x9b, 0x11, 0xd9, 0xf0, 0xee, - 0x67, 0x05, 0xd3, 0x1a, 0x2b, 0xc5, 0x02, 0x8a, 0x36, 0x60, 0xa4, 0x21, 0xcd, 0xd1, 0x29, 0x0b, - 0xf9, 0x48, 0x41, 0xdb, 0xbf, 0xba, 0x46, 0xe2, 0xae, 0x49, 0xcb, 0x1a, 0x25, 0x6c, 0xd0, 0xa5, - 0x7c, 0x2a, 0xbd, 0x29, 0x2f, 0x17, 0x34, 0xdc, 0x44, 0x24, 0x49, 0x5b, 0xe8, 0x7a, 0x49, 0x6e, - 0x7f, 0xd3, 0x82, 0x01, 0x6e, 0xbf, 0x2c, 0x66, 0x04, 0xd6, 0x2e, 0xc8, 0xd2, 0xf9, 0xbc, 0x45, - 0x0b, 0xc5, 0x7d, 0x19, 0xba, 0x0d, 0x15, 0xf6, 0x83, 0xd9, 0x62, 0xca, 0x45, 0xfc, 0xb4, 0x78, - 0xfb, 0x7a, 0x57, 0x6f, 0x49, 0x02, 0x38, 0xa5, 0x65, 0x7f, 0xbf, 0x4c, 0x59, 0x5f, 0x8a, 0x6a, - 0x9c, 0xed, 0xd6, 0x49, 0x9c, 0xed, 0xa5, 0xe3, 0x3f, 0xdb, 0xdf, 0x83, 0xf1, 0x86, 0x76, 0x41, - 0x97, 0x7e, 0xf1, 0x0b, 0x05, 0x97, 0x95, 0x76, 0xab, 0xc7, 0xed, 0x75, 0xcb, 0x26, 0x39, 0x9c, - 0xa5, 0x8f, 0x08, 0x8c, 0xf0, 0xf5, 0x20, 0xda, 0xeb, 0x63, 0xed, 0x2d, 0x14, 0x59, 0x61, 0x7a, - 0x63, 0x6c, 0x15, 0xd7, 0x35, 0x42, 0xd8, 0x20, 0x6b, 0xff, 0x4a, 0x3f, 0xf4, 0xaf, 0xec, 0x90, - 0x20, 0x39, 0x01, 0x56, 0xb7, 0x0d, 0x63, 0x5e, 0xb0, 0x13, 0xfa, 0x3b, 0xc4, 0xe5, 0xf0, 0xa3, - 0x1d, 0xef, 0xa7, 0x45, 0x23, 0x63, 0x6b, 0x06, 0x31, 0x9c, 0x21, 0x7e, 0x1c, 0x96, 0x82, 0xd7, - 0x61, 0x80, 0xaf, 0x0c, 0x61, 0x26, 0xc8, 0xb1, 0xe7, 0xb3, 0x89, 0x15, 0x3b, 0x28, 0xb5, 0x67, - 0xf0, 0xab, 0x04, 0x41, 0x08, 0xbd, 0x0b, 0x63, 0x1b, 0x5e, 0x14, 0x27, 0x54, 0xd9, 0x8f, 0x13, - 0x67, 0xbb, 0x79, 0x04, 0x1b, 0x81, 0x9a, 0x91, 0x55, 0x83, 0x12, 0xce, 0x50, 0x46, 0x9b, 0x30, - 0x4a, 0x55, 0xd4, 0xb4, 0xa9, 0xc1, 0x9e, 0x9b, 0x52, 0x26, 0xc2, 0xab, 0x3a, 0x21, 0x6c, 0xd2, - 0xa5, 0x2c, 0xa9, 0xc1, 0x54, 0xda, 0x21, 0x26, 0xdd, 0x28, 0x96, 0xc4, 0x75, 0x59, 0x0e, 0xa3, - 0x9c, 0x8d, 0x79, 0xca, 0x54, 0x4c, 0xce, 0x96, 0xfa, 0xc3, 0xd8, 0xdf, 0xa6, 0x67, 0x31, 0x9d, - 0xc3, 0x13, 0x38, 0xbe, 0x2e, 0x9b, 0xc7, 0xd7, 0x93, 0x05, 0xbe, 0x6c, 0x97, 0xa3, 0xeb, 0x1d, - 0x18, 0xd6, 0x3e, 0x3c, 0x5a, 0x80, 0x4a, 0x43, 0x3a, 0x73, 0x08, 0x2e, 0xae, 0x44, 0x29, 0xe5, - 0xe5, 0x81, 0x53, 0x1c, 0x3a, 0x2f, 0x54, 0x04, 0xcd, 0xba, 0x7e, 0x51, 0x01, 0x15, 0x33, 0x88, - 0xfd, 0x22, 0xc0, 0xca, 0x7d, 0xd2, 0x58, 0xe4, 0x2a, 0x9e, 0x76, 0xbf, 0x67, 0x75, 0xbf, 0xdf, - 0xb3, 0xbf, 0x65, 0xc1, 0xd8, 0xea, 0xb2, 0x21, 0xd3, 0xcf, 0x03, 0x70, 0xd9, 0xf8, 0xf6, 0xed, - 0xeb, 0xd2, 0x7e, 0xcd, 0x8d, 0x8c, 0xaa, 0x14, 0x6b, 0x18, 0xe8, 0x51, 0x28, 0xfb, 0xad, 0x40, - 0x88, 0xac, 0x83, 0xfb, 0x7b, 0x73, 0xe5, 0xab, 0xad, 0x00, 0xd3, 0x32, 0xcd, 0xc7, 0xaa, 0x5c, - 0xd8, 0xc7, 0x2a, 0xdf, 0xdb, 0xf8, 0xeb, 0x65, 0x98, 0x58, 0xf5, 0xc9, 0x7d, 0xa3, 0xd7, 0x4f, - 0xc3, 0x80, 0x1b, 0x79, 0x3b, 0x24, 0xca, 0x0a, 0x02, 0x55, 0x56, 0x8a, 0x05, 0xb4, 0xb0, 0xdb, - 0xd7, 0xdb, 0xed, 0x07, 0xf9, 0xf1, 0xb9, 0xbc, 0xe5, 0x8e, 0x19, 0x6d, 0xc0, 0x20, 0xbf, 0x0f, - 0x8e, 0x67, 0xfa, 0xd9, 0x52, 0x7c, 0xf5, 0xf0, 0xce, 0x64, 0xe7, 0x67, 0x5e, 0xd8, 0x57, 0xb8, - 0xc3, 0x8d, 0xe2, 0x65, 0xa2, 0x14, 0x4b, 0xe2, 0xb3, 0x1f, 0x87, 0x11, 0x1d, 0xb3, 0x27, 0xcf, - 0x9b, 0xbf, 0x6a, 0xc1, 0xd4, 0xaa, 0x1f, 0x36, 0xee, 0x66, 0xfc, 0xf2, 0x5e, 0x86, 0x61, 0xba, - 0x99, 0x62, 0xc3, 0x69, 0xd5, 0xf0, 0xce, 0x15, 0x20, 0xac, 0xe3, 0x69, 0xd5, 0x6e, 0xde, 0x5c, - 0xab, 0x76, 0x72, 0xea, 0x15, 0x20, 0xac, 0xe3, 0xd9, 0x7f, 0x60, 0xc1, 0xe3, 0x97, 0x96, 0x57, - 0x6a, 0x24, 0x8a, 0xbd, 0x38, 0x21, 0x41, 0xd2, 0xe6, 0x57, 0x4c, 0x65, 0x46, 0x57, 0xeb, 0x4a, - 0x2a, 0x33, 0x56, 0x59, 0x2f, 0x04, 0xf4, 0x61, 0x71, 0xae, 0xff, 0xa6, 0x05, 0x53, 0x97, 0xbc, - 0x04, 0x93, 0x66, 0x98, 0x75, 0x05, 0x8e, 0x48, 0x33, 0x8c, 0xbd, 0x24, 0x8c, 0x76, 0xb3, 0xae, - 0xc0, 0x58, 0x41, 0xb0, 0x86, 0xc5, 0x5b, 0xde, 0xf1, 0x62, 0xda, 0xd3, 0x92, 0xa9, 0xea, 0x62, - 0x51, 0x8e, 0x15, 0x06, 0x1d, 0x98, 0xeb, 0x45, 0x4c, 0x64, 0xd8, 0x15, 0x3b, 0x58, 0x0d, 0xac, - 0x2a, 0x01, 0x38, 0xc5, 0xb1, 0xff, 0x9e, 0x05, 0xa7, 0x2e, 0xf9, 0xad, 0x38, 0x21, 0xd1, 0x46, - 0x6c, 0x74, 0xf6, 0x45, 0xa8, 0x10, 0x29, 0xdc, 0x8b, 0xbe, 0xaa, 0x43, 0x43, 0x49, 0xfd, 0xdc, - 0x0f, 0x59, 0xe1, 0x15, 0x70, 0x77, 0xed, 0xcd, 0x39, 0xf3, 0xb7, 0x4b, 0x30, 0x7a, 0x79, 0x7d, - 0xbd, 0x76, 0x89, 0x24, 0x82, 0x4b, 0xe6, 0x1b, 0xa5, 0xb0, 0xa6, 0x91, 0x1f, 0x26, 0xfc, 0xb4, - 0x12, 0xcf, 0x9f, 0xe7, 0xe1, 0x22, 0xf3, 0x6b, 0x41, 0x72, 0x23, 0xaa, 0x27, 0x91, 0x17, 0x6c, - 0x76, 0xd4, 0xe1, 0x25, 0x2f, 0x2f, 0x77, 0xe3, 0xe5, 0xe8, 0x45, 0x18, 0x60, 0xf1, 0x2a, 0x52, - 0xf8, 0xf8, 0xa0, 0x92, 0x13, 0x58, 0xe9, 0xc1, 0xde, 0x5c, 0xe5, 0x26, 0x5e, 0xe3, 0x7f, 0xb0, - 0x40, 0x45, 0x6f, 0xc3, 0xf0, 0x56, 0x92, 0x34, 0x2f, 0x13, 0xc7, 0x25, 0x91, 0xe4, 0x13, 0xe7, - 0x0e, 0xe7, 0x13, 0x74, 0x3a, 0x78, 0x85, 0x74, 0x6b, 0xa5, 0x65, 0x31, 0xd6, 0x29, 0xda, 0x75, - 0x80, 0x14, 0xf6, 0x80, 0x74, 0x10, 0xfb, 0xe7, 0x4b, 0x30, 0x78, 0xd9, 0x09, 0x5c, 0x9f, 0x44, - 0x68, 0x15, 0xfa, 0xc8, 0x7d, 0xd2, 0x10, 0x07, 0x79, 0x4e, 0xd7, 0xd3, 0xc3, 0x8e, 0xdb, 0xd5, - 0xe8, 0x7f, 0xcc, 0xea, 0x23, 0x0c, 0x83, 0xb4, 0xdf, 0x97, 0x94, 0x97, 0xf8, 0x73, 0xf9, 0xb3, - 0xa0, 0x16, 0x05, 0x3f, 0x29, 0x45, 0x11, 0x96, 0x84, 0x98, 0x05, 0xaa, 0xd1, 0xac, 0x53, 0xf6, - 0x96, 0x14, 0xd3, 0xec, 0xd6, 0x97, 0x6b, 0x1c, 0x5d, 0xd0, 0xe5, 0x16, 0x28, 0x59, 0x88, 0x53, - 0x72, 0xf6, 0x45, 0x98, 0x66, 0xf7, 0xb1, 0x4e, 0xb2, 0x65, 0xec, 0x9a, 0xdc, 0xe5, 0x69, 0xff, - 0xb0, 0x04, 0x93, 0x6b, 0xf5, 0xe5, 0xba, 0x69, 0x3b, 0xbc, 0x08, 0x23, 0xfc, 0x80, 0xa6, 0x8b, - 0xce, 0xf1, 0x45, 0x7d, 0x75, 0x87, 0xb0, 0xae, 0xc1, 0xb0, 0x81, 0x89, 0x1e, 0x87, 0xb2, 0xf7, - 0x5e, 0x90, 0xf5, 0xea, 0x5b, 0x7b, 0xfd, 0x3a, 0xa6, 0xe5, 0x14, 0x4c, 0xcf, 0x7a, 0xce, 0xe4, - 0x14, 0x58, 0x9d, 0xf7, 0xaf, 0xc1, 0x98, 0x17, 0x37, 0x62, 0x6f, 0x2d, 0xa0, 0x1c, 0xc0, 0x69, - 0xc8, 0xe5, 0x9b, 0x0a, 0xe7, 0xb4, 0xab, 0x0a, 0x8a, 0x33, 0xd8, 0x1a, 0xc7, 0xed, 0x2f, 0x2c, - 0x2f, 0xe4, 0xba, 0x8b, 0x53, 0x51, 0xa8, 0xc9, 0x46, 0x17, 0x33, 0x1f, 0x21, 0x21, 0x0a, 0xf1, - 0x01, 0xc7, 0x58, 0xc2, 0xec, 0x77, 0xa1, 0xa2, 0xdc, 0xbc, 0xa4, 0x77, 0xa3, 0xd5, 0xc5, 0xbb, - 0x31, 0x9f, 0x33, 0x49, 0xc3, 0x6f, 0xb9, 0xa3, 0xe1, 0xf7, 0x9f, 0x59, 0x90, 0xfa, 0xa9, 0x20, - 0x0c, 0x95, 0x66, 0xc8, 0x2e, 0x89, 0x22, 0x79, 0x1b, 0xfb, 0x54, 0xce, 0x82, 0xe5, 0x1b, 0x86, - 0x2f, 0xa9, 0x9a, 0xac, 0x8b, 0x53, 0x32, 0xe8, 0x2a, 0x0c, 0x36, 0x23, 0x52, 0x4f, 0x58, 0x68, - 0x42, 0x0f, 0x14, 0xf9, 0xdc, 0xf0, 0x9a, 0x58, 0x92, 0xb0, 0xff, 0x95, 0x05, 0x70, 0xd5, 0xdb, - 0xf6, 0x12, 0xec, 0x04, 0x9b, 0xe4, 0x04, 0xb4, 0xc2, 0xeb, 0xd0, 0x17, 0x37, 0x49, 0xa3, 0xd8, - 0x35, 0x5f, 0xda, 0xb3, 0x7a, 0x93, 0x34, 0xd2, 0xcf, 0x41, 0xff, 0x61, 0x46, 0xc7, 0xfe, 0x1e, - 0xc0, 0x58, 0x8a, 0x46, 0x25, 0x73, 0xf4, 0xbc, 0xe1, 0x93, 0xff, 0x68, 0xc6, 0x27, 0xbf, 0xc2, - 0xb0, 0x35, 0x37, 0xfc, 0x04, 0xca, 0xdb, 0xce, 0x7d, 0xa1, 0x08, 0xbc, 0x5c, 0xb4, 0x43, 0xb4, - 0xa5, 0xf9, 0x6b, 0xce, 0x7d, 0x2e, 0x77, 0x3d, 0x27, 0x17, 0xd2, 0x35, 0xe7, 0xfe, 0x01, 0xbf, - 0xcc, 0x63, 0x1b, 0x96, 0x6a, 0x1e, 0x5f, 0xfa, 0xd3, 0xf4, 0x3f, 0xe3, 0xa1, 0xb4, 0x39, 0xd6, - 0xaa, 0x17, 0x08, 0x3b, 0x66, 0x8f, 0xad, 0x7a, 0x41, 0xb6, 0x55, 0x2f, 0x28, 0xd0, 0xaa, 0xc7, - 0x9c, 0x57, 0x07, 0x85, 0xf9, 0x9f, 0x79, 0xfe, 0x0d, 0x5f, 0xf8, 0x58, 0x4f, 0x4d, 0x8b, 0x7b, - 0x04, 0xde, 0xfc, 0x82, 0x14, 0x36, 0x45, 0x69, 0x6e, 0x17, 0x64, 0xd3, 0xe8, 0xef, 0x5b, 0x30, - 0x26, 0x7e, 0x63, 0xf2, 0x5e, 0x8b, 0xc4, 0x89, 0x38, 0xd4, 0x3e, 0x75, 0x94, 0xde, 0x08, 0x12, - 0xbc, 0x53, 0x1f, 0x95, 0x1c, 0xc9, 0x04, 0xe6, 0xf6, 0x2d, 0xd3, 0x1f, 0xf4, 0x3d, 0x0b, 0xa6, - 0xb7, 0x9d, 0xfb, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc4, 0x0b, 0x85, 0x77, 0xe3, 0x6a, 0xaf, 0xeb, - 0xa4, 0x8d, 0x10, 0xef, 0xae, 0x74, 0x5c, 0x9a, 0xee, 0x84, 0x92, 0xdb, 0xe9, 0x8e, 0x3d, 0x9c, - 0xdd, 0x80, 0x21, 0xb9, 0x30, 0x3b, 0x88, 0xf9, 0x55, 0xfd, 0xec, 0xce, 0x51, 0xaa, 0xe7, 0xa5, - 0x69, 0x6c, 0xfe, 0xf5, 0x96, 0x13, 0x24, 0x5e, 0xb2, 0xab, 0xa9, 0x05, 0xac, 0x1d, 0xb1, 0x14, - 0x8f, 0xb5, 0x9d, 0x77, 0x61, 0x44, 0x5f, 0x77, 0xc7, 0xda, 0xd6, 0x7b, 0x30, 0xd5, 0x61, 0x55, - 0x1d, 0x6b, 0x93, 0xf7, 0xe0, 0xd1, 0xae, 0xeb, 0xe3, 0x38, 0x1b, 0xb6, 0x7f, 0xdb, 0xd2, 0x59, - 0xe7, 0x09, 0x18, 0x5d, 0xae, 0x99, 0x46, 0x97, 0x73, 0x45, 0xf7, 0x50, 0x17, 0xcb, 0xcb, 0x86, - 0xde, 0x7d, 0x7a, 0x24, 0xa0, 0x75, 0x18, 0xf0, 0x69, 0x89, 0xbc, 0xf3, 0x3a, 0xdf, 0xcb, 0x2e, - 0x4d, 0x85, 0x12, 0x56, 0x1e, 0x63, 0x41, 0xcb, 0xfe, 0x9e, 0x05, 0x7d, 0x7f, 0x89, 0x11, 0x43, - 0x6d, 0xa4, 0x45, 0xe0, 0xfb, 0x3c, 0x76, 0xee, 0xad, 0xdc, 0x4f, 0x48, 0x10, 0x33, 0x19, 0xb4, - 0xdb, 0xad, 0xfd, 0x30, 0x6d, 0x4a, 0x3a, 0x61, 0xbc, 0x0a, 0xa3, 0xbe, 0x73, 0x87, 0xf8, 0xd2, - 0x60, 0x9c, 0xd5, 0xd8, 0xae, 0xea, 0x40, 0x6c, 0xe2, 0xd2, 0xca, 0x1b, 0xba, 0x3d, 0x5d, 0x08, - 0x49, 0xaa, 0xb2, 0x61, 0x6c, 0xc7, 0x26, 0x2e, 0x55, 0x19, 0xee, 0x39, 0x49, 0x63, 0x4b, 0x68, - 0x73, 0xaa, 0xbb, 0xb7, 0x69, 0x21, 0xe6, 0x30, 0xb4, 0x08, 0xe3, 0x72, 0xc5, 0xde, 0xa2, 0x6a, - 0x7e, 0x18, 0x08, 0x39, 0x53, 0x45, 0x1d, 0x63, 0x13, 0x8c, 0xb3, 0xf8, 0xe8, 0xe3, 0x30, 0x46, - 0x27, 0x27, 0x6c, 0x25, 0xd2, 0xc5, 0xa4, 0x9f, 0xb9, 0x98, 0x30, 0x0f, 0xe5, 0x75, 0x03, 0x82, - 0x33, 0x98, 0xf6, 0xdb, 0x30, 0x75, 0x35, 0x74, 0xdc, 0x25, 0xc7, 0x77, 0x82, 0x06, 0x89, 0xd6, - 0x82, 0xcd, 0xdc, 0xeb, 0x6b, 0xfd, 0x8a, 0xb9, 0x94, 0x77, 0xc5, 0x6c, 0x47, 0x80, 0xf4, 0x06, - 0x84, 0x73, 0xd4, 0x5b, 0x30, 0xe8, 0xf1, 0xa6, 0xc4, 0xb2, 0x7d, 0x21, 0xcf, 0x1e, 0xd5, 0xd6, - 0x47, 0xcd, 0xd9, 0x87, 0x17, 0x60, 0x49, 0x92, 0xaa, 0x20, 0x9d, 0x0c, 0x58, 0xf9, 0x5a, 0x9e, - 0xfd, 0xd7, 0x2d, 0x18, 0xbf, 0x9e, 0x09, 0x69, 0x7d, 0x1a, 0x06, 0x78, 0x62, 0x84, 0xac, 0x89, - 0xa5, 0xce, 0x4a, 0xb1, 0x80, 0x3e, 0x70, 0x0d, 0xff, 0x97, 0x4b, 0x50, 0x61, 0x6e, 0xb6, 0x4d, - 0xaa, 0x4e, 0x1c, 0xbf, 0x98, 0x7a, 0xcd, 0x10, 0x53, 0x73, 0xb4, 0x4c, 0xd5, 0xb1, 0x6e, 0x52, - 0x2a, 0xba, 0xa9, 0x42, 0x3d, 0x0b, 0x29, 0x98, 0x29, 0x41, 0x1e, 0x0e, 0x38, 0x66, 0x46, 0x86, - 0xca, 0x30, 0x50, 0x76, 0xe9, 0xab, 0x70, 0x1f, 0xba, 0x4b, 0x5f, 0xd5, 0xb3, 0x2e, 0xcc, 0xa9, - 0xa6, 0x75, 0x9e, 0xb1, 0xef, 0x4f, 0x32, 0xe7, 0x49, 0xc7, 0xf7, 0x3e, 0x4f, 0x54, 0xc4, 0xf4, - 0x9c, 0x70, 0x86, 0x14, 0xa5, 0x07, 0x8c, 0xcf, 0x88, 0x7f, 0x3c, 0x20, 0x3e, 0xad, 0x62, 0x5f, - 0x86, 0xf1, 0xcc, 0xd4, 0xa1, 0x97, 0xa1, 0xbf, 0xb9, 0xe5, 0xc4, 0x24, 0xe3, 0xc7, 0xd2, 0x5f, - 0xa3, 0x85, 0x07, 0x7b, 0x73, 0x63, 0xaa, 0x02, 0x2b, 0xc1, 0x1c, 0xdb, 0xfe, 0x72, 0x09, 0xfa, - 0xae, 0x87, 0xee, 0x49, 0x2c, 0xb5, 0xcb, 0xc6, 0x52, 0x7b, 0x3a, 0x3f, 0x9d, 0x46, 0xd7, 0x55, - 0x56, 0xcb, 0xac, 0xb2, 0x73, 0x05, 0x68, 0x1d, 0xbe, 0xc0, 0xb6, 0x61, 0x98, 0xa5, 0xeb, 0x10, - 0x8e, 0x3c, 0x2f, 0x1a, 0x9a, 0xd5, 0x5c, 0x46, 0xb3, 0x1a, 0xd7, 0x50, 0x35, 0xfd, 0xea, 0x19, - 0x18, 0x14, 0x8e, 0x23, 0x59, 0xd7, 0x51, 0x81, 0x8b, 0x25, 0xdc, 0xfe, 0x97, 0x65, 0x30, 0xd2, - 0x83, 0xa0, 0xdf, 0xb3, 0x60, 0x3e, 0xe2, 0x61, 0x38, 0x6e, 0xb5, 0x15, 0x79, 0xc1, 0x66, 0xbd, - 0xb1, 0x45, 0xdc, 0x96, 0xef, 0x05, 0x9b, 0x6b, 0x9b, 0x41, 0xa8, 0x8a, 0x57, 0xee, 0x93, 0x46, - 0x8b, 0xd9, 0x69, 0x0b, 0x67, 0x25, 0x51, 0x97, 0xa6, 0x17, 0xf6, 0xf7, 0xe6, 0xe6, 0x71, 0x4f, - 0xad, 0xe0, 0x1e, 0x7b, 0x85, 0xfe, 0xd8, 0x82, 0x05, 0x9e, 0x20, 0xa3, 0xf8, 0x48, 0x0a, 0x69, - 0xa4, 0x35, 0x49, 0x34, 0x25, 0xb7, 0x4e, 0xa2, 0xed, 0xa5, 0x57, 0xc4, 0x24, 0x2f, 0xd4, 0x7a, - 0x6b, 0x15, 0xf7, 0xda, 0x4d, 0xfb, 0xdf, 0x94, 0x61, 0x94, 0xce, 0x67, 0x1a, 0x14, 0xff, 0xb2, - 0xb1, 0x4c, 0x9e, 0xc8, 0x2c, 0x93, 0x49, 0x03, 0xf9, 0xc1, 0xc4, 0xc3, 0xc7, 0x30, 0xe9, 0x3b, - 0x71, 0x72, 0x99, 0x38, 0x51, 0x72, 0x87, 0x38, 0xec, 0x6e, 0x32, 0xeb, 0xf7, 0x50, 0xe0, 0xba, - 0x53, 0x39, 0x23, 0x5d, 0xcd, 0x12, 0xc3, 0xed, 0xf4, 0xd1, 0x0e, 0x20, 0x76, 0x0f, 0x1a, 0x39, - 0x41, 0xcc, 0xc7, 0xe2, 0x09, 0xbb, 0x6e, 0x6f, 0xad, 0xce, 0x8a, 0x56, 0xd1, 0xd5, 0x36, 0x6a, - 0xb8, 0x43, 0x0b, 0xda, 0x4d, 0x77, 0x7f, 0xd1, 0x9b, 0xee, 0x81, 0x1c, 0x9f, 0xed, 0xaf, 0x58, - 0x30, 0x45, 0x3f, 0x8b, 0xe9, 0xdf, 0x1b, 0xa3, 0x10, 0xc6, 0xe9, 0xb2, 0xf3, 0x49, 0x22, 0xcb, - 0xc4, 0xfe, 0xca, 0x91, 0xac, 0x4d, 0x3a, 0xa9, 0xf8, 0x76, 0xc5, 0x24, 0x86, 0xb3, 0xd4, 0xed, - 0x6f, 0x59, 0xc0, 0x3c, 0xee, 0x4e, 0xe0, 0x30, 0xbb, 0x64, 0x1e, 0x66, 0x76, 0x3e, 0xc7, 0xe8, - 0x72, 0x8e, 0xbd, 0x04, 0x13, 0x14, 0x5a, 0x8b, 0xc2, 0xfb, 0xbb, 0x52, 0xd0, 0xce, 0x37, 0xf0, - 0x7e, 0xa5, 0xc4, 0xb7, 0x8d, 0x8a, 0x27, 0x44, 0xbf, 0x60, 0xc1, 0x50, 0xc3, 0x69, 0x3a, 0x0d, - 0x9e, 0x5c, 0xa9, 0x80, 0x75, 0xc6, 0xa8, 0x3f, 0xbf, 0x2c, 0xea, 0x72, 0xcb, 0xc2, 0x47, 0xe4, - 0xd0, 0x65, 0x71, 0xae, 0x35, 0x41, 0x35, 0x3e, 0x7b, 0x17, 0x46, 0x0d, 0x62, 0xc7, 0xaa, 0x86, - 0xfe, 0x82, 0xc5, 0x99, 0xbe, 0x52, 0x15, 0xee, 0xc1, 0x64, 0xa0, 0xfd, 0xa7, 0xec, 0x4c, 0x4a, - 0xc6, 0xf3, 0xc5, 0xd9, 0x3a, 0xe3, 0x82, 0x9a, 0x77, 0x61, 0x86, 0x20, 0x6e, 0x6f, 0xc3, 0xfe, - 0x55, 0x0b, 0x1e, 0xd1, 0x11, 0xb5, 0x00, 0xd0, 0x3c, 0xbb, 0x71, 0x15, 0x86, 0xc2, 0x26, 0x89, - 0x9c, 0x54, 0x2d, 0x3a, 0x27, 0xe7, 0xff, 0x86, 0x28, 0x3f, 0xd8, 0x9b, 0x9b, 0xd6, 0xa9, 0xcb, - 0x72, 0xac, 0x6a, 0x22, 0x1b, 0x06, 0xd8, 0xbc, 0xc4, 0x22, 0x74, 0x97, 0x25, 0x1b, 0x62, 0x97, - 0x2a, 0x31, 0x16, 0x10, 0xfb, 0x6f, 0x59, 0x7c, 0xb9, 0xe9, 0x5d, 0x47, 0x5f, 0x80, 0x89, 0x6d, - 0xaa, 0x41, 0xad, 0xdc, 0x6f, 0xd2, 0x83, 0x94, 0x5d, 0x27, 0x5b, 0x45, 0x8e, 0x8f, 0x2e, 0xc3, - 0x5d, 0x9a, 0x11, 0xbd, 0x9f, 0xb8, 0x96, 0x21, 0x8b, 0xdb, 0x1a, 0xb2, 0xff, 0x61, 0x89, 0xef, - 0x59, 0x26, 0xc3, 0x3d, 0x03, 0x83, 0xcd, 0xd0, 0x5d, 0x5e, 0xab, 0x62, 0x31, 0x57, 0x8a, 0xe9, - 0xd4, 0x78, 0x31, 0x96, 0x70, 0x74, 0x01, 0x80, 0xdc, 0x4f, 0x48, 0x14, 0x38, 0xbe, 0xba, 0x06, - 0x56, 0xa2, 0xd2, 0x8a, 0x82, 0x60, 0x0d, 0x8b, 0xd6, 0x69, 0x46, 0xe1, 0x8e, 0xe7, 0xb2, 0xc8, - 0x85, 0xb2, 0x59, 0xa7, 0xa6, 0x20, 0x58, 0xc3, 0xa2, 0x7a, 0x6b, 0x2b, 0x88, 0xf9, 0x31, 0xe6, - 0xdc, 0x11, 0xb9, 0x71, 0x86, 0x52, 0xbd, 0xf5, 0xa6, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x02, 0x03, - 0x89, 0xc3, 0x2e, 0x37, 0xfb, 0x8b, 0x78, 0x8a, 0xac, 0x53, 0x5c, 0x3d, 0x19, 0x11, 0xad, 0x8a, - 0x05, 0x09, 0xfb, 0x3f, 0x55, 0x00, 0x52, 0xa9, 0x0b, 0x7d, 0xb9, 0x7d, 0xc3, 0x7f, 0xb4, 0xa8, - 0xc8, 0xf6, 0xe0, 0x76, 0x3b, 0xfa, 0x9a, 0x05, 0xc3, 0x8e, 0xef, 0x87, 0x0d, 0x27, 0x61, 0xd3, - 0x53, 0x2a, 0xca, 0x7a, 0x44, 0x4f, 0x16, 0xd3, 0xba, 0xbc, 0x33, 0x2f, 0xca, 0x0b, 0x47, 0x0d, - 0x92, 0xdb, 0x1f, 0xbd, 0x0b, 0xe8, 0x23, 0x52, 0x6a, 0xe7, 0x5f, 0x78, 0x36, 0x2b, 0xb5, 0x57, - 0x18, 0xc3, 0xd5, 0x04, 0x76, 0xf4, 0xb6, 0x91, 0x4b, 0xa6, 0xaf, 0x48, 0xf8, 0xa9, 0x21, 0x87, - 0xe4, 0xa5, 0x91, 0x41, 0x6f, 0xea, 0x2e, 0xd5, 0xfd, 0x45, 0xe2, 0xbb, 0x35, 0x71, 0x38, 0xc7, - 0x9d, 0x3a, 0x81, 0x71, 0xd7, 0x3c, 0x79, 0x85, 0x5b, 0xd8, 0x0b, 0xf9, 0x2d, 0x64, 0x8e, 0xec, - 0xf4, 0xac, 0xcd, 0x00, 0x70, 0xb6, 0x09, 0xf4, 0x26, 0x77, 0x78, 0x5f, 0x0b, 0x36, 0x42, 0xe1, - 0x1a, 0x76, 0xbe, 0xc0, 0x37, 0xdf, 0x8d, 0x13, 0xb2, 0x4d, 0xeb, 0xa4, 0x87, 0xeb, 0x75, 0x41, - 0x05, 0x2b, 0x7a, 0x68, 0x1d, 0x06, 0x58, 0xb4, 0x51, 0x3c, 0x33, 0x54, 0xc4, 0x12, 0x67, 0x06, - 0xd9, 0xa6, 0xfb, 0x87, 0xfd, 0x8d, 0xb1, 0xa0, 0x85, 0x2e, 0xcb, 0x30, 0xfb, 0x78, 0x2d, 0xb8, - 0x19, 0x13, 0x16, 0x66, 0x5f, 0x59, 0xfa, 0x50, 0x1a, 0x37, 0xcf, 0xcb, 0x3b, 0x66, 0xd3, 0x33, - 0x6a, 0x52, 0xc1, 0x46, 0xfc, 0x97, 0x49, 0xfa, 0x66, 0xa0, 0x48, 0x47, 0xcd, 0x94, 0x7e, 0xe9, - 0x64, 0xdf, 0x32, 0x89, 0xe1, 0x2c, 0xf5, 0x13, 0x3d, 0x52, 0x67, 0x03, 0x98, 0xc8, 0x6e, 0xca, - 0x63, 0x3d, 0xc2, 0x7f, 0xd2, 0x07, 0x63, 0xe6, 0xe2, 0x40, 0x0b, 0x50, 0x11, 0x44, 0x54, 0xd2, - 0x2e, 0xb5, 0x07, 0xae, 0x49, 0x00, 0x4e, 0x71, 0x58, 0xfa, 0x32, 0x56, 0x5d, 0x73, 0x0a, 0x4a, - 0xd3, 0x97, 0x29, 0x08, 0xd6, 0xb0, 0xa8, 0x24, 0x7c, 0x27, 0x0c, 0x13, 0x75, 0x12, 0xa8, 0x75, - 0xb3, 0xc4, 0x4a, 0xb1, 0x80, 0xd2, 0x13, 0xe0, 0x2e, 0xfd, 0x98, 0xbe, 0x69, 0x55, 0x54, 0x27, - 0xc0, 0x15, 0x1d, 0x88, 0x4d, 0x5c, 0x7a, 0xa2, 0x85, 0x31, 0x5b, 0x88, 0x42, 0xde, 0x4e, 0x9d, - 0xac, 0xea, 0x3c, 0x02, 0x4f, 0xc2, 0xd1, 0x67, 0xe0, 0x11, 0x15, 0x30, 0x87, 0xb9, 0x95, 0x56, - 0xb6, 0x38, 0x60, 0xa8, 0xcc, 0x8f, 0x2c, 0x77, 0x46, 0xc3, 0xdd, 0xea, 0xa3, 0xd7, 0x60, 0x4c, - 0xc8, 0xca, 0x92, 0xe2, 0xa0, 0x79, 0x03, 0x7f, 0xc5, 0x80, 0xe2, 0x0c, 0x36, 0xaa, 0xc2, 0x04, - 0x2d, 0x61, 0x42, 0xaa, 0xa4, 0xc0, 0x03, 0xff, 0xd4, 0x51, 0x7f, 0x25, 0x03, 0xc7, 0x6d, 0x35, - 0xd0, 0x22, 0x8c, 0x73, 0x61, 0x85, 0x2a, 0x86, 0xec, 0x3b, 0x08, 0x7f, 0x4e, 0xb5, 0x11, 0x6e, - 0x98, 0x60, 0x9c, 0xc5, 0x47, 0x17, 0x61, 0xc4, 0x89, 0x1a, 0x5b, 0x5e, 0x42, 0x1a, 0x49, 0x2b, - 0xe2, 0x49, 0x2c, 0x34, 0x17, 0x86, 0x45, 0x0d, 0x86, 0x0d, 0x4c, 0xfb, 0xf3, 0x30, 0xd5, 0xc1, - 0x79, 0x9c, 0x2e, 0x1c, 0xa7, 0xe9, 0xc9, 0x31, 0x65, 0xdc, 0xa5, 0x16, 0x6b, 0x6b, 0x72, 0x34, - 0x1a, 0x16, 0x5d, 0x9d, 0xcc, 0x3c, 0xad, 0xe5, 0xd4, 0x54, 0xab, 0x73, 0x55, 0x02, 0x70, 0x8a, - 0x63, 0xff, 0x45, 0x05, 0x34, 0xeb, 0x4d, 0x01, 0x17, 0x99, 0x8b, 0x30, 0x22, 0xd3, 0xc4, 0x6a, - 0xe9, 0x19, 0xd5, 0x30, 0x2f, 0x69, 0x30, 0x6c, 0x60, 0xd2, 0xbe, 0x05, 0xd2, 0x26, 0x95, 0x75, - 0xce, 0x52, 0xc6, 0x2a, 0x9c, 0xe2, 0xa0, 0xf3, 0x30, 0x14, 0x13, 0x7f, 0xe3, 0xaa, 0x17, 0xdc, - 0x15, 0x0b, 0x5b, 0x71, 0xe6, 0xba, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0x41, 0xb9, 0xe5, 0xb9, 0x62, - 0x29, 0x4b, 0xb1, 0xa1, 0x7c, 0x73, 0xad, 0x7a, 0xb0, 0x37, 0xf7, 0x44, 0xb7, 0x9c, 0xb9, 0x54, - 0x3f, 0x8f, 0xe7, 0xe9, 0xf6, 0xa3, 0x95, 0x3b, 0xd9, 0xe9, 0x07, 0x7a, 0xb4, 0xd3, 0x5f, 0x00, - 0x10, 0xa3, 0x96, 0x6b, 0xb9, 0x9c, 0x7e, 0xb5, 0x4b, 0x0a, 0x82, 0x35, 0x2c, 0xaa, 0xe5, 0x37, - 0x22, 0xe2, 0x48, 0x45, 0x98, 0x3b, 0x35, 0x0f, 0x1d, 0x5d, 0xcb, 0x5f, 0xce, 0x12, 0xc3, 0xed, - 0xf4, 0x51, 0x08, 0x93, 0xae, 0x88, 0xca, 0x4c, 0x1b, 0xad, 0xf4, 0xee, 0x49, 0x4d, 0x1b, 0xac, - 0x66, 0x09, 0xe1, 0x76, 0xda, 0xe8, 0x73, 0x30, 0x2b, 0x0b, 0xdb, 0x43, 0x62, 0xd9, 0x76, 0x29, - 0x2f, 0x9d, 0xd9, 0xdf, 0x9b, 0x9b, 0xad, 0x76, 0xc5, 0xc2, 0x87, 0x50, 0x40, 0x6f, 0xc1, 0x00, - 0xbb, 0xd7, 0x89, 0x67, 0x86, 0xd9, 0x89, 0xf7, 0x52, 0x11, 0x7f, 0x7c, 0xba, 0xea, 0xe7, 0xd9, - 0xed, 0x90, 0xf0, 0x34, 0x4d, 0x2f, 0xcb, 0x58, 0x21, 0x16, 0x34, 0x51, 0x13, 0x86, 0x9d, 0x20, - 0x08, 0x13, 0x87, 0x0b, 0x62, 0x23, 0x45, 0x64, 0x49, 0xad, 0x89, 0xc5, 0xb4, 0x2e, 0x6f, 0x47, - 0x39, 0xaf, 0x69, 0x10, 0xac, 0x37, 0x81, 0xee, 0xc1, 0x78, 0x78, 0x8f, 0x32, 0x4c, 0x79, 0xb5, - 0x11, 0xcf, 0x8c, 0x9a, 0x03, 0xcb, 0x31, 0xd4, 0x1a, 0x95, 0x35, 0x4e, 0x66, 0x12, 0xc5, 0xd9, - 0x56, 0xd0, 0xbc, 0x61, 0xae, 0x1e, 0x4b, 0xfd, 0xa9, 0x53, 0x73, 0xb5, 0x6e, 0x9d, 0x66, 0x61, - 0xd7, 0xdc, 0x87, 0x92, 0x71, 0x84, 0xf1, 0x4c, 0xd8, 0x75, 0x0a, 0xc2, 0x3a, 0xde, 0xec, 0xc7, - 0x60, 0x58, 0x9b, 0xf8, 0x5e, 0x1c, 0x77, 0x67, 0x5f, 0x83, 0x89, 0xec, 0x84, 0xf6, 0xe4, 0xf8, - 0xfb, 0x3f, 0x4b, 0x30, 0xde, 0xe1, 0xde, 0xe8, 0xae, 0xc7, 0x9c, 0xcf, 0x0d, 0xd6, 0x77, 0xc5, - 0x0b, 0x5c, 0xcc, 0x20, 0x26, 0x03, 0x2b, 0x15, 0x60, 0x60, 0x92, 0x9b, 0x96, 0xbb, 0x72, 0x53, - 0xc1, 0xb4, 0xfa, 0xde, 0x0f, 0xd3, 0x32, 0xcf, 0x89, 0xfe, 0x42, 0xe7, 0xc4, 0x03, 0x60, 0x74, - 0xc6, 0x51, 0x33, 0x58, 0xe0, 0xa8, 0xf9, 0x66, 0x09, 0x26, 0x52, 0x27, 0x67, 0x91, 0x3b, 0xfa, - 0xf8, 0xaf, 0x21, 0xd6, 0x8d, 0x6b, 0x88, 0xbc, 0xd4, 0xd0, 0x99, 0xfe, 0x75, 0xbd, 0x92, 0x78, - 0x2b, 0x73, 0x25, 0xf1, 0x52, 0x8f, 0x74, 0x0f, 0xbf, 0x9e, 0xf8, 0x6e, 0x09, 0x4e, 0x65, 0xab, - 0x2c, 0xfb, 0x8e, 0xb7, 0x7d, 0x02, 0xf3, 0xf5, 0x19, 0x63, 0xbe, 0x5e, 0xe9, 0x6d, 0x5c, 0xac, - 0x93, 0x5d, 0x27, 0xcd, 0xc9, 0x4c, 0xda, 0xc7, 0x8e, 0x42, 0xfc, 0xf0, 0x99, 0xfb, 0x43, 0x0b, - 0x1e, 0xed, 0x58, 0xef, 0x04, 0x0c, 0xaf, 0x6f, 0x98, 0x86, 0xd7, 0x17, 0x8f, 0x30, 0xba, 0x2e, - 0x96, 0xd8, 0x5f, 0x2b, 0x77, 0x19, 0x15, 0x33, 0x4d, 0xdd, 0x80, 0x61, 0xa7, 0xd1, 0x20, 0x71, - 0x7c, 0x2d, 0x74, 0x55, 0x02, 0xa7, 0xe7, 0xd9, 0xd9, 0x92, 0x16, 0x1f, 0xec, 0xcd, 0xcd, 0x66, - 0x49, 0xa4, 0x60, 0xac, 0x53, 0x30, 0x53, 0xcb, 0x95, 0x8e, 0x29, 0xb5, 0xdc, 0x05, 0x80, 0x1d, - 0xa5, 0xc5, 0x66, 0x2d, 0x5e, 0x9a, 0x7e, 0xab, 0x61, 0xa1, 0xff, 0x9f, 0x49, 0x84, 0xdc, 0x49, - 0xa3, 0xcf, 0x8c, 0x97, 0xcc, 0xf9, 0x7e, 0xba, 0xc3, 0x07, 0x0f, 0xcb, 0x54, 0xd6, 0x41, 0x45, - 0x12, 0x7d, 0x0a, 0x26, 0x62, 0x1e, 0xfc, 0xbf, 0xec, 0x3b, 0x31, 0xf3, 0xee, 0x17, 0xfc, 0x94, - 0x45, 0x58, 0xd6, 0x33, 0x30, 0xdc, 0x86, 0x6d, 0x7f, 0xa7, 0x0c, 0x1f, 0x3c, 0x64, 0xd9, 0xa2, - 0x45, 0xf3, 0xd6, 0xf6, 0xb9, 0xac, 0xfd, 0x67, 0xb6, 0x63, 0x65, 0xc3, 0x20, 0x94, 0xf9, 0xda, - 0xa5, 0xf7, 0xfd, 0xb5, 0xbf, 0xae, 0x5b, 0xeb, 0xb8, 0xdf, 0xe6, 0xa5, 0x23, 0x6f, 0xcc, 0x9f, - 0x56, 0x63, 0xfd, 0x97, 0x2c, 0x78, 0xa2, 0xe3, 0xb0, 0x0c, 0x2f, 0x91, 0x05, 0xa8, 0x34, 0x68, - 0xa1, 0x16, 0x8b, 0x93, 0x06, 0xc1, 0x49, 0x00, 0x4e, 0x71, 0x0c, 0x67, 0x90, 0x52, 0xae, 0x33, - 0xc8, 0xef, 0x5b, 0x30, 0x9d, 0xed, 0xc4, 0x09, 0xf0, 0xad, 0xba, 0xc9, 0xb7, 0xe6, 0x7b, 0xfb, - 0xf8, 0x5d, 0x58, 0xd6, 0x7f, 0x1f, 0x83, 0xd3, 0x6d, 0xa7, 0x1e, 0x9f, 0xc5, 0x9f, 0xb3, 0x60, - 0x72, 0x93, 0x49, 0xef, 0x5a, 0xc0, 0x93, 0x18, 0x57, 0x4e, 0x94, 0xd8, 0xa1, 0x71, 0x52, 0x5c, - 0x17, 0x69, 0x43, 0xc1, 0xed, 0x8d, 0xa1, 0xaf, 0x5a, 0x30, 0xed, 0xdc, 0x8b, 0xdb, 0x5e, 0x36, - 0x11, 0x0b, 0xe9, 0xb5, 0x1c, 0x63, 0x59, 0xce, 0x9b, 0x28, 0x4b, 0x33, 0xfb, 0x7b, 0x73, 0xd3, - 0x9d, 0xb0, 0x70, 0xc7, 0x56, 0xe9, 0xf7, 0xdd, 0x12, 0xe1, 0x14, 0xc5, 0x42, 0xf7, 0x3a, 0x05, - 0x5f, 0x70, 0xb6, 0x26, 0x21, 0x58, 0x51, 0x44, 0xef, 0x40, 0x65, 0x53, 0xc6, 0x38, 0x65, 0xd9, - 0x66, 0x97, 0x69, 0xee, 0x14, 0x12, 0xc5, 0x7d, 0xf7, 0x15, 0x08, 0xa7, 0x44, 0xd1, 0x65, 0x28, - 0x07, 0x1b, 0xb1, 0x88, 0x26, 0xce, 0xf3, 0x01, 0x32, 0x3d, 0xaf, 0x78, 0x00, 0xe6, 0xf5, 0xd5, - 0x3a, 0xa6, 0x24, 0x28, 0xa5, 0xe8, 0x8e, 0x2b, 0xac, 0xc4, 0x39, 0x94, 0xf0, 0x52, 0xb5, 0x9d, - 0x12, 0x5e, 0xaa, 0x62, 0x4a, 0x02, 0xd5, 0xa0, 0x9f, 0x05, 0x6b, 0x08, 0x13, 0x70, 0x4e, 0xc8, - 0x79, 0x5b, 0x48, 0x0a, 0xcf, 0x80, 0xc8, 0x8a, 0x31, 0x27, 0x84, 0xd6, 0x61, 0xa0, 0xc1, 0x92, - 0xf8, 0x0b, 0xdd, 0x3c, 0x2f, 0x19, 0x43, 0x5b, 0xc2, 0x7f, 0x7e, 0xef, 0xc5, 0xcb, 0xb1, 0xa0, - 0xc5, 0xa8, 0x92, 0xe6, 0xd6, 0x46, 0x2c, 0x94, 0xef, 0x3c, 0xaa, 0x6d, 0xcf, 0x31, 0x08, 0xaa, - 0xac, 0x1c, 0x0b, 0x5a, 0xa8, 0x0a, 0xa5, 0x8d, 0x86, 0x48, 0xa4, 0x9a, 0x63, 0xfa, 0x35, 0xa3, - 0x69, 0x97, 0x06, 0xf6, 0xf7, 0xe6, 0x4a, 0xab, 0xcb, 0xb8, 0xb4, 0xd1, 0x40, 0x6f, 0xc0, 0xe0, - 0x06, 0x8f, 0x8f, 0x14, 0x49, 0x53, 0x5f, 0xc8, 0x0b, 0xe2, 0x6c, 0x0b, 0xa6, 0xe4, 0xf1, 0x19, - 0x02, 0x80, 0x25, 0x39, 0x96, 0x4f, 0x4e, 0x45, 0x7c, 0x8a, 0xac, 0xa9, 0xf3, 0xbd, 0x45, 0x88, - 0x0a, 0x9d, 0x54, 0x95, 0x62, 0x8d, 0x22, 0x5d, 0xf3, 0x8e, 0x7c, 0x8f, 0x84, 0x65, 0x4c, 0xcd, - 0x5d, 0xf3, 0x1d, 0x9f, 0x2f, 0xe1, 0x6b, 0x5e, 0x81, 0x70, 0x4a, 0x14, 0xb5, 0x60, 0x74, 0x27, - 0x6e, 0x6e, 0x11, 0xb9, 0xf5, 0x59, 0x1a, 0xd5, 0xe1, 0x0b, 0x9f, 0xc8, 0xc9, 0x8d, 0x2b, 0xaa, - 0x78, 0x51, 0xd2, 0x72, 0xfc, 0x36, 0x0e, 0xc6, 0x12, 0x78, 0xdd, 0xd2, 0xc9, 0x62, 0xb3, 0x15, - 0xfa, 0x49, 0xde, 0x6b, 0x85, 0x77, 0x76, 0x13, 0x22, 0xd2, 0xac, 0xe6, 0x7c, 0x92, 0xd7, 0x39, - 0x72, 0xfb, 0x27, 0x11, 0x00, 0x2c, 0xc9, 0xa9, 0x29, 0x63, 0xdc, 0x78, 0xa2, 0xf0, 0x94, 0xb5, - 0x8d, 0x21, 0x9d, 0x32, 0xc6, 0x7d, 0x53, 0xa2, 0x8c, 0xeb, 0x36, 0xb7, 0xc2, 0x24, 0x0c, 0x32, - 0xbc, 0x7f, 0xb2, 0x08, 0xd7, 0xad, 0x75, 0xa8, 0xd9, 0xce, 0x75, 0x3b, 0x61, 0xe1, 0x8e, 0xad, - 0xa2, 0x00, 0xc6, 0x9a, 0x61, 0x94, 0xdc, 0x0b, 0x23, 0xb9, 0x0e, 0x51, 0x21, 0x1d, 0xd1, 0xa8, - 0x23, 0xda, 0x66, 0x6e, 0xb8, 0x26, 0x04, 0x67, 0xa8, 0xd3, 0x4f, 0x17, 0x37, 0x1c, 0x9f, 0xac, - 0xdd, 0x98, 0x99, 0x2a, 0xf2, 0xe9, 0xea, 0x1c, 0xb9, 0xfd, 0xd3, 0x09, 0x00, 0x96, 0xe4, 0xec, - 0x5f, 0x1d, 0x68, 0x17, 0x1c, 0x98, 0x6a, 0xf0, 0x37, 0xdb, 0x6f, 0x62, 0x3f, 0xd5, 0xbb, 0x06, - 0xfc, 0x00, 0xef, 0x64, 0xbf, 0x6a, 0xc1, 0xe9, 0x66, 0x47, 0xb1, 0x40, 0x1c, 0xbd, 0xbd, 0x2a, - 0xd2, 0x7c, 0x5a, 0x54, 0x36, 0xe4, 0xce, 0x70, 0xdc, 0xa5, 0xcd, 0xac, 0x30, 0x5d, 0x7e, 0xdf, - 0xc2, 0xf4, 0x6d, 0x18, 0x62, 0xd2, 0x5f, 0x9a, 0xeb, 0xa4, 0xc7, 0xb4, 0x20, 0xec, 0x10, 0x5f, - 0x16, 0x24, 0xb0, 0x22, 0x46, 0x27, 0xee, 0xf1, 0xec, 0x20, 0x30, 0x61, 0x60, 0x91, 0x83, 0x8f, - 0x6b, 0x2a, 0xab, 0x62, 0x26, 0x1e, 0xaf, 0x1d, 0x86, 0x7c, 0x90, 0x87, 0x80, 0x0f, 0x6f, 0x0c, - 0x55, 0x3b, 0xa8, 0x4a, 0x03, 0xe6, 0xb5, 0x4b, 0xbe, 0xba, 0x74, 0xb2, 0x22, 0xfe, 0x3f, 0xb2, - 0x3a, 0x48, 0xa4, 0x5c, 0x2d, 0xfb, 0x84, 0xa9, 0x96, 0x3d, 0x9d, 0x55, 0xcb, 0xda, 0x8c, 0x31, - 0x86, 0x46, 0x56, 0x3c, 0x87, 0x68, 0xd1, 0x64, 0x2e, 0xb6, 0x0f, 0x67, 0xf3, 0xd8, 0x1d, 0x73, - 0xc5, 0x72, 0xd5, 0x25, 0x64, 0xea, 0x8a, 0xe5, 0xae, 0x55, 0x31, 0x83, 0x14, 0xcd, 0x07, 0x60, - 0xff, 0x7c, 0x09, 0xca, 0xb5, 0xd0, 0x3d, 0x01, 0xe3, 0xd2, 0x25, 0xc3, 0xb8, 0xf4, 0x54, 0xee, - 0xfb, 0x74, 0x5d, 0x4d, 0x49, 0x37, 0x32, 0xa6, 0xa4, 0x0f, 0xe7, 0x93, 0x3a, 0xdc, 0x70, 0xf4, - 0xbd, 0x32, 0xe8, 0x2f, 0xec, 0xa1, 0xff, 0x70, 0x14, 0x0f, 0xdd, 0x72, 0xb1, 0x47, 0xf7, 0x44, - 0x1b, 0xcc, 0x93, 0x4b, 0xc6, 0xf5, 0xfd, 0xd4, 0x3a, 0xea, 0xde, 0x26, 0xde, 0xe6, 0x56, 0x42, - 0xdc, 0xec, 0xc0, 0x4e, 0xce, 0x51, 0xf7, 0xcf, 0x2d, 0x18, 0xcf, 0xb4, 0x8e, 0xfc, 0x4e, 0x01, - 0x41, 0x47, 0x34, 0x17, 0x4d, 0xe6, 0x46, 0x10, 0xcd, 0x03, 0x28, 0xab, 0xbf, 0x34, 0xc9, 0x30, - 0xe9, 0x54, 0x5d, 0x0b, 0xc4, 0x58, 0xc3, 0x40, 0x2f, 0xc3, 0x70, 0x12, 0x36, 0x43, 0x3f, 0xdc, - 0xdc, 0xbd, 0x42, 0x64, 0xa6, 0x0a, 0x75, 0x63, 0xb2, 0x9e, 0x82, 0xb0, 0x8e, 0x67, 0x7f, 0xbf, - 0x0c, 0xd9, 0xf7, 0x19, 0xff, 0xdf, 0x3a, 0xfd, 0xe9, 0x59, 0xa7, 0x7f, 0x64, 0xc1, 0x04, 0x6d, - 0x9d, 0xb9, 0xce, 0x48, 0x87, 0x5a, 0xf5, 0xa0, 0x81, 0x75, 0xc8, 0x83, 0x06, 0x4f, 0x53, 0x6e, - 0xe7, 0x86, 0xad, 0x44, 0x18, 0x91, 0x34, 0x26, 0x46, 0x4b, 0xb1, 0x80, 0x0a, 0x3c, 0x12, 0x45, - 0x22, 0xf2, 0x48, 0xc7, 0x23, 0x51, 0x84, 0x05, 0x54, 0xbe, 0x77, 0xd0, 0xd7, 0xe5, 0xbd, 0x03, - 0x96, 0xeb, 0x49, 0xb8, 0x6b, 0x08, 0xb1, 0x42, 0xcb, 0xf5, 0x24, 0xfd, 0x38, 0x52, 0x1c, 0xfb, - 0xdb, 0x65, 0x18, 0xa9, 0x85, 0x6e, 0xea, 0x29, 0xff, 0x92, 0xe1, 0x29, 0x7f, 0x36, 0xe3, 0x29, - 0x3f, 0xa1, 0xe3, 0x3e, 0x18, 0x47, 0x79, 0x91, 0x13, 0x8c, 0xbd, 0xc8, 0x71, 0x44, 0x27, 0x79, - 0x23, 0x27, 0x98, 0x22, 0x84, 0x4d, 0xba, 0x3f, 0x4b, 0xce, 0xf1, 0xff, 0xdb, 0x82, 0xb1, 0x5a, - 0xe8, 0xd2, 0x05, 0xfa, 0xb3, 0xb4, 0x1a, 0xf5, 0x4c, 0x62, 0x03, 0x87, 0x64, 0x12, 0xfb, 0x75, - 0x0b, 0x06, 0x6b, 0xa1, 0x7b, 0x02, 0x06, 0xd6, 0x55, 0xd3, 0xc0, 0xfa, 0x44, 0x2e, 0xe7, 0xed, - 0x62, 0x53, 0xfd, 0x4e, 0x19, 0x46, 0x69, 0x8f, 0xc3, 0x4d, 0xf9, 0xbd, 0x8c, 0xb9, 0xb1, 0x0a, - 0xcc, 0x0d, 0x15, 0x09, 0x43, 0xdf, 0x0f, 0xef, 0x65, 0xbf, 0xdd, 0x2a, 0x2b, 0xc5, 0x02, 0x8a, - 0xce, 0xc3, 0x50, 0x33, 0x22, 0x3b, 0x5e, 0xd8, 0x8a, 0xb3, 0x51, 0x8c, 0x35, 0x51, 0x8e, 0x15, - 0x06, 0x7a, 0x09, 0x46, 0x62, 0x2f, 0x68, 0x10, 0xe9, 0xcc, 0xd1, 0xc7, 0x9c, 0x39, 0x78, 0xd2, - 0x46, 0xad, 0x1c, 0x1b, 0x58, 0xe8, 0x36, 0x54, 0xd8, 0x7f, 0xb6, 0x83, 0x7a, 0x7f, 0xb0, 0x80, - 0x67, 0x2a, 0x93, 0x04, 0x70, 0x4a, 0x0b, 0x5d, 0x00, 0x48, 0xa4, 0xdb, 0x49, 0x2c, 0xf2, 0xad, - 0x28, 0xb9, 0x54, 0x39, 0xa4, 0xc4, 0x58, 0xc3, 0x42, 0xcf, 0x41, 0x25, 0x71, 0x3c, 0xff, 0xaa, - 0x17, 0x90, 0x58, 0xb8, 0xed, 0x88, 0x04, 0xcc, 0xa2, 0x10, 0xa7, 0x70, 0x7a, 0xde, 0xb3, 0x18, - 0x6a, 0xfe, 0x18, 0xca, 0x10, 0xc3, 0x66, 0xe7, 0xfd, 0x55, 0x55, 0x8a, 0x35, 0x0c, 0xfb, 0x22, - 0x9c, 0xaa, 0x85, 0x6e, 0x2d, 0x8c, 0x92, 0xd5, 0x30, 0xba, 0xe7, 0x44, 0xae, 0xfc, 0x7e, 0x73, - 0x32, 0xef, 0x2f, 0x3d, 0x93, 0xfb, 0xb9, 0xcd, 0xd1, 0xc8, 0xe3, 0xfb, 0x22, 0x3b, 0xf1, 0x7b, - 0x0c, 0xc1, 0xf8, 0x51, 0x09, 0x50, 0x8d, 0x39, 0xc6, 0x18, 0x6f, 0xe7, 0x6c, 0xc1, 0x58, 0x4c, - 0xae, 0x7a, 0x41, 0xeb, 0xbe, 0x20, 0x55, 0x2c, 0xe6, 0xa5, 0xbe, 0xa2, 0xd7, 0xe1, 0x96, 0x0e, - 0xb3, 0x0c, 0x67, 0xe8, 0xd2, 0xc9, 0x8c, 0x5a, 0xc1, 0x62, 0x7c, 0x33, 0x26, 0x91, 0x78, 0x2b, - 0x86, 0x4d, 0x26, 0x96, 0x85, 0x38, 0x85, 0xd3, 0xc5, 0xc3, 0xfe, 0x5c, 0x0f, 0x03, 0x1c, 0x86, - 0x89, 0x5c, 0x6e, 0xec, 0xed, 0x00, 0xad, 0x1c, 0x1b, 0x58, 0x68, 0x15, 0x50, 0xdc, 0x6a, 0x36, - 0x7d, 0x76, 0xd7, 0xe8, 0xf8, 0x97, 0xa2, 0xb0, 0xd5, 0xe4, 0xfe, 0xd1, 0x22, 0xed, 0x7e, 0xbd, - 0x0d, 0x8a, 0x3b, 0xd4, 0xa0, 0xcc, 0x62, 0x23, 0x66, 0xbf, 0x45, 0x40, 0x35, 0xb7, 0x57, 0xd6, - 0x59, 0x11, 0x96, 0x30, 0xfb, 0x8b, 0xec, 0x80, 0x63, 0x8f, 0x78, 0x24, 0xad, 0x88, 0xa0, 0x6d, - 0x18, 0x6d, 0xb2, 0x43, 0x2c, 0x89, 0x42, 0xdf, 0x27, 0x52, 0xbe, 0x3c, 0x9a, 0x6b, 0x0e, 0x4f, - 0xdb, 0xaf, 0x93, 0xc3, 0x26, 0x75, 0xfb, 0x17, 0xc7, 0x18, 0xaf, 0x12, 0xd7, 0xbd, 0x83, 0xc2, - 0x09, 0x57, 0x48, 0x72, 0x1f, 0x2a, 0xf2, 0x1c, 0x57, 0x7a, 0x0e, 0x08, 0x97, 0x5e, 0x2c, 0xa9, - 0xa0, 0xcf, 0x32, 0x17, 0x73, 0xce, 0x20, 0x8a, 0x3f, 0x32, 0xc8, 0xf1, 0x0d, 0xf7, 0x72, 0x41, - 0x02, 0x6b, 0xe4, 0xd0, 0x55, 0x18, 0x15, 0x6f, 0x3e, 0x08, 0x33, 0x45, 0xd9, 0x50, 0xb1, 0x47, - 0xb1, 0x0e, 0x3c, 0xc8, 0x16, 0x60, 0xb3, 0x32, 0xda, 0x84, 0xc7, 0xb5, 0x37, 0x8d, 0x3a, 0xb8, - 0x91, 0x71, 0xce, 0xf3, 0xc4, 0xfe, 0xde, 0xdc, 0xe3, 0xeb, 0x87, 0x21, 0xe2, 0xc3, 0xe9, 0xa0, - 0x1b, 0x70, 0xca, 0x69, 0x24, 0xde, 0x0e, 0xa9, 0x12, 0xc7, 0xf5, 0xbd, 0x80, 0x98, 0x51, 0xf7, - 0x8f, 0xee, 0xef, 0xcd, 0x9d, 0x5a, 0xec, 0x84, 0x80, 0x3b, 0xd7, 0x43, 0x9f, 0x80, 0x8a, 0x1b, - 0xc4, 0x62, 0x0e, 0x06, 0x8c, 0x27, 0xbc, 0x2a, 0xd5, 0xeb, 0x75, 0x35, 0xfe, 0xf4, 0x0f, 0x4e, - 0x2b, 0xa0, 0xf7, 0xf8, 0x13, 0xf5, 0x4a, 0x9b, 0xe1, 0x4f, 0xc7, 0xbd, 0x52, 0x48, 0x7f, 0x36, - 0x62, 0x61, 0xb8, 0x05, 0x4f, 0xb9, 0x6b, 0x1a, 0x61, 0x32, 0x46, 0x13, 0xe8, 0xd3, 0x80, 0x62, - 0x12, 0xed, 0x78, 0x0d, 0xb2, 0xd8, 0x60, 0x99, 0x4e, 0x99, 0x8d, 0x67, 0xc8, 0x88, 0x5b, 0x40, - 0xf5, 0x36, 0x0c, 0xdc, 0xa1, 0x16, 0xba, 0x4c, 0x39, 0x8f, 0x5e, 0x2a, 0xbc, 0x6b, 0xa5, 0x60, - 0x38, 0x53, 0x25, 0xcd, 0x88, 0x34, 0x9c, 0x84, 0xb8, 0x26, 0x45, 0x9c, 0xa9, 0x47, 0xcf, 0x25, - 0x95, 0xcc, 0x1e, 0x4c, 0x9f, 0xd0, 0xf6, 0x84, 0xf6, 0x54, 0xcf, 0xda, 0x0a, 0xe3, 0xe4, 0x3a, - 0x49, 0xee, 0x85, 0xd1, 0x5d, 0x76, 0x87, 0x31, 0xa4, 0xa5, 0x8d, 0x4b, 0x41, 0x58, 0xc7, 0xa3, - 0x32, 0x14, 0xbb, 0x3c, 0x5b, 0xab, 0xb2, 0x9b, 0x89, 0xa1, 0x74, 0xef, 0x5c, 0xe6, 0xc5, 0x58, - 0xc2, 0x25, 0xea, 0x5a, 0x6d, 0x99, 0xdd, 0x32, 0x64, 0x50, 0xd7, 0x6a, 0xcb, 0x58, 0xc2, 0x51, - 0xd8, 0xfe, 0x50, 0xda, 0x58, 0x91, 0x1b, 0x9f, 0x76, 0x4e, 0x5e, 0xf0, 0xad, 0xb4, 0xfb, 0x30, - 0xa1, 0x1e, 0x6b, 0xe3, 0x19, 0x3d, 0xe3, 0x99, 0xf1, 0x22, 0x0f, 0xe4, 0x77, 0x4c, 0x0c, 0xaa, - 0xec, 0x7a, 0x6b, 0x19, 0x9a, 0xb8, 0xad, 0x15, 0x23, 0x7b, 0xc4, 0x44, 0xee, 0x03, 0x05, 0x0b, - 0x50, 0x89, 0x5b, 0x77, 0xdc, 0x70, 0xdb, 0xf1, 0x02, 0x76, 0x15, 0xa0, 0x3f, 0xf7, 0x2e, 0x01, - 0x38, 0xc5, 0x41, 0x35, 0x18, 0x72, 0x84, 0x0a, 0x27, 0x4c, 0xf6, 0x39, 0xd1, 0xe5, 0x52, 0xe1, - 0xe3, 0xd6, 0x55, 0xf9, 0x0f, 0x2b, 0x2a, 0xe8, 0x55, 0x18, 0x15, 0xc1, 0x51, 0xc2, 0x89, 0x71, - 0xca, 0x74, 0xa4, 0xaf, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x4d, 0x18, 0xa3, 0x54, 0x52, 0x06, 0x38, - 0x33, 0xdd, 0x1b, 0x0f, 0xd5, 0x52, 0x41, 0xeb, 0x64, 0x70, 0x86, 0x2c, 0x72, 0xe1, 0x31, 0xa7, - 0x95, 0x84, 0xdb, 0x74, 0x27, 0x98, 0xfb, 0x64, 0x3d, 0xbc, 0x4b, 0x82, 0x99, 0x53, 0x6c, 0x05, - 0x9e, 0xdd, 0xdf, 0x9b, 0x7b, 0x6c, 0xf1, 0x10, 0x3c, 0x7c, 0x28, 0x15, 0xf4, 0x36, 0x0c, 0x27, - 0xa1, 0x2f, 0x7c, 0x93, 0xe3, 0x99, 0xd3, 0x45, 0x72, 0xda, 0xac, 0xab, 0x0a, 0xba, 0x19, 0x43, - 0x11, 0xc1, 0x3a, 0xc5, 0xd9, 0x4f, 0xc2, 0x64, 0x1b, 0x4b, 0xea, 0xc9, 0x7d, 0xf3, 0x3f, 0xf6, - 0x43, 0x45, 0x59, 0xf4, 0xd0, 0x82, 0x69, 0xbc, 0x7d, 0x34, 0x6b, 0xbc, 0x1d, 0xa2, 0x02, 0x94, - 0x6e, 0xaf, 0xfd, 0x5c, 0x87, 0xe7, 0xb9, 0x9f, 0xcd, 0xdd, 0x83, 0xc5, 0x23, 0xaa, 0x7a, 0x78, - 0xc4, 0x3c, 0xd5, 0xea, 0xfa, 0x0e, 0xd5, 0xea, 0x0a, 0x3e, 0x39, 0x47, 0xf5, 0xb7, 0x66, 0xe8, - 0xae, 0xd5, 0xb2, 0x2f, 0x2a, 0xd5, 0x68, 0x21, 0xe6, 0x30, 0x26, 0x77, 0xd3, 0x33, 0x95, 0xc9, - 0xdd, 0x83, 0x47, 0x94, 0xbb, 0x25, 0x01, 0x9c, 0xd2, 0x42, 0x3b, 0x30, 0xd9, 0x30, 0x1f, 0xc8, - 0x52, 0x71, 0x52, 0xcf, 0xf7, 0xf0, 0x40, 0x55, 0x4b, 0x7b, 0x3d, 0x63, 0x39, 0x4b, 0x0f, 0xb7, - 0x37, 0x81, 0x5e, 0x85, 0xa1, 0xf7, 0xc2, 0x98, 0x5d, 0x2b, 0x88, 0x83, 0x45, 0xc6, 0xa3, 0x0c, - 0xbd, 0x7e, 0xa3, 0xce, 0xca, 0x0f, 0xf6, 0xe6, 0x86, 0x6b, 0xa1, 0x2b, 0xff, 0x62, 0x55, 0x01, - 0x7d, 0xc9, 0x82, 0x53, 0xc6, 0x3e, 0x53, 0x3d, 0x87, 0xa3, 0xf4, 0xfc, 0x71, 0xd1, 0xf2, 0xa9, - 0xb5, 0x4e, 0x34, 0x71, 0xe7, 0xa6, 0xec, 0xdf, 0xe5, 0x26, 0x4c, 0x61, 0xd4, 0x20, 0x71, 0xcb, - 0x3f, 0x89, 0x4c, 0xf6, 0x37, 0x0c, 0x7b, 0xcb, 0x03, 0x30, 0xa2, 0xff, 0x7b, 0x8b, 0x19, 0xd1, - 0xd7, 0xc9, 0x76, 0xd3, 0x77, 0x92, 0x93, 0xf0, 0xee, 0xfd, 0x2c, 0x0c, 0x25, 0xa2, 0xb5, 0x62, - 0x69, 0xf8, 0xb5, 0xee, 0xb1, 0xcb, 0x05, 0x75, 0x30, 0xc9, 0x52, 0xac, 0x08, 0xda, 0xff, 0x9a, - 0x7f, 0x15, 0x09, 0x39, 0x01, 0x4b, 0xc1, 0x75, 0xd3, 0x52, 0xf0, 0x4c, 0xe1, 0xb1, 0x74, 0xb1, - 0x18, 0x7c, 0xdf, 0x1c, 0x01, 0xd3, 0x1f, 0x7e, 0x7a, 0x6e, 0x79, 0xec, 0x5f, 0xb1, 0x60, 0xba, - 0xd3, 0x75, 0x3b, 0x15, 0x30, 0xb8, 0xf6, 0xa2, 0xee, 0xbf, 0xd4, 0xac, 0xde, 0x12, 0xe5, 0x58, - 0x61, 0x14, 0xce, 0x8b, 0xdd, 0x5b, 0xea, 0xa6, 0x1b, 0x60, 0x3e, 0xb5, 0x86, 0x5e, 0xe3, 0xce, - 0xfc, 0x96, 0x7a, 0x0b, 0xad, 0x37, 0x47, 0x7e, 0xfb, 0x37, 0x4a, 0x30, 0xcd, 0x8d, 0xd0, 0x8b, - 0x3b, 0xa1, 0xe7, 0xd6, 0x42, 0x57, 0x84, 0x36, 0xb8, 0x30, 0xd2, 0xd4, 0x94, 0xcf, 0x62, 0xa9, - 0x60, 0x74, 0x75, 0x35, 0x15, 0xf8, 0xf5, 0x52, 0x6c, 0x50, 0xa5, 0xad, 0x90, 0x1d, 0xaf, 0xa1, - 0x6c, 0x9a, 0xa5, 0x9e, 0x4f, 0x06, 0xd5, 0xca, 0x8a, 0x46, 0x07, 0x1b, 0x54, 0x8f, 0xe1, 0x39, - 0x0b, 0xfb, 0x1f, 0x58, 0xf0, 0x48, 0x97, 0x74, 0x31, 0xb4, 0xb9, 0x7b, 0xcc, 0xf0, 0x2f, 0xde, - 0xf2, 0x53, 0xcd, 0xf1, 0xeb, 0x00, 0x2c, 0xa0, 0xe8, 0x0e, 0x00, 0x37, 0xe7, 0xb3, 0x97, 0xdd, - 0x4b, 0x45, 0xfc, 0x91, 0xda, 0x92, 0x32, 0x68, 0xf1, 0xfa, 0xea, 0x2d, 0x77, 0x8d, 0xaa, 0xfd, - 0xad, 0x32, 0xf4, 0xf3, 0x27, 0xa3, 0x6b, 0x30, 0xb8, 0xc5, 0xd3, 0xd7, 0xf6, 0x96, 0x3d, 0x37, - 0x55, 0x2e, 0x78, 0x01, 0x96, 0x64, 0xd0, 0x35, 0x98, 0xa2, 0x27, 0x8b, 0xe7, 0xf8, 0x55, 0xe2, - 0x3b, 0xbb, 0x52, 0x5b, 0xe5, 0x6f, 0x1c, 0xc8, 0x64, 0xdc, 0x53, 0x6b, 0xed, 0x28, 0xb8, 0x53, - 0x3d, 0xf4, 0x5a, 0x5b, 0xb6, 0x39, 0x9e, 0x16, 0x58, 0x49, 0xaa, 0x87, 0x67, 0x9c, 0xa3, 0xf2, - 0x74, 0xb3, 0x4d, 0x2f, 0xd7, 0x5e, 0xe6, 0x35, 0x75, 0x71, 0x13, 0x97, 0xf9, 0x16, 0xb4, 0x98, - 0x4f, 0xc5, 0xfa, 0x56, 0x44, 0xe2, 0xad, 0xd0, 0x77, 0xc5, 0xa3, 0x92, 0xa9, 0x6f, 0x41, 0x06, - 0x8e, 0xdb, 0x6a, 0x50, 0x2a, 0x1b, 0x8e, 0xe7, 0xb7, 0x22, 0x92, 0x52, 0x19, 0x30, 0xa9, 0xac, - 0x66, 0xe0, 0xb8, 0xad, 0x06, 0x5d, 0x5b, 0xa7, 0xc4, 0x3b, 0x84, 0x32, 0x38, 0x5a, 0xb0, 0xa0, - 0xcf, 0xc0, 0xa0, 0x74, 0x91, 0x2f, 0x94, 0xc3, 0x43, 0x38, 0x0e, 0xa8, 0x37, 0x0d, 0xb5, 0x37, - 0xaf, 0x84, 0x73, 0xbc, 0xa4, 0x77, 0x94, 0xf7, 0xee, 0xfe, 0xcc, 0x82, 0xa9, 0x0e, 0xae, 0x5e, - 0x9c, 0xa5, 0x6d, 0x7a, 0x71, 0xa2, 0x32, 0xee, 0x6b, 0x2c, 0x8d, 0x97, 0x63, 0x85, 0x41, 0x77, - 0x0b, 0x67, 0x9a, 0x59, 0x46, 0x29, 0x5c, 0x40, 0x04, 0xb4, 0x37, 0x46, 0x89, 0xce, 0x42, 0x5f, - 0x2b, 0x26, 0x91, 0x7c, 0x7c, 0x4e, 0xf2, 0x79, 0x66, 0x07, 0x64, 0x10, 0x2a, 0xb6, 0x6e, 0x2a, - 0x13, 0x9c, 0x26, 0xb6, 0x72, 0x23, 0x1c, 0x87, 0xd9, 0x5f, 0x2f, 0xc3, 0x78, 0xc6, 0xe5, 0x93, - 0x76, 0x64, 0x3b, 0x0c, 0xbc, 0x24, 0x54, 0x79, 0xd5, 0xf8, 0x7b, 0x57, 0xa4, 0xb9, 0x75, 0x4d, - 0x94, 0x63, 0x85, 0x81, 0x9e, 0x96, 0xef, 0x8d, 0x66, 0x5f, 0x12, 0x58, 0xaa, 0x1a, 0x4f, 0x8e, - 0x16, 0x7d, 0x05, 0xe4, 0x49, 0xe8, 0x6b, 0x86, 0xea, 0xf9, 0x68, 0xf5, 0x3d, 0xf1, 0x52, 0xb5, - 0x16, 0x86, 0x3e, 0x66, 0x40, 0xf4, 0x94, 0x18, 0x7d, 0xe6, 0xe6, 0x02, 0x3b, 0x6e, 0x18, 0x6b, - 0x53, 0xf0, 0x0c, 0x0c, 0xde, 0x25, 0xbb, 0x91, 0x17, 0x6c, 0x66, 0xef, 0x6d, 0xae, 0xf0, 0x62, - 0x2c, 0xe1, 0xe6, 0x4b, 0x1f, 0x83, 0xc7, 0xfc, 0xd2, 0xc7, 0x50, 0xee, 0x39, 0xf8, 0x1d, 0x0b, - 0xc6, 0x59, 0xb2, 0x51, 0x11, 0x9a, 0xef, 0x85, 0xc1, 0x09, 0xc8, 0x18, 0x4f, 0x42, 0x7f, 0x44, - 0x1b, 0xcd, 0xa6, 0xea, 0x67, 0x3d, 0xc1, 0x1c, 0x86, 0x1e, 0x83, 0x3e, 0xd6, 0x05, 0xfa, 0x19, - 0x47, 0x78, 0x4e, 0xf3, 0xaa, 0x93, 0x38, 0x98, 0x95, 0xb2, 0x28, 0x2b, 0x4c, 0x9a, 0xbe, 0xc7, - 0x3b, 0x9d, 0x9a, 0x5b, 0x1f, 0xb6, 0x28, 0xab, 0x8e, 0x9d, 0x7c, 0x50, 0x51, 0x56, 0x9d, 0x89, - 0x1f, 0x2e, 0xe7, 0xff, 0x8f, 0x12, 0x9c, 0xe9, 0x58, 0x2f, 0xbd, 0x01, 0x5e, 0x35, 0x6e, 0x80, - 0x2f, 0x64, 0x6e, 0x80, 0xed, 0xc3, 0x6b, 0x3f, 0x98, 0x3b, 0xe1, 0xce, 0x57, 0xb5, 0xe5, 0x13, - 0xbc, 0xaa, 0xed, 0x2b, 0x2a, 0xe2, 0xf4, 0xe7, 0x88, 0x38, 0x7f, 0x68, 0xc1, 0xa3, 0x1d, 0xa7, - 0xec, 0xa1, 0x0b, 0x6b, 0xeb, 0xd8, 0xcb, 0x2e, 0xda, 0xc9, 0x2f, 0x97, 0xbb, 0x8c, 0x8a, 0xe9, - 0x29, 0xe7, 0x28, 0x17, 0x62, 0xc0, 0x58, 0x08, 0x6f, 0x23, 0x9c, 0x03, 0xf1, 0x32, 0xac, 0xa0, - 0x28, 0xd6, 0xc2, 0xc2, 0x78, 0x27, 0x57, 0x8e, 0xb8, 0xa1, 0xe6, 0x4d, 0x3b, 0xb9, 0x9e, 0x6f, - 0x20, 0x1b, 0x2c, 0x76, 0x5b, 0xd3, 0x3c, 0xcb, 0x47, 0xd1, 0x3c, 0x47, 0x3a, 0x6b, 0x9d, 0x68, - 0x11, 0xc6, 0xb7, 0xbd, 0x80, 0x3d, 0x10, 0x6a, 0x4a, 0x4f, 0x2a, 0x36, 0xf7, 0x9a, 0x09, 0xc6, - 0x59, 0xfc, 0xd9, 0x57, 0x61, 0xf4, 0xe8, 0xd6, 0xb5, 0x1f, 0x97, 0xe1, 0x83, 0x87, 0x30, 0x05, - 0x7e, 0x3a, 0x18, 0xdf, 0x45, 0x3b, 0x1d, 0xda, 0xbe, 0x4d, 0x0d, 0xa6, 0x37, 0x5a, 0xbe, 0xbf, - 0xcb, 0xfc, 0xa7, 0x88, 0x2b, 0x31, 0x84, 0x50, 0xa3, 0x5e, 0x22, 0x5f, 0xed, 0x80, 0x83, 0x3b, - 0xd6, 0x44, 0x9f, 0x06, 0x14, 0xde, 0x61, 0xe9, 0x78, 0xdd, 0x34, 0x9f, 0x02, 0xfb, 0x04, 0xe5, - 0x74, 0xab, 0xde, 0x68, 0xc3, 0xc0, 0x1d, 0x6a, 0x51, 0x39, 0x95, 0x3d, 0x62, 0xae, 0xba, 0x95, - 0x91, 0x53, 0xb1, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x04, 0x93, 0xce, 0x8e, 0xe3, 0xf1, 0xf4, 0x5a, - 0x92, 0x00, 0x17, 0x54, 0x95, 0xfd, 0x6a, 0x31, 0x8b, 0x80, 0xdb, 0xeb, 0xa0, 0xa6, 0x61, 0x90, - 0xe4, 0x89, 0xf8, 0x3f, 0x71, 0x84, 0x15, 0x5c, 0xd8, 0x44, 0x69, 0xff, 0x57, 0x8b, 0x1e, 0x7d, - 0x1d, 0xde, 0x92, 0xa4, 0x33, 0xa2, 0x0c, 0x6c, 0x5a, 0x98, 0x9b, 0x9a, 0x91, 0x65, 0x1d, 0x88, - 0x4d, 0x5c, 0xbe, 0x34, 0xe2, 0xd4, 0x9d, 0xdb, 0x90, 0x36, 0x45, 0x84, 0xa8, 0xc2, 0xa0, 0x12, - 0xb4, 0xeb, 0xed, 0x78, 0x71, 0x18, 0x89, 0x0d, 0xd4, 0xa3, 0x73, 0x6f, 0xca, 0x2f, 0xab, 0x9c, - 0x0c, 0x96, 0xf4, 0xec, 0x6f, 0x94, 0x60, 0x54, 0xb6, 0xf8, 0x7a, 0x2b, 0x4c, 0x9c, 0x13, 0x38, - 0xd2, 0x5f, 0x37, 0x8e, 0xf4, 0x85, 0x62, 0x01, 0xb3, 0xac, 0x73, 0x5d, 0x8f, 0xf2, 0xcf, 0x64, - 0x8e, 0xf2, 0x17, 0x7a, 0x21, 0x7a, 0xf8, 0x11, 0xfe, 0x6f, 0x2d, 0x98, 0x34, 0xf0, 0x4f, 0xe0, - 0x24, 0xa9, 0x99, 0x27, 0xc9, 0x73, 0x3d, 0x8c, 0xa6, 0xcb, 0x09, 0xf2, 0xed, 0x52, 0x66, 0x14, - 0xec, 0xe4, 0xf8, 0x02, 0xf4, 0x6d, 0x39, 0x91, 0x5b, 0x2c, 0xd7, 0x64, 0x5b, 0xf5, 0xf9, 0xcb, - 0x4e, 0xe4, 0x72, 0xfe, 0x7f, 0x5e, 0xbd, 0x74, 0xe5, 0x44, 0x6e, 0x6e, 0x94, 0x03, 0x6b, 0x14, - 0x5d, 0x84, 0x81, 0xb8, 0x11, 0x36, 0x95, 0x1f, 0xe8, 0x59, 0xfe, 0x0a, 0x16, 0x2d, 0x39, 0xd8, - 0x9b, 0x43, 0x66, 0x73, 0xb4, 0x18, 0x0b, 0xfc, 0xd9, 0x4d, 0xa8, 0xa8, 0xa6, 0x8f, 0xd5, 0x13, - 0xfe, 0xbf, 0x95, 0x61, 0xaa, 0xc3, 0x5a, 0x41, 0x5f, 0x34, 0xe6, 0xed, 0xd5, 0x9e, 0x17, 0xdb, - 0xfb, 0x9c, 0xb9, 0x2f, 0x32, 0x4d, 0xc9, 0x15, 0xab, 0xe3, 0x08, 0xcd, 0xdf, 0x8c, 0x49, 0xb6, - 0x79, 0x5a, 0x94, 0xdf, 0x3c, 0x6d, 0xf6, 0xc4, 0xa6, 0x9f, 0x36, 0xa4, 0x7a, 0x7a, 0xac, 0xdf, - 0xf9, 0xaf, 0xf5, 0xc1, 0x74, 0xa7, 0xc8, 0x7c, 0xf4, 0x15, 0x2b, 0xf3, 0xa0, 0xc4, 0x6b, 0xbd, - 0x87, 0xf7, 0xf3, 0x57, 0x26, 0x44, 0x36, 0x9b, 0x79, 0xf3, 0x89, 0x89, 0xdc, 0x19, 0x17, 0xad, - 0xb3, 0xf8, 0xa4, 0x88, 0x3f, 0x0e, 0x22, 0xb9, 0xc2, 0xa7, 0x8e, 0xd0, 0x15, 0xf1, 0xbe, 0x48, - 0x9c, 0x89, 0x4f, 0x92, 0xc5, 0xf9, 0xf1, 0x49, 0xb2, 0x0f, 0xb3, 0x1e, 0x0c, 0x6b, 0xe3, 0x3a, - 0xd6, 0x65, 0x70, 0x97, 0x1e, 0x51, 0x5a, 0xbf, 0x8f, 0x75, 0x29, 0xfc, 0x5d, 0x0b, 0x32, 0x4e, - 0x5b, 0xca, 0x2c, 0x63, 0x75, 0x35, 0xcb, 0x9c, 0x85, 0xbe, 0x28, 0xf4, 0x49, 0xf6, 0xb1, 0x03, - 0x1c, 0xfa, 0x04, 0x33, 0x88, 0x7a, 0xfc, 0xb6, 0xdc, 0xed, 0xf1, 0x5b, 0xaa, 0xa7, 0xfb, 0x64, - 0x87, 0x48, 0x23, 0x89, 0x62, 0xe3, 0x57, 0x69, 0x21, 0xe6, 0x30, 0xfb, 0xb7, 0xfa, 0x60, 0xaa, - 0x43, 0xb4, 0x1b, 0xd5, 0x90, 0x36, 0x9d, 0x84, 0xdc, 0x73, 0x76, 0xb3, 0x49, 0x57, 0x2f, 0xf1, - 0x62, 0x2c, 0xe1, 0xcc, 0xd9, 0x94, 0x27, 0x6e, 0xcb, 0x98, 0xae, 0x44, 0xbe, 0x36, 0x01, 0x3d, - 0xfe, 0x67, 0x52, 0x2f, 0x00, 0xc4, 0xb1, 0xbf, 0x12, 0x50, 0x09, 0xcf, 0x15, 0x4e, 0xad, 0x69, - 0xbe, 0xbf, 0xfa, 0x55, 0x01, 0xc1, 0x1a, 0x16, 0xaa, 0xc2, 0x44, 0x33, 0x0a, 0x13, 0x6e, 0x18, - 0xac, 0x72, 0x47, 0x88, 0x7e, 0x33, 0x9a, 0xaa, 0x96, 0x81, 0xe3, 0xb6, 0x1a, 0xe8, 0x65, 0x18, - 0x16, 0x11, 0x56, 0xb5, 0x30, 0xf4, 0x85, 0x19, 0x49, 0x5d, 0xc7, 0xd7, 0x53, 0x10, 0xd6, 0xf1, - 0xb4, 0x6a, 0xcc, 0xda, 0x38, 0xd8, 0xb1, 0x1a, 0xb7, 0x38, 0x6a, 0x78, 0x99, 0xfc, 0x1d, 0x43, - 0x85, 0xf2, 0x77, 0xa4, 0x86, 0xb5, 0x4a, 0xe1, 0x8b, 0x18, 0xc8, 0x35, 0x40, 0xfd, 0x41, 0x19, - 0x06, 0xf8, 0xa7, 0x38, 0x01, 0x29, 0xaf, 0x26, 0x4c, 0x4a, 0x85, 0x72, 0x25, 0xf0, 0x5e, 0xcd, - 0x57, 0x9d, 0xc4, 0xe1, 0xac, 0x49, 0xed, 0x90, 0xd4, 0x0c, 0x85, 0xe6, 0x8d, 0x3d, 0x34, 0x9b, - 0xb1, 0x94, 0x00, 0xa7, 0xa1, 0xed, 0xa8, 0x2d, 0x80, 0x98, 0x3d, 0xd5, 0x49, 0x69, 0x88, 0x8c, - 0xb0, 0x2f, 0x15, 0xea, 0x47, 0x5d, 0x55, 0xe3, 0xbd, 0x49, 0x97, 0xa5, 0x02, 0x60, 0x8d, 0xf6, - 0xec, 0x2b, 0x50, 0x51, 0xc8, 0x79, 0x2a, 0xe4, 0x88, 0xce, 0xda, 0xfe, 0x3f, 0x18, 0xcf, 0xb4, - 0xd5, 0x93, 0x06, 0xfa, 0x3b, 0x16, 0x8c, 0xf3, 0x2e, 0xaf, 0x04, 0x3b, 0x82, 0x15, 0x7c, 0xd9, - 0x82, 0x69, 0xbf, 0xc3, 0x4e, 0x14, 0x9f, 0xf9, 0x28, 0x7b, 0x58, 0x29, 0x9f, 0x9d, 0xa0, 0xb8, - 0x63, 0x6b, 0xe8, 0x1c, 0x0c, 0xf1, 0x97, 0x87, 0x1d, 0x5f, 0x78, 0x50, 0x8f, 0xf0, 0x5c, 0xd8, - 0xbc, 0x0c, 0x2b, 0xa8, 0xfd, 0x13, 0x0b, 0x26, 0xdb, 0x1e, 0xb2, 0x7f, 0x58, 0x86, 0x21, 0xb2, - 0x7e, 0x97, 0xba, 0x64, 0xfd, 0xd6, 0x47, 0x59, 0x3e, 0x74, 0x94, 0xbf, 0x61, 0x81, 0x58, 0xa1, - 0x27, 0xa0, 0x3f, 0xac, 0x99, 0xfa, 0xc3, 0x87, 0x8a, 0x2c, 0xfa, 0x2e, 0x8a, 0xc3, 0x2f, 0x95, - 0x60, 0x82, 0x23, 0xa4, 0x37, 0x32, 0x0f, 0xcb, 0xc7, 0xe9, 0xed, 0x35, 0x1a, 0xf5, 0x04, 0x68, - 0xe7, 0x91, 0x1a, 0xdf, 0xb2, 0xef, 0xd0, 0x6f, 0xf9, 0x17, 0x16, 0x20, 0x3e, 0x27, 0xd9, 0x67, - 0x9b, 0xf9, 0xe9, 0xa6, 0x99, 0x03, 0x52, 0xce, 0xa1, 0x20, 0x58, 0xc3, 0x7a, 0xc0, 0x43, 0xc8, - 0xdc, 0x87, 0x95, 0xf3, 0xef, 0xc3, 0x7a, 0x18, 0xf5, 0xef, 0x96, 0x21, 0xeb, 0x4a, 0x89, 0xde, - 0x81, 0x91, 0x86, 0xd3, 0x74, 0xee, 0x78, 0xbe, 0x97, 0x78, 0x24, 0x2e, 0x76, 0xe1, 0xbe, 0xac, - 0xd5, 0x10, 0xd7, 0x50, 0x5a, 0x09, 0x36, 0x28, 0xa2, 0x79, 0x80, 0x66, 0xe4, 0xed, 0x78, 0x3e, - 0xd9, 0x64, 0x1a, 0x0f, 0x8b, 0xc5, 0xe0, 0x77, 0xc7, 0xb2, 0x14, 0x6b, 0x18, 0x1d, 0x7c, 0xf7, - 0xcb, 0x27, 0xe1, 0xbb, 0xdf, 0xd7, 0xa3, 0xef, 0x7e, 0x7f, 0x21, 0xdf, 0x7d, 0x0c, 0xa7, 0xe5, - 0xe1, 0x4d, 0xff, 0xaf, 0x7a, 0x3e, 0x11, 0xb2, 0x1b, 0x8f, 0xd5, 0x98, 0xdd, 0xdf, 0x9b, 0x3b, - 0x8d, 0x3b, 0x62, 0xe0, 0x2e, 0x35, 0xed, 0x16, 0x4c, 0xd5, 0x49, 0xe4, 0xb1, 0x9c, 0x94, 0x6e, - 0xba, 0x97, 0x3e, 0x07, 0x95, 0x28, 0xb3, 0x8d, 0x7b, 0x0c, 0xc8, 0xd7, 0xb2, 0x98, 0xc9, 0x6d, - 0x9b, 0x92, 0xb4, 0xff, 0x46, 0x09, 0x06, 0x85, 0x13, 0xe5, 0x09, 0x08, 0x1f, 0x57, 0x0c, 0x13, - 0xd3, 0x33, 0x79, 0xfc, 0x8f, 0x75, 0xab, 0xab, 0x71, 0xa9, 0x9e, 0x31, 0x2e, 0x3d, 0x57, 0x8c, - 0xdc, 0xe1, 0x66, 0xa5, 0x7f, 0x5a, 0x86, 0x31, 0xd3, 0xa9, 0xf4, 0x04, 0xa6, 0xe5, 0x0d, 0x18, - 0x8c, 0x85, 0x7f, 0x73, 0xa9, 0x88, 0xcf, 0x5e, 0xf6, 0x13, 0xa7, 0x37, 0xf1, 0xc2, 0xa3, 0x59, - 0x92, 0xeb, 0xe8, 0x42, 0x5d, 0x3e, 0x11, 0x17, 0xea, 0x3c, 0x5f, 0xdf, 0xbe, 0x07, 0xe1, 0xeb, - 0x6b, 0xff, 0x80, 0xb1, 0x7c, 0xbd, 0xfc, 0x04, 0x8e, 0xf1, 0xd7, 0xcd, 0xc3, 0xe1, 0x7c, 0xa1, - 0x75, 0x27, 0xba, 0xd7, 0xe5, 0x38, 0xff, 0xae, 0x05, 0xc3, 0x02, 0xf1, 0x04, 0x06, 0xf0, 0x69, - 0x73, 0x00, 0x4f, 0x15, 0x1a, 0x40, 0x97, 0x9e, 0x7f, 0xa3, 0xa4, 0x7a, 0x5e, 0x13, 0x4f, 0xed, - 0xe7, 0x66, 0xe0, 0x1e, 0xa2, 0xaa, 0x5f, 0xd8, 0x08, 0x7d, 0x21, 0xc0, 0x3d, 0x96, 0x86, 0xe6, - 0xf1, 0xf2, 0x03, 0xed, 0x37, 0x56, 0xd8, 0x2c, 0x72, 0x2c, 0x8c, 0x12, 0x71, 0x80, 0x76, 0x7a, - 0xe8, 0xdf, 0x05, 0x48, 0x5f, 0x57, 0x17, 0x51, 0xad, 0xdd, 0x77, 0x6b, 0x2b, 0xf1, 0xfc, 0x79, - 0x2f, 0x48, 0xe2, 0x24, 0x9a, 0x5f, 0x0b, 0x92, 0x1b, 0x11, 0x17, 0xfa, 0xb5, 0x58, 0x3b, 0x45, - 0x0b, 0x6b, 0x74, 0x65, 0x10, 0x07, 0x6b, 0xa3, 0xdf, 0xbc, 0x41, 0xba, 0x2e, 0xca, 0xb1, 0xc2, - 0xb0, 0x5f, 0x61, 0x9c, 0x9d, 0x4d, 0x50, 0x6f, 0x61, 0x70, 0xbf, 0x38, 0xa0, 0xa6, 0x96, 0x99, - 0x85, 0xaf, 0xeb, 0xc1, 0x76, 0x45, 0xd9, 0x27, 0xed, 0x82, 0xee, 0x47, 0x9d, 0xc6, 0xe6, 0x21, - 0xd2, 0x76, 0xed, 0xf8, 0x4a, 0x61, 0x8e, 0xdc, 0xc3, 0x45, 0x23, 0x4b, 0x3a, 0xc8, 0x32, 0xad, - 0xad, 0xd5, 0xb2, 0x79, 0xd3, 0x97, 0x25, 0x00, 0xa7, 0x38, 0x68, 0x41, 0x28, 0x94, 0xdc, 0xe2, - 0xf2, 0xc1, 0x8c, 0x42, 0x29, 0xa7, 0x44, 0xd3, 0x28, 0x5f, 0x80, 0x61, 0xf5, 0x14, 0x4d, 0x8d, - 0x3f, 0x02, 0x52, 0xe1, 0xf2, 0xd5, 0x4a, 0x5a, 0x8c, 0x75, 0x1c, 0xb4, 0x06, 0x53, 0xae, 0x8a, - 0xd9, 0xa9, 0xb5, 0xee, 0xf8, 0x5e, 0x83, 0x56, 0xe5, 0xf1, 0xb6, 0x8f, 0xec, 0xef, 0xcd, 0x4d, - 0x55, 0xdb, 0xc1, 0xb8, 0x53, 0x1d, 0xb4, 0x0e, 0xe3, 0x31, 0x7f, 0x72, 0x47, 0x06, 0x66, 0x08, - 0x1b, 0xc4, 0xb3, 0xf2, 0xbe, 0xb3, 0x6e, 0x82, 0x0f, 0x58, 0x11, 0xe7, 0x0a, 0x32, 0x94, 0x23, - 0x4b, 0x02, 0xbd, 0x06, 0x63, 0xbe, 0xfe, 0x9e, 0x68, 0x4d, 0x98, 0x28, 0x94, 0x07, 0x9b, 0xf1, - 0xda, 0x68, 0x0d, 0x67, 0xb0, 0xd1, 0x1b, 0x30, 0xa3, 0x97, 0x88, 0x3c, 0x42, 0x4e, 0xb0, 0x49, - 0x62, 0xf1, 0x3c, 0xc7, 0x63, 0xfb, 0x7b, 0x73, 0x33, 0x57, 0xbb, 0xe0, 0xe0, 0xae, 0xb5, 0xd1, - 0x45, 0x18, 0x91, 0x33, 0xa9, 0x85, 0x31, 0xa5, 0xbe, 0x93, 0x1a, 0x0c, 0x1b, 0x98, 0xef, 0xef, - 0x5a, 0xf7, 0x0b, 0xb4, 0xb2, 0x76, 0x84, 0xa3, 0x77, 0x61, 0x44, 0xef, 0x63, 0xf6, 0x6c, 0xce, - 0x7f, 0xa3, 0x55, 0x88, 0x02, 0xaa, 0xe7, 0x3a, 0x0c, 0x1b, 0xb4, 0xed, 0x1b, 0x30, 0x50, 0xdf, - 0x8d, 0x1b, 0x89, 0x5f, 0x80, 0xbf, 0x3d, 0x69, 0x0c, 0x21, 0xdd, 0x7b, 0xec, 0xbd, 0x28, 0x31, - 0x22, 0x9b, 0xc0, 0xf8, 0xfa, 0x72, 0xad, 0x1e, 0x36, 0xee, 0x92, 0x64, 0x91, 0x6b, 0x6f, 0x58, - 0x70, 0x37, 0xeb, 0x88, 0x5c, 0xab, 0x03, 0x3f, 0xb4, 0xff, 0xd4, 0x82, 0x7e, 0xf6, 0xd6, 0x51, - 0xde, 0x3b, 0x59, 0x45, 0x3a, 0x8d, 0x5e, 0x86, 0x01, 0xb2, 0xb1, 0x41, 0x1a, 0x89, 0xd8, 0xc6, - 0x32, 0x56, 0x60, 0x60, 0x85, 0x95, 0xd2, 0xcd, 0xc9, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0x9f, - 0x85, 0x4a, 0xe2, 0x6d, 0x93, 0x45, 0xd7, 0x15, 0x06, 0xc5, 0xde, 0xdc, 0x57, 0x14, 0xb3, 0x58, - 0x97, 0x44, 0x70, 0x4a, 0xcf, 0xfe, 0x5a, 0x09, 0x20, 0x8d, 0xd4, 0xc9, 0x1b, 0xe6, 0x52, 0xdb, - 0x73, 0x60, 0x4f, 0x77, 0x78, 0x0e, 0x0c, 0xa5, 0x04, 0x3b, 0x3c, 0x06, 0xa6, 0xa6, 0xaa, 0x5c, - 0x68, 0xaa, 0xfa, 0x7a, 0x99, 0xaa, 0x65, 0x98, 0x4c, 0x23, 0x8d, 0xcc, 0x90, 0x4d, 0x96, 0x1c, - 0x74, 0x3d, 0x0b, 0xc4, 0xed, 0xf8, 0xf6, 0xd7, 0x2c, 0x10, 0x0e, 0x8f, 0x05, 0x56, 0xab, 0x2b, - 0x9f, 0xee, 0x31, 0xb2, 0x98, 0x3d, 0x5b, 0xc4, 0x17, 0x54, 0xe4, 0x2e, 0x53, 0xfb, 0xc7, 0xc8, - 0x58, 0x66, 0x50, 0xb5, 0x7f, 0xd3, 0x82, 0x61, 0x0e, 0xbe, 0xc6, 0x64, 0xea, 0xfc, 0x7e, 0xf5, - 0x94, 0x79, 0x96, 0xbd, 0x6a, 0x43, 0x09, 0xab, 0x0c, 0xa4, 0xfa, 0xab, 0x36, 0x12, 0x80, 0x53, - 0x1c, 0xf4, 0x0c, 0x0c, 0xc6, 0xad, 0x3b, 0x0c, 0x3d, 0xe3, 0xfd, 0x58, 0xe7, 0xc5, 0x58, 0xc2, - 0xed, 0x7f, 0x5e, 0x82, 0x89, 0xac, 0xf3, 0x2b, 0xc2, 0x30, 0xc0, 0x65, 0xec, 0xac, 0x78, 0x76, - 0x98, 0x2d, 0x47, 0x73, 0x9e, 0x05, 0xfe, 0x36, 0x33, 0x33, 0xba, 0x0b, 0x4a, 0x68, 0x03, 0x86, - 0xdd, 0xf0, 0x5e, 0x70, 0xcf, 0x89, 0xdc, 0xc5, 0xda, 0x9a, 0xf8, 0x12, 0x39, 0xee, 0x4a, 0xd5, - 0xb4, 0x82, 0xee, 0x9a, 0xcb, 0x6c, 0x0b, 0x29, 0x08, 0xeb, 0x84, 0xa9, 0x4e, 0xd9, 0x08, 0x83, - 0x0d, 0x6f, 0xf3, 0x9a, 0xd3, 0x2c, 0x76, 0x31, 0xbf, 0x2c, 0xd1, 0xb5, 0x36, 0x46, 0x45, 0x8e, - 0x06, 0x0e, 0xc0, 0x29, 0x49, 0xfb, 0xd7, 0xa7, 0xc1, 0x58, 0x0b, 0x46, 0x7a, 0x58, 0xeb, 0x81, - 0xa7, 0x87, 0x7d, 0x0b, 0x86, 0xc8, 0x76, 0x33, 0xd9, 0xad, 0x7a, 0x51, 0xb1, 0x64, 0xdf, 0x2b, - 0x02, 0xbb, 0x9d, 0xba, 0x84, 0x60, 0x45, 0xb1, 0x4b, 0xb2, 0xdf, 0xf2, 0x43, 0x91, 0xec, 0xb7, - 0xef, 0x2f, 0x25, 0xd9, 0xef, 0x1b, 0x30, 0xb8, 0xe9, 0x25, 0x98, 0x34, 0x43, 0x91, 0xf4, 0x22, - 0x67, 0xf1, 0x5c, 0xe2, 0xc8, 0xed, 0x69, 0x20, 0x05, 0x00, 0x4b, 0x72, 0x68, 0x5d, 0x6d, 0xaa, - 0x81, 0x22, 0x67, 0x79, 0xbb, 0xad, 0xaf, 0xe3, 0xb6, 0x12, 0xc9, 0x7d, 0x07, 0xdf, 0x7f, 0x72, - 0x5f, 0x95, 0x92, 0x77, 0xe8, 0x41, 0xa5, 0xe4, 0x35, 0x52, 0x1b, 0x57, 0x8e, 0x23, 0xb5, 0xf1, - 0xd7, 0x2c, 0x38, 0xd5, 0xec, 0x94, 0x18, 0x5c, 0x24, 0xd7, 0xfd, 0xe4, 0x11, 0x52, 0xa5, 0x1b, - 0x4d, 0xb3, 0x54, 0x02, 0x1d, 0xd1, 0x70, 0xe7, 0x86, 0x65, 0x8e, 0xe4, 0xe1, 0xf7, 0x9f, 0x23, - 0xf9, 0xb8, 0xb3, 0xf0, 0xa6, 0x19, 0x93, 0x47, 0x8f, 0x25, 0x63, 0xf2, 0xd8, 0x03, 0xcc, 0x98, - 0xac, 0xe5, 0x3a, 0x1e, 0x7f, 0xb0, 0xb9, 0x8e, 0xb7, 0xcc, 0x73, 0x89, 0xa7, 0xd6, 0x7d, 0xb9, - 0xf0, 0xb9, 0x64, 0xb4, 0x70, 0xf8, 0xc9, 0xc4, 0xb3, 0x3e, 0x4f, 0xbe, 0xcf, 0xac, 0xcf, 0x46, - 0xee, 0x64, 0x74, 0x1c, 0xb9, 0x93, 0xdf, 0xd1, 0x4f, 0xd0, 0xa9, 0x22, 0x2d, 0xa8, 0x83, 0xb2, - 0xbd, 0x85, 0x4e, 0x67, 0x68, 0x7b, 0x76, 0xe6, 0xe9, 0x93, 0xce, 0xce, 0x7c, 0xea, 0x18, 0xb3, - 0x33, 0x9f, 0x3e, 0xd1, 0xec, 0xcc, 0x8f, 0x3c, 0x24, 0xd9, 0x99, 0x67, 0x4e, 0x2a, 0x3b, 0xf3, - 0xa3, 0x0f, 0x34, 0x3b, 0x33, 0xfd, 0x74, 0x4d, 0x19, 0x42, 0x36, 0x33, 0x5b, 0xe4, 0xd3, 0x75, - 0x8c, 0x38, 0xe3, 0x9f, 0x4e, 0x81, 0x70, 0x4a, 0xd4, 0xfe, 0x2b, 0x70, 0xe6, 0xf0, 0xa5, 0x9b, - 0x7a, 0x6b, 0xd4, 0x52, 0x9b, 0x59, 0xc6, 0x5b, 0x83, 0x89, 0x85, 0x1a, 0x56, 0xe1, 0xf4, 0xb1, - 0xdf, 0xb6, 0xe0, 0x91, 0x2e, 0xd9, 0x15, 0x0b, 0xc7, 0x5f, 0x36, 0x61, 0xbc, 0x69, 0x56, 0x2d, - 0x1c, 0xce, 0x6d, 0x64, 0x73, 0x54, 0x3e, 0xf2, 0x19, 0x00, 0xce, 0x92, 0x5f, 0xfa, 0xd0, 0x0f, - 0x7f, 0x7c, 0xe6, 0x03, 0x3f, 0xfa, 0xf1, 0x99, 0x0f, 0xfc, 0xf1, 0x8f, 0xcf, 0x7c, 0xe0, 0xe7, - 0xf6, 0xcf, 0x58, 0x3f, 0xdc, 0x3f, 0x63, 0xfd, 0x68, 0xff, 0x8c, 0xf5, 0x67, 0xfb, 0x67, 0xac, - 0xaf, 0xfd, 0xe4, 0xcc, 0x07, 0xde, 0x2c, 0xed, 0xbc, 0xf0, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, - 0x5f, 0x32, 0x87, 0xa4, 0xbe, 0xc8, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 12696 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x6c, 0x24, 0x57, + 0x76, 0x18, 0xbc, 0xd5, 0xdd, 0x7c, 0xf4, 0xe1, 0xfb, 0xce, 0x70, 0xc4, 0xa1, 0x34, 0xd3, 0xa3, + 0xd2, 0xee, 0x68, 0xb4, 0x92, 0xc8, 0xd5, 0x48, 0x5a, 0xc9, 0xab, 0x5d, 0xd9, 0x24, 0x9b, 0x9c, + 0xa1, 0x66, 0xc8, 0x69, 0xdd, 0xe6, 0x8c, 0x76, 0x65, 0xed, 0x7a, 0x8b, 0xdd, 0x97, 0x64, 0x89, + 0xc5, 0xaa, 0x56, 0x55, 0x35, 0x67, 0xa8, 0xcf, 0x06, 0xbe, 0xac, 0x63, 0x27, 0x7e, 0x20, 0x58, + 0xc4, 0x46, 0x1e, 0xb6, 0xe1, 0x00, 0x8e, 0x03, 0xdb, 0x71, 0x12, 0xc4, 0xb1, 0x63, 0x3b, 0xbb, + 0x76, 0xe2, 0x38, 0xf9, 0xe1, 0xfc, 0xd9, 0x38, 0x01, 0x82, 0x35, 0x60, 0x84, 0xb1, 0x69, 0x27, + 0x81, 0x7f, 0xe4, 0x81, 0x38, 0x7f, 0xcc, 0x18, 0x71, 0x70, 0x9f, 0x75, 0x6f, 0x75, 0x55, 0x77, + 0x73, 0xc4, 0xa1, 0x64, 0x63, 0xff, 0x75, 0xdf, 0x73, 0xee, 0xb9, 0xb7, 0xee, 0xf3, 0x9c, 0x73, + 0xcf, 0x03, 0x5e, 0xdb, 0x7d, 0x35, 0x9a, 0x73, 0x83, 0xf9, 0xdd, 0xf6, 0x26, 0x09, 0x7d, 0x12, + 0x93, 0x68, 0x7e, 0x9f, 0xf8, 0xcd, 0x20, 0x9c, 0x17, 0x00, 0xa7, 0xe5, 0xce, 0x37, 0x82, 0x90, + 0xcc, 0xef, 0xbf, 0x30, 0xbf, 0x4d, 0x7c, 0x12, 0x3a, 0x31, 0x69, 0xce, 0xb5, 0xc2, 0x20, 0x0e, + 0x10, 0xe2, 0x38, 0x73, 0x4e, 0xcb, 0x9d, 0xa3, 0x38, 0x73, 0xfb, 0x2f, 0xcc, 0x3e, 0xbf, 0xed, + 0xc6, 0x3b, 0xed, 0xcd, 0xb9, 0x46, 0xb0, 0x37, 0xbf, 0x1d, 0x6c, 0x07, 0xf3, 0x0c, 0x75, 0xb3, + 0xbd, 0xc5, 0xfe, 0xb1, 0x3f, 0xec, 0x17, 0x27, 0x31, 0xbb, 0x96, 0x34, 0x43, 0x1e, 0xc4, 0xc4, + 0x8f, 0xdc, 0xc0, 0x8f, 0x9e, 0x77, 0x5a, 0x6e, 0x44, 0xc2, 0x7d, 0x12, 0xce, 0xb7, 0x76, 0xb7, + 0x29, 0x2c, 0x32, 0x11, 0xe6, 0xf7, 0x5f, 0xd8, 0x24, 0xb1, 0xd3, 0xd1, 0xa3, 0xd9, 0x97, 0x12, + 0x72, 0x7b, 0x4e, 0x63, 0xc7, 0xf5, 0x49, 0x78, 0x20, 0x69, 0xcc, 0x87, 0x24, 0x0a, 0xda, 0x61, + 0x83, 0x9c, 0xa8, 0x56, 0x34, 0xbf, 0x47, 0x62, 0x27, 0xe3, 0xeb, 0x67, 0xe7, 0xf3, 0x6a, 0x85, + 0x6d, 0x3f, 0x76, 0xf7, 0x3a, 0x9b, 0xf9, 0x74, 0xaf, 0x0a, 0x51, 0x63, 0x87, 0xec, 0x39, 0x1d, + 0xf5, 0x5e, 0xcc, 0xab, 0xd7, 0x8e, 0x5d, 0x6f, 0xde, 0xf5, 0xe3, 0x28, 0x0e, 0xd3, 0x95, 0xec, + 0x6f, 0x5a, 0x70, 0x65, 0xe1, 0xad, 0xfa, 0xb2, 0xe7, 0x44, 0xb1, 0xdb, 0x58, 0xf4, 0x82, 0xc6, + 0x6e, 0x3d, 0x0e, 0x42, 0x72, 0x2f, 0xf0, 0xda, 0x7b, 0xa4, 0xce, 0x06, 0x02, 0x3d, 0x07, 0xc3, + 0xfb, 0xec, 0xff, 0x6a, 0x75, 0xc6, 0xba, 0x62, 0x5d, 0x2b, 0x2f, 0x4e, 0xfe, 0xd6, 0x61, 0xe5, + 0x63, 0x47, 0x87, 0x95, 0xe1, 0x7b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x15, 0x06, 0xb7, 0xa2, 0x8d, + 0x83, 0x16, 0x99, 0x29, 0x30, 0xdc, 0x71, 0x81, 0x3b, 0xb8, 0x52, 0xa7, 0xa5, 0x58, 0x40, 0xd1, + 0x3c, 0x94, 0x5b, 0x4e, 0x18, 0xbb, 0xb1, 0x1b, 0xf8, 0x33, 0xc5, 0x2b, 0xd6, 0xb5, 0x81, 0xc5, + 0x29, 0x81, 0x5a, 0xae, 0x49, 0x00, 0x4e, 0x70, 0x68, 0x37, 0x42, 0xe2, 0x34, 0xef, 0xf8, 0xde, + 0xc1, 0x4c, 0xe9, 0x8a, 0x75, 0x6d, 0x38, 0xe9, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0x3f, 0x56, + 0x80, 0xe1, 0x85, 0xad, 0x2d, 0xd7, 0x77, 0xe3, 0x03, 0x74, 0x0f, 0x46, 0xfd, 0xa0, 0x49, 0xe4, + 0x7f, 0xf6, 0x15, 0x23, 0xd7, 0xaf, 0xcc, 0x75, 0xae, 0xcc, 0xb9, 0x75, 0x0d, 0x6f, 0x71, 0xf2, + 0xe8, 0xb0, 0x32, 0xaa, 0x97, 0x60, 0x83, 0x0e, 0xc2, 0x30, 0xd2, 0x0a, 0x9a, 0x8a, 0x6c, 0x81, + 0x91, 0xad, 0x64, 0x91, 0xad, 0x25, 0x68, 0x8b, 0x13, 0x47, 0x87, 0x95, 0x11, 0xad, 0x00, 0xeb, + 0x44, 0xd0, 0x26, 0x4c, 0xd0, 0xbf, 0x7e, 0xec, 0x2a, 0xba, 0x45, 0x46, 0xf7, 0xa9, 0x3c, 0xba, + 0x1a, 0xea, 0xe2, 0xb9, 0xa3, 0xc3, 0xca, 0x44, 0xaa, 0x10, 0xa7, 0x09, 0xda, 0xef, 0xc3, 0xf8, + 0x42, 0x1c, 0x3b, 0x8d, 0x1d, 0xd2, 0xe4, 0x33, 0x88, 0x5e, 0x82, 0x92, 0xef, 0xec, 0x11, 0x31, + 0xbf, 0x57, 0xc4, 0xc0, 0x96, 0xd6, 0x9d, 0x3d, 0x72, 0x7c, 0x58, 0x99, 0xbc, 0xeb, 0xbb, 0xef, + 0xb5, 0xc5, 0xaa, 0xa0, 0x65, 0x98, 0x61, 0xa3, 0xeb, 0x00, 0x4d, 0xb2, 0xef, 0x36, 0x48, 0xcd, + 0x89, 0x77, 0xc4, 0x7c, 0x23, 0x51, 0x17, 0xaa, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x01, 0x94, 0x17, + 0xf6, 0x03, 0xb7, 0x59, 0x0b, 0x9a, 0x11, 0xda, 0x85, 0x89, 0x56, 0x48, 0xb6, 0x48, 0xa8, 0x8a, + 0x66, 0xac, 0x2b, 0xc5, 0x6b, 0x23, 0xd7, 0xaf, 0x65, 0x7e, 0xac, 0x89, 0xba, 0xec, 0xc7, 0xe1, + 0xc1, 0xe2, 0x63, 0xa2, 0xbd, 0x89, 0x14, 0x14, 0xa7, 0x29, 0xdb, 0xff, 0xba, 0x00, 0xd3, 0x0b, + 0xef, 0xb7, 0x43, 0x52, 0x75, 0xa3, 0xdd, 0xf4, 0x0a, 0x6f, 0xba, 0xd1, 0xee, 0x7a, 0x32, 0x02, + 0x6a, 0x69, 0x55, 0x45, 0x39, 0x56, 0x18, 0xe8, 0x79, 0x18, 0xa2, 0xbf, 0xef, 0xe2, 0x55, 0xf1, + 0xc9, 0xe7, 0x04, 0xf2, 0x48, 0xd5, 0x89, 0x9d, 0x2a, 0x07, 0x61, 0x89, 0x83, 0xd6, 0x60, 0xa4, + 0xc1, 0x36, 0xe4, 0xf6, 0x5a, 0xd0, 0x24, 0x6c, 0x32, 0xcb, 0x8b, 0xcf, 0x52, 0xf4, 0xa5, 0xa4, + 0xf8, 0xf8, 0xb0, 0x32, 0xc3, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0x5f, + 0x25, 0x46, 0x09, 0x32, 0xf6, 0xd6, 0x35, 0x6d, 0xab, 0x0c, 0xb0, 0xad, 0x32, 0x9a, 0xbd, 0x4d, + 0xd0, 0x0b, 0x50, 0xda, 0x75, 0xfd, 0xe6, 0xcc, 0x20, 0xa3, 0x75, 0x89, 0xce, 0xf9, 0x2d, 0xd7, + 0x6f, 0x1e, 0x1f, 0x56, 0xa6, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0x3f, 0xb6, 0xa0, 0xc2, + 0x60, 0x2b, 0xae, 0x47, 0x6a, 0x24, 0x8c, 0xdc, 0x28, 0x26, 0x7e, 0x6c, 0x0c, 0xe8, 0x75, 0x80, + 0x88, 0x34, 0x42, 0x12, 0x6b, 0x43, 0xaa, 0x16, 0x46, 0x5d, 0x41, 0xb0, 0x86, 0x45, 0x0f, 0x84, + 0x68, 0xc7, 0x09, 0xd9, 0xfa, 0x12, 0x03, 0xab, 0x0e, 0x84, 0xba, 0x04, 0xe0, 0x04, 0xc7, 0x38, + 0x10, 0x8a, 0xbd, 0x0e, 0x04, 0xf4, 0x39, 0x98, 0x48, 0x1a, 0x8b, 0x5a, 0x4e, 0x43, 0x0e, 0x20, + 0xdb, 0x32, 0x75, 0x13, 0x84, 0xd3, 0xb8, 0xf6, 0xdf, 0xb7, 0xc4, 0xe2, 0xa1, 0x5f, 0xfd, 0x11, + 0xff, 0x56, 0xfb, 0x57, 0x2d, 0x18, 0x5a, 0x74, 0xfd, 0xa6, 0xeb, 0x6f, 0xa3, 0x2f, 0xc3, 0x30, + 0xbd, 0x9b, 0x9a, 0x4e, 0xec, 0x88, 0x73, 0xef, 0x53, 0xda, 0xde, 0x52, 0x57, 0xc5, 0x5c, 0x6b, + 0x77, 0x9b, 0x16, 0x44, 0x73, 0x14, 0x9b, 0xee, 0xb6, 0x3b, 0x9b, 0xef, 0x92, 0x46, 0xbc, 0x46, + 0x62, 0x27, 0xf9, 0x9c, 0xa4, 0x0c, 0x2b, 0xaa, 0xe8, 0x16, 0x0c, 0xc6, 0x4e, 0xb8, 0x4d, 0x62, + 0x71, 0x00, 0x66, 0x1e, 0x54, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0xf8, 0x0d, 0x92, 0x5c, 0x0b, 0x1b, + 0xac, 0x2a, 0x16, 0x24, 0xec, 0x1f, 0x1c, 0x84, 0x8b, 0x4b, 0xf5, 0xd5, 0x9c, 0x75, 0x75, 0x15, + 0x06, 0x9b, 0xa1, 0xbb, 0x4f, 0x42, 0x31, 0xce, 0x8a, 0x4a, 0x95, 0x95, 0x62, 0x01, 0x45, 0xaf, + 0xc2, 0x28, 0xbf, 0x90, 0x6e, 0x3a, 0x7e, 0xd3, 0x93, 0x43, 0x7c, 0x5e, 0x60, 0x8f, 0xde, 0xd3, + 0x60, 0xd8, 0xc0, 0x3c, 0xe1, 0xa2, 0xba, 0x9a, 0xda, 0x8c, 0x79, 0x97, 0xdd, 0x0f, 0x58, 0x30, + 0xc9, 0x9b, 0x59, 0x88, 0xe3, 0xd0, 0xdd, 0x6c, 0xc7, 0x24, 0x9a, 0x19, 0x60, 0x27, 0xdd, 0x52, + 0xd6, 0x68, 0xe5, 0x8e, 0xc0, 0xdc, 0xbd, 0x14, 0x15, 0x7e, 0x08, 0xce, 0x88, 0x76, 0x27, 0xd3, + 0x60, 0xdc, 0xd1, 0x2c, 0xfa, 0x5e, 0x0b, 0x66, 0x1b, 0x81, 0x1f, 0x87, 0x81, 0xe7, 0x91, 0xb0, + 0xd6, 0xde, 0xf4, 0xdc, 0x68, 0x87, 0xaf, 0x53, 0x4c, 0xb6, 0xd8, 0x49, 0x90, 0x33, 0x87, 0x0a, + 0x49, 0xcc, 0xe1, 0xe5, 0xa3, 0xc3, 0xca, 0xec, 0x52, 0x2e, 0x29, 0xdc, 0xa5, 0x19, 0xb4, 0x0b, + 0x88, 0x5e, 0xa5, 0xf5, 0xd8, 0xd9, 0x26, 0x49, 0xe3, 0x43, 0xfd, 0x37, 0x7e, 0xe1, 0xe8, 0xb0, + 0x82, 0xd6, 0x3b, 0x48, 0xe0, 0x0c, 0xb2, 0xe8, 0x3d, 0x38, 0x4f, 0x4b, 0x3b, 0xbe, 0x75, 0xb8, + 0xff, 0xe6, 0x66, 0x8e, 0x0e, 0x2b, 0xe7, 0xd7, 0x33, 0x88, 0xe0, 0x4c, 0xd2, 0xb3, 0x4b, 0x30, + 0x9d, 0x39, 0x55, 0x68, 0x12, 0x8a, 0xbb, 0x84, 0xb3, 0x20, 0x65, 0x4c, 0x7f, 0xa2, 0xf3, 0x30, + 0xb0, 0xef, 0x78, 0x6d, 0xb1, 0x4a, 0x31, 0xff, 0xf3, 0x99, 0xc2, 0xab, 0x96, 0xdd, 0x80, 0xd1, + 0x25, 0xa7, 0xe5, 0x6c, 0xba, 0x9e, 0x1b, 0xbb, 0x24, 0x42, 0x4f, 0x43, 0xd1, 0x69, 0x36, 0xd9, + 0x15, 0x59, 0x5e, 0x9c, 0x3e, 0x3a, 0xac, 0x14, 0x17, 0x9a, 0xf4, 0xac, 0x06, 0x85, 0x75, 0x80, + 0x29, 0x06, 0xfa, 0x24, 0x94, 0x9a, 0x61, 0xd0, 0x9a, 0x29, 0x30, 0x4c, 0x3a, 0x54, 0xa5, 0x6a, + 0x18, 0xb4, 0x52, 0xa8, 0x0c, 0xc7, 0xfe, 0x8d, 0x02, 0x3c, 0xb1, 0x44, 0x5a, 0x3b, 0x2b, 0xf5, + 0x9c, 0x4d, 0x77, 0x0d, 0x86, 0xf7, 0x02, 0xdf, 0x8d, 0x83, 0x30, 0x12, 0x4d, 0xb3, 0xdb, 0x64, + 0x4d, 0x94, 0x61, 0x05, 0x45, 0x57, 0xa0, 0xd4, 0x4a, 0x38, 0x81, 0x51, 0xc9, 0x45, 0x30, 0x1e, + 0x80, 0x41, 0x28, 0x46, 0x3b, 0x22, 0xa1, 0xb8, 0x05, 0x15, 0xc6, 0xdd, 0x88, 0x84, 0x98, 0x41, + 0x92, 0xe3, 0x94, 0x1e, 0xb4, 0x62, 0x5b, 0xa5, 0x8e, 0x53, 0x0a, 0xc1, 0x1a, 0x16, 0xaa, 0x41, + 0x39, 0x52, 0x93, 0x3a, 0xd0, 0xff, 0xa4, 0x8e, 0xb1, 0xf3, 0x56, 0xcd, 0x64, 0x42, 0xc4, 0x38, + 0x06, 0x06, 0x7b, 0x9e, 0xb7, 0x5f, 0x2f, 0x00, 0xe2, 0x43, 0xf8, 0xe7, 0x6c, 0xe0, 0xee, 0x76, + 0x0e, 0x5c, 0x26, 0xe7, 0x75, 0x3b, 0x68, 0x38, 0x5e, 0xfa, 0x08, 0x3f, 0xad, 0xd1, 0xfb, 0xdf, + 0x16, 0x3c, 0xb1, 0xe4, 0xfa, 0x4d, 0x12, 0xe6, 0x2c, 0xc0, 0x47, 0x23, 0x80, 0x9c, 0xec, 0xa4, + 0x37, 0x96, 0x58, 0xe9, 0x14, 0x96, 0x98, 0xfd, 0x3f, 0x2c, 0x40, 0xfc, 0xb3, 0x3f, 0x72, 0x1f, + 0x7b, 0xb7, 0xf3, 0x63, 0x4f, 0x61, 0x59, 0xd8, 0xb7, 0x61, 0x7c, 0xc9, 0x73, 0x89, 0x1f, 0xaf, + 0xd6, 0x96, 0x02, 0x7f, 0xcb, 0xdd, 0x46, 0x9f, 0x81, 0x71, 0x2a, 0xd3, 0x06, 0xed, 0xb8, 0x4e, + 0x1a, 0x81, 0xcf, 0xd8, 0x7f, 0x2a, 0x09, 0xa2, 0xa3, 0xc3, 0xca, 0xf8, 0x86, 0x01, 0xc1, 0x29, + 0x4c, 0xfb, 0x77, 0xe9, 0xf8, 0x05, 0x7b, 0xad, 0xc0, 0x27, 0x7e, 0xbc, 0x14, 0xf8, 0x4d, 0x2e, + 0x26, 0x7e, 0x06, 0x4a, 0x31, 0x1d, 0x0f, 0x3e, 0x76, 0x57, 0xe5, 0x46, 0xa1, 0xa3, 0x70, 0x7c, + 0x58, 0xb9, 0xd0, 0x59, 0x83, 0x8d, 0x13, 0xab, 0x83, 0xbe, 0x0d, 0x06, 0xa3, 0xd8, 0x89, 0xdb, + 0x91, 0x18, 0xcd, 0x27, 0xe5, 0x68, 0xd6, 0x59, 0xe9, 0xf1, 0x61, 0x65, 0x42, 0x55, 0xe3, 0x45, + 0x58, 0x54, 0x40, 0xcf, 0xc0, 0xd0, 0x1e, 0x89, 0x22, 0x67, 0x5b, 0x72, 0xf8, 0x13, 0xa2, 0xee, + 0xd0, 0x1a, 0x2f, 0xc6, 0x12, 0x8e, 0x9e, 0x82, 0x01, 0x12, 0x86, 0x41, 0x28, 0xf6, 0xe8, 0x98, + 0x40, 0x1c, 0x58, 0xa6, 0x85, 0x98, 0xc3, 0xec, 0x7f, 0x6b, 0xc1, 0x84, 0xea, 0x2b, 0x6f, 0xeb, + 0x0c, 0x58, 0xb9, 0xb7, 0x01, 0x1a, 0xf2, 0x03, 0x23, 0x76, 0x7b, 0x8c, 0x5c, 0xbf, 0x9a, 0xc9, + 0xa0, 0x74, 0x0c, 0x63, 0x42, 0x59, 0x15, 0x45, 0x58, 0xa3, 0x66, 0xff, 0xba, 0x05, 0xe7, 0x52, + 0x5f, 0x74, 0xdb, 0x8d, 0x62, 0xf4, 0x4e, 0xc7, 0x57, 0xcd, 0xf5, 0xf7, 0x55, 0xb4, 0x36, 0xfb, + 0x26, 0xb5, 0x94, 0x65, 0x89, 0xf6, 0x45, 0x37, 0x61, 0xc0, 0x8d, 0xc9, 0x9e, 0xfc, 0x98, 0xa7, + 0xba, 0x7e, 0x0c, 0xef, 0x55, 0x32, 0x23, 0xab, 0xb4, 0x26, 0xe6, 0x04, 0xec, 0x1f, 0x29, 0x42, + 0x99, 0x2f, 0xdb, 0x35, 0xa7, 0x75, 0x06, 0x73, 0xb1, 0x0a, 0x25, 0x46, 0x9d, 0x77, 0xfc, 0xe9, + 0xec, 0x8e, 0x8b, 0xee, 0xcc, 0x51, 0x39, 0x8d, 0xb3, 0x82, 0xea, 0x6a, 0xa0, 0x45, 0x98, 0x91, + 0x40, 0x0e, 0xc0, 0xa6, 0xeb, 0x3b, 0xe1, 0x01, 0x2d, 0x9b, 0x29, 0x32, 0x82, 0xcf, 0x77, 0x27, + 0xb8, 0xa8, 0xf0, 0x39, 0x59, 0xd5, 0xd7, 0x04, 0x80, 0x35, 0xa2, 0xb3, 0xaf, 0x40, 0x59, 0x21, + 0x9f, 0x84, 0xc7, 0x99, 0xfd, 0x1c, 0x4c, 0xa4, 0xda, 0xea, 0x55, 0x7d, 0x54, 0x67, 0x91, 0xbe, + 0xc6, 0x4e, 0x01, 0xd1, 0xeb, 0x65, 0x7f, 0x5f, 0x9c, 0xa2, 0xef, 0xc3, 0x79, 0x2f, 0xe3, 0x70, + 0x12, 0x53, 0xd5, 0xff, 0x61, 0xf6, 0x84, 0xf8, 0xec, 0xf3, 0x59, 0x50, 0x9c, 0xd9, 0x06, 0xbd, + 0xf6, 0x83, 0x16, 0x5d, 0xf3, 0x8e, 0xc7, 0xfa, 0x2b, 0xa4, 0xef, 0x3b, 0xa2, 0x0c, 0x2b, 0x28, + 0x3d, 0xc2, 0xce, 0xab, 0xce, 0xdf, 0x22, 0x07, 0x75, 0xe2, 0x91, 0x46, 0x1c, 0x84, 0x1f, 0x6a, + 0xf7, 0x2f, 0xf1, 0xd1, 0xe7, 0x27, 0xe0, 0x88, 0x20, 0x50, 0xbc, 0x45, 0x0e, 0xf8, 0x54, 0xe8, + 0x5f, 0x57, 0xec, 0xfa, 0x75, 0xbf, 0x60, 0xc1, 0x98, 0xfa, 0xba, 0x33, 0xd8, 0xea, 0x8b, 0xe6, + 0x56, 0xbf, 0xd4, 0x75, 0x81, 0xe7, 0x6c, 0xf2, 0xaf, 0x17, 0xe0, 0xa2, 0xc2, 0xa1, 0xec, 0x3e, + 0xff, 0x23, 0x56, 0xd5, 0x3c, 0x94, 0x7d, 0xa5, 0x3d, 0xb0, 0x4c, 0xb1, 0x3d, 0xd1, 0x1d, 0x24, + 0x38, 0x94, 0x6b, 0xf3, 0x13, 0x11, 0x7f, 0x54, 0x57, 0xab, 0x09, 0x15, 0xda, 0x22, 0x14, 0xdb, + 0x6e, 0x53, 0xdc, 0x19, 0x9f, 0x92, 0xa3, 0x7d, 0x77, 0xb5, 0x7a, 0x7c, 0x58, 0x79, 0x32, 0x4f, + 0xa5, 0x4b, 0x2f, 0xab, 0x68, 0xee, 0xee, 0x6a, 0x15, 0xd3, 0xca, 0x68, 0x01, 0x26, 0xa4, 0xd6, + 0xfa, 0x1e, 0xe5, 0xa0, 0x02, 0x5f, 0x5c, 0x2d, 0x4a, 0x37, 0x86, 0x4d, 0x30, 0x4e, 0xe3, 0xa3, + 0x2a, 0x4c, 0xee, 0xb6, 0x37, 0x89, 0x47, 0x62, 0xfe, 0xc1, 0xb7, 0x08, 0xd7, 0x1c, 0x95, 0x13, + 0xd1, 0xf2, 0x56, 0x0a, 0x8e, 0x3b, 0x6a, 0xd8, 0x7f, 0xc6, 0x8e, 0x78, 0x31, 0x7a, 0xb5, 0x30, + 0xa0, 0x0b, 0x8b, 0x52, 0xff, 0x30, 0x97, 0x73, 0x3f, 0xab, 0xe2, 0x16, 0x39, 0xd8, 0x08, 0x28, + 0xb3, 0x9d, 0xbd, 0x2a, 0x8c, 0x35, 0x5f, 0xea, 0xba, 0xe6, 0x7f, 0xa9, 0x00, 0xd3, 0x6a, 0x04, + 0x0c, 0xbe, 0xee, 0xcf, 0xfb, 0x18, 0xbc, 0x00, 0x23, 0x4d, 0xb2, 0xe5, 0xb4, 0xbd, 0x58, 0xa9, + 0x31, 0x07, 0xb8, 0x2a, 0xbb, 0x9a, 0x14, 0x63, 0x1d, 0xe7, 0x04, 0xc3, 0xf6, 0xd3, 0x23, 0xec, + 0x6e, 0x8d, 0x1d, 0xba, 0xc6, 0xd5, 0xae, 0xb1, 0x72, 0x77, 0xcd, 0x53, 0x30, 0xe0, 0xee, 0x51, + 0x5e, 0xab, 0x60, 0xb2, 0x50, 0xab, 0xb4, 0x10, 0x73, 0x18, 0xfa, 0x04, 0x0c, 0x35, 0x82, 0xbd, + 0x3d, 0xc7, 0x6f, 0xb2, 0x2b, 0xaf, 0xbc, 0x38, 0x42, 0xd9, 0xb1, 0x25, 0x5e, 0x84, 0x25, 0x0c, + 0x3d, 0x01, 0x25, 0x27, 0xdc, 0x8e, 0x66, 0x4a, 0x0c, 0x67, 0x98, 0xb6, 0xb4, 0x10, 0x6e, 0x47, + 0x98, 0x95, 0x52, 0xa9, 0xea, 0x7e, 0x10, 0xee, 0xba, 0xfe, 0x76, 0xd5, 0x0d, 0xc5, 0x96, 0x50, + 0x77, 0xe1, 0x5b, 0x0a, 0x82, 0x35, 0x2c, 0xb4, 0x02, 0x03, 0xad, 0x20, 0x8c, 0xa3, 0x99, 0x41, + 0x36, 0xdc, 0x4f, 0xe6, 0x1c, 0x44, 0xfc, 0x6b, 0x6b, 0x41, 0x18, 0x27, 0x1f, 0x40, 0xff, 0x45, + 0x98, 0x57, 0x47, 0xdf, 0x06, 0x45, 0xe2, 0xef, 0xcf, 0x0c, 0x31, 0x2a, 0xb3, 0x59, 0x54, 0x96, + 0xfd, 0xfd, 0x7b, 0x4e, 0x98, 0x9c, 0xd2, 0xcb, 0xfe, 0x3e, 0xa6, 0x75, 0xd0, 0x17, 0xa0, 0x2c, + 0xb7, 0x78, 0x24, 0xd4, 0x1c, 0x99, 0x4b, 0x4c, 0x1e, 0x0c, 0x98, 0xbc, 0xd7, 0x76, 0x43, 0xb2, + 0x47, 0xfc, 0x38, 0x4a, 0xce, 0x34, 0x09, 0x8d, 0x70, 0x42, 0x0d, 0x7d, 0x41, 0xea, 0xd6, 0xd6, + 0x82, 0xb6, 0x1f, 0x47, 0x33, 0x65, 0xd6, 0xbd, 0xcc, 0x57, 0x8f, 0x7b, 0x09, 0x5e, 0x5a, 0xf9, + 0xc6, 0x2b, 0x63, 0x83, 0x14, 0xc2, 0x30, 0xe6, 0xb9, 0xfb, 0xc4, 0x27, 0x51, 0x54, 0x0b, 0x83, + 0x4d, 0x32, 0x03, 0xac, 0xe7, 0x17, 0xb3, 0x1f, 0x03, 0x82, 0x4d, 0xb2, 0x38, 0x75, 0x74, 0x58, + 0x19, 0xbb, 0xad, 0xd7, 0xc1, 0x26, 0x09, 0x74, 0x17, 0xc6, 0xa9, 0x5c, 0xe3, 0x26, 0x44, 0x47, + 0x7a, 0x11, 0x65, 0xd2, 0x07, 0x36, 0x2a, 0xe1, 0x14, 0x11, 0xf4, 0x06, 0x94, 0x3d, 0x77, 0x8b, + 0x34, 0x0e, 0x1a, 0x1e, 0x99, 0x19, 0x65, 0x14, 0x33, 0xb7, 0xd5, 0x6d, 0x89, 0xc4, 0xe5, 0x22, + 0xf5, 0x17, 0x27, 0xd5, 0xd1, 0x3d, 0xb8, 0x10, 0x93, 0x70, 0xcf, 0xf5, 0x1d, 0xba, 0x1d, 0x84, + 0xbc, 0xc0, 0x9e, 0x54, 0xc6, 0xd8, 0x7a, 0xbb, 0x2c, 0x86, 0xee, 0xc2, 0x46, 0x26, 0x16, 0xce, + 0xa9, 0x8d, 0xee, 0xc0, 0x04, 0xdb, 0x09, 0xb5, 0xb6, 0xe7, 0xd5, 0x02, 0xcf, 0x6d, 0x1c, 0xcc, + 0x8c, 0x33, 0x82, 0x9f, 0x90, 0xf7, 0xc2, 0xaa, 0x09, 0x3e, 0x3e, 0xac, 0x40, 0xf2, 0x0f, 0xa7, + 0x6b, 0xa3, 0x4d, 0xa6, 0x43, 0x6f, 0x87, 0x6e, 0x7c, 0x40, 0xd7, 0x2f, 0x79, 0x10, 0xcf, 0x4c, + 0x74, 0x15, 0x85, 0x75, 0x54, 0xa5, 0x68, 0xd7, 0x0b, 0x71, 0x9a, 0x20, 0xdd, 0xda, 0x51, 0xdc, + 0x74, 0xfd, 0x99, 0x49, 0x76, 0x62, 0xa8, 0x9d, 0x51, 0xa7, 0x85, 0x98, 0xc3, 0x98, 0xfe, 0x9c, + 0xfe, 0xb8, 0x43, 0x4f, 0xd0, 0x29, 0x86, 0x98, 0xe8, 0xcf, 0x25, 0x00, 0x27, 0x38, 0x94, 0xa9, + 0x89, 0xe3, 0x83, 0x19, 0xc4, 0x50, 0xd5, 0x76, 0xd9, 0xd8, 0xf8, 0x02, 0xa6, 0xe5, 0xe8, 0x36, + 0x0c, 0x11, 0x7f, 0x7f, 0x25, 0x0c, 0xf6, 0x66, 0xce, 0xe5, 0xef, 0xd9, 0x65, 0x8e, 0xc2, 0x0f, + 0xf4, 0x44, 0xc0, 0x13, 0xc5, 0x58, 0x92, 0x40, 0x0f, 0x60, 0x26, 0x63, 0x46, 0xf8, 0x04, 0x9c, + 0x67, 0x13, 0xf0, 0x59, 0x51, 0x77, 0x66, 0x23, 0x07, 0xef, 0xb8, 0x0b, 0x0c, 0xe7, 0x52, 0x47, + 0x5f, 0x84, 0x31, 0xbe, 0xa1, 0xf8, 0xe3, 0x5b, 0x34, 0x33, 0xcd, 0xbe, 0xe6, 0x4a, 0xfe, 0xe6, + 0xe4, 0x88, 0x8b, 0xd3, 0xa2, 0x43, 0x63, 0x7a, 0x69, 0x84, 0x4d, 0x6a, 0xf6, 0x26, 0x8c, 0xab, + 0x73, 0x8b, 0x2d, 0x1d, 0x54, 0x81, 0x01, 0xc6, 0xed, 0x08, 0xfd, 0x56, 0x99, 0xce, 0x14, 0xe3, + 0x84, 0x30, 0x2f, 0x67, 0x33, 0xe5, 0xbe, 0x4f, 0x16, 0x0f, 0x62, 0xc2, 0xa5, 0xea, 0xa2, 0x36, + 0x53, 0x12, 0x80, 0x13, 0x1c, 0xfb, 0xff, 0x72, 0xae, 0x31, 0x39, 0x1c, 0xfb, 0xb8, 0x0e, 0x9e, + 0x83, 0xe1, 0x9d, 0x20, 0x8a, 0x29, 0x36, 0x6b, 0x63, 0x20, 0xe1, 0x13, 0x6f, 0x8a, 0x72, 0xac, + 0x30, 0xd0, 0x6b, 0x30, 0xd6, 0xd0, 0x1b, 0x10, 0x77, 0x99, 0x1a, 0x02, 0xa3, 0x75, 0x6c, 0xe2, + 0xa2, 0x57, 0x61, 0x98, 0x3d, 0x9d, 0x37, 0x02, 0x4f, 0x30, 0x59, 0xf2, 0x42, 0x1e, 0xae, 0x89, + 0xf2, 0x63, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x2a, 0x0c, 0xd2, 0x2e, 0xac, 0xd6, 0xc4, 0x2d, 0xa2, + 0x54, 0x35, 0x37, 0x59, 0x29, 0x16, 0x50, 0xfb, 0xaf, 0x17, 0xb4, 0x51, 0xa6, 0x12, 0x29, 0x41, + 0x35, 0x18, 0xba, 0xef, 0xb8, 0xb1, 0xeb, 0x6f, 0x0b, 0x76, 0xe1, 0x99, 0xae, 0x57, 0x0a, 0xab, + 0xf4, 0x16, 0xaf, 0xc0, 0x2f, 0x3d, 0xf1, 0x07, 0x4b, 0x32, 0x94, 0x62, 0xd8, 0xf6, 0x7d, 0x4a, + 0xb1, 0xd0, 0x2f, 0x45, 0xcc, 0x2b, 0x70, 0x8a, 0xe2, 0x0f, 0x96, 0x64, 0xd0, 0x3b, 0x00, 0x72, + 0x59, 0x92, 0xa6, 0x78, 0xb2, 0x7e, 0xae, 0x37, 0xd1, 0x0d, 0x55, 0x67, 0x71, 0x9c, 0x5e, 0xa9, + 0xc9, 0x7f, 0xac, 0xd1, 0xb3, 0x63, 0xc6, 0x56, 0x75, 0x76, 0x06, 0x7d, 0x27, 0x3d, 0x09, 0x9c, + 0x30, 0x26, 0xcd, 0x85, 0x58, 0x0c, 0xce, 0x27, 0xfb, 0x93, 0x29, 0x36, 0xdc, 0x3d, 0xa2, 0x9f, + 0x1a, 0x82, 0x08, 0x4e, 0xe8, 0xd9, 0xbf, 0x52, 0x84, 0x99, 0xbc, 0xee, 0xd2, 0x45, 0x47, 0x1e, + 0xb8, 0xf1, 0x12, 0xe5, 0x86, 0x2c, 0x73, 0xd1, 0x2d, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0xec, 0x47, + 0xee, 0xb6, 0x14, 0x09, 0x07, 0x92, 0xd9, 0xaf, 0xb3, 0x52, 0x2c, 0xa0, 0x14, 0x2f, 0x24, 0x4e, + 0x24, 0x6c, 0x22, 0xb4, 0x55, 0x82, 0x59, 0x29, 0x16, 0x50, 0x5d, 0xdf, 0x54, 0xea, 0xa1, 0x6f, + 0x32, 0x86, 0x68, 0xe0, 0x74, 0x87, 0x08, 0x7d, 0x09, 0x60, 0xcb, 0xf5, 0xdd, 0x68, 0x87, 0x51, + 0x1f, 0x3c, 0x31, 0x75, 0xc5, 0x4b, 0xad, 0x28, 0x2a, 0x58, 0xa3, 0x88, 0x5e, 0x86, 0x11, 0xb5, + 0x01, 0x57, 0xab, 0xec, 0x81, 0x48, 0x7b, 0x70, 0x4f, 0x4e, 0xa3, 0x2a, 0xd6, 0xf1, 0xec, 0x77, + 0xd3, 0xeb, 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x7e, 0xc7, 0xb7, 0xd0, 0x7d, 0x7c, 0xed, 0xdf, + 0x2c, 0xc2, 0x84, 0xd1, 0x58, 0x3b, 0xea, 0xe3, 0xcc, 0xba, 0x41, 0xef, 0x39, 0x27, 0x26, 0x62, + 0xff, 0xd9, 0xbd, 0xb7, 0x8a, 0x7e, 0x17, 0xd2, 0x1d, 0xc0, 0xeb, 0xa3, 0x2f, 0x41, 0xd9, 0x73, + 0x22, 0xa6, 0xbb, 0x22, 0x62, 0xdf, 0xf5, 0x43, 0x2c, 0x91, 0x23, 0x9c, 0x28, 0xd6, 0xae, 0x1a, + 0x4e, 0x3b, 0x21, 0x49, 0x2f, 0x64, 0xca, 0xfb, 0x48, 0xa3, 0x1b, 0xd5, 0x09, 0xca, 0x20, 0x1d, + 0x60, 0x0e, 0x43, 0xaf, 0xc2, 0x68, 0x48, 0xd8, 0xaa, 0x58, 0xa2, 0xac, 0x1c, 0x5b, 0x66, 0x03, + 0x09, 0xcf, 0x87, 0x35, 0x18, 0x36, 0x30, 0x13, 0x56, 0x7e, 0xb0, 0x0b, 0x2b, 0xff, 0x0c, 0x0c, + 0xb1, 0x1f, 0x6a, 0x05, 0xa8, 0xd9, 0x58, 0xe5, 0xc5, 0x58, 0xc2, 0xd3, 0x0b, 0x66, 0xb8, 0xcf, + 0x05, 0xf3, 0x49, 0x18, 0xaf, 0x3a, 0x64, 0x2f, 0xf0, 0x97, 0xfd, 0x66, 0x2b, 0x70, 0xfd, 0x18, + 0xcd, 0x40, 0x89, 0xdd, 0x0e, 0x7c, 0x6f, 0x97, 0x28, 0x05, 0x5c, 0xa2, 0x8c, 0xb9, 0xbd, 0x0d, + 0xd3, 0xd5, 0xe0, 0xbe, 0x7f, 0xdf, 0x09, 0x9b, 0x0b, 0xb5, 0x55, 0x4d, 0xce, 0x5d, 0x97, 0x72, + 0x16, 0x37, 0x62, 0xc9, 0x3c, 0x53, 0xb5, 0x9a, 0xfc, 0xae, 0x5d, 0x71, 0x3d, 0x92, 0xa3, 0x8d, + 0xf8, 0x9b, 0x05, 0xa3, 0xa5, 0x04, 0x5f, 0x3d, 0x18, 0x59, 0xb9, 0x0f, 0x46, 0x6f, 0xc2, 0xf0, + 0x96, 0x4b, 0xbc, 0x26, 0x26, 0x5b, 0x62, 0x89, 0x3d, 0x9d, 0xff, 0x2e, 0xbf, 0x42, 0x31, 0xa5, + 0xf6, 0x89, 0x4b, 0x69, 0x2b, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x2e, 0x4c, 0x4a, 0x31, 0x40, 0x42, + 0xc5, 0x82, 0x7b, 0xa6, 0x9b, 0x6c, 0x61, 0x12, 0x3f, 0x7f, 0x74, 0x58, 0x99, 0xc4, 0x29, 0x32, + 0xb8, 0x83, 0x30, 0x15, 0xcb, 0xf6, 0xe8, 0xd1, 0x5a, 0x62, 0xc3, 0xcf, 0xc4, 0x32, 0x26, 0x61, + 0xb2, 0x52, 0xfb, 0x27, 0x2c, 0x78, 0xac, 0x63, 0x64, 0x84, 0xa4, 0x7d, 0xca, 0xb3, 0x90, 0x96, + 0x7c, 0x0b, 0xbd, 0x25, 0x5f, 0xfb, 0x1f, 0x58, 0x70, 0x7e, 0x79, 0xaf, 0x15, 0x1f, 0x54, 0x5d, + 0xf3, 0x75, 0xe7, 0x15, 0x18, 0xdc, 0x23, 0x4d, 0xb7, 0xbd, 0x27, 0x66, 0xae, 0x22, 0x8f, 0x9f, + 0x35, 0x56, 0x7a, 0x7c, 0x58, 0x19, 0xab, 0xc7, 0x41, 0xe8, 0x6c, 0x13, 0x5e, 0x80, 0x05, 0x3a, + 0x3b, 0xc4, 0xdd, 0xf7, 0xc9, 0x6d, 0x77, 0xcf, 0x95, 0x76, 0x16, 0x5d, 0x75, 0x67, 0x73, 0x72, + 0x40, 0xe7, 0xde, 0x6c, 0x3b, 0x7e, 0xec, 0xc6, 0x07, 0xe2, 0x61, 0x46, 0x12, 0xc1, 0x09, 0x3d, + 0xfb, 0x9b, 0x16, 0x4c, 0xc8, 0x75, 0xbf, 0xd0, 0x6c, 0x86, 0x24, 0x8a, 0xd0, 0x2c, 0x14, 0xdc, + 0x96, 0xe8, 0x25, 0x88, 0x5e, 0x16, 0x56, 0x6b, 0xb8, 0xe0, 0xb6, 0x50, 0x0d, 0xca, 0xdc, 0x5c, + 0x23, 0x59, 0x5c, 0x7d, 0x19, 0x7d, 0xb0, 0x1e, 0x6c, 0xc8, 0x9a, 0x38, 0x21, 0x22, 0x39, 0x38, + 0x76, 0x66, 0x16, 0xcd, 0x57, 0xaf, 0x9b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x0d, 0x86, 0xfd, 0xa0, + 0xc9, 0xad, 0x67, 0xf8, 0xed, 0xc7, 0x96, 0xec, 0xba, 0x28, 0xc3, 0x0a, 0x6a, 0xff, 0xb0, 0x05, + 0xa3, 0xf2, 0xcb, 0xfa, 0x64, 0x26, 0xe9, 0xd6, 0x4a, 0x18, 0xc9, 0x64, 0x6b, 0x51, 0x66, 0x90, + 0x41, 0x0c, 0x1e, 0xb0, 0x78, 0x12, 0x1e, 0xd0, 0xfe, 0xf1, 0x02, 0x8c, 0xcb, 0xee, 0xd4, 0xdb, + 0x9b, 0x11, 0x89, 0xd1, 0x06, 0x94, 0x1d, 0x3e, 0xe4, 0x44, 0xae, 0xd8, 0xa7, 0xb2, 0x85, 0x0f, + 0x63, 0x7e, 0x92, 0x6b, 0x79, 0x41, 0xd6, 0xc6, 0x09, 0x21, 0xe4, 0xc1, 0x94, 0x1f, 0xc4, 0xec, + 0x88, 0x56, 0xf0, 0x6e, 0x4f, 0x20, 0x69, 0xea, 0x17, 0x05, 0xf5, 0xa9, 0xf5, 0x34, 0x15, 0xdc, + 0x49, 0x18, 0x2d, 0x4b, 0x85, 0x47, 0x31, 0x5f, 0xdc, 0xd0, 0x67, 0x21, 0x5b, 0xdf, 0x61, 0xff, + 0x9a, 0x05, 0x65, 0x89, 0x76, 0x16, 0xaf, 0x5d, 0x6b, 0x30, 0x14, 0xb1, 0x49, 0x90, 0x43, 0x63, + 0x77, 0xeb, 0x38, 0x9f, 0xaf, 0xe4, 0xe6, 0xe1, 0xff, 0x23, 0x2c, 0x69, 0x30, 0x7d, 0xb7, 0xea, + 0xfe, 0x47, 0x44, 0xdf, 0xad, 0xfa, 0x93, 0x73, 0xc3, 0xfc, 0x57, 0xd6, 0x67, 0x4d, 0xac, 0xa5, + 0x0c, 0x52, 0x2b, 0x24, 0x5b, 0xee, 0x83, 0x34, 0x83, 0x54, 0x63, 0xa5, 0x58, 0x40, 0xd1, 0x3b, + 0x30, 0xda, 0x90, 0x8a, 0xce, 0xe4, 0x18, 0xb8, 0xda, 0x55, 0xe9, 0xae, 0xde, 0x67, 0xb8, 0x65, + 0xed, 0x92, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0xe7, 0xf6, 0x62, 0xaf, 0xe7, 0xf6, 0x84, 0x6e, 0xfe, + 0xe3, 0xf3, 0x4f, 0x5a, 0x30, 0xc8, 0xd5, 0x65, 0xfd, 0xe9, 0x17, 0xb5, 0xe7, 0xaa, 0x64, 0xec, + 0xee, 0xd1, 0x42, 0xf1, 0xfc, 0x84, 0xd6, 0xa0, 0xcc, 0x7e, 0x30, 0xb5, 0x41, 0x31, 0xdf, 0xa4, + 0x98, 0xb7, 0xaa, 0x77, 0xf0, 0x9e, 0xac, 0x86, 0x13, 0x0a, 0xf6, 0x8f, 0x16, 0xe9, 0x51, 0x95, + 0xa0, 0x1a, 0x37, 0xb8, 0xf5, 0xe8, 0x6e, 0xf0, 0xc2, 0xa3, 0xba, 0xc1, 0xb7, 0x61, 0xa2, 0xa1, + 0x3d, 0x6e, 0x25, 0x33, 0x79, 0xad, 0xeb, 0x22, 0xd1, 0xde, 0xc1, 0xb8, 0xca, 0x68, 0xc9, 0x24, + 0x82, 0xd3, 0x54, 0xd1, 0x77, 0xc2, 0x28, 0x9f, 0x67, 0xd1, 0x0a, 0xb7, 0x58, 0xf8, 0x44, 0xfe, + 0x7a, 0xd1, 0x9b, 0x60, 0x2b, 0xb1, 0xae, 0x55, 0xc7, 0x06, 0x31, 0xfb, 0x57, 0x86, 0x61, 0x60, + 0x79, 0x9f, 0xf8, 0xf1, 0x19, 0x1c, 0x48, 0x0d, 0x18, 0x77, 0xfd, 0xfd, 0xc0, 0xdb, 0x27, 0x4d, + 0x0e, 0x3f, 0xc9, 0xe5, 0x7a, 0x41, 0x90, 0x1e, 0x5f, 0x35, 0x48, 0xe0, 0x14, 0xc9, 0x47, 0x21, + 0x61, 0xde, 0x80, 0x41, 0x3e, 0xf7, 0x42, 0xbc, 0xcc, 0x54, 0x06, 0xb3, 0x41, 0x14, 0xbb, 0x20, + 0x91, 0x7e, 0xb9, 0xf6, 0x59, 0x54, 0x47, 0xef, 0xc2, 0xf8, 0x96, 0x1b, 0x46, 0x31, 0x15, 0x0d, + 0xa3, 0xd8, 0xd9, 0x6b, 0x3d, 0x84, 0x44, 0xa9, 0xc6, 0x61, 0xc5, 0xa0, 0x84, 0x53, 0x94, 0xd1, + 0x36, 0x8c, 0x51, 0x21, 0x27, 0x69, 0x6a, 0xe8, 0xc4, 0x4d, 0x29, 0x95, 0xd1, 0x6d, 0x9d, 0x10, + 0x36, 0xe9, 0xd2, 0xc3, 0xa4, 0xc1, 0x84, 0xa2, 0x61, 0xc6, 0x51, 0xa8, 0xc3, 0x84, 0x4b, 0x43, + 0x1c, 0x46, 0xcf, 0x24, 0x66, 0xb6, 0x52, 0x36, 0xcf, 0x24, 0xcd, 0x38, 0xe5, 0xcb, 0x50, 0x26, + 0x74, 0x08, 0x29, 0x61, 0xa1, 0x18, 0x9f, 0xef, 0xaf, 0xaf, 0x6b, 0x6e, 0x23, 0x0c, 0x4c, 0x59, + 0x7e, 0x59, 0x52, 0xc2, 0x09, 0x51, 0xb4, 0x04, 0x83, 0x11, 0x09, 0x5d, 0x12, 0x09, 0x15, 0x79, + 0x97, 0x69, 0x64, 0x68, 0xdc, 0xf6, 0x9c, 0xff, 0xc6, 0xa2, 0x2a, 0x5d, 0x5e, 0x0e, 0x93, 0x86, + 0x98, 0x56, 0x5c, 0x5b, 0x5e, 0x0b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x03, 0x86, 0x42, 0xe2, 0x31, + 0x65, 0xd1, 0x58, 0xff, 0x8b, 0x9c, 0xeb, 0x9e, 0x78, 0x3d, 0x2c, 0x09, 0xa0, 0x5b, 0x80, 0x42, + 0x42, 0x79, 0x08, 0xd7, 0xdf, 0x56, 0xc6, 0x1c, 0x42, 0xd7, 0xfd, 0xb8, 0x68, 0xff, 0x1c, 0x4e, + 0x30, 0xa4, 0x55, 0x2a, 0xce, 0xa8, 0x86, 0x6e, 0xc0, 0x94, 0x2a, 0x5d, 0xf5, 0xa3, 0xd8, 0xf1, + 0x1b, 0x84, 0xa9, 0xb9, 0xcb, 0x09, 0x57, 0x84, 0xd3, 0x08, 0xb8, 0xb3, 0x8e, 0xfd, 0x73, 0x94, + 0x9d, 0xa1, 0xa3, 0x75, 0x06, 0xbc, 0xc0, 0xeb, 0x26, 0x2f, 0x70, 0x31, 0x77, 0xe6, 0x72, 0xf8, + 0x80, 0x23, 0x0b, 0x46, 0xb4, 0x99, 0x4d, 0xd6, 0xac, 0xd5, 0x65, 0xcd, 0xb6, 0x61, 0x92, 0xae, + 0xf4, 0x3b, 0x9b, 0xcc, 0x0d, 0xab, 0xc9, 0x16, 0x66, 0xe1, 0xe1, 0x16, 0xa6, 0x7a, 0x65, 0xbe, + 0x9d, 0x22, 0x88, 0x3b, 0x9a, 0x40, 0xaf, 0x48, 0xcd, 0x49, 0xd1, 0x30, 0xd2, 0xe2, 0x5a, 0x91, + 0xe3, 0xc3, 0xca, 0xa4, 0xf6, 0x21, 0xba, 0xa6, 0xc4, 0xfe, 0xb2, 0xfc, 0x46, 0xf5, 0x9a, 0xdf, + 0x50, 0x8b, 0x25, 0xf5, 0x9a, 0xaf, 0x96, 0x03, 0x4e, 0x70, 0xe8, 0x1e, 0xa5, 0x22, 0x48, 0xfa, + 0x35, 0x9f, 0x0a, 0x28, 0x98, 0x41, 0xec, 0x17, 0x01, 0x96, 0x1f, 0x90, 0x06, 0x5f, 0xea, 0xfa, + 0x03, 0xa4, 0x95, 0xff, 0x00, 0x69, 0xff, 0x7b, 0x0b, 0xc6, 0x57, 0x96, 0x0c, 0x31, 0x71, 0x0e, + 0x80, 0xcb, 0x46, 0x6f, 0xbd, 0xb5, 0x2e, 0x75, 0xeb, 0x5c, 0x3d, 0xaa, 0x4a, 0xb1, 0x86, 0x81, + 0x2e, 0x42, 0xd1, 0x6b, 0xfb, 0x42, 0x64, 0x19, 0x3a, 0x3a, 0xac, 0x14, 0x6f, 0xb7, 0x7d, 0x4c, + 0xcb, 0x34, 0x0b, 0xc1, 0x62, 0xdf, 0x16, 0x82, 0x3d, 0xdd, 0xab, 0x50, 0x05, 0x06, 0xee, 0xdf, + 0x77, 0x9b, 0xdc, 0x88, 0x5d, 0xe8, 0xfd, 0xdf, 0x7a, 0x6b, 0xb5, 0x1a, 0x61, 0x5e, 0x6e, 0x7f, + 0xb5, 0x08, 0xb3, 0x2b, 0x1e, 0x79, 0xf0, 0x01, 0x0d, 0xf9, 0xfb, 0xb5, 0x6f, 0x3c, 0x19, 0xbf, + 0x78, 0x52, 0x1b, 0xd6, 0xde, 0xe3, 0xb1, 0x05, 0x43, 0xfc, 0x31, 0x5b, 0x9a, 0xf5, 0xbf, 0x96, + 0xd5, 0x7a, 0xfe, 0x80, 0xcc, 0xf1, 0x47, 0x71, 0x61, 0xce, 0xaf, 0x6e, 0x5a, 0x51, 0x8a, 0x25, + 0xf1, 0xd9, 0xcf, 0xc0, 0xa8, 0x8e, 0x79, 0x22, 0x6b, 0xf2, 0xbf, 0x54, 0x84, 0x49, 0xda, 0x83, + 0x47, 0x3a, 0x11, 0x77, 0x3b, 0x27, 0xe2, 0xb4, 0x2d, 0x8a, 0x7b, 0xcf, 0xc6, 0x3b, 0xe9, 0xd9, + 0x78, 0x21, 0x6f, 0x36, 0xce, 0x7a, 0x0e, 0xbe, 0xd7, 0x82, 0x73, 0x2b, 0x5e, 0xd0, 0xd8, 0x4d, + 0x59, 0xfd, 0xbe, 0x0c, 0x23, 0xf4, 0x1c, 0x8f, 0x0c, 0x2f, 0x22, 0xc3, 0xaf, 0x4c, 0x80, 0xb0, + 0x8e, 0xa7, 0x55, 0xbb, 0x7b, 0x77, 0xb5, 0x9a, 0xe5, 0x8e, 0x26, 0x40, 0x58, 0xc7, 0xb3, 0xbf, + 0x61, 0xc1, 0xa5, 0x1b, 0x4b, 0xcb, 0xc9, 0x52, 0xec, 0xf0, 0x88, 0xa3, 0x52, 0x60, 0x53, 0xeb, + 0x4a, 0x22, 0x05, 0x56, 0x59, 0x2f, 0x04, 0xf4, 0xa3, 0xe2, 0xed, 0xf9, 0xb3, 0x16, 0x9c, 0xbb, + 0xe1, 0xc6, 0xf4, 0x5a, 0x4e, 0xfb, 0x66, 0xd1, 0x7b, 0x39, 0x72, 0xe3, 0x20, 0x3c, 0x48, 0xfb, + 0x66, 0x61, 0x05, 0xc1, 0x1a, 0x16, 0x6f, 0x79, 0xdf, 0x65, 0x66, 0x54, 0x05, 0x53, 0x15, 0x85, + 0x45, 0x39, 0x56, 0x18, 0xf4, 0xc3, 0x9a, 0x6e, 0xc8, 0x44, 0x89, 0x03, 0x71, 0xc2, 0xaa, 0x0f, + 0xab, 0x4a, 0x00, 0x4e, 0x70, 0xec, 0x9f, 0xb0, 0x60, 0xfa, 0x86, 0xd7, 0x8e, 0x62, 0x12, 0x6e, + 0x45, 0x46, 0x67, 0x5f, 0x84, 0x32, 0x91, 0xe2, 0xba, 0xe8, 0xab, 0x62, 0x30, 0x95, 0x1c, 0xcf, + 0x1d, 0xc3, 0x14, 0x5e, 0x1f, 0x9e, 0x03, 0x27, 0x73, 0x1d, 0xfb, 0xc5, 0x02, 0x8c, 0xdd, 0xdc, + 0xd8, 0xa8, 0xdd, 0x20, 0xb1, 0xb8, 0xc5, 0x7a, 0xab, 0x9a, 0xb1, 0xa6, 0x31, 0xeb, 0x26, 0x14, + 0xb5, 0x63, 0xd7, 0x9b, 0xe3, 0x9e, 0xc8, 0x73, 0xab, 0x7e, 0x7c, 0x27, 0xac, 0xc7, 0xa1, 0xeb, + 0x6f, 0x67, 0xea, 0xd8, 0xe4, 0x5d, 0x5b, 0xcc, 0xbb, 0x6b, 0xd1, 0x8b, 0x30, 0xc8, 0x5c, 0xa1, + 0xa5, 0x78, 0xf2, 0xb8, 0x92, 0x29, 0x58, 0xe9, 0xf1, 0x61, 0xa5, 0x7c, 0x17, 0xaf, 0xf2, 0x3f, + 0x58, 0xa0, 0xa2, 0xbb, 0x30, 0xb2, 0x13, 0xc7, 0xad, 0x9b, 0xc4, 0x69, 0x92, 0x50, 0x9e, 0x0e, + 0x97, 0xb3, 0x4e, 0x07, 0x3a, 0x08, 0x1c, 0x2d, 0xd9, 0x50, 0x49, 0x59, 0x84, 0x75, 0x3a, 0x76, + 0x1d, 0x20, 0x81, 0x9d, 0x92, 0x7e, 0xc1, 0xfe, 0x03, 0x0b, 0x86, 0xb8, 0x57, 0x5a, 0x88, 0x3e, + 0x0b, 0x25, 0xf2, 0x80, 0x34, 0x04, 0xe7, 0x98, 0xd9, 0xe1, 0x84, 0xf1, 0xe0, 0xda, 0x72, 0xfa, + 0x1f, 0xb3, 0x5a, 0xe8, 0x26, 0x0c, 0xd1, 0xde, 0xde, 0x50, 0x2e, 0x7a, 0x4f, 0xe6, 0x7d, 0xb1, + 0x9a, 0x76, 0xce, 0xab, 0x88, 0x22, 0x2c, 0xab, 0x33, 0xcd, 0x6f, 0xa3, 0x55, 0xa7, 0x07, 0x58, + 0xdc, 0xed, 0x9e, 0xdd, 0x58, 0xaa, 0x71, 0x24, 0x41, 0x8d, 0x6b, 0x7e, 0x65, 0x21, 0x4e, 0x88, + 0xd8, 0x1b, 0x50, 0xa6, 0x93, 0xba, 0xe0, 0xb9, 0x4e, 0x77, 0xa5, 0xf3, 0xb3, 0x50, 0x96, 0x0a, + 0xe0, 0x48, 0x38, 0x36, 0x31, 0xaa, 0x52, 0x3f, 0x1c, 0xe1, 0x04, 0x6e, 0x6f, 0xc1, 0x79, 0xf6, + 0xf2, 0xef, 0xc4, 0x3b, 0xc6, 0x1e, 0xeb, 0xbd, 0x98, 0x9f, 0x13, 0x82, 0x18, 0x9f, 0x99, 0x19, + 0xcd, 0x77, 0x60, 0x54, 0x52, 0x4c, 0x84, 0x32, 0xfb, 0x8f, 0x4a, 0xf0, 0xf8, 0x6a, 0x3d, 0xdf, + 0x61, 0xf1, 0x55, 0x18, 0xe5, 0x6c, 0x1a, 0x5d, 0xda, 0x8e, 0x27, 0xda, 0x55, 0xef, 0x62, 0x1b, + 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x12, 0x14, 0xdd, 0xf7, 0xfc, 0xb4, 0x19, 0xee, 0xea, 0x9b, 0xeb, + 0x98, 0x96, 0x53, 0x30, 0xe5, 0xf8, 0xf8, 0x51, 0xaa, 0xc0, 0x8a, 0xeb, 0x7b, 0x1d, 0xc6, 0xdd, + 0xa8, 0x11, 0xb9, 0xab, 0x3e, 0x3d, 0x67, 0x12, 0x67, 0xd7, 0x44, 0x49, 0x40, 0x3b, 0xad, 0xa0, + 0x38, 0x85, 0xad, 0x9d, 0xeb, 0x03, 0x7d, 0x73, 0x8d, 0x3d, 0x3d, 0x7d, 0x28, 0x43, 0xdc, 0x62, + 0x5f, 0x17, 0x31, 0xa3, 0x36, 0xc1, 0x10, 0xf3, 0x0f, 0x8e, 0xb0, 0x84, 0x51, 0x09, 0xac, 0xb1, + 0xe3, 0xb4, 0x16, 0xda, 0xf1, 0x4e, 0xd5, 0x8d, 0x1a, 0xc1, 0x3e, 0x09, 0x0f, 0x98, 0xf0, 0x3c, + 0x9c, 0x48, 0x60, 0x0a, 0xb0, 0x74, 0x73, 0xa1, 0x46, 0x31, 0x71, 0x67, 0x1d, 0x93, 0x2b, 0x84, + 0xd3, 0xe0, 0x0a, 0x17, 0x60, 0x42, 0x36, 0x53, 0x27, 0x11, 0xbb, 0x23, 0x46, 0x58, 0xc7, 0x94, + 0xa9, 0xad, 0x28, 0x56, 0xdd, 0x4a, 0xe3, 0xa3, 0x57, 0x60, 0xcc, 0xf5, 0xdd, 0xd8, 0x75, 0xe2, + 0x20, 0x64, 0x37, 0x2c, 0x97, 0x93, 0x99, 0x25, 0xdb, 0xaa, 0x0e, 0xc0, 0x26, 0x9e, 0xfd, 0x87, + 0x25, 0x98, 0x62, 0xd3, 0xf6, 0xad, 0x15, 0xf6, 0x91, 0x59, 0x61, 0x77, 0x3b, 0x57, 0xd8, 0x69, + 0xb0, 0xbb, 0x1f, 0xe6, 0x32, 0x7b, 0x17, 0xca, 0xca, 0x16, 0x58, 0x3a, 0x03, 0x58, 0x39, 0xce, + 0x00, 0xbd, 0xb9, 0x0f, 0xf9, 0x8c, 0x5b, 0xcc, 0x7c, 0xc6, 0xfd, 0xdb, 0x16, 0x24, 0x26, 0x91, + 0xe8, 0x26, 0x94, 0x5b, 0x01, 0x33, 0x3b, 0x08, 0xa5, 0x2d, 0xcf, 0xe3, 0x99, 0x17, 0x15, 0xbf, + 0x14, 0xf9, 0xf8, 0xd5, 0x64, 0x0d, 0x9c, 0x54, 0x46, 0x8b, 0x30, 0xd4, 0x0a, 0x49, 0x3d, 0x66, + 0x2e, 0xb0, 0x3d, 0xe9, 0xf0, 0x35, 0xc2, 0xf1, 0xb1, 0xac, 0x68, 0xff, 0x92, 0x05, 0xc0, 0x5f, + 0x4a, 0x1d, 0x7f, 0x9b, 0x9c, 0x81, 0xf6, 0xb7, 0x0a, 0xa5, 0xa8, 0x45, 0x1a, 0xdd, 0x0c, 0x42, + 0x92, 0xfe, 0xd4, 0x5b, 0xa4, 0x91, 0x0c, 0x38, 0xfd, 0x87, 0x59, 0x6d, 0xfb, 0xfb, 0x00, 0xc6, + 0x13, 0xb4, 0xd5, 0x98, 0xec, 0xa1, 0xe7, 0x0d, 0x97, 0xb8, 0x8b, 0x29, 0x97, 0xb8, 0x32, 0xc3, + 0xd6, 0x14, 0x8d, 0xef, 0x42, 0x71, 0xcf, 0x79, 0x20, 0x34, 0x49, 0xcf, 0x76, 0xef, 0x06, 0xa5, + 0x3f, 0xb7, 0xe6, 0x3c, 0xe0, 0x32, 0xd3, 0xb3, 0x72, 0x81, 0xac, 0x39, 0x0f, 0x8e, 0xb9, 0xd9, + 0x07, 0x3b, 0xa4, 0x6e, 0xbb, 0x51, 0xfc, 0x95, 0xff, 0x94, 0xfc, 0x67, 0xcb, 0x8e, 0x36, 0xc2, + 0xda, 0x72, 0x7d, 0xf1, 0x6e, 0xd8, 0x57, 0x5b, 0xae, 0x9f, 0x6e, 0xcb, 0xf5, 0xfb, 0x68, 0xcb, + 0xf5, 0xd1, 0xfb, 0x30, 0x24, 0xde, 0xe8, 0x99, 0xad, 0xb7, 0xa9, 0xa5, 0xca, 0x6b, 0x4f, 0x3c, + 0xf1, 0xf3, 0x36, 0xe7, 0xa5, 0x4c, 0x28, 0x4a, 0x7b, 0xb6, 0x2b, 0x1b, 0x44, 0x7f, 0xc3, 0x82, + 0x71, 0xf1, 0x1b, 0x93, 0xf7, 0xda, 0x24, 0x8a, 0x05, 0xef, 0xf9, 0xe9, 0xfe, 0xfb, 0x20, 0x2a, + 0xf2, 0xae, 0x7c, 0x5a, 0x1e, 0xb3, 0x26, 0xb0, 0x67, 0x8f, 0x52, 0xbd, 0x40, 0xff, 0xc8, 0x82, + 0xf3, 0x7b, 0xce, 0x03, 0xde, 0x22, 0x2f, 0xc3, 0x4e, 0xec, 0x06, 0xc2, 0x76, 0xfd, 0xb3, 0xfd, + 0x4d, 0x7f, 0x47, 0x75, 0xde, 0x49, 0x69, 0xe6, 0x7a, 0x3e, 0x0b, 0xa5, 0x67, 0x57, 0x33, 0xfb, + 0x35, 0xbb, 0x05, 0xc3, 0x72, 0xbd, 0x65, 0x48, 0xde, 0x55, 0x9d, 0xb1, 0x3e, 0xb1, 0x89, 0x84, + 0xee, 0x97, 0x46, 0xdb, 0x11, 0x6b, 0xed, 0x91, 0xb6, 0xf3, 0x2e, 0x8c, 0xea, 0x6b, 0xec, 0x91, + 0xb6, 0xf5, 0x1e, 0x9c, 0xcb, 0x58, 0x4b, 0x8f, 0xb4, 0xc9, 0xfb, 0x70, 0x31, 0x77, 0x7d, 0x3c, + 0xca, 0x86, 0xed, 0x5f, 0xb4, 0xf4, 0x73, 0xf0, 0x0c, 0x54, 0xf0, 0x4b, 0xa6, 0x0a, 0xfe, 0x72, + 0xf7, 0x9d, 0x93, 0xa3, 0x87, 0x7f, 0x47, 0xef, 0x34, 0x3d, 0xd5, 0xd1, 0x1b, 0x30, 0xe8, 0xd1, + 0x12, 0x69, 0x1c, 0x62, 0xf7, 0xde, 0x91, 0x09, 0x2f, 0xc5, 0xca, 0x23, 0x2c, 0x28, 0xd8, 0xbf, + 0x6a, 0x41, 0xe9, 0x0c, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x7c, 0x2e, 0x69, 0x11, 0xd2, 0x6c, 0x0e, + 0x3b, 0xf7, 0x97, 0x65, 0xd8, 0xb6, 0x9c, 0x81, 0xf9, 0x2e, 0x38, 0x77, 0x3b, 0x70, 0x9a, 0x8b, + 0x8e, 0xe7, 0xf8, 0x0d, 0x12, 0xae, 0xfa, 0xdb, 0x3d, 0xad, 0x94, 0x74, 0x9b, 0xa2, 0x42, 0x2f, + 0x9b, 0x22, 0x7b, 0x07, 0x90, 0xde, 0x80, 0xb0, 0xe3, 0xc4, 0x30, 0xe4, 0xf2, 0xa6, 0xc4, 0xf0, + 0x3f, 0x9d, 0xcd, 0xdd, 0x75, 0xf4, 0x4c, 0xb3, 0x50, 0xe4, 0x05, 0x58, 0x12, 0xb2, 0x5f, 0x85, + 0x4c, 0xdf, 0xad, 0xde, 0x6a, 0x03, 0xfb, 0x65, 0x98, 0x62, 0x35, 0x4f, 0x26, 0xd2, 0xda, 0x3f, + 0x60, 0xc1, 0xc4, 0x7a, 0x2a, 0x36, 0xc5, 0x55, 0xf6, 0xd6, 0x97, 0xa1, 0xf7, 0xad, 0xb3, 0x52, + 0x2c, 0xa0, 0xa7, 0xae, 0x5f, 0xfa, 0x33, 0x0b, 0x12, 0x57, 0xc9, 0x33, 0x60, 0xaa, 0x96, 0x0c, + 0xa6, 0x2a, 0x53, 0xef, 0xa1, 0xba, 0x93, 0xc7, 0x53, 0xa1, 0x5b, 0x2a, 0x2e, 0x40, 0x17, 0x95, + 0x47, 0x42, 0x86, 0x7b, 0x91, 0x8f, 0x9b, 0xc1, 0x03, 0x64, 0xa4, 0x00, 0x66, 0x26, 0xa4, 0x70, + 0x3f, 0x22, 0x66, 0x42, 0xaa, 0x3f, 0x39, 0xbb, 0xaf, 0xa6, 0x75, 0x99, 0x9d, 0x4a, 0xdf, 0xce, + 0xcc, 0xbe, 0x1d, 0xcf, 0x7d, 0x9f, 0xa8, 0xe0, 0x26, 0x15, 0x61, 0xc6, 0x2d, 0x4a, 0x8f, 0x0f, + 0x2b, 0x63, 0xea, 0x1f, 0x8f, 0x80, 0x95, 0x54, 0xb1, 0x6f, 0xc2, 0x44, 0x6a, 0xc0, 0xd0, 0xcb, + 0x30, 0xd0, 0xda, 0x71, 0x22, 0x92, 0x32, 0x8d, 0x1c, 0xa8, 0xd1, 0xc2, 0xe3, 0xc3, 0xca, 0xb8, + 0xaa, 0xc0, 0x4a, 0x30, 0xc7, 0xb6, 0xff, 0xa7, 0x05, 0xa5, 0xf5, 0xa0, 0x79, 0x16, 0x8b, 0xe9, + 0x75, 0x63, 0x31, 0x3d, 0x91, 0x17, 0x3f, 0x30, 0x77, 0x1d, 0xad, 0xa4, 0xd6, 0xd1, 0xe5, 0x5c, + 0x0a, 0xdd, 0x97, 0xd0, 0x1e, 0x8c, 0xb0, 0xa8, 0x84, 0xc2, 0x54, 0xf3, 0x45, 0x83, 0xbf, 0xaf, + 0xa4, 0xf8, 0xfb, 0x09, 0x0d, 0x55, 0xe3, 0xf2, 0x9f, 0x81, 0x21, 0x61, 0x2e, 0x98, 0x36, 0x70, + 0x17, 0xb8, 0x58, 0xc2, 0xed, 0x9f, 0x2c, 0x82, 0x11, 0x05, 0x11, 0xfd, 0x9a, 0x05, 0x73, 0x21, + 0xf7, 0x18, 0x6c, 0x56, 0xdb, 0xa1, 0xeb, 0x6f, 0xd7, 0x1b, 0x3b, 0xa4, 0xd9, 0xf6, 0x5c, 0x7f, + 0x7b, 0x75, 0xdb, 0x0f, 0x54, 0xf1, 0xf2, 0x03, 0xd2, 0x68, 0x33, 0x9d, 0x7f, 0x8f, 0x90, 0x8b, + 0xca, 0x1c, 0xe7, 0xfa, 0xd1, 0x61, 0x65, 0x0e, 0x9f, 0x88, 0x36, 0x3e, 0x61, 0x5f, 0xd0, 0x37, + 0x2c, 0x98, 0xe7, 0xc1, 0x01, 0xfb, 0xef, 0x7f, 0x17, 0x69, 0xa8, 0x26, 0x49, 0x25, 0x44, 0x36, + 0x48, 0xb8, 0xb7, 0xf8, 0x8a, 0x18, 0xd0, 0xf9, 0xda, 0xc9, 0xda, 0xc2, 0x27, 0xed, 0x9c, 0xfd, + 0x2f, 0x8b, 0x30, 0x26, 0x9c, 0xd5, 0x45, 0x14, 0x94, 0x97, 0x8d, 0x25, 0xf1, 0x64, 0x6a, 0x49, + 0x4c, 0x19, 0xc8, 0xa7, 0x13, 0x00, 0x25, 0x82, 0x29, 0xcf, 0x89, 0xe2, 0x9b, 0xc4, 0x09, 0xe3, + 0x4d, 0xe2, 0x70, 0x33, 0x95, 0xe2, 0x89, 0x4d, 0x6a, 0x94, 0xfa, 0xe5, 0x76, 0x9a, 0x18, 0xee, + 0xa4, 0x8f, 0xf6, 0x01, 0x31, 0x5b, 0x9b, 0xd0, 0xf1, 0x23, 0xfe, 0x2d, 0xae, 0x78, 0x0f, 0x38, + 0x59, 0xab, 0xb3, 0xa2, 0x55, 0x74, 0xbb, 0x83, 0x1a, 0xce, 0x68, 0x41, 0xb3, 0xa1, 0x1a, 0xe8, + 0xd7, 0x86, 0x6a, 0xb0, 0x87, 0x17, 0x89, 0x0f, 0x93, 0x1d, 0xf1, 0x06, 0xde, 0x86, 0xb2, 0xb2, + 0x75, 0x13, 0x87, 0x4e, 0xf7, 0xb0, 0x1d, 0x69, 0x0a, 0x5c, 0x45, 0x92, 0xd8, 0x59, 0x26, 0xe4, + 0xec, 0x7f, 0x5c, 0x30, 0x1a, 0xe4, 0x93, 0xb8, 0x0e, 0xc3, 0x4e, 0x14, 0xb9, 0xdb, 0x3e, 0x69, + 0x8a, 0x1d, 0xfb, 0xf1, 0xbc, 0x1d, 0x6b, 0x34, 0xc3, 0xec, 0x0d, 0x17, 0x44, 0x4d, 0xac, 0x68, + 0xa0, 0x9b, 0xdc, 0x18, 0x68, 0x5f, 0xf2, 0xf3, 0xfd, 0x51, 0x03, 0x69, 0x2e, 0xb4, 0x4f, 0xb0, + 0xa8, 0x8f, 0xbe, 0xc8, 0xad, 0xb5, 0x6e, 0xf9, 0xc1, 0x7d, 0xff, 0x46, 0x10, 0x48, 0x0f, 0xb3, + 0xfe, 0x08, 0x4e, 0x49, 0x1b, 0x2d, 0x55, 0x1d, 0x9b, 0xd4, 0xfa, 0x8b, 0xc9, 0xf3, 0xdd, 0x70, + 0x8e, 0x92, 0x36, 0xfd, 0x44, 0x22, 0x44, 0x60, 0x42, 0x44, 0x42, 0x90, 0x65, 0x62, 0xec, 0x32, + 0x59, 0x75, 0xb3, 0x76, 0xa2, 0xd0, 0xbb, 0x65, 0x92, 0xc0, 0x69, 0x9a, 0xf6, 0xcf, 0x58, 0xc0, + 0x2c, 0xdc, 0xcf, 0x80, 0x65, 0xf8, 0x9c, 0xc9, 0x32, 0xcc, 0xe4, 0x0d, 0x72, 0x0e, 0xb7, 0xf0, + 0x12, 0x5f, 0x59, 0xb5, 0x30, 0x78, 0x70, 0x20, 0x5e, 0xca, 0xfb, 0xe0, 0x52, 0xff, 0x8f, 0xc5, + 0x0f, 0x31, 0xe5, 0x74, 0x8e, 0xbe, 0x07, 0x86, 0x1b, 0x4e, 0xcb, 0x69, 0xf0, 0x90, 0xbd, 0xb9, + 0x1a, 0x1b, 0xa3, 0xd2, 0xdc, 0x92, 0xa8, 0xc1, 0x35, 0x10, 0x32, 0xa2, 0xc6, 0xb0, 0x2c, 0xee, + 0xa9, 0x75, 0x50, 0x4d, 0xce, 0xee, 0xc2, 0x98, 0x41, 0xec, 0x91, 0x8a, 0xab, 0xdf, 0xc3, 0xaf, + 0x58, 0x15, 0x01, 0x66, 0x0f, 0xa6, 0x7c, 0xed, 0x3f, 0xbd, 0x50, 0xa4, 0x08, 0xf2, 0xf1, 0x5e, + 0x97, 0x28, 0xbb, 0x7d, 0x34, 0x0b, 0xfe, 0x14, 0x19, 0xdc, 0x49, 0xd9, 0xfe, 0x29, 0x0b, 0x1e, + 0xd3, 0x11, 0xb5, 0x78, 0x00, 0xbd, 0x74, 0xc0, 0x55, 0x18, 0x0e, 0x5a, 0x24, 0x74, 0xe2, 0x20, + 0x14, 0xb7, 0xc6, 0x35, 0x39, 0xe8, 0x77, 0x44, 0xf9, 0xb1, 0x88, 0x9d, 0x28, 0xa9, 0xcb, 0x72, + 0xac, 0x6a, 0x22, 0x1b, 0x06, 0xd9, 0x60, 0x44, 0x22, 0x56, 0x03, 0x3b, 0x03, 0xd8, 0x73, 0x68, + 0x84, 0x05, 0xc4, 0xfe, 0x23, 0x8b, 0x2f, 0x2c, 0xbd, 0xeb, 0xe8, 0x3d, 0x98, 0xdc, 0x73, 0xe2, + 0xc6, 0xce, 0xf2, 0x83, 0x56, 0xc8, 0x55, 0xdf, 0x72, 0x9c, 0x9e, 0xed, 0x35, 0x4e, 0xda, 0x47, + 0x26, 0x06, 0x68, 0x6b, 0x29, 0x62, 0xb8, 0x83, 0x3c, 0xda, 0x84, 0x11, 0x56, 0xc6, 0x2c, 0x9d, + 0xa3, 0x6e, 0xac, 0x41, 0x5e, 0x6b, 0xea, 0x45, 0x79, 0x2d, 0xa1, 0x83, 0x75, 0xa2, 0xf6, 0x57, + 0x8a, 0x7c, 0xb7, 0x33, 0x6e, 0xfb, 0x19, 0x18, 0x6a, 0x05, 0xcd, 0xa5, 0xd5, 0x2a, 0x16, 0xb3, + 0xa0, 0xae, 0x91, 0x1a, 0x2f, 0xc6, 0x12, 0x8e, 0x5e, 0x03, 0x20, 0x0f, 0x62, 0x12, 0xfa, 0x8e, + 0xa7, 0x0c, 0x42, 0x94, 0x09, 0x64, 0x35, 0x58, 0x0f, 0xe2, 0xbb, 0x11, 0xf9, 0xae, 0x65, 0x85, + 0x82, 0x35, 0x74, 0x74, 0x1d, 0xa0, 0x15, 0x06, 0xfb, 0x6e, 0x93, 0xb9, 0xce, 0x15, 0x4d, 0x73, + 0x89, 0x9a, 0x82, 0x60, 0x0d, 0x0b, 0xbd, 0x06, 0x63, 0x6d, 0x3f, 0xe2, 0x1c, 0x8a, 0xb3, 0x29, + 0x22, 0x0f, 0x0e, 0x27, 0x96, 0x0b, 0x77, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x02, 0x0c, 0xc6, 0x0e, + 0xb3, 0x77, 0x18, 0xc8, 0xb7, 0x5b, 0xdc, 0xa0, 0x18, 0x7a, 0xc0, 0x58, 0x5a, 0x01, 0x8b, 0x8a, + 0xe8, 0x6d, 0xe9, 0x87, 0xc0, 0xcf, 0x7a, 0x61, 0x30, 0xdc, 0xdf, 0xbd, 0xa0, 0x79, 0x21, 0x08, + 0x43, 0x64, 0x83, 0x96, 0xfd, 0x8d, 0x32, 0x40, 0xc2, 0x8e, 0xa3, 0xf7, 0x3b, 0xce, 0xa3, 0xe7, + 0xba, 0x33, 0xf0, 0xa7, 0x77, 0x18, 0xa1, 0xef, 0xb7, 0x60, 0xc4, 0xf1, 0xbc, 0xa0, 0xe1, 0xc4, + 0x6c, 0x94, 0x0b, 0xdd, 0xcf, 0x43, 0xd1, 0xfe, 0x42, 0x52, 0x83, 0x77, 0xe1, 0x45, 0xb9, 0xf0, + 0x34, 0x48, 0xcf, 0x5e, 0xe8, 0x0d, 0xa3, 0x4f, 0x49, 0x29, 0x8d, 0x2f, 0x8f, 0xd9, 0xb4, 0x94, + 0x56, 0x66, 0x47, 0xbf, 0x26, 0xa0, 0xa1, 0xbb, 0x46, 0x50, 0xb9, 0x52, 0x7e, 0x7c, 0x05, 0x83, + 0x2b, 0xed, 0x15, 0x4f, 0x0e, 0xd5, 0x74, 0xc7, 0xa9, 0x81, 0xfc, 0x20, 0x24, 0x9a, 0xf8, 0xd3, + 0xc3, 0x69, 0xea, 0x5d, 0x98, 0x68, 0x9a, 0x77, 0xbb, 0x58, 0x4d, 0x4f, 0xe7, 0xd1, 0x4d, 0xb1, + 0x02, 0xc9, 0x6d, 0x9e, 0x02, 0xe0, 0x34, 0x61, 0x54, 0xe3, 0x2e, 0x6c, 0xab, 0xfe, 0x56, 0x20, + 0x0c, 0xcf, 0xed, 0xdc, 0xb9, 0x3c, 0x88, 0x62, 0xb2, 0x47, 0x31, 0x93, 0x4b, 0x7b, 0x5d, 0xd4, + 0xc5, 0x8a, 0x0a, 0x7a, 0x03, 0x06, 0x99, 0x0f, 0x6c, 0x34, 0x33, 0x9c, 0xaf, 0x28, 0x34, 0xc3, + 0x37, 0x24, 0x9b, 0x8a, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0x6e, 0xca, 0x18, 0x2f, 0xd1, 0xaa, 0x7f, + 0x37, 0x22, 0x2c, 0xc6, 0x4b, 0x79, 0xf1, 0xe3, 0x49, 0xf8, 0x16, 0x5e, 0x9e, 0x19, 0x1a, 0xde, + 0xa8, 0x49, 0x99, 0x23, 0xf1, 0x5f, 0x46, 0x9c, 0x9f, 0x81, 0xfc, 0xee, 0x99, 0x51, 0xe9, 0x93, + 0xe1, 0xbc, 0x67, 0x92, 0xc0, 0x69, 0x9a, 0x94, 0xd1, 0xe4, 0x3b, 0x57, 0x98, 0xae, 0xf7, 0xda, + 0xff, 0x5c, 0xbe, 0x66, 0x97, 0x0c, 0x2f, 0xc1, 0xa2, 0xfe, 0x99, 0xde, 0xfa, 0xb3, 0x3e, 0x4c, + 0xa6, 0xb7, 0xe8, 0x23, 0xe5, 0x32, 0xfe, 0xa0, 0x04, 0xe3, 0xe6, 0x92, 0x42, 0xf3, 0x50, 0x16, + 0x44, 0x54, 0xc0, 0x51, 0xb5, 0x4b, 0xd6, 0x24, 0x00, 0x27, 0x38, 0x2c, 0xce, 0x2c, 0xab, 0xae, + 0x99, 0x1c, 0x26, 0x71, 0x66, 0x15, 0x04, 0x6b, 0x58, 0x54, 0x5e, 0xda, 0x0c, 0x82, 0x58, 0x5d, + 0x2a, 0x6a, 0xdd, 0x2d, 0xb2, 0x52, 0x2c, 0xa0, 0xf4, 0x32, 0xd9, 0x25, 0xa1, 0x4f, 0x3c, 0x33, + 0x8e, 0x99, 0xba, 0x4c, 0x6e, 0xe9, 0x40, 0x6c, 0xe2, 0xd2, 0x5b, 0x32, 0x88, 0xd8, 0x42, 0x16, + 0x52, 0x59, 0x62, 0xc2, 0x59, 0xe7, 0xde, 0xe4, 0x12, 0x8e, 0xbe, 0x00, 0x8f, 0x29, 0xe7, 0x6f, + 0xcc, 0x95, 0xd0, 0xb2, 0xc5, 0x41, 0x43, 0x89, 0xf2, 0xd8, 0x52, 0x36, 0x1a, 0xce, 0xab, 0x8f, + 0x5e, 0x87, 0x71, 0xc1, 0xb9, 0x4b, 0x8a, 0x43, 0xa6, 0x5d, 0xc4, 0x2d, 0x03, 0x8a, 0x53, 0xd8, + 0x32, 0x12, 0x1b, 0x63, 0x9e, 0x25, 0x85, 0xe1, 0xce, 0x48, 0x6c, 0x3a, 0x1c, 0x77, 0xd4, 0x40, + 0x0b, 0x30, 0xc1, 0x59, 0x2b, 0xd7, 0xdf, 0xe6, 0x73, 0x22, 0x3c, 0x4b, 0xd4, 0x96, 0xba, 0x63, + 0x82, 0x71, 0x1a, 0x1f, 0xbd, 0x0a, 0xa3, 0x4e, 0xd8, 0xd8, 0x71, 0x63, 0xd2, 0x88, 0xdb, 0x21, + 0x77, 0x39, 0xd1, 0x0c, 0x4b, 0x16, 0x34, 0x18, 0x36, 0x30, 0xed, 0xf7, 0xe1, 0x5c, 0x86, 0x53, + 0x1a, 0x5d, 0x38, 0x4e, 0xcb, 0x95, 0xdf, 0x94, 0x32, 0xc6, 0x5c, 0xa8, 0xad, 0xca, 0xaf, 0xd1, + 0xb0, 0xe8, 0xea, 0x64, 0xce, 0x6b, 0x5a, 0x82, 0x09, 0xb5, 0x3a, 0x57, 0x24, 0x00, 0x27, 0x38, + 0xf6, 0xff, 0x2a, 0xc0, 0x44, 0x86, 0x62, 0x9d, 0x25, 0x39, 0x48, 0xc9, 0x1e, 0x49, 0x4e, 0x03, + 0x33, 0xb0, 0x5f, 0xe1, 0x04, 0x81, 0xfd, 0x8a, 0xbd, 0x02, 0xfb, 0x95, 0x3e, 0x48, 0x60, 0x3f, + 0x73, 0xc4, 0x06, 0xfa, 0x1a, 0xb1, 0x8c, 0x60, 0x80, 0x83, 0x27, 0x0c, 0x06, 0x68, 0x0c, 0xfa, + 0x50, 0x1f, 0x83, 0xfe, 0xa3, 0x05, 0x98, 0x4c, 0x1b, 0xc0, 0x9d, 0x81, 0x3a, 0xf6, 0x0d, 0x43, + 0x1d, 0x9b, 0x9d, 0x32, 0x24, 0x6d, 0x96, 0x97, 0xa7, 0x9a, 0xc5, 0x29, 0xd5, 0xec, 0x27, 0xfb, + 0xa2, 0xd6, 0x5d, 0x4d, 0xfb, 0x77, 0x0b, 0x30, 0x9d, 0xae, 0xb2, 0xe4, 0x39, 0xee, 0xde, 0x19, + 0x8c, 0xcd, 0x1d, 0x63, 0x6c, 0x9e, 0xef, 0xe7, 0x6b, 0x58, 0xd7, 0x72, 0x07, 0xe8, 0xad, 0xd4, + 0x00, 0xcd, 0xf7, 0x4f, 0xb2, 0xfb, 0x28, 0x7d, 0xb3, 0x08, 0x97, 0x33, 0xeb, 0x25, 0xda, 0xcc, + 0x15, 0x43, 0x9b, 0x79, 0x3d, 0xa5, 0xcd, 0xb4, 0xbb, 0xd7, 0x3e, 0x1d, 0xf5, 0xa6, 0xf0, 0x16, + 0x64, 0xc1, 0xdf, 0x1e, 0x52, 0xb5, 0x69, 0x78, 0x0b, 0x2a, 0x42, 0xd8, 0xa4, 0xfb, 0x17, 0x49, + 0xa5, 0xf9, 0x6f, 0x2c, 0xb8, 0x98, 0x39, 0x37, 0x67, 0xa0, 0xc2, 0x5a, 0x37, 0x55, 0x58, 0xcf, + 0xf4, 0xbd, 0x5a, 0x73, 0x74, 0x5a, 0x7f, 0x58, 0xcc, 0xf9, 0x16, 0x26, 0xa0, 0xdf, 0x81, 0x11, + 0xa7, 0xd1, 0x20, 0x51, 0xb4, 0x16, 0x34, 0x55, 0x30, 0xb4, 0xe7, 0x99, 0x9c, 0x95, 0x14, 0x1f, + 0x1f, 0x56, 0x66, 0xd3, 0x24, 0x12, 0x30, 0xd6, 0x29, 0x98, 0xf1, 0x1b, 0x0b, 0xa7, 0x1a, 0xbf, + 0xf1, 0x3a, 0xc0, 0xbe, 0xe2, 0xd6, 0xd3, 0x42, 0xbe, 0xc6, 0xc7, 0x6b, 0x58, 0xe8, 0x8b, 0x30, + 0x1c, 0x89, 0x6b, 0x5c, 0x2c, 0xc5, 0x17, 0xfb, 0x9c, 0x2b, 0x67, 0x93, 0x78, 0xa6, 0x5b, 0xba, + 0xd2, 0x87, 0x28, 0x92, 0xe8, 0x3b, 0x60, 0x32, 0xe2, 0x51, 0x4f, 0x96, 0x3c, 0x27, 0x62, 0x3e, + 0x0e, 0x62, 0x15, 0x32, 0x5f, 0xf3, 0x7a, 0x0a, 0x86, 0x3b, 0xb0, 0xd1, 0x8a, 0xfc, 0x28, 0x16, + 0xa2, 0x85, 0x2f, 0xcc, 0xab, 0xc9, 0x07, 0x89, 0x14, 0x4b, 0xe7, 0xd3, 0xc3, 0xcf, 0x06, 0x5e, + 0xab, 0x69, 0xff, 0x68, 0x09, 0x1e, 0xef, 0x72, 0x88, 0xa1, 0x05, 0xf3, 0x8d, 0xf2, 0xd9, 0xb4, + 0xf4, 0x3b, 0x9b, 0x59, 0xd9, 0x10, 0x87, 0x53, 0x6b, 0xa5, 0xf0, 0x81, 0xd7, 0xca, 0x0f, 0x59, + 0x9a, 0x5e, 0x82, 0x5b, 0xd2, 0x7d, 0xee, 0x84, 0x87, 0xf3, 0x29, 0x2a, 0x2a, 0xb6, 0x32, 0xa4, + 0xfd, 0xeb, 0x7d, 0x77, 0xa7, 0x6f, 0xf1, 0xff, 0x6c, 0xb5, 0xb3, 0x5f, 0xb1, 0xe0, 0xc9, 0xcc, + 0xfe, 0x1a, 0x36, 0x15, 0xf3, 0x50, 0x6e, 0xd0, 0x42, 0xcd, 0x6f, 0x2a, 0x71, 0x28, 0x95, 0x00, + 0x9c, 0xe0, 0x18, 0xa6, 0x13, 0x85, 0x9e, 0xa6, 0x13, 0xff, 0xc2, 0x82, 0x8e, 0x05, 0x7c, 0x06, + 0x27, 0xe9, 0xaa, 0x79, 0x92, 0x7e, 0xbc, 0x9f, 0xb9, 0xcc, 0x39, 0x44, 0xff, 0xf3, 0x04, 0x5c, + 0xc8, 0x71, 0x94, 0xd8, 0x87, 0xa9, 0xed, 0x06, 0x31, 0x3d, 0xd2, 0xc4, 0xc7, 0x64, 0x3a, 0xef, + 0x75, 0x75, 0x5f, 0x63, 0xb9, 0x71, 0xa6, 0x3a, 0x50, 0x70, 0x67, 0x13, 0xe8, 0x2b, 0x16, 0x9c, + 0x77, 0xee, 0x47, 0x1d, 0x19, 0x10, 0xc5, 0x9a, 0x79, 0x29, 0x53, 0x4b, 0xd1, 0x23, 0x63, 0x22, + 0x4f, 0x16, 0x94, 0x85, 0x85, 0x33, 0xdb, 0x42, 0x58, 0xc4, 0xaf, 0xa4, 0xfc, 0x76, 0x17, 0x9f, + 0xc9, 0x2c, 0x8f, 0x16, 0x7e, 0xa6, 0x4a, 0x08, 0x56, 0x74, 0xd0, 0x3d, 0x28, 0x6f, 0x4b, 0x37, + 0x33, 0x71, 0x66, 0x67, 0x5e, 0x82, 0x99, 0xbe, 0x68, 0xfc, 0xdd, 0x50, 0x81, 0x70, 0x42, 0x0a, + 0xbd, 0x0e, 0x45, 0x7f, 0x2b, 0xea, 0x96, 0x65, 0x27, 0x65, 0x6a, 0xc4, 0xfd, 0x91, 0xd7, 0x57, + 0xea, 0x98, 0x56, 0x44, 0x37, 0xa1, 0x18, 0x6e, 0x36, 0x85, 0x62, 0x2d, 0x93, 0x2f, 0xc5, 0x8b, + 0xd5, 0xec, 0x45, 0xc2, 0x29, 0xe1, 0xc5, 0x2a, 0xa6, 0x24, 0x50, 0x0d, 0x06, 0x98, 0x4f, 0x81, + 0xd0, 0x9f, 0x65, 0x32, 0xa4, 0x5d, 0x7c, 0x73, 0xb8, 0xd3, 0x32, 0x43, 0xc0, 0x9c, 0x10, 0xda, + 0x80, 0xc1, 0x06, 0xcb, 0xc8, 0x22, 0x42, 0x26, 0x7f, 0x2a, 0x53, 0x85, 0xd6, 0x25, 0x55, 0x8d, + 0xd0, 0x28, 0x31, 0x0c, 0x2c, 0x68, 0x31, 0xaa, 0xa4, 0xb5, 0xb3, 0x15, 0x31, 0x11, 0x3c, 0x8f, + 0x6a, 0x97, 0x0c, 0x4c, 0x82, 0x2a, 0xc3, 0xc0, 0x82, 0x16, 0xfa, 0x0c, 0x14, 0xb6, 0x1a, 0xc2, + 0xe5, 0x20, 0x53, 0x97, 0x66, 0xba, 0x94, 0x2f, 0x0e, 0x1e, 0x1d, 0x56, 0x0a, 0x2b, 0x4b, 0xb8, + 0xb0, 0xd5, 0x40, 0xeb, 0x30, 0xb4, 0xc5, 0x9d, 0x50, 0x85, 0xba, 0xec, 0xe9, 0x6c, 0xff, 0xd8, + 0x0e, 0x3f, 0x55, 0x6e, 0x2a, 0x2f, 0x00, 0x58, 0x12, 0x61, 0x41, 0x20, 0x95, 0x33, 0xad, 0x88, + 0x86, 0x3c, 0x77, 0x32, 0x07, 0x68, 0xee, 0xde, 0x9e, 0xb8, 0xe4, 0x62, 0x8d, 0x22, 0xfa, 0x32, + 0x94, 0x1d, 0x99, 0x7b, 0x4f, 0x44, 0x8b, 0x78, 0x31, 0x73, 0x63, 0x76, 0x4f, 0x4b, 0xc8, 0x57, + 0xb5, 0x42, 0xc2, 0x09, 0x51, 0xb4, 0x0b, 0x63, 0xfb, 0x51, 0x6b, 0x87, 0xc8, 0x8d, 0xcc, 0x82, + 0x47, 0xe4, 0x5c, 0x5c, 0xf7, 0x04, 0xa2, 0x1b, 0xc6, 0x6d, 0xc7, 0xeb, 0x38, 0x7b, 0xd8, 0x63, + 0xf3, 0x3d, 0x9d, 0x18, 0x36, 0x69, 0xd3, 0xe1, 0x7f, 0xaf, 0x1d, 0x6c, 0x1e, 0xc4, 0x44, 0x84, + 0x4f, 0xce, 0x1c, 0xfe, 0x37, 0x39, 0x4a, 0xe7, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xe8, 0x56, 0x77, + 0x64, 0x5e, 0x4b, 0x16, 0x36, 0x39, 0x67, 0xab, 0x67, 0x26, 0xbf, 0xd4, 0x06, 0x85, 0x9d, 0x91, + 0x09, 0x29, 0x76, 0x36, 0xb6, 0x76, 0x82, 0x38, 0xf0, 0x53, 0xe7, 0xf2, 0x54, 0xfe, 0xd9, 0x58, + 0xcb, 0xc0, 0xef, 0x3c, 0x1b, 0xb3, 0xb0, 0x70, 0x66, 0x5b, 0xa8, 0x09, 0xe3, 0xad, 0x20, 0x8c, + 0xef, 0x07, 0xa1, 0x5c, 0x5f, 0xa8, 0x8b, 0xb8, 0x6f, 0x60, 0x8a, 0x16, 0x59, 0x38, 0x6f, 0x13, + 0x82, 0x53, 0x34, 0xd1, 0xe7, 0x61, 0x28, 0x6a, 0x38, 0x1e, 0x59, 0xbd, 0x33, 0x73, 0x2e, 0xff, + 0xd2, 0xa9, 0x73, 0x94, 0x9c, 0xd5, 0xc5, 0x26, 0x47, 0xa0, 0x60, 0x49, 0x0e, 0xad, 0xc0, 0x00, + 0x8b, 0xc9, 0xcf, 0x22, 0x3f, 0xe7, 0x44, 0x25, 0xea, 0x30, 0xea, 0xe4, 0x67, 0x13, 0x2b, 0xc6, + 0xbc, 0x3a, 0xdd, 0x03, 0x82, 0xeb, 0x0d, 0xa2, 0x99, 0xe9, 0xfc, 0x3d, 0x20, 0x98, 0xe5, 0x3b, + 0xf5, 0x6e, 0x7b, 0x40, 0x21, 0xe1, 0x84, 0x28, 0x3d, 0x99, 0xe9, 0x69, 0x7a, 0xa1, 0x8b, 0x9d, + 0x49, 0xee, 0x59, 0xca, 0x4e, 0x66, 0x7a, 0x92, 0x52, 0x12, 0xf6, 0xef, 0x0d, 0x75, 0x72, 0x2a, + 0x4c, 0x4e, 0xfa, 0xcb, 0x56, 0xc7, 0x13, 0xda, 0xa7, 0xfb, 0x55, 0xdb, 0x9c, 0x22, 0x8f, 0xfa, + 0x15, 0x0b, 0x2e, 0xb4, 0x32, 0x3f, 0x44, 0x5c, 0xfb, 0xfd, 0x69, 0x7f, 0xf8, 0xa7, 0xab, 0xe8, + 0xec, 0xd9, 0x70, 0x9c, 0xd3, 0x52, 0x5a, 0x0e, 0x28, 0x7e, 0x60, 0x39, 0x60, 0x0d, 0x86, 0x19, + 0x6b, 0xd9, 0x23, 0x43, 0x59, 0xda, 0x0b, 0x8d, 0x31, 0x10, 0x4b, 0xa2, 0x22, 0x56, 0x24, 0xd0, + 0x0f, 0x5b, 0x70, 0x29, 0xdd, 0x75, 0x4c, 0x18, 0x58, 0xc4, 0x32, 0xe7, 0x22, 0xda, 0x8a, 0xf8, + 0xfe, 0x4b, 0xb5, 0x6e, 0xc8, 0xc7, 0xbd, 0x10, 0x70, 0xf7, 0xc6, 0x50, 0x35, 0x43, 0x46, 0x1c, + 0x34, 0xf5, 0xe2, 0x7d, 0xc8, 0x89, 0x2f, 0xc1, 0xe8, 0x5e, 0xd0, 0xf6, 0x63, 0x61, 0x96, 0x22, + 0x9c, 0x04, 0xd9, 0x3b, 0xf0, 0x9a, 0x56, 0x8e, 0x0d, 0xac, 0x94, 0x74, 0x39, 0xfc, 0xb0, 0xd2, + 0x25, 0x7a, 0x27, 0x95, 0x87, 0xba, 0x9c, 0x1f, 0x33, 0x4f, 0x08, 0xe2, 0x27, 0xc8, 0x46, 0x7d, + 0xb6, 0x12, 0xd1, 0xcf, 0x59, 0x19, 0xac, 0x3c, 0x97, 0x91, 0x3f, 0x6b, 0xca, 0xc8, 0x57, 0xd3, + 0x32, 0x72, 0x87, 0x4e, 0xd4, 0x10, 0x8f, 0xfb, 0x0f, 0xbc, 0xdc, 0x6f, 0x24, 0x33, 0xdb, 0x83, + 0x2b, 0xbd, 0xae, 0x25, 0x66, 0x9f, 0xd4, 0x54, 0x2f, 0x60, 0x89, 0x7d, 0x52, 0x73, 0xb5, 0x8a, + 0x19, 0xa4, 0xdf, 0x50, 0x17, 0xf6, 0x7f, 0xb3, 0xa0, 0x58, 0x0b, 0x9a, 0x67, 0xa0, 0xe3, 0xfd, + 0x9c, 0xa1, 0xe3, 0x7d, 0x3c, 0x27, 0x3f, 0x78, 0xae, 0x46, 0x77, 0x39, 0xa5, 0xd1, 0xbd, 0x94, + 0x47, 0xa0, 0xbb, 0xfe, 0xf6, 0xa7, 0x8b, 0xa0, 0x67, 0x33, 0x47, 0xff, 0xea, 0x61, 0x8c, 0x83, + 0x8b, 0xdd, 0x12, 0x9c, 0x0b, 0xca, 0xcc, 0xac, 0x49, 0xfa, 0xbd, 0xfd, 0x39, 0xb3, 0x11, 0x7e, + 0x8b, 0xb8, 0xdb, 0x3b, 0x31, 0x69, 0xa6, 0x3f, 0xe7, 0xec, 0x6c, 0x84, 0xff, 0x8b, 0x05, 0x13, + 0xa9, 0xd6, 0x91, 0x07, 0x63, 0x9e, 0xae, 0xa0, 0x13, 0xeb, 0xf4, 0xa1, 0x74, 0x7b, 0xc2, 0xc6, + 0x52, 0x2b, 0xc2, 0x26, 0x71, 0x34, 0x07, 0xa0, 0x1e, 0xd0, 0xa4, 0xde, 0x8b, 0x71, 0xfd, 0xea, + 0x85, 0x2d, 0xc2, 0x1a, 0x06, 0x7a, 0x19, 0x46, 0xe2, 0xa0, 0x15, 0x78, 0xc1, 0xf6, 0xc1, 0x2d, + 0x22, 0x83, 0xab, 0x28, 0xcb, 0xa9, 0x8d, 0x04, 0x84, 0x75, 0x3c, 0xfb, 0x67, 0x8b, 0x90, 0xce, + 0x80, 0xff, 0xad, 0x35, 0xf9, 0xd1, 0x5c, 0x93, 0xdf, 0xb4, 0x60, 0x92, 0xb6, 0xce, 0xac, 0x38, + 0xe4, 0x65, 0xab, 0x12, 0xc0, 0x58, 0x5d, 0x12, 0xc0, 0x5c, 0xa5, 0x67, 0x57, 0x33, 0x68, 0xc7, + 0x42, 0x6f, 0xa6, 0x1d, 0x4e, 0xb4, 0x14, 0x0b, 0xa8, 0xc0, 0x23, 0x61, 0x28, 0x5c, 0x93, 0x74, + 0x3c, 0x12, 0x86, 0x58, 0x40, 0x65, 0x7e, 0x98, 0x52, 0x4e, 0x7e, 0x18, 0x16, 0x2a, 0x4e, 0xbc, + 0xf7, 0x0b, 0xb6, 0x47, 0x0b, 0x15, 0x27, 0x0d, 0x01, 0x12, 0x1c, 0xfb, 0x17, 0x8b, 0x30, 0x5a, + 0x0b, 0x9a, 0xc9, 0x13, 0xd6, 0x4b, 0xc6, 0x13, 0xd6, 0x95, 0xd4, 0x13, 0xd6, 0xa4, 0x8e, 0xfb, + 0xad, 0x07, 0xab, 0x0f, 0xeb, 0xc1, 0xea, 0x9f, 0x5b, 0x6c, 0xd6, 0xaa, 0xeb, 0x75, 0x91, 0x9f, + 0xf6, 0x05, 0x18, 0x61, 0x07, 0x12, 0xf3, 0x85, 0x93, 0xef, 0x3a, 0x2c, 0xf4, 0xfb, 0x7a, 0x52, + 0x8c, 0x75, 0x1c, 0x74, 0x0d, 0x86, 0x23, 0xe2, 0x84, 0x8d, 0x1d, 0x75, 0xc6, 0x89, 0x57, 0x0f, + 0x5e, 0x86, 0x15, 0x14, 0xbd, 0x99, 0x44, 0x29, 0x2b, 0xe6, 0x67, 0x5a, 0xd5, 0xfb, 0xc3, 0xb7, + 0x48, 0x7e, 0x68, 0x32, 0xfb, 0x2d, 0x40, 0x9d, 0xf8, 0x7d, 0xc4, 0x23, 0xaa, 0x98, 0xf1, 0x88, + 0xca, 0x1d, 0xb1, 0x88, 0xfe, 0xd4, 0x82, 0xf1, 0x5a, 0xd0, 0xa4, 0x5b, 0xf7, 0x2f, 0xd2, 0x3e, + 0xd5, 0x43, 0x34, 0x0e, 0x76, 0x09, 0xd1, 0xf8, 0xf7, 0x2c, 0x18, 0xaa, 0x05, 0xcd, 0x33, 0xd0, + 0xb6, 0x7f, 0xd6, 0xd4, 0xb6, 0x3f, 0x96, 0xb3, 0x24, 0x72, 0x14, 0xec, 0xbf, 0x5c, 0x84, 0x31, + 0xda, 0xcf, 0x60, 0x5b, 0xce, 0x92, 0x31, 0x22, 0x56, 0x1f, 0x23, 0x42, 0xd9, 0xdc, 0xc0, 0xf3, + 0x82, 0xfb, 0xe9, 0x19, 0x5b, 0x61, 0xa5, 0x58, 0x40, 0xd1, 0x73, 0x30, 0xdc, 0x0a, 0xc9, 0xbe, + 0x1b, 0x08, 0xfe, 0x51, 0x7b, 0xbb, 0xa8, 0x89, 0x72, 0xac, 0x30, 0xa8, 0xdc, 0x15, 0xb9, 0x7e, + 0x83, 0xc8, 0x34, 0xcf, 0x25, 0x96, 0x09, 0x8a, 0xc7, 0x5e, 0xd6, 0xca, 0xb1, 0x81, 0x85, 0xde, + 0x82, 0x32, 0xfb, 0xcf, 0x4e, 0x94, 0x93, 0x67, 0xae, 0x11, 0x09, 0x0f, 0x04, 0x01, 0x9c, 0xd0, + 0x42, 0xd7, 0x01, 0x62, 0x19, 0x9f, 0x37, 0x12, 0x61, 0x65, 0x14, 0xaf, 0xad, 0x22, 0xf7, 0x46, + 0x58, 0xc3, 0x42, 0xcf, 0x42, 0x39, 0x76, 0x5c, 0xef, 0xb6, 0xeb, 0x93, 0x88, 0xa9, 0x9c, 0x8b, + 0x32, 0x9f, 0x81, 0x28, 0xc4, 0x09, 0x9c, 0xf2, 0x3a, 0xcc, 0xe7, 0x9a, 0xe7, 0xbd, 0x1a, 0x66, + 0xd8, 0x8c, 0xd7, 0xb9, 0xad, 0x4a, 0xb1, 0x86, 0x61, 0xbf, 0x0a, 0xd3, 0xb5, 0xa0, 0x59, 0x0b, + 0xc2, 0x78, 0x25, 0x08, 0xef, 0x3b, 0x61, 0x53, 0xce, 0x5f, 0x45, 0x86, 0xd6, 0xa7, 0x67, 0xcf, + 0x00, 0xdf, 0x99, 0x46, 0xd0, 0xfc, 0x17, 0x19, 0xb7, 0x73, 0x42, 0x5f, 0x8b, 0x06, 0xbb, 0x77, + 0x55, 0x8a, 0xbb, 0x1b, 0x4e, 0x4c, 0xd0, 0x1d, 0x96, 0x16, 0x2b, 0xb9, 0x82, 0x44, 0xf5, 0x67, + 0xb4, 0xb4, 0x58, 0x09, 0x30, 0xf3, 0xce, 0x32, 0xeb, 0xdb, 0xbf, 0x5e, 0x64, 0xa7, 0x51, 0x2a, + 0xe3, 0x1b, 0xfa, 0x12, 0x8c, 0x47, 0xe4, 0xb6, 0xeb, 0xb7, 0x1f, 0x48, 0x21, 0xbc, 0x8b, 0xb7, + 0x4c, 0x7d, 0x59, 0xc7, 0xe4, 0xaa, 0x3c, 0xb3, 0x0c, 0xa7, 0xa8, 0xd1, 0x79, 0x0a, 0xdb, 0xfe, + 0x42, 0x74, 0x37, 0x22, 0xa1, 0xc8, 0x38, 0xc6, 0xe6, 0x09, 0xcb, 0x42, 0x9c, 0xc0, 0xe9, 0xba, + 0x64, 0x7f, 0xd6, 0x03, 0x1f, 0x07, 0x41, 0x2c, 0x57, 0x32, 0xcb, 0x59, 0xa3, 0x95, 0x63, 0x03, + 0x0b, 0xad, 0x00, 0x8a, 0xda, 0xad, 0x96, 0xc7, 0xde, 0xdb, 0x1d, 0xef, 0x46, 0x18, 0xb4, 0x5b, + 0xfc, 0xad, 0xb3, 0xb8, 0x78, 0x81, 0x5e, 0x61, 0xf5, 0x0e, 0x28, 0xce, 0xa8, 0x41, 0x4f, 0x9f, + 0xad, 0x88, 0xfd, 0x66, 0xab, 0xbb, 0x28, 0xd4, 0xeb, 0x75, 0x56, 0x84, 0x25, 0x8c, 0x2e, 0x26, + 0xd6, 0x3c, 0xc7, 0x1c, 0x4c, 0x16, 0x13, 0x56, 0xa5, 0x58, 0xc3, 0x40, 0xcb, 0x30, 0x14, 0x1d, + 0x44, 0x8d, 0x58, 0x04, 0x41, 0xca, 0xc9, 0x1d, 0x59, 0x67, 0x28, 0x5a, 0x3e, 0x03, 0x5e, 0x05, + 0xcb, 0xba, 0xf6, 0xf7, 0xb0, 0xcb, 0x90, 0xe5, 0xa7, 0x8a, 0xdb, 0x21, 0x41, 0x7b, 0x30, 0xd6, + 0x62, 0x53, 0x2e, 0xa2, 0x27, 0x8b, 0x79, 0x7b, 0xa9, 0x4f, 0xa9, 0xf6, 0x3e, 0x3d, 0x68, 0x94, + 0xd6, 0x89, 0x89, 0x0b, 0x35, 0x9d, 0x1c, 0x36, 0xa9, 0xdb, 0x3f, 0x38, 0xc5, 0xce, 0xdc, 0x3a, + 0x17, 0x55, 0x87, 0x84, 0xc5, 0xaf, 0xe0, 0xcb, 0x67, 0xf3, 0x75, 0x26, 0xc9, 0x17, 0x09, 0xab, + 0x61, 0x2c, 0xeb, 0xa2, 0x37, 0xd9, 0xdb, 0x34, 0x3f, 0xe8, 0x7a, 0xa5, 0x09, 0xe6, 0x58, 0xc6, + 0x33, 0xb4, 0xa8, 0x88, 0x35, 0x22, 0xe8, 0x36, 0x8c, 0x89, 0x74, 0x46, 0x42, 0x29, 0x56, 0x34, + 0x94, 0x1e, 0x63, 0x58, 0x07, 0x1e, 0xa7, 0x0b, 0xb0, 0x59, 0x19, 0x6d, 0xc3, 0x25, 0x2d, 0xb7, + 0xdf, 0x8d, 0xd0, 0x61, 0xef, 0x95, 0x2e, 0xdb, 0x44, 0xda, 0xb9, 0xf9, 0xe4, 0xd1, 0x61, 0xe5, + 0xd2, 0x46, 0x37, 0x44, 0xdc, 0x9d, 0x0e, 0xba, 0x03, 0xd3, 0xdc, 0xb1, 0xae, 0x4a, 0x9c, 0xa6, + 0xe7, 0xfa, 0xea, 0x60, 0xe6, 0xeb, 0xf0, 0xe2, 0xd1, 0x61, 0x65, 0x7a, 0x21, 0x0b, 0x01, 0x67, + 0xd7, 0x43, 0x9f, 0x85, 0x72, 0xd3, 0x8f, 0xc4, 0x18, 0x0c, 0x1a, 0x69, 0x2b, 0xcb, 0xd5, 0xf5, + 0xba, 0xfa, 0xfe, 0xe4, 0x0f, 0x4e, 0x2a, 0xa0, 0x6d, 0xae, 0x18, 0x53, 0x72, 0xe8, 0x50, 0x7e, + 0x8a, 0x72, 0xb1, 0x24, 0x0c, 0xd7, 0x1a, 0xae, 0x11, 0x56, 0xa6, 0xa9, 0x86, 0xd7, 0x8d, 0x41, + 0x18, 0xbd, 0x01, 0x88, 0x32, 0x6a, 0x6e, 0x83, 0x2c, 0x34, 0x58, 0x10, 0x6b, 0xa6, 0x47, 0x1c, + 0x36, 0x5c, 0x19, 0x50, 0xbd, 0x03, 0x03, 0x67, 0xd4, 0x42, 0x37, 0xe9, 0x41, 0xa6, 0x97, 0x0a, + 0x13, 0x5b, 0xc9, 0xdc, 0xcf, 0x54, 0x49, 0x2b, 0x24, 0x0d, 0x27, 0x26, 0x4d, 0x93, 0x22, 0x4e, + 0xd5, 0xa3, 0x77, 0xa9, 0xca, 0x67, 0x03, 0x66, 0xa4, 0x8a, 0xce, 0x9c, 0x36, 0x54, 0x2e, 0xde, + 0x09, 0xa2, 0x78, 0x9d, 0xc4, 0xf7, 0x83, 0x70, 0x57, 0x04, 0x06, 0x4b, 0x62, 0x54, 0x26, 0x20, + 0xac, 0xe3, 0x51, 0x3e, 0x98, 0x3d, 0x0e, 0xaf, 0x56, 0xd9, 0x0b, 0xdd, 0x70, 0xb2, 0x4f, 0x6e, + 0xf2, 0x62, 0x2c, 0xe1, 0x12, 0x75, 0xb5, 0xb6, 0xc4, 0x5e, 0xdb, 0x52, 0xa8, 0xab, 0xb5, 0x25, + 0x2c, 0xe1, 0x88, 0x74, 0xa6, 0x04, 0x1d, 0xcf, 0xd7, 0x6a, 0x76, 0x5e, 0x07, 0x7d, 0x66, 0x05, + 0xf5, 0x61, 0x52, 0x25, 0x23, 0xe5, 0x11, 0xd3, 0xa2, 0x99, 0x09, 0xb6, 0x48, 0xfa, 0x0f, 0xb7, + 0xa6, 0xf4, 0xc4, 0xab, 0x29, 0x4a, 0xb8, 0x83, 0xb6, 0x11, 0x3b, 0x64, 0xb2, 0x67, 0x3e, 0xa2, + 0x79, 0x28, 0x47, 0xed, 0xcd, 0x66, 0xb0, 0xe7, 0xb8, 0x3e, 0x7b, 0x1c, 0xd3, 0x98, 0xac, 0xba, + 0x04, 0xe0, 0x04, 0x07, 0xad, 0xc0, 0xb0, 0x23, 0x95, 0xc0, 0x28, 0x3f, 0x98, 0x80, 0x52, 0xfd, + 0x72, 0xff, 0x5a, 0xa9, 0xf6, 0x55, 0x75, 0xd1, 0x6b, 0x30, 0x26, 0xdc, 0xa9, 0x78, 0x88, 0x05, + 0xf6, 0x78, 0xa5, 0xd9, 0xcb, 0xd7, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x17, 0x61, 0x9c, 0x52, 0x49, + 0x0e, 0xb6, 0x99, 0xf3, 0xfd, 0x9c, 0x88, 0x5a, 0x9e, 0x09, 0xbd, 0x32, 0x4e, 0x11, 0x43, 0x4d, + 0x78, 0xc2, 0x69, 0xc7, 0x01, 0x53, 0xa4, 0x9b, 0xeb, 0x7f, 0x23, 0xd8, 0x25, 0x3e, 0x7b, 0xc3, + 0x1a, 0x5e, 0xbc, 0x72, 0x74, 0x58, 0x79, 0x62, 0xa1, 0x0b, 0x1e, 0xee, 0x4a, 0x05, 0xdd, 0x85, + 0x91, 0x38, 0xf0, 0x98, 0xe5, 0x3a, 0x65, 0x25, 0x2e, 0xe4, 0xc7, 0xde, 0xd9, 0x50, 0x68, 0xba, + 0x12, 0x49, 0x55, 0xc5, 0x3a, 0x1d, 0xb4, 0xc1, 0xf7, 0x18, 0x8b, 0x4a, 0x4a, 0xa2, 0x99, 0xc7, + 0xf2, 0x07, 0x46, 0x05, 0x2f, 0x35, 0xb7, 0xa0, 0xa8, 0x89, 0x75, 0x32, 0xe8, 0x06, 0x4c, 0xb5, + 0x42, 0x37, 0x60, 0x0b, 0x5b, 0x3d, 0x62, 0xcc, 0x98, 0xa9, 0x05, 0x6a, 0x69, 0x04, 0xdc, 0x59, + 0x87, 0x0a, 0x99, 0xb2, 0x70, 0xe6, 0x22, 0xcf, 0x53, 0xc5, 0x19, 0x6f, 0x5e, 0x86, 0x15, 0x14, + 0xad, 0xb1, 0x73, 0x99, 0x8b, 0x83, 0x33, 0xb3, 0xf9, 0x41, 0x18, 0x74, 0xb1, 0x91, 0xf3, 0x4b, + 0xea, 0x2f, 0x4e, 0x28, 0xd0, 0x7b, 0x23, 0xda, 0x71, 0x42, 0x52, 0x0b, 0x83, 0x06, 0xe1, 0x9d, + 0xe1, 0x46, 0xf3, 0x8f, 0xf3, 0xe0, 0x89, 0xf4, 0xde, 0xa8, 0x67, 0x21, 0xe0, 0xec, 0x7a, 0xa8, + 0xa9, 0xa5, 0x67, 0xa6, 0x6c, 0x68, 0x34, 0xf3, 0x44, 0x17, 0x33, 0xa3, 0x14, 0xcf, 0x9a, 0xac, + 0x45, 0xa3, 0x38, 0xc2, 0x29, 0x9a, 0xb3, 0xdf, 0x0e, 0x53, 0x1d, 0xf7, 0xc5, 0x89, 0xe2, 0x76, + 0xff, 0xc9, 0x00, 0x94, 0x95, 0x2a, 0x1c, 0xcd, 0x9b, 0x2f, 0x1c, 0x17, 0xd3, 0x2f, 0x1c, 0xc3, + 0x94, 0x23, 0xd7, 0x1f, 0x35, 0x36, 0x0c, 0xa3, 0xb8, 0x42, 0x7e, 0x96, 0x2c, 0x9d, 0xa7, 0xee, + 0xe9, 0x01, 0xa7, 0x69, 0x36, 0x8a, 0x7d, 0x3f, 0x95, 0x94, 0xba, 0x2a, 0x4b, 0xfa, 0x4c, 0x52, + 0x4b, 0x85, 0xff, 0x56, 0xd0, 0x5c, 0xad, 0xa5, 0xb3, 0x36, 0xd6, 0x68, 0x21, 0xe6, 0x30, 0x26, + 0xbe, 0x51, 0xe6, 0x86, 0x89, 0x6f, 0x43, 0x0f, 0x29, 0xbe, 0x49, 0x02, 0x38, 0xa1, 0x85, 0x3c, + 0x98, 0x6a, 0x98, 0x09, 0x37, 0x95, 0xd7, 0xdb, 0x53, 0x3d, 0x53, 0x5f, 0xb6, 0xb5, 0xec, 0x66, + 0x4b, 0x69, 0x2a, 0xb8, 0x93, 0x30, 0x7a, 0x0d, 0x86, 0xdf, 0x0b, 0x22, 0xb6, 0xf9, 0xc4, 0x0d, + 0x2f, 0xbd, 0x83, 0x86, 0xdf, 0xbc, 0x53, 0x67, 0xe5, 0xc7, 0x87, 0x95, 0x91, 0x5a, 0xd0, 0x94, + 0x7f, 0xb1, 0xaa, 0x80, 0x1e, 0xc0, 0xb4, 0x71, 0x2e, 0xaa, 0xee, 0x42, 0xff, 0xdd, 0xbd, 0x24, + 0x9a, 0x9b, 0x5e, 0xcd, 0xa2, 0x84, 0xb3, 0x1b, 0xa0, 0x87, 0x8d, 0x1f, 0x88, 0x64, 0xb5, 0x92, + 0x8b, 0x60, 0xcc, 0x42, 0x59, 0xf7, 0x0d, 0x4f, 0x21, 0xe0, 0xce, 0x3a, 0xf6, 0xd7, 0xf8, 0xcb, + 0x81, 0xd0, 0x2f, 0x92, 0xa8, 0xed, 0x9d, 0x45, 0x2e, 0xa4, 0x65, 0x43, 0xf5, 0xf9, 0xd0, 0xaf, + 0x53, 0xbf, 0x69, 0xb1, 0xd7, 0xa9, 0x0d, 0xb2, 0xd7, 0xf2, 0xa8, 0x94, 0xfb, 0xe8, 0x3b, 0xfe, + 0x26, 0x0c, 0xc7, 0xa2, 0xb5, 0x6e, 0xe9, 0x9b, 0xb4, 0x4e, 0xb1, 0x17, 0x3a, 0xc5, 0x5f, 0xc8, + 0x52, 0xac, 0xc8, 0xd8, 0xff, 0x94, 0xcf, 0x80, 0x84, 0x9c, 0x81, 0x1a, 0xaa, 0x6a, 0xaa, 0xa1, + 0x2a, 0x3d, 0xbe, 0x20, 0x47, 0x1d, 0xf5, 0x4f, 0xcc, 0x7e, 0x33, 0x51, 0xee, 0xa3, 0xfe, 0x2c, + 0x6a, 0xff, 0x98, 0x05, 0xe7, 0xb3, 0xec, 0x88, 0x28, 0x4f, 0xc8, 0x05, 0x49, 0xf5, 0x4c, 0xac, + 0x46, 0xf0, 0x9e, 0x28, 0xc7, 0x0a, 0xa3, 0xef, 0xcc, 0x08, 0x27, 0x0b, 0x9f, 0x76, 0x07, 0xc6, + 0x6a, 0x21, 0xd1, 0xee, 0x80, 0xd7, 0xb9, 0x9b, 0x19, 0xef, 0xcf, 0x73, 0x27, 0x76, 0x31, 0xb3, + 0x7f, 0xbe, 0x00, 0xe7, 0xf9, 0x3b, 0xcf, 0xc2, 0x7e, 0xe0, 0x36, 0x6b, 0x41, 0x53, 0x64, 0xb5, + 0x78, 0x1b, 0x46, 0x5b, 0x9a, 0xf4, 0xdf, 0x2d, 0x80, 0x93, 0xae, 0x25, 0x48, 0xa4, 0x30, 0xbd, + 0x14, 0x1b, 0xb4, 0x50, 0x13, 0x46, 0xc9, 0xbe, 0xdb, 0x50, 0x8f, 0x05, 0x85, 0x13, 0xdf, 0x0d, + 0xaa, 0x95, 0x65, 0x8d, 0x0e, 0x36, 0xa8, 0x3e, 0x82, 0x44, 0x67, 0xf6, 0x8f, 0x5b, 0xf0, 0x58, + 0x4e, 0xb8, 0x27, 0xda, 0xdc, 0x7d, 0xf6, 0xa2, 0x26, 0x72, 0x26, 0xa9, 0xe6, 0xf8, 0x3b, 0x1b, + 0x16, 0x50, 0xf4, 0x79, 0x00, 0xfe, 0x4e, 0x46, 0x85, 0x92, 0x5e, 0x71, 0x71, 0x8c, 0x90, 0x1e, + 0x5a, 0x28, 0x06, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x9f, 0x29, 0xc2, 0x00, 0x7b, 0x97, 0x41, 0x2b, + 0x30, 0xb4, 0xc3, 0x83, 0x1b, 0xf7, 0x13, 0x47, 0x39, 0x91, 0xee, 0x78, 0x01, 0x96, 0x95, 0xd1, + 0x1a, 0x9c, 0xe3, 0xc1, 0xa1, 0xbd, 0x2a, 0xf1, 0x9c, 0x03, 0xa9, 0x24, 0xe0, 0x79, 0x86, 0x54, + 0x58, 0x89, 0xd5, 0x4e, 0x14, 0x9c, 0x55, 0x0f, 0xbd, 0x0e, 0xe3, 0xb1, 0xbb, 0x47, 0x82, 0x76, + 0x2c, 0x29, 0xf1, 0xb0, 0xd0, 0x8a, 0x8d, 0xdb, 0x30, 0xa0, 0x38, 0x85, 0x4d, 0xc5, 0x9d, 0x56, + 0x87, 0x3a, 0x44, 0xcb, 0xdc, 0x6f, 0xaa, 0x40, 0x4c, 0x5c, 0x66, 0x40, 0xd4, 0x66, 0xe6, 0x52, + 0x1b, 0x3b, 0x21, 0x89, 0x76, 0x02, 0xaf, 0x29, 0xd2, 0x54, 0x27, 0x06, 0x44, 0x29, 0x38, 0xee, + 0xa8, 0x41, 0xa9, 0x6c, 0x39, 0xae, 0xd7, 0x0e, 0x49, 0x42, 0x65, 0xd0, 0xa4, 0xb2, 0x92, 0x82, + 0xe3, 0x8e, 0x1a, 0x74, 0x1d, 0x4d, 0x8b, 0xbc, 0xd1, 0xd2, 0xd9, 0x5d, 0x59, 0x85, 0x0d, 0x49, + 0xb7, 0x9f, 0x2e, 0xd1, 0x5e, 0x84, 0xdd, 0x8c, 0xca, 0x3c, 0xad, 0x69, 0xf1, 0x84, 0xc3, 0x8f, + 0xa4, 0xf2, 0x30, 0xd9, 0x8b, 0x7f, 0xcf, 0x82, 0x73, 0x19, 0xd6, 0xa7, 0xfc, 0xa8, 0xda, 0x76, + 0xa3, 0x58, 0xe5, 0x52, 0xd1, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, 0x0f, 0xfc, 0x30, 0x4c, + 0x1f, 0x80, 0xc2, 0xba, 0x4b, 0x40, 0x4f, 0x76, 0x00, 0xa2, 0x2b, 0x50, 0x6a, 0x47, 0x44, 0xc6, + 0x69, 0x52, 0xe7, 0x37, 0xd3, 0xeb, 0x32, 0x08, 0x65, 0x4d, 0xb7, 0x95, 0x4a, 0x55, 0x63, 0x4d, + 0xb9, 0x9e, 0x94, 0xc3, 0xec, 0xaf, 0x16, 0xe1, 0x62, 0xae, 0x9d, 0x39, 0xed, 0xd2, 0x5e, 0xe0, + 0xbb, 0x71, 0xa0, 0xde, 0xfc, 0x78, 0xa4, 0x10, 0xd2, 0xda, 0x59, 0x13, 0xe5, 0x58, 0x61, 0xa0, + 0xab, 0x32, 0x83, 0x79, 0x3a, 0x5b, 0xcc, 0x62, 0xd5, 0x48, 0x62, 0xde, 0x6f, 0x26, 0xae, 0xa7, + 0xa0, 0xd4, 0x0a, 0x02, 0x2f, 0x7d, 0x18, 0xd1, 0xee, 0x06, 0x81, 0x87, 0x19, 0x10, 0x7d, 0x42, + 0x8c, 0x43, 0xea, 0x91, 0x0b, 0x3b, 0xcd, 0x20, 0xd2, 0x06, 0xe3, 0x19, 0x18, 0xda, 0x25, 0x07, + 0xa1, 0xeb, 0x6f, 0xa7, 0x1f, 0x3f, 0x6f, 0xf1, 0x62, 0x2c, 0xe1, 0x66, 0xb2, 0x84, 0xa1, 0xd3, + 0x4e, 0xa1, 0x35, 0xdc, 0xf3, 0x6a, 0xfb, 0xa1, 0x22, 0x4c, 0xe0, 0xc5, 0xea, 0xb7, 0x26, 0xe2, + 0x6e, 0xe7, 0x44, 0x9c, 0x76, 0x0a, 0xad, 0xde, 0xb3, 0xf1, 0xcb, 0x16, 0x4c, 0xb0, 0x80, 0xc2, + 0x22, 0x3e, 0x85, 0x1b, 0xf8, 0x67, 0xc0, 0xba, 0x3d, 0x05, 0x03, 0x21, 0x6d, 0x34, 0x9d, 0x17, + 0x87, 0xf5, 0x04, 0x73, 0x18, 0x7a, 0x02, 0x4a, 0xac, 0x0b, 0x74, 0xf2, 0x46, 0x79, 0x4a, 0x81, + 0xaa, 0x13, 0x3b, 0x98, 0x95, 0x32, 0xa7, 0x6b, 0x4c, 0x5a, 0x9e, 0xcb, 0x3b, 0x9d, 0x3c, 0x28, + 0x7c, 0x34, 0x9c, 0xae, 0x33, 0xbb, 0xf6, 0xc1, 0x9c, 0xae, 0xb3, 0x49, 0x76, 0x17, 0x8b, 0xfe, + 0x7b, 0x01, 0x2e, 0x67, 0xd6, 0xeb, 0xdb, 0xe9, 0xba, 0x7b, 0xed, 0xd3, 0xb1, 0x61, 0xc9, 0x36, + 0x2d, 0x29, 0x9e, 0xa1, 0x69, 0x49, 0xa9, 0x5f, 0xce, 0x71, 0xa0, 0x0f, 0x5f, 0xe8, 0xcc, 0x21, + 0xfb, 0x88, 0xf8, 0x42, 0x67, 0xf6, 0x2d, 0x47, 0xac, 0xfb, 0xb3, 0x42, 0xce, 0xb7, 0x30, 0x01, + 0xef, 0x1a, 0x3d, 0x67, 0x18, 0x30, 0x12, 0x9c, 0xf0, 0x28, 0x3f, 0x63, 0x78, 0x19, 0x56, 0x50, + 0xe4, 0x6a, 0x5e, 0xc5, 0x85, 0xfc, 0xac, 0x89, 0xb9, 0x4d, 0xcd, 0x99, 0xef, 0x3f, 0x6a, 0x08, + 0x32, 0x3c, 0x8c, 0xd7, 0x34, 0xa1, 0xbc, 0xd8, 0xbf, 0x50, 0x3e, 0x9a, 0x2d, 0x90, 0xa3, 0x05, + 0x98, 0xd8, 0x73, 0x7d, 0x96, 0x05, 0xdf, 0x64, 0x45, 0x55, 0x90, 0x8d, 0x35, 0x13, 0x8c, 0xd3, + 0xf8, 0xb3, 0xaf, 0xc1, 0xd8, 0xc3, 0xab, 0x23, 0xbf, 0x59, 0x84, 0xc7, 0xbb, 0x6c, 0x7b, 0x7e, + 0xd6, 0x1b, 0x73, 0xa0, 0x9d, 0xf5, 0x1d, 0xf3, 0x50, 0x83, 0xf3, 0x5b, 0x6d, 0xcf, 0x3b, 0x60, + 0xd6, 0x9b, 0xa4, 0x29, 0x31, 0x04, 0xaf, 0xf8, 0x84, 0x4c, 0xe2, 0xb0, 0x92, 0x81, 0x83, 0x33, + 0x6b, 0xa2, 0x37, 0x00, 0x05, 0x22, 0x65, 0xeb, 0x0d, 0xe2, 0x0b, 0xad, 0x3a, 0x1b, 0xf8, 0x62, + 0xb2, 0x19, 0xef, 0x74, 0x60, 0xe0, 0x8c, 0x5a, 0x94, 0xe9, 0xa7, 0xb7, 0xd2, 0x81, 0xea, 0x56, + 0x8a, 0xe9, 0xc7, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x06, 0x4c, 0x39, 0xfb, 0x8e, 0xcb, 0x83, 0xcf, + 0x49, 0x02, 0x9c, 0xeb, 0x57, 0x4a, 0xb0, 0x85, 0x34, 0x02, 0xee, 0xac, 0x93, 0x72, 0x6b, 0x1e, + 0xcc, 0x77, 0x6b, 0xee, 0x7e, 0x2e, 0xf6, 0xd2, 0xe9, 0xda, 0xff, 0xd1, 0xa2, 0xd7, 0x57, 0x46, + 0xda, 0x75, 0x3a, 0x0e, 0x4a, 0x37, 0xa9, 0x79, 0x18, 0x4f, 0x6b, 0xf6, 0x19, 0x09, 0x10, 0x9b, + 0xb8, 0x7c, 0x41, 0x44, 0x89, 0x8b, 0x8b, 0xc1, 0xba, 0x8b, 0x10, 0x02, 0x0a, 0x03, 0x7d, 0x01, + 0x86, 0x9a, 0xee, 0xbe, 0x1b, 0x05, 0xa1, 0xd8, 0x2c, 0x27, 0x74, 0x14, 0x48, 0xce, 0xc1, 0x2a, + 0x27, 0x83, 0x25, 0x3d, 0xfb, 0x87, 0x0a, 0x30, 0x26, 0x5b, 0x7c, 0xb3, 0x1d, 0xc4, 0xce, 0x19, + 0x5c, 0xcb, 0x37, 0x8c, 0x6b, 0xf9, 0x13, 0xdd, 0xe2, 0x28, 0xb0, 0x2e, 0xe5, 0x5e, 0xc7, 0x77, + 0x52, 0xd7, 0xf1, 0xd3, 0xbd, 0x49, 0x75, 0xbf, 0x86, 0xff, 0x99, 0x05, 0x53, 0x06, 0xfe, 0x19, + 0xdc, 0x06, 0x2b, 0xe6, 0x6d, 0xf0, 0x64, 0xcf, 0x6f, 0xc8, 0xb9, 0x05, 0xbe, 0xaf, 0x98, 0xea, + 0x3b, 0x3b, 0xfd, 0xdf, 0x83, 0xd2, 0x8e, 0x13, 0x36, 0xbb, 0xc5, 0x6b, 0xed, 0xa8, 0x34, 0x77, + 0xd3, 0x09, 0x9b, 0xfc, 0x0c, 0x7f, 0x4e, 0x25, 0x7a, 0x74, 0xc2, 0x66, 0x4f, 0x8f, 0x2e, 0xd6, + 0x14, 0x7a, 0x15, 0x06, 0xa3, 0x46, 0xd0, 0x52, 0xf6, 0x96, 0x57, 0x78, 0x12, 0x48, 0x5a, 0x72, + 0x7c, 0x58, 0x41, 0x66, 0x73, 0xb4, 0x18, 0x0b, 0x7c, 0xf4, 0x36, 0x8c, 0xb1, 0x5f, 0xca, 0xee, + 0xa0, 0x98, 0x9f, 0x25, 0xa0, 0xae, 0x23, 0x72, 0xf3, 0x15, 0xa3, 0x08, 0x9b, 0xa4, 0x66, 0xb7, + 0xa1, 0xac, 0x3e, 0xeb, 0x91, 0x7a, 0xe2, 0xfc, 0xbb, 0x22, 0x9c, 0xcb, 0x58, 0x73, 0x28, 0x32, + 0x66, 0xe2, 0x85, 0x3e, 0x97, 0xea, 0x07, 0x9c, 0x8b, 0x88, 0x49, 0x43, 0x4d, 0xb1, 0xb6, 0xfa, + 0x6e, 0xf4, 0x6e, 0x44, 0xd2, 0x8d, 0xd2, 0xa2, 0xde, 0x8d, 0xd2, 0xc6, 0xce, 0x6c, 0xa8, 0x69, + 0x43, 0xaa, 0xa7, 0x8f, 0x74, 0x4e, 0xff, 0xb8, 0x08, 0xe7, 0xb3, 0x42, 0xbb, 0xa0, 0xef, 0x4e, + 0x65, 0x83, 0x79, 0xa9, 0xdf, 0xa0, 0x30, 0x3c, 0x45, 0x8c, 0xc8, 0x6d, 0x3c, 0x67, 0xe6, 0x87, + 0xe9, 0x39, 0xcc, 0xa2, 0x4d, 0xe6, 0xbe, 0x19, 0xf2, 0x2c, 0x3e, 0xf2, 0xf8, 0xf8, 0x74, 0xdf, + 0x1d, 0x10, 0xe9, 0x7f, 0xa2, 0x94, 0xfb, 0xa6, 0x2c, 0xee, 0xed, 0xbe, 0x29, 0x5b, 0x9e, 0x75, + 0x61, 0x44, 0xfb, 0x9a, 0x47, 0x3a, 0xe3, 0xbb, 0xf4, 0xb6, 0xd2, 0xfa, 0xfd, 0x48, 0x67, 0xfd, + 0xc7, 0x2d, 0x48, 0x19, 0x37, 0x2a, 0x75, 0x97, 0x95, 0xab, 0xee, 0xba, 0x02, 0xa5, 0x30, 0xf0, + 0x48, 0x3a, 0x41, 0x0b, 0x0e, 0x3c, 0x82, 0x19, 0x84, 0x62, 0xc4, 0x89, 0xb2, 0x63, 0x54, 0x17, + 0xe4, 0x84, 0x88, 0xf6, 0x14, 0x0c, 0x78, 0x64, 0x9f, 0x78, 0xe9, 0xe8, 0xe7, 0xb7, 0x69, 0x21, + 0xe6, 0x30, 0xfb, 0x97, 0x4b, 0x70, 0xa9, 0xab, 0x03, 0x34, 0x15, 0x87, 0xb6, 0x9d, 0x98, 0xdc, + 0x77, 0x0e, 0xd2, 0x61, 0x8a, 0x6f, 0xf0, 0x62, 0x2c, 0xe1, 0xcc, 0xde, 0x9b, 0x87, 0x25, 0x4c, + 0x29, 0x07, 0x45, 0x34, 0x42, 0x01, 0x7d, 0x04, 0x79, 0xdd, 0xaf, 0x03, 0x44, 0x91, 0xb7, 0xec, + 0x53, 0xee, 0xae, 0x29, 0x0c, 0xc9, 0x93, 0xf0, 0x95, 0xf5, 0xdb, 0x02, 0x82, 0x35, 0x2c, 0x54, + 0x85, 0xc9, 0x56, 0x18, 0xc4, 0x5c, 0xd7, 0x5a, 0xe5, 0x66, 0x3e, 0x03, 0xa6, 0xef, 0x69, 0x2d, + 0x05, 0xc7, 0x1d, 0x35, 0xd0, 0xcb, 0x30, 0x22, 0xfc, 0x51, 0x6b, 0x41, 0xe0, 0x09, 0x35, 0x90, + 0x32, 0x1a, 0xa9, 0x27, 0x20, 0xac, 0xe3, 0x69, 0xd5, 0x98, 0x02, 0x77, 0x28, 0xb3, 0x1a, 0x57, + 0xe2, 0x6a, 0x78, 0xa9, 0x30, 0x4f, 0xc3, 0x7d, 0x85, 0x79, 0x4a, 0x14, 0x63, 0xe5, 0xbe, 0xdf, + 0xac, 0xa0, 0xa7, 0x2a, 0xe9, 0x17, 0x4a, 0x70, 0x4e, 0x2c, 0x9c, 0x47, 0xbd, 0x5c, 0x1e, 0x51, + 0xf6, 0xf9, 0x6f, 0xad, 0x99, 0xb3, 0x5e, 0x33, 0x3f, 0x6c, 0x81, 0xc9, 0x5e, 0xa1, 0xff, 0x2f, + 0x37, 0xce, 0xfb, 0xcb, 0xb9, 0xec, 0x5a, 0x53, 0x5e, 0x20, 0x1f, 0x30, 0xe2, 0xbb, 0xfd, 0x1f, + 0x2c, 0x78, 0xb2, 0x27, 0x45, 0xb4, 0x0c, 0x65, 0xc6, 0x03, 0x6a, 0xd2, 0xd9, 0xd3, 0xca, 0x0c, + 0x50, 0x02, 0x72, 0x58, 0xd2, 0xa4, 0x26, 0x5a, 0xee, 0x08, 0xa8, 0xff, 0x4c, 0x46, 0x40, 0xfd, + 0x69, 0x63, 0x78, 0x1e, 0x32, 0xa2, 0xfe, 0xd7, 0x8a, 0x30, 0xc8, 0x57, 0xfc, 0x19, 0x88, 0x61, + 0x2b, 0x42, 0x6f, 0xdb, 0x25, 0x8e, 0x14, 0xef, 0xcb, 0x5c, 0xd5, 0x89, 0x1d, 0xce, 0x26, 0xa8, + 0xdb, 0x2a, 0xd1, 0xf0, 0xa2, 0x39, 0xe3, 0x3e, 0x9b, 0x4d, 0x29, 0x26, 0x81, 0xd3, 0xd0, 0x6e, + 0xb7, 0x2f, 0x01, 0x44, 0x2c, 0xd1, 0x3c, 0xa5, 0x21, 0x22, 0x92, 0x7d, 0xb2, 0x4b, 0xeb, 0x75, + 0x85, 0xcc, 0xfb, 0x90, 0xec, 0x74, 0x05, 0xc0, 0x1a, 0xc5, 0xd9, 0x57, 0xa0, 0xac, 0x90, 0x7b, + 0x69, 0x71, 0x46, 0x75, 0xe6, 0xe2, 0x73, 0x30, 0x91, 0x6a, 0xeb, 0x44, 0x4a, 0xa0, 0x5f, 0xb1, + 0x60, 0x82, 0x77, 0x79, 0xd9, 0xdf, 0x17, 0x67, 0xea, 0xfb, 0x70, 0xde, 0xcb, 0x38, 0xdb, 0xc4, + 0x8c, 0xf6, 0x7f, 0x16, 0x2a, 0xa5, 0x4f, 0x16, 0x14, 0x67, 0xb6, 0x81, 0xae, 0xd1, 0x75, 0x4b, + 0xcf, 0x2e, 0xc7, 0x13, 0xbe, 0x43, 0xa3, 0x7c, 0xcd, 0xf2, 0x32, 0xac, 0xa0, 0xf6, 0xef, 0x58, + 0x30, 0xc5, 0x7b, 0x7e, 0x8b, 0x1c, 0xa8, 0x1d, 0xfe, 0x61, 0xf6, 0x5d, 0xe4, 0xb8, 0x28, 0xe4, + 0xe4, 0xb8, 0xd0, 0x3f, 0xad, 0xd8, 0xf5, 0xd3, 0x7e, 0xde, 0x02, 0xb1, 0x02, 0xcf, 0x40, 0x94, + 0xff, 0x76, 0x53, 0x94, 0x9f, 0xcd, 0x5f, 0xd4, 0x39, 0x32, 0xfc, 0x9f, 0x5a, 0x30, 0xc9, 0x11, + 0x92, 0xb7, 0xe4, 0x0f, 0x75, 0x1e, 0xfa, 0x49, 0x56, 0xa7, 0xb2, 0x53, 0x67, 0x7f, 0x94, 0x31, + 0x59, 0xa5, 0xae, 0x93, 0xd5, 0x94, 0x1b, 0xe8, 0x04, 0x49, 0x18, 0x4f, 0x1c, 0x2b, 0xda, 0xfe, + 0x23, 0x0b, 0x10, 0x6f, 0xc6, 0x60, 0x7f, 0x28, 0x53, 0xc1, 0x4a, 0xb5, 0xeb, 0x22, 0x39, 0x6a, + 0x14, 0x04, 0x6b, 0x58, 0xa7, 0x32, 0x3c, 0x29, 0x83, 0x80, 0x62, 0x6f, 0x83, 0x80, 0x13, 0x8c, + 0xe8, 0xd7, 0x4a, 0x90, 0x36, 0xe6, 0x47, 0xf7, 0x60, 0xb4, 0xe1, 0xb4, 0x9c, 0x4d, 0xd7, 0x73, + 0x63, 0x97, 0x44, 0xdd, 0x2c, 0x89, 0x96, 0x34, 0x3c, 0xf1, 0xd4, 0xab, 0x95, 0x60, 0x83, 0x0e, + 0x9a, 0x03, 0x68, 0x85, 0xee, 0xbe, 0xeb, 0x91, 0x6d, 0xa6, 0x71, 0x60, 0xde, 0x8a, 0xdc, 0x3c, + 0x46, 0x96, 0x62, 0x0d, 0x23, 0xc3, 0xf1, 0xac, 0xf8, 0xe8, 0x1c, 0xcf, 0x4a, 0x27, 0x74, 0x3c, + 0x1b, 0xe8, 0xcb, 0xf1, 0x0c, 0xc3, 0x05, 0xc9, 0x22, 0xd1, 0xff, 0x2b, 0xae, 0x47, 0x04, 0x5f, + 0xcc, 0x7d, 0x18, 0x67, 0x8f, 0x0e, 0x2b, 0x17, 0x70, 0x26, 0x06, 0xce, 0xa9, 0x89, 0x3e, 0x0f, + 0x33, 0x8e, 0xe7, 0x05, 0xf7, 0xd5, 0xa8, 0x2d, 0x47, 0x0d, 0xc7, 0xe3, 0x1a, 0xfb, 0x21, 0x46, + 0xf5, 0x89, 0xa3, 0xc3, 0xca, 0xcc, 0x42, 0x0e, 0x0e, 0xce, 0xad, 0x9d, 0xf2, 0x5b, 0x1b, 0xee, + 0xe5, 0xb7, 0x66, 0xef, 0xc2, 0xb9, 0x3a, 0x09, 0x5d, 0x96, 0x22, 0xb2, 0x99, 0x6c, 0xc9, 0x0d, + 0x28, 0x87, 0xa9, 0x43, 0xa8, 0xaf, 0xc0, 0x46, 0x5a, 0x18, 0x5c, 0x79, 0xe8, 0x24, 0x84, 0xec, + 0x3f, 0xb1, 0x60, 0x48, 0x38, 0x14, 0x9c, 0x01, 0xef, 0xb3, 0x60, 0xa8, 0xa0, 0x2b, 0xd9, 0x07, + 0x35, 0xeb, 0x4c, 0xae, 0xf2, 0x79, 0x35, 0xa5, 0x7c, 0x7e, 0xb2, 0x1b, 0x91, 0xee, 0x6a, 0xe7, + 0xbf, 0x55, 0x84, 0x71, 0xd3, 0x99, 0xe2, 0x0c, 0x86, 0x60, 0x1d, 0x86, 0x22, 0xe1, 0xb9, 0x53, + 0xc8, 0xb7, 0x7d, 0x4e, 0x4f, 0x62, 0x62, 0xd8, 0x24, 0x7c, 0x75, 0x24, 0x91, 0x4c, 0x97, 0xa0, + 0xe2, 0x23, 0x74, 0x09, 0xea, 0xe5, 0xcf, 0x52, 0x3a, 0x0d, 0x7f, 0x16, 0xfb, 0xeb, 0xec, 0xb2, + 0xd0, 0xcb, 0xcf, 0x80, 0x8f, 0xb8, 0x61, 0x5e, 0x2b, 0x76, 0x97, 0x95, 0x25, 0x3a, 0x95, 0xc3, + 0x4f, 0xfc, 0x92, 0x05, 0x97, 0x32, 0xbe, 0x4a, 0x63, 0x2e, 0x9e, 0x83, 0x61, 0xa7, 0xdd, 0x74, + 0xd5, 0x5e, 0xd6, 0x1e, 0xa2, 0x16, 0x44, 0x39, 0x56, 0x18, 0x68, 0x09, 0xa6, 0xc8, 0x83, 0x96, + 0xcb, 0x5f, 0x02, 0x75, 0xeb, 0xc3, 0x22, 0x0f, 0xf1, 0xba, 0x9c, 0x06, 0xe2, 0x4e, 0x7c, 0xe5, + 0x0e, 0x5d, 0xcc, 0x75, 0x87, 0xfe, 0x87, 0x16, 0x8c, 0x88, 0x6e, 0x9f, 0xc1, 0x68, 0x7f, 0x87, + 0x39, 0xda, 0x8f, 0x77, 0x19, 0xed, 0x9c, 0x61, 0xfe, 0x3b, 0x05, 0xd5, 0xdf, 0x5a, 0x10, 0xc6, + 0x7d, 0x30, 0x2d, 0xaf, 0xc2, 0x70, 0x2b, 0x0c, 0xe2, 0xa0, 0x11, 0x78, 0x82, 0x67, 0x79, 0x22, + 0xf1, 0xd6, 0xe7, 0xe5, 0xc7, 0xda, 0x6f, 0xac, 0xb0, 0xd9, 0xe8, 0x05, 0x61, 0x2c, 0xf8, 0x84, + 0x64, 0xf4, 0x82, 0x30, 0xc6, 0x0c, 0x82, 0x9a, 0x00, 0xb1, 0x13, 0x6e, 0x93, 0x98, 0x96, 0x89, + 0xc0, 0x1f, 0xf9, 0x87, 0x47, 0x3b, 0x76, 0xbd, 0x39, 0xd7, 0x8f, 0xa3, 0x38, 0x9c, 0x5b, 0xf5, + 0xe3, 0x3b, 0x21, 0x17, 0x81, 0x34, 0xf7, 0x7b, 0x45, 0x0b, 0x6b, 0x74, 0xa5, 0x8f, 0x24, 0x6b, + 0x63, 0xc0, 0x7c, 0xd2, 0x5e, 0x17, 0xe5, 0x58, 0x61, 0xd8, 0xaf, 0xb0, 0xab, 0x84, 0x0d, 0xd0, + 0xc9, 0x3c, 0xe3, 0xbf, 0x31, 0xac, 0x86, 0x96, 0xbd, 0x67, 0x55, 0x75, 0xff, 0xfb, 0xee, 0x27, + 0x37, 0x6d, 0x58, 0xf7, 0x84, 0x49, 0x9c, 0xf4, 0xd1, 0x77, 0x76, 0x58, 0x3a, 0x3c, 0xdf, 0xe3, + 0x0a, 0x38, 0x81, 0x6d, 0x03, 0x0b, 0x3b, 0xcd, 0xc2, 0xf3, 0xae, 0xd6, 0xc4, 0x22, 0xd7, 0xc2, + 0x4e, 0x0b, 0x00, 0x4e, 0x70, 0xd0, 0xbc, 0x10, 0xa0, 0x4b, 0x46, 0x76, 0x38, 0x29, 0x40, 0xcb, + 0xcf, 0xd7, 0x24, 0xe8, 0x17, 0x60, 0x44, 0x65, 0x89, 0xab, 0xf1, 0x64, 0x5b, 0x22, 0x0c, 0xca, + 0x72, 0x52, 0x8c, 0x75, 0x1c, 0xb4, 0x01, 0x13, 0x11, 0xd7, 0x9e, 0xa8, 0x68, 0x77, 0x5c, 0x0b, + 0xf5, 0x49, 0x69, 0x21, 0x51, 0x37, 0xc1, 0xc7, 0xac, 0x88, 0x1f, 0x1d, 0xd2, 0xd1, 0x31, 0x4d, + 0x02, 0xbd, 0x0e, 0xe3, 0x9e, 0x9e, 0x6b, 0xbd, 0x26, 0x94, 0x54, 0xca, 0x80, 0xd8, 0xc8, 0xc4, + 0x5e, 0xc3, 0x29, 0x6c, 0xca, 0xeb, 0xe8, 0x25, 0x22, 0x42, 0xa3, 0xe3, 0x6f, 0x93, 0x48, 0xe4, + 0xb8, 0x62, 0xbc, 0xce, 0xed, 0x1c, 0x1c, 0x9c, 0x5b, 0x1b, 0xbd, 0x0a, 0xa3, 0xf2, 0xf3, 0x35, + 0x37, 0xde, 0xc4, 0x4c, 0x5d, 0x83, 0x61, 0x03, 0x13, 0xdd, 0x87, 0x69, 0xf9, 0x7f, 0x23, 0x74, + 0xb6, 0xb6, 0xdc, 0x86, 0xf0, 0xa2, 0xe6, 0xbe, 0x3a, 0x0b, 0xd2, 0xf9, 0x67, 0x39, 0x0b, 0xe9, + 0xf8, 0xb0, 0x72, 0x45, 0x8c, 0x5a, 0x26, 0x9c, 0x4d, 0x62, 0x36, 0x7d, 0xb4, 0x06, 0xe7, 0x76, + 0x88, 0xe3, 0xc5, 0x3b, 0x4b, 0x3b, 0xa4, 0xb1, 0x2b, 0x37, 0x11, 0x73, 0x0e, 0xd6, 0x8c, 0xbb, + 0x6f, 0x76, 0xa2, 0xe0, 0xac, 0x7a, 0xe8, 0x1d, 0x98, 0x69, 0xb5, 0x37, 0x3d, 0x37, 0xda, 0x59, + 0x0f, 0x62, 0x66, 0x94, 0xa1, 0x92, 0xac, 0x09, 0x2f, 0x62, 0xe5, 0x18, 0x5d, 0xcb, 0xc1, 0xc3, + 0xb9, 0x14, 0xd0, 0xfb, 0x30, 0x9d, 0x5a, 0x0c, 0xc2, 0xa7, 0x71, 0x3c, 0x3f, 0xde, 0x6d, 0x3d, + 0xab, 0x82, 0xf0, 0x51, 0xcc, 0x02, 0xe1, 0xec, 0x26, 0x3e, 0x98, 0xa9, 0xce, 0x7b, 0xb4, 0xb2, + 0xc6, 0x94, 0xa1, 0x2f, 0xc3, 0xa8, 0xbe, 0x8a, 0xc4, 0x05, 0x73, 0x35, 0x9b, 0x67, 0xd1, 0x56, + 0x1b, 0x67, 0xe9, 0xd4, 0x8a, 0xd2, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0xfe, 0x3e, 0x74, 0x1b, + 0x86, 0x1b, 0x9e, 0x4b, 0xfc, 0x78, 0xb5, 0xd6, 0x2d, 0xe8, 0xc6, 0x92, 0xc0, 0x11, 0x03, 0x26, + 0x02, 0x84, 0xf2, 0x32, 0xac, 0x28, 0xd8, 0xbf, 0x51, 0x80, 0x4a, 0x8f, 0x68, 0xb3, 0x29, 0x8d, + 0xb2, 0xd5, 0x97, 0x46, 0x79, 0x41, 0xa6, 0x8c, 0x5b, 0x4f, 0x89, 0xd9, 0xa9, 0x74, 0x70, 0x89, + 0xb0, 0x9d, 0xc6, 0xef, 0xdb, 0xc2, 0x57, 0x57, 0x4a, 0x97, 0x7a, 0xda, 0x9e, 0x1b, 0x8f, 0x51, + 0x03, 0xfd, 0x0b, 0x22, 0xb9, 0x0f, 0x0b, 0xf6, 0xd7, 0x0b, 0x30, 0xad, 0x86, 0xf0, 0x2f, 0xee, + 0xc0, 0xdd, 0xed, 0x1c, 0xb8, 0x53, 0x78, 0x96, 0xb1, 0xef, 0xc0, 0x20, 0x0f, 0x5a, 0xd2, 0x07, + 0x03, 0xf4, 0x94, 0x19, 0xe1, 0x4a, 0x5d, 0xd3, 0x46, 0x94, 0xab, 0xbf, 0x62, 0xc1, 0xc4, 0xc6, + 0x52, 0xad, 0x1e, 0x34, 0x76, 0x49, 0xbc, 0xc0, 0x19, 0x56, 0x2c, 0xf8, 0x1f, 0xeb, 0x21, 0xf9, + 0x9a, 0x2c, 0x8e, 0xe9, 0x0a, 0x94, 0x76, 0x82, 0x28, 0x4e, 0xbf, 0xd9, 0xde, 0x0c, 0xa2, 0x18, + 0x33, 0x88, 0xfd, 0xbb, 0x16, 0x0c, 0xb0, 0x44, 0xa7, 0xbd, 0xb2, 0xef, 0xf6, 0xf3, 0x5d, 0xe8, + 0x65, 0x18, 0x24, 0x5b, 0x5b, 0xa4, 0x11, 0x8b, 0x59, 0x95, 0x0e, 0xa5, 0x83, 0xcb, 0xac, 0x94, + 0x5e, 0xfa, 0xac, 0x31, 0xfe, 0x17, 0x0b, 0x64, 0xf4, 0x16, 0x94, 0x63, 0x77, 0x8f, 0x2c, 0x34, + 0x9b, 0xe2, 0xd5, 0xeb, 0x21, 0xfc, 0x77, 0x37, 0x24, 0x01, 0x9c, 0xd0, 0xb2, 0xbf, 0x5a, 0x00, + 0x48, 0x5c, 0xef, 0x7b, 0x7d, 0xe2, 0x62, 0xc7, 0x7b, 0xc8, 0xd5, 0x8c, 0xf7, 0x10, 0x94, 0x10, + 0xcc, 0x78, 0x0c, 0x51, 0xc3, 0x54, 0xec, 0x6b, 0x98, 0x4a, 0x27, 0x19, 0xa6, 0x25, 0x98, 0x4a, + 0x42, 0x07, 0x98, 0x71, 0x54, 0x98, 0x90, 0xb2, 0x91, 0x06, 0xe2, 0x4e, 0x7c, 0x9b, 0xc0, 0x15, + 0x19, 0xd1, 0x52, 0xde, 0x35, 0xcc, 0xa8, 0xf2, 0x04, 0x89, 0x98, 0x93, 0x07, 0x9f, 0x42, 0xee, + 0x83, 0xcf, 0x4f, 0x59, 0x70, 0x3e, 0xdd, 0x0e, 0xf3, 0x5e, 0xfb, 0x01, 0x0b, 0xa6, 0xd9, 0xb3, + 0x17, 0x6b, 0xb5, 0xf3, 0x91, 0xed, 0xa5, 0xec, 0x90, 0x0a, 0xdd, 0x7b, 0x9c, 0x78, 0x2e, 0xaf, + 0x65, 0x91, 0xc6, 0xd9, 0x2d, 0xda, 0xdf, 0x6f, 0x81, 0x70, 0x12, 0xea, 0x63, 0x63, 0xbf, 0x2d, + 0x53, 0x95, 0x1a, 0x01, 0xbc, 0xaf, 0xe4, 0x7b, 0x4d, 0x89, 0xb0, 0xdd, 0xea, 0x22, 0x35, 0x82, + 0x75, 0x1b, 0xb4, 0xec, 0x26, 0x08, 0x68, 0x95, 0x30, 0x3d, 0x51, 0xef, 0xde, 0x5c, 0x07, 0x68, + 0x32, 0x5c, 0x2d, 0x61, 0xa1, 0x3a, 0xb6, 0xab, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x47, 0x0a, 0x30, + 0x22, 0x03, 0x46, 0xb7, 0xfd, 0x7e, 0xa4, 0xb9, 0x13, 0xe5, 0x8d, 0x61, 0x19, 0x3e, 0x29, 0xe1, + 0x5a, 0x22, 0x04, 0x27, 0x19, 0x3e, 0x25, 0x00, 0x27, 0x38, 0xe8, 0x19, 0x18, 0x8a, 0xda, 0x9b, + 0x0c, 0x3d, 0xe5, 0xfa, 0x52, 0xe7, 0xc5, 0x58, 0xc2, 0xd1, 0xe7, 0x61, 0x92, 0xd7, 0x0b, 0x83, + 0x96, 0xb3, 0xcd, 0x95, 0x86, 0x03, 0xca, 0x17, 0x75, 0x72, 0x2d, 0x05, 0x3b, 0x3e, 0xac, 0x9c, + 0x4f, 0x97, 0x31, 0x75, 0x73, 0x07, 0x15, 0xfb, 0xcb, 0x80, 0x3a, 0x63, 0x60, 0xa3, 0x37, 0xb8, + 0x01, 0x92, 0x1b, 0x92, 0x66, 0x37, 0x3d, 0xb2, 0xee, 0x3a, 0x29, 0xcd, 0xcf, 0x79, 0x2d, 0xac, + 0xea, 0xdb, 0x7f, 0xad, 0x08, 0x93, 0x69, 0x47, 0x3a, 0x74, 0x13, 0x06, 0xf9, 0x05, 0x23, 0xc8, + 0x77, 0x79, 0xa6, 0xd4, 0xdc, 0xef, 0xd8, 0x56, 0x13, 0x77, 0x94, 0xa8, 0x8f, 0xde, 0x81, 0x91, + 0x66, 0x70, 0xdf, 0xbf, 0xef, 0x84, 0xcd, 0x85, 0xda, 0xaa, 0x58, 0x97, 0x99, 0x7c, 0x6a, 0x35, + 0x41, 0xd3, 0x5d, 0xfa, 0x98, 0x4a, 0x3e, 0x01, 0x61, 0x9d, 0x1c, 0xda, 0x60, 0x71, 0xfd, 0xb6, + 0xdc, 0xed, 0x35, 0xa7, 0xd5, 0xcd, 0x1a, 0x75, 0x49, 0x22, 0x69, 0x94, 0xc7, 0x44, 0xf0, 0x3f, + 0x0e, 0xc0, 0x09, 0x21, 0xf4, 0xdd, 0x70, 0x2e, 0xca, 0x51, 0x6d, 0xe5, 0xa5, 0x44, 0xe8, 0xa6, + 0xed, 0x59, 0x7c, 0x8c, 0x4a, 0x10, 0x59, 0x4a, 0xb0, 0xac, 0x66, 0xec, 0xaf, 0x9c, 0x03, 0x63, + 0x37, 0x1a, 0x79, 0x71, 0xac, 0x53, 0xca, 0x8b, 0x83, 0x61, 0x98, 0xec, 0xb5, 0xe2, 0x83, 0xaa, + 0x1b, 0x76, 0x4b, 0xac, 0xb6, 0x2c, 0x70, 0x3a, 0x69, 0x4a, 0x08, 0x56, 0x74, 0xb2, 0x93, 0x17, + 0x15, 0x3f, 0xc4, 0xe4, 0x45, 0xa5, 0x33, 0x4c, 0x5e, 0xb4, 0x0e, 0x43, 0xdb, 0x6e, 0x8c, 0x49, + 0x2b, 0x10, 0xac, 0x5d, 0xe6, 0x3a, 0xbc, 0xc1, 0x51, 0x3a, 0x13, 0x66, 0x08, 0x00, 0x96, 0x44, + 0xd0, 0x1b, 0x6a, 0x07, 0x0e, 0xe6, 0x4b, 0x46, 0x9d, 0xef, 0x69, 0x99, 0x7b, 0x50, 0x24, 0x2b, + 0x1a, 0x7a, 0xd8, 0x64, 0x45, 0x2b, 0x32, 0xc5, 0xd0, 0x70, 0xbe, 0xe9, 0x38, 0xcb, 0x20, 0xd4, + 0x23, 0xb1, 0x90, 0x91, 0x8c, 0xa9, 0x7c, 0x7a, 0xc9, 0x98, 0xbe, 0xdf, 0x82, 0xe9, 0x56, 0x56, + 0x5e, 0x32, 0x91, 0x18, 0xe8, 0xe5, 0xbe, 0x13, 0xaf, 0x19, 0x0d, 0x32, 0x11, 0x39, 0x13, 0x0d, + 0x67, 0x37, 0x47, 0x07, 0x3a, 0xdc, 0x6c, 0x8a, 0x6c, 0x42, 0x4f, 0xe5, 0x64, 0x75, 0xea, 0x92, + 0xcb, 0x69, 0x23, 0x23, 0x83, 0xd0, 0xc7, 0xf3, 0x32, 0x08, 0xf5, 0x9d, 0x37, 0xe8, 0x0d, 0x95, + 0xcf, 0x69, 0x2c, 0x7f, 0x29, 0xf1, 0x6c, 0x4d, 0x3d, 0xb3, 0x38, 0xbd, 0xa1, 0xb2, 0x38, 0x75, + 0x89, 0x6f, 0xc6, 0x73, 0x34, 0xf5, 0xcc, 0xdd, 0xa4, 0xe5, 0x5f, 0x9a, 0x38, 0x9d, 0xfc, 0x4b, + 0xc6, 0x55, 0xc3, 0x53, 0x00, 0x3d, 0xdb, 0xe3, 0xaa, 0x31, 0xe8, 0x76, 0xbf, 0x6c, 0x78, 0xae, + 0xa9, 0xa9, 0x87, 0xca, 0x35, 0x75, 0x4f, 0xcf, 0xdd, 0x84, 0x7a, 0x24, 0x27, 0xa2, 0x48, 0x7d, + 0x66, 0x6c, 0xba, 0xa7, 0x5f, 0x80, 0xe7, 0xf2, 0xe9, 0xaa, 0x7b, 0xae, 0x93, 0x6e, 0xe6, 0x15, + 0xd8, 0x91, 0x09, 0xea, 0xfc, 0xd9, 0x64, 0x82, 0x9a, 0x3e, 0xf5, 0x4c, 0x50, 0x17, 0xce, 0x20, + 0x13, 0xd4, 0x63, 0x1f, 0x6a, 0x26, 0xa8, 0x99, 0x47, 0x90, 0x09, 0x6a, 0x3d, 0xc9, 0x04, 0x75, + 0x31, 0x7f, 0x4a, 0x32, 0xec, 0x59, 0x73, 0xf2, 0x3f, 0xdd, 0x83, 0x72, 0x4b, 0x46, 0x7a, 0x10, + 0x01, 0xd8, 0xb2, 0x93, 0xd1, 0x66, 0x85, 0x83, 0xe0, 0x53, 0xa2, 0x40, 0x38, 0x21, 0x45, 0xe9, + 0x26, 0xf9, 0xa0, 0x1e, 0xef, 0xa2, 0x04, 0xcd, 0x52, 0x2f, 0xe5, 0x67, 0x81, 0xb2, 0xff, 0x6a, + 0x01, 0x2e, 0x77, 0x5f, 0xd7, 0x89, 0x6e, 0xaa, 0x96, 0xbc, 0xa5, 0xa4, 0x74, 0x53, 0x5c, 0xc8, + 0x49, 0xb0, 0xfa, 0x0e, 0x87, 0x73, 0x03, 0xa6, 0x94, 0x21, 0xab, 0xe7, 0x36, 0x0e, 0xb4, 0x24, + 0xb5, 0xca, 0x61, 0xaf, 0x9e, 0x46, 0xc0, 0x9d, 0x75, 0xd0, 0x02, 0x4c, 0x18, 0x85, 0xab, 0x55, + 0x21, 0xcc, 0x28, 0x65, 0x58, 0xdd, 0x04, 0xe3, 0x34, 0xbe, 0xfd, 0x73, 0x16, 0x3c, 0x96, 0x93, + 0x24, 0xa1, 0xef, 0x68, 0x2f, 0x5b, 0x30, 0xd1, 0x32, 0xab, 0xf6, 0x08, 0x0a, 0x65, 0xa4, 0x62, + 0x50, 0x7d, 0x4d, 0x01, 0x70, 0x9a, 0xe8, 0xe2, 0xb5, 0xdf, 0xfa, 0xfd, 0xcb, 0x1f, 0xfb, 0xed, + 0xdf, 0xbf, 0xfc, 0xb1, 0xdf, 0xf9, 0xfd, 0xcb, 0x1f, 0xfb, 0xff, 0x8f, 0x2e, 0x5b, 0xbf, 0x75, + 0x74, 0xd9, 0xfa, 0xed, 0xa3, 0xcb, 0xd6, 0xef, 0x1c, 0x5d, 0xb6, 0x7e, 0xef, 0xe8, 0xb2, 0xf5, + 0xd5, 0x3f, 0xb8, 0xfc, 0xb1, 0xb7, 0x0b, 0xfb, 0x2f, 0xfc, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x67, 0xe8, 0x47, 0xf4, 0xcd, 0xe5, 0x00, 0x00, } diff --git a/vendor/k8s.io/client-go/pkg/api/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto similarity index 66% rename from vendor/k8s.io/client-go/pkg/api/v1/generated.proto rename to vendor/k8s.io/api/core/v1/generated.proto index 9c48148aa..b4d75069f 100644 --- a/vendor/k8s.io/client-go/pkg/api/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,14 +19,14 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.api.v1; +package k8s.io.api.core.v1; +import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto"; import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; @@ -39,13 +39,13 @@ option go_package = "v1"; // ownership management and SELinux relabeling. message AWSElasticBlockStoreVolumeSource { // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore optional string volumeID = 1; // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional optional string fsType = 2; @@ -59,7 +59,7 @@ message AWSElasticBlockStoreVolumeSource { // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". // If omitted, the default is "false". - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional optional bool readOnly = 4; } @@ -120,6 +120,28 @@ message AzureDiskVolumeSource { // the ReadOnly setting in VolumeMounts. // +optional optional bool readOnly = 5; + + // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + optional string kind = 6; +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +message AzureFilePersistentVolumeSource { + // the name of secret that contains Azure Storage Account Name and Key + optional string secretName = 1; + + // Share Name + optional string shareName = 2; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 3; + + // the namespace of the secret that contains Azure Storage Account Name and Key + // default is the same as the Pod + // +optional + optional string secretNamespace = 4; } // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. @@ -136,11 +158,11 @@ message AzureFileVolumeSource { optional bool readOnly = 3; } -// Binding ties one object to another. -// For example, a pod is bound to a node by a scheduler. +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. message Binding { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -148,6 +170,57 @@ message Binding { optional ObjectReference target = 2; } +// Represents storage that is managed by an external CSI volume driver (Beta feature) +message CSIPersistentVolumeSource { + // Driver is the name of the driver to use for this volume. + // Required. + optional string driver = 1; + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + optional string volumeHandle = 2; + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + optional bool readOnly = 3; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 4; + + // Attributes of the volume to publish. + // +optional + map volumeAttributes = 5; + + // ControllerPublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerPublishVolume and ControllerUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + optional SecretReference controllerPublishSecretRef = 6; + + // NodeStageSecretRef is a reference to the secret object containing sensitive + // information to pass to the CSI driver to complete the CSI NodeStageVolume + // and NodeStageVolume and NodeUnstageVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + optional SecretReference nodeStageSecretRef = 7; + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + optional SecretReference nodePublishSecretRef = 8; +} + // Adds and removes POSIX capabilities from running containers. message Capabilities { // Added capabilities @@ -161,9 +234,9 @@ message Capabilities { // Represents a Ceph Filesystem mount that lasts the lifetime of a pod // Cephfs volumes do not support ownership management or SELinux relabeling. -message CephFSVolumeSource { +message CephFSPersistentVolumeSource { // Required: Monitors is a collection of Ceph monitors - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it repeated string monitors = 1; // Optional: Used as the mounted root, rather than the full Ceph tree, default is / @@ -171,23 +244,56 @@ message CephFSVolumeSource { optional string path = 2; // Optional: User is the rados user name, default is admin - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional optional string user = 3; // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional optional string secretFile = 4; // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional SecretReference secretRef = 5; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional bool readOnly = 6; +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +message CephFSVolumeSource { + // Required: Monitors is a collection of Ceph monitors + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + repeated string monitors = 1; + + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + optional string path = 2; + + // Optional: User is the rados user name, default is admin + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional string user = 3; + + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional string secretFile = 4; + + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 5; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional optional bool readOnly = 6; } @@ -196,23 +302,65 @@ message CephFSVolumeSource { // A Cinder volume must exist before mounting to a container. // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. -message CinderVolumeSource { +message CinderPersistentVolumeSource { // volume id used to identify the volume in cinder - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md optional string volumeID = 1; // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional optional string fsType = 2; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional optional bool readOnly = 3; + + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + optional SecretReference secretRef = 4; +} + +// Represents a cinder volume resource in Openstack. +// A Cinder volume must exist before mounting to a container. +// The volume must also be in the same region as the kubelet. +// Cinder volumes support ownership management and SELinux relabeling. +message CinderVolumeSource { + // volume id used to identify the volume in cinder + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + optional string volumeID = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional string fsType = 2; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional bool readOnly = 3; + + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + optional LocalObjectReference secretRef = 4; +} + +// ClientIPConfig represents the configurations of Client IP based session affinity. +message ClientIPConfig { + // timeoutSeconds specifies the seconds of ClientIP type session sticky time. + // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + // Default value is 10800(for 3 hours). + // +optional + optional int32 timeoutSeconds = 1; } // Information about the condition of a component. @@ -239,19 +387,21 @@ message ComponentCondition { // ComponentStatus (and ComponentStatusList) holds the cluster validation info. message ComponentStatus { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // List of component conditions observed // +optional + // +patchMergeKey=type + // +patchStrategy=merge repeated ComponentCondition conditions = 2; } // Status of all the conditions for the component as a list of ComponentStatus objects. message ComponentStatusList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -262,14 +412,27 @@ message ComponentStatusList { // ConfigMap holds configuration data for pods to consume. message ConfigMap { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Data contains the configuration data. - // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // Values with non-UTF-8 byte sequences must use the BinaryData field. + // The keys stored in Data must not overlap with the keys in + // the BinaryData field, this is enforced during validation process. // +optional map data = 2; + + // BinaryData contains the binary data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // BinaryData can contain byte sequences that are not in the UTF-8 range. + // The keys stored in BinaryData must not overlap with the ones in + // the Data field, this is enforced during validation process. + // Using this field will require 1.10+ apiserver and + // kubelet. + // +optional + map binaryData = 3; } // ConfigMapEnvSource selects a ConfigMap to populate the environment @@ -301,7 +464,7 @@ message ConfigMapKeySelector { // ConfigMapList is a resource containing a list of ConfigMap objects. message ConfigMapList { - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -309,6 +472,31 @@ message ConfigMapList { repeated ConfigMap items = 2; } +// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. +message ConfigMapNodeConfigSource { + // Namespace is the metadata.namespace of the referenced ConfigMap. + // This field is required in all cases. + optional string namespace = 1; + + // Name is the metadata.name of the referenced ConfigMap. + // This field is required in all cases. + optional string name = 2; + + // UID is the metadata.UID of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + optional string uid = 3; + + // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + optional string resourceVersion = 4; + + // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure + // This field is required in all cases. + optional string kubeletConfigKey = 5; +} + // Adapts a ConfigMap into a projected volume. // // The contents of the target ConfigMap's Data field will be presented in a @@ -374,7 +562,9 @@ message Container { optional string name = 1; // Docker image name. - // More info: http://kubernetes.io/docs/user-guide/images + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. // +optional optional string image = 2; @@ -385,7 +575,7 @@ message Container { // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional repeated string command = 3; @@ -396,7 +586,7 @@ message Container { // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional repeated string args = 4; @@ -415,6 +605,8 @@ message Container { // accessible from the network. // Cannot be updated. // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge repeated ContainerPort ports = 6; // List of sources to populate environment variables in the container. @@ -429,30 +621,41 @@ message Container { // List of environment variables to set in the container. // Cannot be updated. // +optional + // +patchMergeKey=name + // +patchStrategy=merge repeated EnvVar env = 7; // Compute Resources required by this container. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional optional ResourceRequirements resources = 8; // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge repeated VolumeMount volumeMounts = 9; + // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + repeated VolumeDevice volumeDevices = 21; + // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional optional Probe livenessProbe = 10; // Periodic probe of container service readiness. // Container will be removed from service endpoints if the probe fails. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional optional Probe readinessProbe = 11; @@ -485,12 +688,13 @@ message Container { // One of Always, Never, IfNotPresent. // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/images#updating-images + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images // +optional optional string imagePullPolicy = 14; // Security options the pod should run with. - // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md + // More info: https://kubernetes.io/docs/concepts/policy/security-context/ + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional optional SecurityContext securityContext = 15; @@ -519,7 +723,7 @@ message Container { // Describe a container image message ContainerImage { // Names by which this image is known. - // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] repeated string names = 1; // The size of the image in bytes. @@ -645,7 +849,7 @@ message ContainerStatus { optional int32 restartCount = 5; // The image the container is running. - // More info: http://kubernetes.io/docs/user-guide/images + // More info: https://kubernetes.io/docs/concepts/containers/images // TODO(dchen1107): Which image the container is running with? optional string image = 6; @@ -653,7 +857,6 @@ message ContainerStatus { optional string imageID = 7; // Container's ID in the format 'docker://'. - // More info: http://kubernetes.io/docs/user-guide/container-environment#container-information // +optional optional string containerID = 8; } @@ -664,37 +867,6 @@ message DaemonEndpoint { optional int32 Port = 1; } -// DeleteOptions may be provided when deleting an API object -// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. -// +k8s:openapi-gen=false -message DeleteOptions { - // The duration in seconds before the object should be deleted. Value must be non-negative integer. - // The value zero indicates delete immediately. If this value is nil, the default grace period for the - // specified type will be used. - // Defaults to a per object value if not specified. zero means delete immediately. - // +optional - optional int64 gracePeriodSeconds = 1; - - // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be - // returned. - // +optional - optional Preconditions preconditions = 2; - - // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. - // Should the dependent objects be orphaned. If true/false, the "orphan" - // finalizer will be added to/removed from the object's finalizers list. - // Either this field or PropagationPolicy may be set, but not both. - // +optional - optional bool orphanDependents = 3; - - // Whether and how garbage collection will be performed. - // Either this field or OrphanDependents may be set, but not both. - // The default policy is decided by the existing finalizer set in the - // metadata.finalizers and the resource-specific default policy. - // +optional - optional string propagationPolicy = 4; -} - // Represents downward API info for projecting into a projected volume. // Note that this is identical to a downwardAPI volume source without the default // mode. @@ -748,9 +920,18 @@ message EmptyDirVolumeSource { // What type of storage medium should back this directory. // The default is "" which means to use the node's default medium. // Must be an empty string (default) or Memory. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional optional string medium = 1; + + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; } // EndpointAddress is a tuple that describes single IP address. @@ -835,7 +1016,7 @@ message EndpointSubset { // ] message Endpoints { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -846,13 +1027,14 @@ message Endpoints { // subsets for the different ports. No address will appear in both Addresses and // NotReadyAddresses in the same subset. // Sets of addresses and ports that comprise a service. + // +optional repeated EndpointSubset subsets = 2; } // EndpointsList is a list of endpoints. message EndpointsList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -862,7 +1044,7 @@ message EndpointsList { // EnvFromSource represents the source of a set of ConfigMaps message EnvFromSource { - // An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. // +optional optional string prefix = 1; @@ -899,12 +1081,12 @@ message EnvVar { // EnvVarSource represents a source for the value of an EnvVar. message EnvVarSource { // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.podIP. + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. // +optional optional ObjectFieldSelector fieldRef = 1; // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. // +optional optional ResourceFieldSelector resourceFieldRef = 2; @@ -918,10 +1100,9 @@ message EnvVarSource { } // Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. message Event { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The object that this event is about. @@ -957,12 +1138,36 @@ message Event { // Type of this event (Normal, Warning), new types could be added in the future // +optional optional string type = 9; + + // Time when this Event was first observed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + optional EventSeries series = 11; + + // What action was taken/failed regarding to the Regarding object. + // +optional + optional string action = 12; + + // Optional secondary object for more complex actions. + // +optional + optional ObjectReference related = 13; + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + optional string reportingComponent = 14; + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + optional string reportingInstance = 15; } // EventList is a list of events. message EventList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -970,6 +1175,19 @@ message EventList { repeated Event items = 2; } +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continuously for some time. +message EventSeries { + // Number of occurrences in this series up to the last heartbeat time + optional int32 count = 1; + + // Time of the last occurrence observed + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + + // State of this Series: Ongoing or Finished + optional string state = 3; +} + // EventSource contains information for an event. message EventSource { // Component from which the event is generated. @@ -996,10 +1214,12 @@ message ExecAction { // Fibre Channel volumes can only be mounted as read/write once. // Fibre Channel volumes support ownership management and SELinux relabeling. message FCVolumeSource { - // Required: FC target worldwide names (WWNs) + // Optional: FC target worldwide names (WWNs) + // +optional repeated string targetWWNs = 1; - // Required: FC target lun number + // Optional: FC target lun number + // +optional optional int32 lun = 2; // Filesystem type to mount. @@ -1013,10 +1233,45 @@ message FCVolumeSource { // the ReadOnly setting in VolumeMounts. // +optional optional bool readOnly = 4; + + // Optional: FC volume world wide identifiers (wwids) + // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + // +optional + repeated string wwids = 5; +} + +// FlexPersistentVolumeSource represents a generic persistent volume resource that is +// provisioned/attached using an exec based plugin. +message FlexPersistentVolumeSource { + // Driver is the name of the driver to use for this volume. + optional string driver = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + optional string fsType = 2; + + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + optional SecretReference secretRef = 3; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // Optional: Extra command options if any. + // +optional + map options = 5; } // FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +// provisioned/attached using an exec based plugin. message FlexVolumeSource { // Driver is the name of the driver to use for this volume. optional string driver = 1; @@ -1067,13 +1322,13 @@ message FlockerVolumeSource { // PDs support ownership management and SELinux relabeling. message GCEPersistentDiskVolumeSource { // Unique name of the PD resource in GCE. Used to identify the disk in GCE. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk optional string pdName = 1; // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional optional string fsType = 2; @@ -1082,13 +1337,13 @@ message GCEPersistentDiskVolumeSource { // If omitted, the default is to mount by volume name. // Examples: For volume /dev/sda1, you specify the partition as "1". // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional optional int32 partition = 3; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional optional bool readOnly = 4; } @@ -1096,6 +1351,10 @@ message GCEPersistentDiskVolumeSource { // Represents a volume that is populated with the contents of a git repository. // Git repo volumes do not support ownership management. // Git repo volumes support SELinux relabeling. +// +// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an +// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir +// into the Pod's container. message GitRepoVolumeSource { // Repository URL optional string repository = 1; @@ -1116,16 +1375,16 @@ message GitRepoVolumeSource { // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsVolumeSource { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; // Path is the Glusterfs volume path. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod optional string path = 2; // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod // +optional optional bool readOnly = 3; } @@ -1184,36 +1443,54 @@ message Handler { optional TCPSocketAction tcpSocket = 3; } +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +message HostAlias { + // IP address of the host file entry. + optional string ip = 1; + + // Hostnames for the above IP address. + repeated string hostnames = 2; +} + // Represents a host path mapped into a pod. // Host path volumes do not support ownership management or SELinux relabeling. message HostPathVolumeSource { // Path of the directory on the host. - // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // If the path is a symlink, it will follow the link to the real path. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath optional string path = 1; + + // Type for HostPath Volume + // Defaults to "" + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // +optional + optional string type = 2; } -// Represents an ISCSI disk. +// ISCSIPersistentVolumeSource represents an ISCSI disk. // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. -message ISCSIVolumeSource { - // iSCSI target portal. The portal is either an IP or ip_addr:port if the port +message ISCSIPersistentVolumeSource { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). optional string targetPortal = 1; // Target iSCSI Qualified Name. optional string iqn = 2; - // iSCSI target lun number. + // iSCSI Target Lun number. optional int32 lun = 3; - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). // +optional optional string iscsiInterface = 4; // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://kubernetes.io/docs/user-guide/volumes#iscsi + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional optional string fsType = 5; @@ -1223,10 +1500,84 @@ message ISCSIVolumeSource { // +optional optional bool readOnly = 6; - // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional repeated string portals = 7; + + // whether support iSCSI Discovery CHAP authentication + // +optional + optional bool chapAuthDiscovery = 8; + + // whether support iSCSI Session CHAP authentication + // +optional + optional bool chapAuthSession = 11; + + // CHAP Secret for iSCSI target and initiator authentication + // +optional + optional SecretReference secretRef = 10; + + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + optional string initiatorName = 12; +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +message ISCSIVolumeSource { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + optional string targetPortal = 1; + + // Target iSCSI Qualified Name. + optional string iqn = 2; + + // iSCSI Target Lun number. + optional int32 lun = 3; + + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). + // +optional + optional string iscsiInterface = 4; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 5; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + optional bool readOnly = 6; + + // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + repeated string portals = 7; + + // whether support iSCSI Discovery CHAP authentication + // +optional + optional bool chapAuthDiscovery = 8; + + // whether support iSCSI Session CHAP authentication + // +optional + optional bool chapAuthSession = 11; + + // CHAP Secret for iSCSI target and initiator authentication + // +optional + optional LocalObjectReference secretRef = 10; + + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + optional string initiatorName = 12; } // Maps a string key to a path within a volume. @@ -1255,7 +1606,7 @@ message Lifecycle { // PostStart is called immediately after a container is created. If the handler fails, // the container is terminated and restarted according to its restart policy. // Other management of the container blocks until the hook completes. - // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional optional Handler postStart = 1; @@ -1264,7 +1615,7 @@ message Lifecycle { // The reason for termination is passed to the handler. // Regardless of the outcome of the handler, the container is eventually terminated. // Other management of the container blocks until the hook completes. - // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional optional Handler preStop = 2; } @@ -1272,12 +1623,12 @@ message Lifecycle { // LimitRange sets resource usage limits for each kind of resource in a Namespace. message LimitRange { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the limits enforced. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional LimitRangeSpec spec = 2; } @@ -1312,12 +1663,12 @@ message LimitRangeItem { // LimitRangeList is a list of LimitRange items. message LimitRangeList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of LimitRange objects. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ repeated LimitRange items = 2; } @@ -1330,7 +1681,7 @@ message LimitRangeSpec { // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1338,39 +1689,6 @@ message List { repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; } -// ListOptions is the query options to a standard REST list call. -// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. -// +k8s:openapi-gen=false -message ListOptions { - // A selector to restrict the list of returned objects by their labels. - // Defaults to everything. - // +optional - optional string labelSelector = 1; - - // A selector to restrict the list of returned objects by their fields. - // Defaults to everything. - // +optional - optional string fieldSelector = 2; - - // Watch for changes to the described resources and return them as a stream of - // add, update, and remove notifications. Specify resourceVersion. - // +optional - optional bool watch = 3; - - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - // When specified for list: - // - if unset, then the result is returned from remote storage based on quorum-read flag; - // - if it's 0, then we simply return what we currently have in cache, no guarantee; - // - if set to non zero, then the result is at least as fresh as given rv. - // +optional - optional string resourceVersion = 4; - - // Timeout for the list/watch call. - // +optional - optional int64 timeoutSeconds = 5; -} - // LoadBalancerIngress represents the status of a load-balancer ingress point: // traffic intended for the service should be sent to an ingress point. message LoadBalancerIngress { @@ -1397,27 +1715,37 @@ message LoadBalancerStatus { // referenced object inside the same namespace. message LocalObjectReference { // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // TODO: Add other useful fields. apiVersion, kind, uid? // +optional optional string name = 1; } +// Local represents directly-attached storage with node affinity (Beta feature) +message LocalVolumeSource { + // The full path to the volume on the node. + // It can be either a directory or block device (disk, partition, ...). + // Directories can be represented only by PersistentVolume with VolumeMode=Filesystem. + // Block devices can be represented only by VolumeMode=Block, which also requires the + // BlockVolume alpha feature gate to be enabled. + optional string path = 1; +} + // Represents an NFS mount that lasts the lifetime of a pod. // NFS volumes do not support ownership management or SELinux relabeling. message NFSVolumeSource { // Server is the hostname or IP address of the NFS server. - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs optional string server = 1; // Path that is exported by the NFS server. - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs optional string path = 2; // ReadOnly here will force // the NFS export to be mounted with read-only permissions. // Defaults to false. - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional optional bool readOnly = 3; } @@ -1426,17 +1754,17 @@ message NFSVolumeSource { // Use of multiple namespaces is optional. message Namespace { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional NamespaceSpec spec = 2; // Status describes the current status of a Namespace. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional NamespaceStatus status = 3; } @@ -1444,19 +1772,19 @@ message Namespace { // NamespaceList is a list of Namespaces. message NamespaceList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Namespace objects in the list. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ repeated Namespace items = 2; } // NamespaceSpec describes the attributes on a Namespace. message NamespaceSpec { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional repeated string finalizers = 1; } @@ -1464,7 +1792,7 @@ message NamespaceSpec { // NamespaceStatus is information about the current status of a Namespace. message NamespaceStatus { // Phase is the current lifecycle phase of the namespace. - // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional optional string phase = 1; } @@ -1473,19 +1801,19 @@ message NamespaceStatus { // Each node will have a unique identifier in the cache (i.e. in etcd). message Node { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a node. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional NodeSpec spec = 2; // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional NodeStatus status = 3; } @@ -1547,6 +1875,62 @@ message NodeCondition { optional string message = 6; } +// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. +message NodeConfigSource { + // ConfigMap is a reference to a Node's ConfigMap + optional ConfigMapNodeConfigSource configMap = 2; +} + +// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource. +message NodeConfigStatus { + // Assigned reports the checkpointed config the node will try to use. + // When Node.Spec.ConfigSource is updated, the node checkpoints the associated + // config payload to local disk, along with a record indicating intended + // config. The node refers to this record to choose its config checkpoint, and + // reports this record in Assigned. Assigned only updates in the status after + // the record has been checkpointed to disk. When the Kubelet is restarted, + // it tries to make the Assigned config the Active config by loading and + // validating the checkpointed payload identified by Assigned. + // +optional + optional NodeConfigSource assigned = 1; + + // Active reports the checkpointed config the node is actively using. + // Active will represent either the current version of the Assigned config, + // or the current LastKnownGood config, depending on whether attempting to use the + // Assigned config results in an error. + // +optional + optional NodeConfigSource active = 2; + + // LastKnownGood reports the checkpointed config the node will fall back to + // when it encounters an error attempting to use the Assigned config. + // The Assigned config becomes the LastKnownGood config when the node determines + // that the Assigned config is stable and correct. + // This is currently implemented as a 10-minute soak period starting when the local + // record of Assigned config is updated. If the Assigned config is Active at the end + // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is + // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, + // because the local default config is always assumed good. + // You should not make assumptions about the node's method of determining config stability + // and correctness, as this may change or become configurable in the future. + // +optional + optional NodeConfigSource lastKnownGood = 3; + + // Error describes any problems reconciling the Spec.ConfigSource to the Active config. + // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned + // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting + // to load or validate the Assigned config, etc. + // Errors may occur at different points while syncing config. Earlier errors (e.g. download or + // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across + // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in + // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error + // by fixing the config assigned in Spec.ConfigSource. + // You can find additional information for debugging by searching the error message in the Kubelet log. + // Error is a human-readable description of the error state; machines can check whether or not Error + // is empty, but should not rely on the stability of the Error text across Kubelet versions. + // +optional + optional string error = 4; +} + // NodeDaemonEndpoints lists ports opened by daemons running on the Node. message NodeDaemonEndpoints { // Endpoint on which Kubelet is listening. @@ -1557,7 +1941,7 @@ message NodeDaemonEndpoints { // NodeList is the whole list of all Nodes which have been registered with master. message NodeList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1606,10 +1990,17 @@ message NodeSelectorRequirement { repeated string values = 3; } -// A null or empty node selector term matches no objects. +// A null or empty node selector term matches no objects. The requirements of +// them are ANDed. +// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. message NodeSelectorTerm { - // Required. A list of node selector requirements. The requirements are ANDed. + // A list of node selector requirements by node's labels. + // +optional repeated NodeSelectorRequirement matchExpressions = 1; + + // A list of node selector requirements by node's fields. + // +optional + repeated NodeSelectorRequirement matchFields = 2; } // NodeSpec describes the attributes that a node is created with. @@ -1618,29 +2009,34 @@ message NodeSpec { // +optional optional string podCIDR = 1; - // External ID of the node assigned by some machine database (e.g. a cloud provider). - // Deprecated. - // +optional - optional string externalID = 2; - // ID of the node assigned by the cloud provider in the format: :// // +optional optional string providerID = 3; // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration + // More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration // +optional optional bool unschedulable = 4; // If specified, the node's taints. // +optional repeated Taint taints = 5; + + // If specified, the source to get node configuration from + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + // +optional + optional NodeConfigSource configSource = 6; + + // Deprecated. Not all kubelets will set this field. Remove field after 1.13. + // see: https://issues.k8s.io/61966 + // +optional + optional string externalID = 2; } // NodeStatus is information about the current status of a node. message NodeStatus { // Capacity represents the total resources of a node. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity // +optional map capacity = 1; @@ -1650,20 +2046,24 @@ message NodeStatus { map allocatable = 2; // NodePhase is the recently observed lifecycle phase of the node. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase + // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase // The field is never populated, and now is deprecated. // +optional optional string phase = 3; // Conditions is an array of current observed node conditions. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition + // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition // +optional + // +patchMergeKey=type + // +patchStrategy=merge repeated NodeCondition conditions = 4; // List of addresses reachable to the node. // Queried from cloud provider, if available. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses + // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses // +optional + // +patchMergeKey=type + // +patchStrategy=merge repeated NodeAddress addresses = 5; // Endpoints of daemons running on the Node. @@ -1671,7 +2071,7 @@ message NodeStatus { optional NodeDaemonEndpoints daemonEndpoints = 6; // Set of ids/uuids to uniquely identify the node. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info + // More info: https://kubernetes.io/docs/concepts/nodes/node/#info // +optional optional NodeSystemInfo nodeInfo = 7; @@ -1686,17 +2086,21 @@ message NodeStatus { // List of volumes that are attached to the node. // +optional repeated AttachedVolume volumesAttached = 10; + + // Status of the config assigned to the node via the dynamic Kubelet config feature. + // +optional + optional NodeConfigStatus config = 11; } // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. message NodeSystemInfo { // MachineID reported by the node. For unique machine identification - // in the cluster this field is prefered. Learn more from man(5) + // in the cluster this field is preferred. Learn more from man(5) // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html optional string machineID = 1; // SystemUUID reported by the node. For unique machine identification - // MachineID is prefered. This field is specific to Red Hat hosts + // MachineID is preferred. This field is specific to Red Hat hosts // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html optional string systemUUID = 2; @@ -1735,175 +2139,26 @@ message ObjectFieldSelector { optional string fieldPath = 2; } -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. -// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. -// +k8s:openapi-gen=false -message ObjectMeta { - // Name must be unique within a namespace. Is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names - // +optional - optional string name = 1; - - // GenerateName is an optional prefix, used by the server, to generate a unique - // name ONLY IF the Name field has not been provided. - // If this field is used, the name returned to the client will be different - // than the name passed. This value will also be combined with a unique suffix. - // The provided value has the same validation rules as the Name field, - // and may be truncated by the length of the suffix required to make the value - // unique on the server. - // - // If this field is specified and the generated name exists, the server will - // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - // - // Applied only if Name is not specified. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency - // +optional - optional string generateName = 2; - - // Namespace defines the space within each name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - // - // Must be a DNS_LABEL. - // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces - // +optional - optional string namespace = 3; - - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - // +optional - optional string selfLink = 4; - - // UID is the unique in time and space value for this object. It is typically generated by - // the server on successful creation of a resource and is not allowed to change on PUT - // operations. - // - // Populated by the system. - // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids - // +optional - optional string uid = 5; - - // An opaque value that represents the internal version of this object that can - // be used by clients to determine when objects have changed. May be used for optimistic - // concurrency, change detection, and the watch operation on a resource or set of resources. - // Clients must treat these values as opaque and passed unmodified back to the server. - // They may only be valid for a particular resource or set of resources. - // - // Populated by the system. - // Read-only. - // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency - // +optional - optional string resourceVersion = 6; - - // A sequence number representing a specific generation of the desired state. - // Populated by the system. Read-only. - // +optional - optional int64 generation = 7; - - // CreationTimestamp is a timestamp representing the server time when this object was - // created. It is not guaranteed to be set in happens-before order across separate operations. - // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // - // Populated by the system. - // Read-only. - // Null for lists. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time creationTimestamp = 8; - - // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This - // field is set by the server when a graceful deletion is requested by the user, and is not - // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. - // If not set, graceful deletion of the object has not been requested. - // - // Populated by the system when a graceful deletion is requested. - // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deletionTimestamp = 9; - - // Number of seconds allowed for this object to gracefully terminate before - // it will be removed from the system. Only set when deletionTimestamp is also set. - // May only be shortened. - // Read-only. - // +optional - optional int64 deletionGracePeriodSeconds = 10; - - // Map of string keys and values that can be used to organize and categorize - // (scope and select) objects. May match selectors of replication controllers - // and services. - // More info: http://kubernetes.io/docs/user-guide/labels - // +optional - map labels = 11; - - // Annotations is an unstructured key value map stored with a resource that may be - // set by external tools to store and retrieve arbitrary metadata. They are not - // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations - // +optional - map annotations = 12; - - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - // +optional - repeated k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference ownerReferences = 13; - - // Must be empty before the object is deleted from the registry. Each entry - // is an identifier for the responsible component that will remove the entry - // from the list. If the deletionTimestamp of the object is non-nil, entries - // in this list can only be removed. - // +optional - repeated string finalizers = 14; - - // The name of the cluster which the object belongs to. - // This is used to distinguish resources with same name and namespace in different clusters. - // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. - // +optional - optional string clusterName = 15; -} - // ObjectReference contains enough information to let you inspect or modify the referred object. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message ObjectReference { // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional string kind = 1; // Namespace of the referent. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ // +optional optional string namespace = 2; // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // +optional optional string name = 3; // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids // +optional optional string uid = 4; @@ -1912,7 +2167,7 @@ message ObjectReference { optional string apiVersion = 5; // Specific resourceVersion to which this reference is made, if any. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 6; @@ -1930,23 +2185,23 @@ message ObjectReference { // PersistentVolume (PV) is a storage resource provisioned by an administrator. // It is analogous to a node. -// More info: http://kubernetes.io/docs/user-guide/persistent-volumes +// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes message PersistentVolume { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines a specification of a persistent volume owned by the cluster. // Provisioned by an administrator. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes // +optional optional PersistentVolumeSpec spec = 2; // Status represents the current information/status for the persistent volume. // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes // +optional optional PersistentVolumeStatus status = 3; } @@ -1954,31 +2209,56 @@ message PersistentVolume { // PersistentVolumeClaim is a user's request for and claim to a persistent volume message PersistentVolumeClaim { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired characteristics of a volume requested by a pod author. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional optional PersistentVolumeClaimSpec spec = 2; // Status represents the current information/status of a persistent volume claim. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional optional PersistentVolumeClaimStatus status = 3; } +// PersistentVolumeClaimCondition contails details about state of pvc +message PersistentVolumeClaimCondition { + optional string type = 1; + + optional string status = 2; + + // Last time we probed the condition. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // Unique, this should be a short, machine understandable string that gives the reason + // for condition's last transition. If it reports "ResizeStarted" that means the underlying + // persistent volume is being resized. + // +optional + optional string reason = 5; + + // Human-readable message indicating details about last transition. + // +optional + optional string message = 6; +} + // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. message PersistentVolumeClaimList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of persistent volume claims. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims repeated PersistentVolumeClaim items = 2; } @@ -1986,7 +2266,7 @@ message PersistentVolumeClaimList { // and allows a Source for provider-specific attributes message PersistentVolumeClaimSpec { // AccessModes contains the desired access modes the volume should have. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional repeated string accessModes = 1; @@ -1995,7 +2275,7 @@ message PersistentVolumeClaimSpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // Resources represents the minimum resources the volume should have. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional optional ResourceRequirements resources = 2; @@ -2004,9 +2284,15 @@ message PersistentVolumeClaimSpec { optional string volumeName = 3; // Name of the StorageClass required by the claim. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 // +optional optional string storageClassName = 5; + + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. + // +optional + optional string volumeMode = 6; } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. @@ -2016,13 +2302,20 @@ message PersistentVolumeClaimStatus { optional string phase = 1; // AccessModes contains the actual access modes the volume backing the PVC has. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional repeated string accessModes = 2; // Represents the actual resources of the underlying volume. // +optional map capacity = 3; + + // Current Condition of persistent volume claim. If underlying persistent volume is being + // resized then the Condition will be set to 'ResizeStarted'. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated PersistentVolumeClaimCondition conditions = 4; } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -2031,7 +2324,7 @@ message PersistentVolumeClaimStatus { // type of volume that is owned by someone else (the system). message PersistentVolumeClaimVolumeSource { // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims optional string claimName = 1; // Will force the ReadOnly setting in VolumeMounts. @@ -2043,12 +2336,12 @@ message PersistentVolumeClaimVolumeSource { // PersistentVolumeList is a list of PersistentVolume items. message PersistentVolumeList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of persistent volumes. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes repeated PersistentVolume items = 2; } @@ -2057,13 +2350,13 @@ message PersistentVolumeList { message PersistentVolumeSource { // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; // AWSElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; @@ -2071,39 +2364,39 @@ message PersistentVolumeSource { // Provisioned by a developer or tester. // This is useful for single-node development and testing only! // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath // +optional optional HostPathVolumeSource hostPath = 3; // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional optional GlusterfsVolumeSource glusterfs = 4; // NFS represents an NFS mount on the host. Provisioned by an admin. - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional optional NFSVolumeSource nfs = 5; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // +optional - optional RBDVolumeSource rbd = 6; + optional RBDPersistentVolumeSource rbd = 6; // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional - optional ISCSIVolumeSource iscsi = 7; + optional ISCSIPersistentVolumeSource iscsi = 7; // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional - optional CinderVolumeSource cinder = 8; + optional CinderPersistentVolumeSource cinder = 8; // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime // +optional - optional CephFSVolumeSource cephfs = 9; + optional CephFSPersistentVolumeSource cephfs = 9; // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. // +optional @@ -2114,14 +2407,13 @@ message PersistentVolumeSource { optional FlockerVolumeSource flocker = 11; // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional - optional FlexVolumeSource flexVolume = 12; + optional FlexPersistentVolumeSource flexVolume = 12; // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. // +optional - optional AzureFileVolumeSource azureFile = 13; + optional AzureFilePersistentVolumeSource azureFile = 13; // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine // +optional @@ -2144,13 +2436,26 @@ message PersistentVolumeSource { // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // +optional - optional ScaleIOVolumeSource scaleIO = 19; + optional ScaleIOPersistentVolumeSource scaleIO = 19; + + // Local represents directly-attached storage with node affinity + // +optional + optional LocalVolumeSource local = 20; + + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // +optional + optional StorageOSPersistentVolumeSource storageos = 21; + + // CSI represents storage that handled by an external CSI driver (Beta feature). + // +optional + optional CSIPersistentVolumeSource csi = 22; } // PersistentVolumeSpec is the specification of a persistent volume. message PersistentVolumeSpec { // A description of the persistent volume's resources and capacity. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity // +optional map capacity = 1; @@ -2158,21 +2463,22 @@ message PersistentVolumeSpec { optional PersistentVolumeSource persistentVolumeSource = 2; // AccessModes contains all ways the volume can be mounted. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes // +optional repeated string accessModes = 3; // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. // Expected to be non-nil when bound. // claim.VolumeName is the authoritative bind between PV and PVC. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding // +optional optional ObjectReference claimRef = 4; // What happens to a persistent volume when released from its claim. - // Valid options are Retain (default) and Recycle. - // Recycling must be supported by the volume plugin underlying this persistent volume. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy + // Valid options are Retain (default for manually created PersistentVolumes), Delete (default + // for dynamically provisioned PersistentVolumes), and Recycle (deprecated). + // Recycle must be supported by the volume plugin underlying this PersistentVolume. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming // +optional optional string persistentVolumeReclaimPolicy = 5; @@ -2180,12 +2486,29 @@ message PersistentVolumeSpec { // means that this volume does not belong to any StorageClass. // +optional optional string storageClassName = 6; + + // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will + // simply fail if one is invalid. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options + // +optional + repeated string mountOptions = 7; + + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. + // +optional + optional string volumeMode = 8; + + // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. + // This field influences the scheduling of pods that use this volume. + // +optional + optional VolumeNodeAffinity nodeAffinity = 9; } // PersistentVolumeStatus is the current status of a persistent volume. message PersistentVolumeStatus { // Phase indicates if a volume is available, bound to a claim, or released by a claim. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase // +optional optional string phase = 1; @@ -2214,12 +2537,12 @@ message PhotonPersistentDiskVolumeSource { // by clients and scheduled onto hosts. message Pod { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional PodSpec spec = 2; @@ -2227,23 +2550,13 @@ message Pod { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional PodStatus status = 3; } // Pod affinity is a group of inter pod affinity scheduling rules. message PodAffinity { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` // If the affinity requirements specified by this field are not met at // scheduling time, the pod will not be scheduled onto the node. // If the affinity requirements specified by this field cease to be met @@ -2271,7 +2584,7 @@ message PodAffinity { // relative to the given namespace(s)) that this pod should be // co-located (affinity) or not co-located (anti-affinity) with, // where co-located is defined as running on a node whose value of -// the label with key tches that of any node on which +// the label with key matches that of any node on which // a pod of the set of pods is running message PodAffinityTerm { // A label query over a set of resources, in this case pods. @@ -2280,31 +2593,19 @@ message PodAffinityTerm { // namespaces specifies which namespaces the labelSelector applies to (matches against); // null or empty list means "this pod's namespace" + // +optional repeated string namespaces = 2; // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching // the labelSelector in the specified namespaces, where co-located is defined as running on a node // whose value of the label with key topologyKey matches that of any node on which any of the // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - // +optional + // Empty topologyKey is not allowed. optional string topologyKey = 3; } // Pod anti affinity is a group of inter pod anti affinity scheduling rules. message PodAntiAffinity { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` // If the anti-affinity requirements specified by this field are not met at // scheduling time, the pod will not be scheduled onto the node. // If the anti-affinity requirements specified by this field cease to be met @@ -2364,13 +2665,12 @@ message PodAttachOptions { // PodCondition contains details for the current condition of this pod. message PodCondition { // Type is the type of the condition. - // Currently only Ready. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions optional string type = 1; // Status is the status of the condition. // Can be True, False, Unknown. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions optional string status = 2; // Last time we probed the condition. @@ -2390,6 +2690,38 @@ message PodCondition { optional string message = 6; } +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +message PodDNSConfig { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + repeated string nameservers = 1; + + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + repeated string searches = 2; + + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + repeated PodDNSConfigOption options = 3; +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +message PodDNSConfigOption { + // Required. + optional string name = 1; + + // +optional + optional string value = 2; +} + // PodExecOptions is the query options to a Pod's remote exec call. // --- // TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging @@ -2427,12 +2759,12 @@ message PodExecOptions { // PodList is a list of Pods. message PodList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pods. - // More info: http://kubernetes.io/docs/user-guide/pods + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md repeated Pod items = 2; } @@ -2501,6 +2833,12 @@ message PodProxyOptions { optional string path = 1; } +// PodReadinessGate contains the reference to a pod condition +message PodReadinessGate { + // ConditionType refers to a condition in the pod's condition list with matching type. + optional string conditionType = 1; +} + // PodSecurityContext holds pod-level security attributes and common container settings. // Some fields are also present in container.securityContext. Field values of // container.securityContext take precedence over field values of PodSecurityContext. @@ -2521,6 +2859,14 @@ message PodSecurityContext { // +optional optional int64 runAsUser = 2; + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + optional int64 runAsGroup = 6; + // Indicates that the container must run as a non-root user. // If true, the Kubelet will validate the image at runtime to ensure that it // does not run as UID 0 (root) and fail to start the container if it does. @@ -2547,6 +2893,11 @@ message PodSecurityContext { // If unset, the Kubelet will not modify the ownership and permissions of any volume. // +optional optional int64 fsGroup = 5; + + // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + // sysctls (by the container runtime) might fail to launch. + // +optional + repeated Sysctl sysctls = 7; } // Describes the class of pods that should avoid this node. @@ -2560,8 +2911,10 @@ message PodSignature { // PodSpec is a description of a pod. message PodSpec { // List of volumes that can be mounted by containers belonging to the pod. - // More info: http://kubernetes.io/docs/user-guide/volumes + // More info: https://kubernetes.io/docs/concepts/storage/volumes // +optional + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys repeated Volume volumes = 1; // List of initialization containers belonging to the pod. @@ -2576,20 +2929,23 @@ message PodSpec { // in a similar fashion. // Init containers cannot currently be added or removed. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/containers + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // +patchMergeKey=name + // +patchStrategy=merge repeated Container initContainers = 20; // List of containers belonging to the pod. // Containers cannot currently be added or removed. // There must be at least one container in a Pod. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/containers + // +patchMergeKey=name + // +patchStrategy=merge repeated Container containers = 2; // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. - // More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy // +optional optional string restartPolicy = 3; @@ -2609,21 +2965,23 @@ message PodSpec { // +optional optional int64 activeDeadlineSeconds = 5; - // Set DNS policy for containers within the pod. - // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Set DNS policy for the pod. // Defaults to "ClusterFirst". - // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. // +optional optional string dnsPolicy = 6; // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: http://kubernetes.io/docs/user-guide/node-selection/README + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ // +optional map nodeSelector = 7; // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ // +optional optional string serviceAccountName = 8; @@ -2662,6 +3020,16 @@ message PodSpec { // +optional optional bool hostIPC = 13; + // Share a single process namespace between all of the containers in a pod. + // When this is set containers will be able to view and signal processes from other containers + // in the same pod, and the first process in each container will not be assigned PID 1. + // HostPID and ShareProcessNamespace cannot both be set. + // Optional: Default to false. + // This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature. + // +k8s:conversion-gen=false + // +optional + optional bool shareProcessNamespace = 27; + // SecurityContext holds pod-level security attributes and common container settings. // Optional: Defaults to empty. See type description for default values of each field. // +optional @@ -2670,8 +3038,10 @@ message PodSpec { // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. For example, // in the case of docker, only DockerConfig type secrets are honored. - // More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod // +optional + // +patchMergeKey=name + // +patchStrategy=merge repeated LocalObjectReference imagePullSecrets = 15; // Specifies the hostname of the Pod @@ -2696,19 +3066,75 @@ message PodSpec { // If specified, the pod's tolerations. // +optional repeated Toleration tolerations = 22; + + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + // +patchMergeKey=ip + // +patchStrategy=merge + repeated HostAlias hostAliases = 23; + + // If specified, indicates the pod's priority. "system-node-critical" and + // "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + optional string priorityClassName = 24; + + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + optional int32 priority = 25; + + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + optional PodDNSConfig dnsConfig = 26; + + // If specified, all readiness gates will be evaluated for pod readiness. + // A pod is ready when all its containers are ready AND + // all conditions specified in the readiness gates have status equal to "True" + // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md + // +optional + repeated PodReadinessGate readinessGates = 28; } // PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. +// state of a system, especially if the node that hosts the pod cannot contact the control +// plane. message PodStatus { - // Current condition of the pod. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase + // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. + // The conditions array, the reason and message fields, and the individual container status + // arrays contain more detail about the pod's status. + // There are five possible phase values: + // + // Pending: The pod has been accepted by the Kubernetes system, but one or more of the + // container images has not been created. This includes time before being scheduled as + // well as time spent downloading images over the network, which could take a while. + // Running: The pod has been bound to a node, and all of the containers have been created. + // At least one container is still running, or is in the process of starting or restarting. + // Succeeded: All containers in the pod have terminated in success, and will not be restarted. + // Failed: All containers in the pod have terminated, and at least one container has + // terminated in failure. The container either exited with non-zero status or was terminated + // by the system. + // Unknown: For some reason the state of the pod could not be obtained, typically due to an + // error in communicating with the host of the pod. + // + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase // +optional optional string phase = 1; // Current service state of pod. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions // +optional + // +patchMergeKey=type + // +patchStrategy=merge repeated PodCondition conditions = 2; // A human readable message indicating details about why the pod is in this condition. @@ -2716,10 +3142,20 @@ message PodStatus { optional string message = 3; // A brief CamelCase message indicating details about why the pod is in this state. - // e.g. 'OutOfDisk' + // e.g. 'Evicted' // +optional optional string reason = 4; + // nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be + // scheduled right away as preemption victims receive their graceful termination periods. + // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide + // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to + // give the resources on this node to a higher priority pod that is created after preemption. + // As a result, this field may be different than PodSpec.nodeName when the pod is + // scheduled. + // +optional + optional string nominatedNodeName = 11; + // IP address of the host to which the pod is assigned. Empty if not yet scheduled. // +optional optional string hostIP = 5; @@ -2737,18 +3173,18 @@ message PodStatus { // The list has one entry per init container in the manifest. The most recent successful // init container will have ready = true, the most recently started container will have // startTime set. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status repeated ContainerStatus initContainerStatuses = 10; // The list has one entry per container in the manifest. Each entry is currently the output // of `docker inspect`. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional repeated ContainerStatus containerStatuses = 8; // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md + // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional optional string qosClass = 9; } @@ -2756,7 +3192,7 @@ message PodStatus { // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded message PodStatusResult { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -2764,7 +3200,7 @@ message PodStatusResult { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional PodStatus status = 2; } @@ -2772,12 +3208,12 @@ message PodStatusResult { // PodTemplate describes a template for creating copies of a predefined pod. message PodTemplate { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Template defines the pods that will be created from this pod template. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional PodTemplateSpec template = 2; } @@ -2785,7 +3221,7 @@ message PodTemplate { // PodTemplateList is a list of PodTemplates. message PodTemplateList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2796,12 +3232,12 @@ message PodTemplateList { // PodTemplateSpec describes the data a pod should have when created from a template message PodTemplateSpec { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional PodSpec spec = 2; } @@ -2865,13 +3301,13 @@ message Probe { optional Handler handler = 1; // Number of seconds after the container has started before liveness probes are initiated. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional optional int32 initialDelaySeconds = 2; // Number of seconds after which the probe times out. // Defaults to 1 second. Minimum value is 1. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional optional int32 timeoutSeconds = 3; @@ -2934,51 +3370,102 @@ message QuobyteVolumeSource { // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. -message RBDVolumeSource { +message RBDPersistentVolumeSource { // A collection of Ceph monitors. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it repeated string monitors = 1; // The rados image name. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it optional string image = 2; // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://kubernetes.io/docs/user-guide/volumes#rbd + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional optional string fsType = 3; // The rados pool name. // Default is rbd. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional string pool = 4; // The rados user name. // Default is admin. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional string user = 5; // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional string keyring = 6; // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional SecretReference secretRef = 7; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional bool readOnly = 8; +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +message RBDVolumeSource { + // A collection of Ceph monitors. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + repeated string monitors = 1; + + // The rados image name. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + optional string image = 2; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 3; + + // The rados pool name. + // Default is rbd. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional string pool = 4; + + // The rados user name. + // Default is admin. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional string user = 5; + + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional string keyring = 6; + + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 7; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional bool readOnly = 8; } @@ -2986,7 +3473,7 @@ message RBDVolumeSource { // RangeAllocation is not a public type. message RangeAllocation { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3001,12 +3488,12 @@ message RangeAllocation { message ReplicationController { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the replication controller. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ReplicationControllerSpec spec = 2; @@ -3014,7 +3501,7 @@ message ReplicationController { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ReplicationControllerStatus status = 3; } @@ -3043,12 +3530,12 @@ message ReplicationControllerCondition { // ReplicationControllerList is a collection of replication controllers. message ReplicationControllerList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of replication controllers. - // More info: http://kubernetes.io/docs/user-guide/replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller repeated ReplicationController items = 2; } @@ -3057,7 +3544,7 @@ message ReplicationControllerSpec { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller // +optional optional int32 replicas = 1; @@ -3071,13 +3558,13 @@ message ReplicationControllerSpec { // If Selector is empty, it is defaulted to the labels present on the Pod template. // Label keys and values that must match in order to be controlled by this replication // controller, if empty defaulted to labels on Pod template. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional map selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional optional PodTemplateSpec template = 3; } @@ -3086,7 +3573,7 @@ message ReplicationControllerSpec { // controller. message ReplicationControllerStatus { // Replicas is the most recently oberved number of replicas. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller optional int32 replicas = 1; // The number of pods that have labels matching the labels of the pod template of the replication controller. @@ -3107,6 +3594,8 @@ message ReplicationControllerStatus { // Represents the latest available observations of a replication controller's current state. // +optional + // +patchMergeKey=type + // +patchStrategy=merge repeated ReplicationControllerCondition conditions = 6; } @@ -3127,17 +3616,17 @@ message ResourceFieldSelector { // ResourceQuota sets aggregate quota restrictions enforced per namespace message ResourceQuota { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired quota. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ResourceQuotaSpec spec = 2; // Status defines the actual enforced quota and its current usage. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ResourceQuotaStatus status = 3; } @@ -3145,19 +3634,19 @@ message ResourceQuota { // ResourceQuotaList is a list of ResourceQuota items. message ResourceQuotaList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ResourceQuota objects. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ repeated ResourceQuota items = 2; } // ResourceQuotaSpec defines the desired hard limits to enforce for Quota. message ResourceQuotaSpec { - // Hard is the set of desired hard limits for each named resource. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // hard is the set of desired hard limits for each named resource. + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional map hard = 1; @@ -3165,12 +3654,18 @@ message ResourceQuotaSpec { // If not specified, the quota matches all objects. // +optional repeated string scopes = 2; + + // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota + // but expressed using ScopeSelectorOperator in combination with possible values. + // For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched. + // +optional + optional ScopeSelector scopeSelector = 3; } // ResourceQuotaStatus defines the enforced hard limits and observed use. message ResourceQuotaStatus { // Hard is the set of enforced hard limits for each named resource. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional map hard = 1; @@ -3182,14 +3677,14 @@ message ResourceQuotaStatus { // ResourceRequirements describes the compute resource requirements. message ResourceRequirements { // Limits describes the maximum amount of compute resources allowed. - // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ // +optional map limits = 1; // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, // otherwise to an implementation-defined value. - // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ // +optional map requests = 2; } @@ -3213,8 +3708,8 @@ message SELinuxOptions { optional string level = 4; } -// ScaleIOVolumeSource represents a persistent ScaleIO volume -message ScaleIOVolumeSource { +// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume +message ScaleIOPersistentVolumeSource { // The host address of the ScaleIO API Gateway. optional string gateway = 1; @@ -3223,21 +3718,21 @@ message ScaleIOVolumeSource { // SecretRef references to the secret for ScaleIO user and other // sensitive information. If this is not provided, Login operation will fail. - optional LocalObjectReference secretRef = 3; + optional SecretReference secretRef = 3; // Flag to enable/disable SSL communication with Gateway, default false // +optional optional bool sslEnabled = 4; - // The name of the Protection Domain for the configured storage (defaults to "default"). + // The name of the ScaleIO Protection Domain for the configured storage. // +optional optional string protectionDomain = 5; - // The Storage Pool associated with the protection domain (defaults to "default"). + // The ScaleIO Storage Pool associated with the protection domain. // +optional optional string storagePool = 6; - // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // +optional optional string storageMode = 7; @@ -3257,19 +3752,88 @@ message ScaleIOVolumeSource { optional bool readOnly = 10; } +// ScaleIOVolumeSource represents a persistent ScaleIO volume +message ScaleIOVolumeSource { + // The host address of the ScaleIO API Gateway. + optional string gateway = 1; + + // The name of the storage system as configured in ScaleIO. + optional string system = 2; + + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + optional LocalObjectReference secretRef = 3; + + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + optional bool sslEnabled = 4; + + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + optional string protectionDomain = 5; + + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + optional string storagePool = 6; + + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // +optional + optional string storageMode = 7; + + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + optional string volumeName = 8; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 9; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 10; +} + +// A scope selector represents the AND of the selectors represented +// by the scoped-resource selector requirements. +message ScopeSelector { + // A list of scope selector requirements by scope of the resources. + // +optional + repeated ScopedResourceSelectorRequirement matchExpressions = 1; +} + +// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator +// that relates the scope name and values. +message ScopedResourceSelectorRequirement { + // The name of the scope that the selector applies to. + optional string scopeName = 1; + + // Represents a scope's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + optional string operator = 2; + + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. + // This array is replaced during a strategic merge patch. + // +optional + repeated string values = 3; +} + // Secret holds secret data of a certain type. The total bytes of the values in // the Data field must be less than MaxSecretSize bytes. message Secret { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN - // or leading dot followed by valid DNS_SUBDOMAIN. - // The serialized form of the secret data is a base64 encoded string, - // representing the arbitrary (possibly non-string) data value here. - // Described in https://tools.ietf.org/html/rfc4648#section-4 + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 // +optional map data = 2; @@ -3316,12 +3880,12 @@ message SecretKeySelector { // SecretList is a list of Secret. message SecretList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of secret objects. - // More info: http://kubernetes.io/docs/user-guide/secrets + // More info: https://kubernetes.io/docs/concepts/configuration/secret repeated Secret items = 2; } @@ -3349,6 +3913,18 @@ message SecretProjection { optional bool optional = 4; } +// SecretReference represents a Secret Reference. It has enough information to retrieve secret +// in any namespace +message SecretReference { + // Name is unique within a namespace to reference a secret resource. + // +optional + optional string name = 1; + + // Namespace defines the space within which the secret name must be unique. + // +optional + optional string namespace = 2; +} + // Adapts a Secret into a volume. // // The contents of the target Secret's Data field will be presented in a volume @@ -3356,7 +3932,7 @@ message SecretProjection { // Secret volumes support ownership management and SELinux relabeling. message SecretVolumeSource { // Name of the secret in the pod's namespace to use. - // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret // +optional optional string secretName = 1; @@ -3412,6 +3988,13 @@ message SecurityContext { // +optional optional int64 runAsUser = 4; + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional int64 runAsGroup = 8; + // Indicates that the container must run as a non-root user. // If true, the Kubelet will validate the image at runtime to ensure that it // does not run as UID 0 (root) and fail to start the container if it does. @@ -3425,6 +4008,15 @@ message SecurityContext { // Default is false. // +optional optional bool readOnlyRootFilesystem = 6; + + // AllowPrivilegeEscalation controls whether a process can gain more + // privileges than its parent process. This bool directly controls if + // the no_new_privs flag will be set on the container process. + // AllowPrivilegeEscalation is true always when the container is: + // 1) run as Privileged + // 2) has CAP_SYS_ADMIN + // +optional + optional bool allowPrivilegeEscalation = 7; } // SerializedReference is a reference to serialized object. @@ -3439,19 +4031,19 @@ message SerializedReference { // will answer requests sent through the proxy. message Service { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a service. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ServiceSpec spec = 2; // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ServiceStatus status = 3; } @@ -3462,19 +4054,21 @@ message Service { // * a set of secrets message ServiceAccount { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. - // More info: http://kubernetes.io/docs/user-guide/secrets + // More info: https://kubernetes.io/docs/concepts/configuration/secret // +optional + // +patchMergeKey=name + // +patchStrategy=merge repeated ObjectReference secrets = 2; // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret + // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod // +optional repeated LocalObjectReference imagePullSecrets = 3; @@ -3487,19 +4081,45 @@ message ServiceAccount { // ServiceAccountList is a list of ServiceAccount objects message ServiceAccountList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ServiceAccounts. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ repeated ServiceAccount items = 2; } +// ServiceAccountTokenProjection represents a projected service account token +// volume. This projection can be used to insert a service account token into +// the pods runtime filesystem for use against APIs (Kubernetes API Server or +// otherwise). +message ServiceAccountTokenProjection { + // Audience is the intended audience of the token. A recipient of a token + // must identify itself with an identifier specified in the audience of the + // token, and otherwise should reject the token. The audience defaults to the + // identifier of the apiserver. + // +optional + optional string audience = 1; + + // ExpirationSeconds is the requested duration of validity of the service + // account token. As the token approaches expiration, the kubelet volume + // plugin will proactively rotate the service account token. The kubelet will + // start trying to rotate the token if the token is older than 80 percent of + // its time to live or if the token is older than 24 hours.Defaults to 1 hour + // and must be at least 10 minutes. + // +optional + optional int64 expirationSeconds = 2; + + // Path is the path relative to the mount point of the file to project the + // token into. + optional string path = 3; +} + // ServiceList holds a list of services. message ServiceList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3531,7 +4151,7 @@ message ServicePort { // of the 'port' field is used (an identity map). // This field is ignored for services with clusterIP=None, and should be // omitted or set equal to the 'port' field. - // More info: http://kubernetes.io/docs/user-guide/services#defining-a-service + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; @@ -3539,7 +4159,7 @@ message ServicePort { // Usually assigned by the system. If specified, it will be allocated to the service // if unused or else creation of the service will fail. // Default is to auto-allocate a port if the ServiceType of this Service requires one. - // More info: http://kubernetes.io/docs/user-guide/services#type--nodeport + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport // +optional optional int32 nodePort = 5; } @@ -3558,7 +4178,9 @@ message ServiceProxyOptions { // ServiceSpec describes the attributes that a user creates on a service. message ServiceSpec { // The list of ports that are exposed by this service. - // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +patchMergeKey=port + // +patchStrategy=merge repeated ServicePort ports = 1; // Route service traffic to pods with label keys and values matching this @@ -3566,7 +4188,7 @@ message ServiceSpec { // external process managing its endpoints, which Kubernetes will not // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. // Ignored if type is ExternalName. - // More info: http://kubernetes.io/docs/user-guide/services#overview + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ // +optional map selector = 2; @@ -3578,7 +4200,7 @@ message ServiceSpec { // can be specified for headless services when proxying is not required. // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if // type is ExternalName. - // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +optional optional string clusterIP = 3; @@ -3595,7 +4217,7 @@ message ServiceSpec { // "LoadBalancer" builds on NodePort and creates an // external load-balancer (if supported in the current cloud) which routes // to the clusterIP. - // More info: http://kubernetes.io/docs/user-guide/services#overview + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types // +optional optional string type = 4; @@ -3603,26 +4225,15 @@ message ServiceSpec { // will also accept traffic for this service. These IPs are not managed by // Kubernetes. The user is responsible for ensuring that traffic arrives // at a node with this IP. A common example is external load-balancers - // that are not part of the Kubernetes system. A previous form of this - // functionality exists as the deprecatedPublicIPs field. When using this - // field, callers should also clear the deprecatedPublicIPs field. + // that are not part of the Kubernetes system. // +optional repeated string externalIPs = 5; - // deprecatedPublicIPs is deprecated and replaced by the externalIPs field - // with almost the exact same semantics. This field is retained in the v1 - // API for compatibility until at least 8/20/2016. It will be removed from - // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are - // set, deprecatedPublicIPs is used. - // +k8s:conversion-gen=false - // +optional - repeated string deprecatedPublicIPs = 6; - // Supports "ClientIP" and "None". Used to maintain session affinity. // Enable client IP based session affinity. // Must be ClientIP or None. // Defaults to None. - // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +optional optional string sessionAffinity = 7; @@ -3637,15 +4248,46 @@ message ServiceSpec { // If specified and supported by the platform, this will restrict traffic through the cloud-provider // load-balancer will be restricted to the specified client IPs. This field will be ignored if the // cloud-provider does not support the feature." - // More info: http://kubernetes.io/docs/user-guide/services-firewalls + // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ // +optional repeated string loadBalancerSourceRanges = 9; // externalName is the external reference that kubedns or equivalent will // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. // +optional optional string externalName = 10; + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + optional string externalTrafficPolicy = 11; + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + optional int32 healthCheckNodePort = 12; + + // publishNotReadyAddresses, when set to true, indicates that DNS implementations + // must publish the notReadyAddresses of subsets for the Endpoints associated with + // the Service. The default value is false. + // The primary use case for setting this field is to use a StatefulSet's Headless Service + // to propagate SRV records for its Pods without respect to their readiness for purpose + // of peer discovery. + // +optional + optional bool publishNotReadyAddresses = 13; + + // sessionAffinityConfig contains the configurations of session affinity. + // +optional + optional SessionAffinityConfig sessionAffinityConfig = 14; } // ServiceStatus represents the current status of a service. @@ -3656,9 +4298,83 @@ message ServiceStatus { optional LoadBalancerStatus loadBalancer = 1; } +// SessionAffinityConfig represents the configurations of session affinity. +message SessionAffinityConfig { + // clientIP contains the configurations of Client IP based session affinity. + // +optional + optional ClientIPConfig clientIP = 1; +} + +// Represents a StorageOS persistent volume resource. +message StorageOSPersistentVolumeSource { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + optional string volumeName = 1; + + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + optional string volumeNamespace = 2; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 3; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + optional ObjectReference secretRef = 5; +} + +// Represents a StorageOS persistent volume resource. +message StorageOSVolumeSource { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + optional string volumeName = 1; + + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + optional string volumeNamespace = 2; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 3; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + optional LocalObjectReference secretRef = 5; +} + +// Sysctl defines a kernel parameter to be set message Sysctl { + // Name of a property to set optional string name = 1; + // Value of a property to set optional string value = 2; } @@ -3668,10 +4384,14 @@ message TCPSocketAction { // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; + + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + optional string host = 2; } -// The node this Taint is attached to has the effect "effect" on -// any pod that that does not tolerate the Taint. +// The node this Taint is attached to has the "effect" on +// any pod that does not tolerate the Taint. message Taint { // Required. The taint key to be applied to a node. optional string key = 1; @@ -3724,11 +4444,33 @@ message Toleration { optional int64 tolerationSeconds = 5; } +// A topology selector requirement is a selector that matches given label. +// This is an alpha feature and may change in the future. +message TopologySelectorLabelRequirement { + // The label key that the selector applies to. + optional string key = 1; + + // An array of string values. One value must match the label to be selected. + // Each entry in Values is ORed. + repeated string values = 2; +} + +// A topology selector term represents the result of label queries. +// A null or empty topology selector term matches no objects. +// The requirements of them are ANDed. +// It provides a subset of functionality as NodeSelectorTerm. +// This is an alpha feature and may change in the future. +message TopologySelectorTerm { + // A list of topology selector requirements by labels. + // +optional + repeated TopologySelectorLabelRequirement matchLabelExpressions = 1; +} + // Volume represents a named volume in a pod that may be accessed by any container in the pod. message Volume { // Volume's name. // Must be a DNS_LABEL and unique within the pod. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 1; // VolumeSource represents the location and type of the mounted volume. @@ -3737,6 +4479,15 @@ message Volume { optional VolumeSource volumeSource = 2; } +// volumeDevice describes a mapping of a raw block device within a container. +message VolumeDevice { + // name must match the name of a persistentVolumeClaim in the pod + optional string name = 1; + + // devicePath is the path inside of the container that the device will be mapped to. + optional string devicePath = 2; +} + // VolumeMount describes a mounting of a Volume within a container. message VolumeMount { // This must match the Name of a Volume. @@ -3755,18 +4506,38 @@ message VolumeMount { // Defaults to "" (volume's root). // +optional optional string subPath = 4; + + // mountPropagation determines how mounts are propagated from the host + // to container and the other way around. + // When not set, MountPropagationHostToContainer is used. + // This field is beta in 1.10. + // +optional + optional string mountPropagation = 5; +} + +// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. +message VolumeNodeAffinity { + // Required specifies hard node constraints that must be met. + optional NodeSelector required = 1; } // Projection that may be projected along with other supported volume types message VolumeProjection { // information about the secret data to project + // +optional optional SecretProjection secret = 1; // information about the downwardAPI data to project + // +optional optional DownwardAPIProjection downwardAPI = 2; // information about the configMap data to project + // +optional optional ConfigMapProjection configMap = 3; + + // information about the serviceAccountToken data to project + // +optional + optional ServiceAccountTokenProjection serviceAccountToken = 4; } // Represents the source of a volume to mount. @@ -3776,7 +4547,7 @@ message VolumeSource { // machine that is directly exposed to the container. This is generally // used for system agents or other privileged things that are allowed // to see the host machine. Most containers will NOT need this. - // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath // --- // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not // mount host directories as read/write. @@ -3784,66 +4555,68 @@ message VolumeSource { optional HostPathVolumeSource hostPath = 1; // EmptyDir represents a temporary directory that shares a pod's lifetime. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional optional EmptyDirVolumeSource emptyDir = 2; // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; // AWSElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; // GitRepo represents a git repository at a particular revision. + // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + // into the Pod's container. // +optional optional GitRepoVolumeSource gitRepo = 5; // Secret represents a secret that should populate this volume. - // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret // +optional optional SecretVolumeSource secret = 6; // NFS represents an NFS mount on the host that shares a pod's lifetime - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional optional NFSVolumeSource nfs = 7; // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md // +optional optional ISCSIVolumeSource iscsi = 8; // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional optional GlusterfsVolumeSource glusterfs = 9; // PersistentVolumeClaimVolumeSource represents a reference to a // PersistentVolumeClaim in the same namespace. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // +optional optional RBDVolumeSource rbd = 11; // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional optional FlexVolumeSource flexVolume = 12; // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional optional CinderVolumeSource cinder = 13; @@ -3896,6 +4669,10 @@ message VolumeSource { // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // +optional optional ScaleIOVolumeSource scaleIO = 25; + + // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // +optional + optional StorageOSVolumeSource storageos = 27; } // Represents a vSphere volume resource. @@ -3908,6 +4685,14 @@ message VsphereVirtualDiskVolumeSource { // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional optional string fsType = 2; + + // Storage Policy Based Management (SPBM) profile name. + // +optional + optional string storagePolicyName = 3; + + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + optional string storagePolicyID = 4; } // The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) diff --git a/vendor/k8s.io/api/core/v1/objectreference.go b/vendor/k8s.io/api/core/v1/objectreference.go new file mode 100644 index 000000000..ee5335ee8 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/objectreference.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that +// intend only to get a reference to that object. This simplifies the event recording interface. +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/client-go/pkg/api/v1/register.go b/vendor/k8s.io/api/core/v1/register.go similarity index 87% rename from vendor/k8s.io/client-go/pkg/api/v1/register.go rename to vendor/k8s.io/api/core/v1/register.go index 5c2dfddd1..1aac0cb41 100644 --- a/vendor/k8s.io/client-go/pkg/api/v1/register.go +++ b/vendor/k8s.io/api/core/v1/register.go @@ -34,11 +34,14 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs, addFastPathConversionFuncs) + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Pod{}, diff --git a/vendor/k8s.io/api/core/v1/resource.go b/vendor/k8s.io/api/core/v1/resource.go new file mode 100644 index 000000000..bb8041254 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/resource.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +// Returns string version of ResourceName. +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) StorageEphemeral() *resource.Quantity { + if val, ok := (*self)[ResourceEphemeralStorage]; ok { + return &val + } + return &resource.Quantity{} +} diff --git a/vendor/k8s.io/api/core/v1/taint.go b/vendor/k8s.io/api/core/v1/taint.go new file mode 100644 index 000000000..7b606a309 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/taint.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "fmt" + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch *Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} diff --git a/vendor/k8s.io/api/core/v1/toleration.go b/vendor/k8s.io/api/core/v1/toleration.go new file mode 100644 index 000000000..b203d335b --- /dev/null +++ b/vendor/k8s.io/api/core/v1/toleration.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , +// if the two tolerations have same combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} + +// ToleratesTaint checks if the toleration tolerates the taint. +// The matching follows the rules below: +// (1) Empty toleration.effect means to match all taint effects, +// otherwise taint effect must equal to toleration.effect. +// (2) If toleration.operator is 'Exists', it means to match all taint values. +// (3) Empty toleration.key means to match all taint keys. +// If toleration.key is empty, toleration.operator must be 'Exists'; +// this combination means to match all taint values and all taint keys. +func (t *Toleration) ToleratesTaint(taint *Taint) bool { + if len(t.Effect) > 0 && t.Effect != taint.Effect { + return false + } + + if len(t.Key) > 0 && t.Key != taint.Key { + return false + } + + // TODO: Use proper defaulting when Toleration becomes a field of PodSpec + switch t.Operator { + // empty operator means Equal + case "", TolerationOpEqual: + return t.Value == taint.Value + case TolerationOpExists: + return true + default: + return false + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/types.go b/vendor/k8s.io/api/core/v1/types.go similarity index 68% rename from vendor/k8s.io/client-go/pkg/api/v1/types.go rename to vendor/k8s.io/api/core/v1/types.go index a75a1d0f0..b941e0ac2 100644 --- a/vendor/k8s.io/client-go/pkg/api/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -19,200 +19,10 @@ package v1 import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" ) -// The comments for the structs and fields can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored and not exported to the SwaggerAPI. -// -// The aforementioned methods can be generated by hack/update-generated-swagger-docs.sh - -// Common string formats -// --------------------- -// Many fields in this API have formatting requirements. The commonly used -// formats are defined here. -// -// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" -// in the C language. This is captured by the following regex: -// [A-Za-z_][A-Za-z0-9_]* -// This defines the format, but not the length restriction, which should be -// specified at the definition of any field of this type. -// -// DNS_LABEL: This is a string, no more than 63 characters long, that conforms -// to the definition of a "label" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])? -// -// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms -// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* -// or more simply: -// DNS_LABEL(\.DNS_LABEL)* -// -// IANA_SVC_NAME: This is a string, no more than 15 characters long, that -// conforms to the definition of IANA service name in RFC 6335. -// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. -// Hypens ('-') cannot be leading or trailing character of the string -// and cannot be adjacent to other hyphens. - -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. -// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. -// +k8s:openapi-gen=false -type ObjectMeta struct { - // Name must be unique within a namespace. Is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - - // GenerateName is an optional prefix, used by the server, to generate a unique - // name ONLY IF the Name field has not been provided. - // If this field is used, the name returned to the client will be different - // than the name passed. This value will also be combined with a unique suffix. - // The provided value has the same validation rules as the Name field, - // and may be truncated by the length of the suffix required to make the value - // unique on the server. - // - // If this field is specified and the generated name exists, the server will - // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - // - // Applied only if Name is not specified. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency - // +optional - GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` - - // Namespace defines the space within each name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - // - // Must be a DNS_LABEL. - // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` - - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - // +optional - SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` - - // UID is the unique in time and space value for this object. It is typically generated by - // the server on successful creation of a resource and is not allowed to change on PUT - // operations. - // - // Populated by the system. - // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids - // +optional - UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` - - // An opaque value that represents the internal version of this object that can - // be used by clients to determine when objects have changed. May be used for optimistic - // concurrency, change detection, and the watch operation on a resource or set of resources. - // Clients must treat these values as opaque and passed unmodified back to the server. - // They may only be valid for a particular resource or set of resources. - // - // Populated by the system. - // Read-only. - // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency - // +optional - ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` - - // A sequence number representing a specific generation of the desired state. - // Populated by the system. Read-only. - // +optional - Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"` - - // CreationTimestamp is a timestamp representing the server time when this object was - // created. It is not guaranteed to be set in happens-before order across separate operations. - // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // - // Populated by the system. - // Read-only. - // Null for lists. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` - - // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This - // field is set by the server when a graceful deletion is requested by the user, and is not - // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. - // If not set, graceful deletion of the object has not been requested. - // - // Populated by the system when a graceful deletion is requested. - // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` - - // Number of seconds allowed for this object to gracefully terminate before - // it will be removed from the system. Only set when deletionTimestamp is also set. - // May only be shortened. - // Read-only. - // +optional - DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"` - - // Map of string keys and values that can be used to organize and categorize - // (scope and select) objects. May match selectors of replication controllers - // and services. - // More info: http://kubernetes.io/docs/user-guide/labels - // +optional - Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` - - // Annotations is an unstructured key value map stored with a resource that may be - // set by external tools to store and retrieve arbitrary metadata. They are not - // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations - // +optional - Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` - - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - // +optional - OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` - - // Must be empty before the object is deleted from the registry. Each entry - // is an identifier for the responsible component that will remove the entry - // from the list. If the deletionTimestamp of the object is non-nil, entries - // in this list can only be removed. - // +optional - Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` - - // The name of the cluster which the object belongs to. - // This is used to distinguish resources with same name and namespace in different clusters. - // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. - // +optional - ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` -} - const ( // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients NamespaceDefault string = "default" @@ -224,7 +34,7 @@ const ( type Volume struct { // Volume's name. // Must be a DNS_LABEL and unique within the pod. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // VolumeSource represents the location and type of the mounted volume. // If not specified, the Volume is implied to be an EmptyDir. @@ -239,62 +49,64 @@ type VolumeSource struct { // machine that is directly exposed to the container. This is generally // used for system agents or other privileged things that are allowed // to see the host machine. Most containers will NOT need this. - // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath // --- // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not // mount host directories as read/write. // +optional HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"` // EmptyDir represents a temporary directory that shares a pod's lifetime. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"` // AWSElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` // GitRepo represents a git repository at a particular revision. + // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + // into the Pod's container. // +optional GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"` // Secret represents a secret that should populate this volume. - // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret // +optional Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"` // NFS represents an NFS mount on the host that shares a pod's lifetime - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md // +optional ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` // PersistentVolumeClaimVolumeSource represents a reference to a // PersistentVolumeClaim in the same namespace. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // +optional RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime @@ -334,6 +146,9 @@ type VolumeSource struct { // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // +optional ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"` + // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // +optional + StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -342,7 +157,7 @@ type VolumeSource struct { // type of volume that is owned by someone else (the system). type PersistentVolumeClaimVolumeSource struct { // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"` // Will force the ReadOnly setting in VolumeMounts. // Default false. @@ -355,45 +170,45 @@ type PersistentVolumeClaimVolumeSource struct { type PersistentVolumeSource struct { // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"` // AWSElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"` // HostPath represents a directory on the host. // Provisioned by a developer or tester. // This is useful for single-node development and testing only! // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath // +optional HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` // NFS represents an NFS mount on the host. Provisioned by an admin. - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // +optional - RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` + RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional - ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` + ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional - Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` + Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime // +optional - CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"` + CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"` // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. // +optional FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"` @@ -401,13 +216,12 @@ type PersistentVolumeSource struct { // +optional Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"` // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional - FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` + FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. // +optional - AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"` + AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"` // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine // +optional VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"` @@ -424,43 +238,52 @@ type PersistentVolumeSource struct { PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"` // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // +optional - ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` + ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` + // Local represents directly-attached storage with node affinity + // +optional + Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // +optional + StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` + // CSI represents storage that handled by an external CSI driver (Beta feature). + // +optional + CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"` } const ( - // AlphaStorageClassAnnotation represents the previous alpha storage class - // annotation. It's currently still used and will be held for backwards - // compatibility - AlphaStorageClassAnnotation = "volume.alpha.kubernetes.io/storage-class" - // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. // It's currently still used and will be held for backwards compatibility BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + + // MountOptionAnnotation defines mount option annotation used in PVs + MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" ) -// +genclient=true -// +nonNamespaced=true +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PersistentVolume (PV) is a storage resource provisioned by an administrator. // It is analogous to a node. -// More info: http://kubernetes.io/docs/user-guide/persistent-volumes +// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes type PersistentVolume struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines a specification of a persistent volume owned by the cluster. // Provisioned by an administrator. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes // +optional Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status represents the current information/status for the persistent volume. // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes // +optional Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -468,31 +291,52 @@ type PersistentVolume struct { // PersistentVolumeSpec is the specification of a persistent volume. type PersistentVolumeSpec struct { // A description of the persistent volume's resources and capacity. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity // +optional Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` // The actual volume backing the persistent volume. PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"` // AccessModes contains all ways the volume can be mounted. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes // +optional AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. // Expected to be non-nil when bound. // claim.VolumeName is the authoritative bind between PV and PVC. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding // +optional ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"` // What happens to a persistent volume when released from its claim. - // Valid options are Retain (default) and Recycle. - // Recycling must be supported by the volume plugin underlying this persistent volume. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy + // Valid options are Retain (default for manually created PersistentVolumes), Delete (default + // for dynamically provisioned PersistentVolumes), and Recycle (deprecated). + // Recycle must be supported by the volume plugin underlying this PersistentVolume. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming // +optional PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"` // Name of StorageClass to which this persistent volume belongs. Empty value // means that this volume does not belong to any StorageClass. // +optional StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"` + // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will + // simply fail if one is invalid. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options + // +optional + MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"` + // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. + // This field influences the scheduling of pods that use this volume. + // +optional + NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"` +} + +// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. +type VolumeNodeAffinity struct { + // Required specifies hard node constraints that must be met. + Required *NodeSelector `json:"required,omitempty" protobuf:"bytes,1,opt,name=required"` } // PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. @@ -510,10 +354,20 @@ const ( PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" ) +// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +type PersistentVolumeMode string + +const ( + // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. + PersistentVolumeBlock PersistentVolumeMode = "Block" + // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. + PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" +) + // PersistentVolumeStatus is the current status of a persistent volume. type PersistentVolumeStatus struct { // Phase indicates if a volume is available, bound to a claim, or released by a claim. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase // +optional Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"` // A human-readable message indicating details about why the volume is in this state. @@ -525,49 +379,54 @@ type PersistentVolumeStatus struct { Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // PersistentVolumeList is a list of PersistentVolume items. type PersistentVolumeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of persistent volumes. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PersistentVolumeClaim is a user's request for and claim to a persistent volume type PersistentVolumeClaim struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired characteristics of a volume requested by a pod author. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status represents the current information/status of a persistent volume claim. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. type PersistentVolumeClaimList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // A list of persistent volume claims. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -575,23 +434,58 @@ type PersistentVolumeClaimList struct { // and allows a Source for provider-specific attributes type PersistentVolumeClaimSpec struct { // AccessModes contains the desired access modes the volume should have. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` // A label query over volumes to consider for binding. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` // Resources represents the minimum resources the volume should have. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` // VolumeName is the binding reference to the PersistentVolume backing this claim. // +optional VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` // Name of the StorageClass required by the claim. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 // +optional StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` +} + +// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type +type PersistentVolumeClaimConditionType string + +const ( + // PersistentVolumeClaimResizing - a user trigger resize of pvc has been started + PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" + // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node + PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" +) + +// PersistentVolumeClaimCondition contails details about state of pvc +type PersistentVolumeClaimCondition struct { + Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // Unique, this should be a short, machine understandable string that gives the reason + // for condition's last transition. If it reports "ResizeStarted" that means the underlying + // persistent volume is being resized. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. @@ -600,12 +494,18 @@ type PersistentVolumeClaimStatus struct { // +optional Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"` // AccessModes contains the actual access modes the volume backing the PVC has. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` // Represents the actual resources of the underlying volume. // +optional Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` + // Current Condition of persistent volume claim. If underlying persistent volume is being + // resized then the Condition will be set to 'ResizeStarted'. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` } type PersistentVolumeAccessMode string @@ -650,12 +550,41 @@ const ( ClaimLost PersistentVolumeClaimPhase = "Lost" ) +type HostPathType string + +const ( + // For backwards compatible, leave it empty if unset + HostPathUnset HostPathType = "" + // If nothing exists at the given path, an empty directory will be created there + // as needed with file mode 0755, having the same group and ownership with Kubelet. + HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" + // A directory must exist at the given path + HostPathDirectory HostPathType = "Directory" + // If nothing exists at the given path, an empty file will be created there + // as needed with file mode 0644, having the same group and ownership with Kubelet. + HostPathFileOrCreate HostPathType = "FileOrCreate" + // A file must exist at the given path + HostPathFile HostPathType = "File" + // A UNIX socket must exist at the given path + HostPathSocket HostPathType = "Socket" + // A character device must exist at the given path + HostPathCharDev HostPathType = "CharDevice" + // A block device must exist at the given path + HostPathBlockDev HostPathType = "BlockDevice" +) + // Represents a host path mapped into a pod. // Host path volumes do not support ownership management or SELinux relabeling. type HostPathVolumeSource struct { // Path of the directory on the host. - // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // If the path is a symlink, it will follow the link to the real path. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath Path string `json:"path" protobuf:"bytes,1,opt,name=path"` + // Type for HostPath Volume + // Defaults to "" + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // +optional + Type *HostPathType `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"` } // Represents an empty directory for a pod. @@ -664,25 +593,33 @@ type EmptyDirVolumeSource struct { // What type of storage medium should back this directory. // The default is "" which means to use the node's default medium. // Must be an empty string (default) or Memory. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"` + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` } // Represents a Glusterfs mount that lasts the lifetime of a pod. // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` // Path is the Glusterfs volume path. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } @@ -691,42 +628,86 @@ type GlusterfsVolumeSource struct { // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { // A collection of Ceph monitors. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // The rados image name. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://kubernetes.io/docs/user-guide/volumes#rbd + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // The rados pool name. // Default is rbd. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // The rados user name. // Default is admin. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDPersistentVolumeSource struct { + // A collection of Ceph monitors. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` + // The rados image name. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // The rados pool name. + // Default is rbd. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` + // The rados user name. + // Default is admin. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } @@ -737,45 +718,113 @@ type RBDVolumeSource struct { // Cinder volumes support ownership management and SELinux relabeling. type CinderVolumeSource struct { // volume id used to identify the volume in cinder - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"` +} + +// Represents a cinder volume resource in Openstack. +// A Cinder volume must exist before mounting to a container. +// The volume must also be in the same region as the kubelet. +// Cinder volumes support ownership management and SELinux relabeling. +type CinderPersistentVolumeSource struct { + // volume id used to identify the volume in cinder + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"` } // Represents a Ceph Filesystem mount that lasts the lifetime of a pod // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // Optional: User is the rados user name, default is admin - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` +} + +// SecretReference represents a Secret Reference. It has enough information to retrieve secret +// in any namespace +type SecretReference struct { + // Name is unique within a namespace to reference a secret resource. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // Namespace defines the space within which the secret name must be unique. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSPersistentVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` + // Optional: User is the rados user name, default is admin + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } @@ -797,8 +846,9 @@ type FlockerVolumeSource struct { type StorageMedium string const ( - StorageMediumDefault StorageMedium = "" // use whatever the default is for the node - StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node, assume anything we don't explicitly handle is this + StorageMediumMemory StorageMedium = "Memory" // use memory (e.g. tmpfs on linux) + StorageMediumHugePages StorageMedium = "HugePages" // use hugepages ) // Protocol defines network protocols supported for things like container ports. @@ -819,12 +869,12 @@ const ( // PDs support ownership management and SELinux relabeling. type GCEPersistentDiskVolumeSource struct { // Unique name of the PD resource in GCE. Used to identify the disk in GCE. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` @@ -832,12 +882,12 @@ type GCEPersistentDiskVolumeSource struct { // If omitted, the default is to mount by volume name. // Examples: For volume /dev/sda1, you specify the partition as "1". // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` } @@ -869,8 +919,34 @@ type QuobyteVolumeSource struct { Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"` } +// FlexPersistentVolumeSource represents a generic persistent volume resource that is +// provisioned/attached using an exec based plugin. +type FlexPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // Optional: Extra command options if any. + // +optional + Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"` +} + // FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +// provisioned/attached using an exec based plugin. type FlexVolumeSource struct { // Driver is the name of the driver to use for this volume. Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` @@ -903,12 +979,12 @@ type FlexVolumeSource struct { // ownership management and SELinux relabeling. type AWSElasticBlockStoreVolumeSource struct { // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` @@ -920,7 +996,7 @@ type AWSElasticBlockStoreVolumeSource struct { Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". // If omitted, the default is "false". - // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` } @@ -928,6 +1004,10 @@ type AWSElasticBlockStoreVolumeSource struct { // Represents a volume that is populated with the contents of a git repository. // Git repo volumes do not support ownership management. // Git repo volumes support SELinux relabeling. +// +// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an +// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir +// into the Pod's container. type GitRepoVolumeSource struct { // Repository URL Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"` @@ -949,7 +1029,7 @@ type GitRepoVolumeSource struct { // Secret volumes support ownership management and SELinux relabeling. type SecretVolumeSource struct { // Name of the secret in the pod's namespace to use. - // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret // +optional SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"` // If unspecified, each key-value pair in the Data field of the referenced @@ -1003,17 +1083,17 @@ type SecretProjection struct { // NFS volumes do not support ownership management or SELinux relabeling. type NFSVolumeSource struct { // Server is the hostname or IP address of the NFS server. - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs Server string `json:"server" protobuf:"bytes,1,opt,name=server"` // Path that is exported by the NFS server. - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force // the NFS export to be mounted with read-only permissions. // Defaults to false. - // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } @@ -1022,20 +1102,21 @@ type NFSVolumeSource struct { // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. type ISCSIVolumeSource struct { - // iSCSI target portal. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` // Target iSCSI Qualified Name. IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` - // iSCSI target lun number. + // iSCSI Target Lun number. Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). // +optional ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://kubernetes.io/docs/user-guide/volumes#iscsi + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` @@ -1043,20 +1124,82 @@ type ISCSIVolumeSource struct { // Defaults to false. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` - // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` + // whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"` + // whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` + // CHAP Secret for iSCSI target and initiator authentication + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"` +} + +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIPersistentVolumeSource struct { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` + // Target iSCSI Qualified Name. + IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` + // iSCSI Target Lun number. + Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). + // +optional + ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` + // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` + // whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"` + // whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` + // CHAP Secret for iSCSI target and initiator authentication + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"` } // Represents a Fibre Channel volume. // Fibre Channel volumes can only be mounted as read/write once. // Fibre Channel volumes support ownership management and SELinux relabeling. type FCVolumeSource struct { - // Required: FC target worldwide names (WWNs) - TargetWWNs []string `json:"targetWWNs" protobuf:"bytes,1,rep,name=targetWWNs"` - // Required: FC target lun number - Lun *int32 `json:"lun" protobuf:"varint,2,opt,name=lun"` + // Optional: FC target worldwide names (WWNs) + // +optional + TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"` + // Optional: FC target lun number + // +optional + Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. @@ -1067,6 +1210,10 @@ type FCVolumeSource struct { // the ReadOnly setting in VolumeMounts. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // Optional: FC volume world wide identifiers (wwids) + // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + // +optional + WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"` } // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. @@ -1081,6 +1228,22 @@ type AzureFileVolumeSource struct { ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFilePersistentVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"` + // Share Name + ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + // the namespace of the secret that contains Azure Storage Account Name and Key + // default is the same as the Pod + // +optional + SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"` +} + // Represents a vSphere volume resource. type VsphereVirtualDiskVolumeSource struct { // Path that identifies vSphere volume vmdk @@ -1090,6 +1253,12 @@ type VsphereVirtualDiskVolumeSource struct { // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Storage Policy Based Management (SPBM) profile name. + // +optional + StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"` + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"` } // Represents a Photon Controller persistent disk resource. @@ -1103,11 +1272,16 @@ type PhotonPersistentDiskVolumeSource struct { } type AzureDataDiskCachingMode string +type AzureDataDiskKind string const ( AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" + + AzureSharedBlobDisk AzureDataDiskKind = "Shared" + AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" + AzureManagedDisk AzureDataDiskKind = "Managed" ) // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. @@ -1128,6 +1302,8 @@ type AzureDiskVolumeSource struct { // the ReadOnly setting in VolumeMounts. // +optional ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` + // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"` } // PortworxVolumeSource represents a Portworx volume resource. @@ -1156,13 +1332,13 @@ type ScaleIOVolumeSource struct { // Flag to enable/disable SSL communication with Gateway, default false // +optional SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` - // The name of the Protection Domain for the configured storage (defaults to "default"). + // The name of the ScaleIO Protection Domain for the configured storage. // +optional ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` - // The Storage Pool associated with the protection domain (defaults to "default"). + // The ScaleIO Storage Pool associated with the protection domain. // +optional StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` - // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // +optional StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` // The name of a volume already created in the ScaleIO system @@ -1179,6 +1355,97 @@ type ScaleIOVolumeSource struct { ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"` } +// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume +type ScaleIOPersistentVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"` + // The name of the storage system as configured in ScaleIO. + System string `json:"system" protobuf:"bytes,2,opt,name=system"` + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // +optional + StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"` +} + +// Represents a StorageOS persistent volume resource. +type StorageOSVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"` + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` +} + +// Represents a StorageOS persistent volume resource. +type StorageOSPersistentVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"` + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` +} + // Adapts a ConfigMap into a volume. // // The contents of the target ConfigMap's Data field will be presented in a @@ -1235,6 +1502,30 @@ type ConfigMapProjection struct { Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } +// ServiceAccountTokenProjection represents a projected service account token +// volume. This projection can be used to insert a service account token into +// the pods runtime filesystem for use against APIs (Kubernetes API Server or +// otherwise). +type ServiceAccountTokenProjection struct { + // Audience is the intended audience of the token. A recipient of a token + // must identify itself with an identifier specified in the audience of the + // token, and otherwise should reject the token. The audience defaults to the + // identifier of the apiserver. + //+optional + Audience string `json:"audience,omitempty" protobuf:"bytes,1,rep,name=audience"` + // ExpirationSeconds is the requested duration of validity of the service + // account token. As the token approaches expiration, the kubelet volume + // plugin will proactively rotate the service account token. The kubelet will + // start trying to rotate the token if the token is older than 80 percent of + // its time to live or if the token is older than 24 hours.Defaults to 1 hour + // and must be at least 10 minutes. + //+optional + ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"` + // Path is the path relative to the mount point of the file to project the + // token into. + Path string `json:"path" protobuf:"bytes,3,opt,name=path"` +} + // Represents a projected volume source type ProjectedVolumeSource struct { // list of volume projections @@ -1253,11 +1544,17 @@ type VolumeProjection struct { // all types below are the supported types for projection into the same volume // information about the secret data to project + // +optional Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` // information about the downwardAPI data to project + // +optional DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"` // information about the configMap data to project + // +optional ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"` + // information about the serviceAccountToken data to project + // +optional + ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"` } const ( @@ -1282,6 +1579,67 @@ type KeyToPath struct { Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"` } +// Local represents directly-attached storage with node affinity (Beta feature) +type LocalVolumeSource struct { + // The full path to the volume on the node. + // It can be either a directory or block device (disk, partition, ...). + // Directories can be represented only by PersistentVolume with VolumeMode=Filesystem. + // Block devices can be represented only by VolumeMode=Block, which also requires the + // BlockVolume alpha feature gate to be enabled. + Path string `json:"path" protobuf:"bytes,1,opt,name=path"` +} + +// Represents storage that is managed by an external CSI volume driver (Beta feature) +type CSIPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + // Required. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"` + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` + + // Attributes of the volume to publish. + // +optional + VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,5,rep,name=volumeAttributes"` + + // ControllerPublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerPublishVolume and ControllerUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"` + + // NodeStageSecretRef is a reference to the secret object containing sensitive + // information to pass to the CSI driver to complete the CSI NodeStageVolume + // and NodeStageVolume and NodeUnstageVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"` + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"` +} + // ContainerPort represents a network port in a single container. type ContainerPort struct { // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each @@ -1322,6 +1680,45 @@ type VolumeMount struct { // Defaults to "" (volume's root). // +optional SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"` + // mountPropagation determines how mounts are propagated from the host + // to container and the other way around. + // When not set, MountPropagationHostToContainer is used. + // This field is beta in 1.10. + // +optional + MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"` +} + +// MountPropagationMode describes mount propagation. +type MountPropagationMode string + +const ( + // MountPropagationNone means that the volume in a container will + // not receive new mounts from the host or other containers, and filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode corresponds to "private" in Linux terminology. + MountPropagationNone MountPropagationMode = "None" + // MountPropagationHostToContainer means that the volume in a container will + // receive new mounts from the host or other containers, but filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rslave" in Linux terminology). + MountPropagationHostToContainer MountPropagationMode = "HostToContainer" + // MountPropagationBidirectional means that the volume in a container will + // receive new mounts from the host or other containers, and its own mounts + // will be propagated from the container to the host or other containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rshared" in Linux terminology). + MountPropagationBidirectional MountPropagationMode = "Bidirectional" +) + +// volumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"` } // EnvVar represents an environment variable present in a Container. @@ -1349,11 +1746,11 @@ type EnvVar struct { // EnvVarSource represents a source for the value of an EnvVar. type EnvVarSource struct { // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.podIP. + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. // +optional FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"` // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. // +optional ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"` // Selects a key of a ConfigMap. @@ -1409,7 +1806,7 @@ type SecretKeySelector struct { // EnvFromSource represents the source of a set of ConfigMaps type EnvFromSource struct { - // An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. // +optional Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"` // The ConfigMap to select from @@ -1492,6 +1889,9 @@ type TCPSocketAction struct { // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"` + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` } // ExecAction describes a "run in container" action. @@ -1511,12 +1911,12 @@ type Probe struct { // The action taken to determine the health of a container Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"` // Number of seconds after the container has started before liveness probes are initiated. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"` // Number of seconds after which the probe times out. // Defaults to 1 second. Minimum value is 1. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` // How often (in seconds) to perform the probe. @@ -1574,13 +1974,13 @@ type Capabilities struct { // ResourceRequirements describes the compute resource requirements. type ResourceRequirements struct { // Limits describes the maximum amount of compute resources allowed. - // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ // +optional Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, // otherwise to an implementation-defined value. - // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ // +optional Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` } @@ -1597,7 +1997,9 @@ type Container struct { // Cannot be updated. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Docker image name. - // More info: http://kubernetes.io/docs/user-guide/images + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. // +optional Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` // Entrypoint array. Not executed within a shell. @@ -1607,7 +2009,7 @@ type Container struct { // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` // Arguments to the entrypoint. @@ -1617,7 +2019,7 @@ type Container struct { // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` // Container's working directory. @@ -1634,6 +2036,8 @@ type Container struct { // accessible from the network. // Cannot be updated. // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` // List of sources to populate environment variables in the container. // The keys defined within a source must be a C_IDENTIFIER. All invalid keys @@ -1646,26 +2050,36 @@ type Container struct { // List of environment variables to set in the container. // Cannot be updated. // +optional + // +patchMergeKey=name + // +patchStrategy=merge Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` // Compute Resources required by this container. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` // Periodic probe of container service readiness. // Container will be removed from service endpoints if the probe fails. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` // Actions that the management system should take in response to container lifecycle events. @@ -1694,11 +2108,12 @@ type Container struct { // One of Always, Never, IfNotPresent. // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/images#updating-images + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images // +optional ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` // Security options the pod should run with. - // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md + // More info: https://kubernetes.io/docs/concepts/policy/security-context/ + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` @@ -1749,7 +2164,7 @@ type Lifecycle struct { // PostStart is called immediately after a container is created. If the handler fails, // the container is terminated and restarted according to its restart policy. // Other management of the container blocks until the hook completes. - // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` // PreStop is called immediately before a container is terminated. @@ -1757,7 +2172,7 @@ type Lifecycle struct { // The reason for termination is passed to the handler. // Regardless of the outcome of the handler, the container is eventually terminated. // Other management of the container blocks until the hook completes. - // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` } @@ -1849,13 +2264,12 @@ type ContainerStatus struct { // garbage collection. This value will get capped at 5 by GC. RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` // The image the container is running. - // More info: http://kubernetes.io/docs/user-guide/images + // More info: https://kubernetes.io/docs/concepts/containers/images // TODO(dchen1107): Which image the container is running with? Image string `json:"image" protobuf:"bytes,6,opt,name=image"` // ImageID of the container's image. ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` // Container's ID in the format 'docker://'. - // More info: http://kubernetes.io/docs/user-guide/container-environment#container-information // +optional ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` } @@ -1898,17 +2312,18 @@ const ( // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler // can't schedule the pod right now, for example due to insufficient resources in the cluster. PodReasonUnschedulable = "Unschedulable" + // ContainersReady indicates whether all containers in the pod are ready. + ContainersReady PodConditionType = "ContainersReady" ) // PodCondition contains details for the current condition of this pod. type PodCondition struct { // Type is the type of the condition. - // Currently only Ready. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"` // Status is the status of the condition. // Can be True, False, Unknown. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` // Last time we probed the condition. // +optional @@ -1954,6 +2369,15 @@ const ( // determined by kubelet) DNS settings. DNSDefault DNSPolicy = "Default" + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" +) + +const ( + // DefaultTerminationGracePeriodSeconds indicates the default duration in + // seconds a pod needs to terminate gracefully. DefaultTerminationGracePeriodSeconds = 30 ) @@ -1965,17 +2389,23 @@ type NodeSelector struct { NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"` } -// A null or empty node selector term matches no objects. +// A null or empty node selector term matches no objects. The requirements of +// them are ANDed. +// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. type NodeSelectorTerm struct { - //Required. A list of node selector requirements. The requirements are ANDed. - MatchExpressions []NodeSelectorRequirement `json:"matchExpressions" protobuf:"bytes,1,rep,name=matchExpressions"` + // A list of node selector requirements by node's labels. + // +optional + MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"` + // A list of node selector requirements by node's fields. + // +optional + MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" protobuf:"bytes,2,rep,name=matchFields"` } // A node selector requirement is a selector that contains values, a key, and an operator // that relates the key and values. type NodeSelectorRequirement struct { // The label key that the selector applies to. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` // Represents a key's relationship to a set of values. // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"` @@ -2001,6 +2431,27 @@ const ( NodeSelectorOpLt NodeSelectorOperator = "Lt" ) +// A topology selector term represents the result of label queries. +// A null or empty topology selector term matches no objects. +// The requirements of them are ANDed. +// It provides a subset of functionality as NodeSelectorTerm. +// This is an alpha feature and may change in the future. +type TopologySelectorTerm struct { + // A list of topology selector requirements by labels. + // +optional + MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty" protobuf:"bytes,1,rep,name=matchLabelExpressions"` +} + +// A topology selector requirement is a selector that matches given label. +// This is an alpha feature and may change in the future. +type TopologySelectorLabelRequirement struct { + // The label key that the selector applies to. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // An array of string values. One value must match the label to be selected. + // Each entry in Values is ORed. + Values []string `json:"values" protobuf:"bytes,2,rep,name=values"` +} + // Affinity is a group of affinity scheduling rules. type Affinity struct { // Describes node affinity scheduling rules for the pod. @@ -2026,6 +2477,7 @@ type PodAffinity struct { // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the affinity requirements specified by this field are not met at // scheduling time, the pod will not be scheduled onto the node. // If the affinity requirements specified by this field cease to be met @@ -2060,6 +2512,7 @@ type PodAntiAffinity struct { // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the anti-affinity requirements specified by this field are not met at // scheduling time, the pod will not be scheduled onto the node. // If the anti-affinity requirements specified by this field cease to be met @@ -2095,7 +2548,7 @@ type WeightedPodAffinityTerm struct { // relative to the given namespace(s)) that this pod should be // co-located (affinity) or not co-located (anti-affinity) with, // where co-located is defined as running on a node whose value of -// the label with key tches that of any node on which +// the label with key matches that of any node on which // a pod of the set of pods is running type PodAffinityTerm struct { // A label query over a set of resources, in this case pods. @@ -2103,16 +2556,14 @@ type PodAffinityTerm struct { LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` // namespaces specifies which namespaces the labelSelector applies to (matches against); // null or empty list means "this pod's namespace" + // +optional Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"` // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching // the labelSelector in the specified namespaces, where co-located is defined as running on a node // whose value of the label with key topologyKey matches that of any node on which any of the // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - // +optional - TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"` + // Empty topologyKey is not allowed. + TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"` } // Node affinity is a group of node affinity scheduling rules. @@ -2155,11 +2606,11 @@ type PreferredSchedulingTerm struct { Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"` } -// The node this Taint is attached to has the effect "effect" on -// any pod that that does not tolerate the Taint. +// The node this Taint is attached to has the "effect" on +// any pod that does not tolerate the Taint. type Taint struct { // Required. The taint key to be applied to a node. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` // Required. The taint value corresponding to the taint key. // +optional Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` @@ -2170,7 +2621,7 @@ type Taint struct { // TimeAdded represents the time at which the taint was added. // It is only written for NoExecute taints. // +optional - TimeAdded metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` + TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` } type TaintEffect string @@ -2190,6 +2641,7 @@ const ( // Kubelet without going through the scheduler to start. // Enforced by Kubelet and the scheduler. // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + // Evict any already-running pods that do not tolerate the taint. // Currently enforced by NodeController. TaintEffectNoExecute TaintEffect = "NoExecute" @@ -2201,7 +2653,7 @@ type Toleration struct { // Key is the taint key that the toleration applies to. Empty means match all taint keys. // If the key is empty, operator must be Exists; this combination means to match all values and all keys. // +optional - Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"` // Operator represents a key's relationship to the value. // Valid operators are Exists and Equal. Defaults to Equal. // Exists is equivalent to wildcard for value, so that a pod can @@ -2232,33 +2684,20 @@ const ( TolerationOpEqual TolerationOperator = "Equal" ) -const ( - // This annotation key will be used to contain an array of v1 JSON encoded Containers - // for init containers. The annotation will be placed into the internal type and cleared. - // This key is only recognized by version >= 1.4. - PodInitContainersBetaAnnotationKey = "pod.beta.kubernetes.io/init-containers" - // This annotation key will be used to contain an array of v1 JSON encoded Containers - // for init containers. The annotation will be placed into the internal type and cleared. - // This key is recognized by version >= 1.3. For version 1.4 code, this key - // will have its value copied to the beta key. - PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers" - // This annotation key will be used to contain an array of v1 JSON encoded - // ContainerStatuses for init containers. The annotation will be placed into the internal - // type and cleared. This key is only recognized by version >= 1.4. - PodInitContainerStatusesBetaAnnotationKey = "pod.beta.kubernetes.io/init-container-statuses" - // This annotation key will be used to contain an array of v1 JSON encoded - // ContainerStatuses for init containers. The annotation will be placed into the internal - // type and cleared. This key is recognized by version >= 1.3. For version 1.4 code, - // this key will have its value copied to the beta key. - PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses" -) +// PodReadinessGate contains the reference to a pod condition +type PodReadinessGate struct { + // ConditionType refers to a condition in the pod's condition list with matching type. + ConditionType PodConditionType `json:"conditionType" protobuf:"bytes,1,opt,name=conditionType,casttype=PodConditionType"` +} // PodSpec is a description of a pod. type PodSpec struct { // List of volumes that can be mounted by containers belonging to the pod. - // More info: http://kubernetes.io/docs/user-guide/volumes + // More info: https://kubernetes.io/docs/concepts/storage/volumes // +optional - Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` // List of initialization containers belonging to the pod. // Init containers are executed in order prior to containers being started. If any // init container fails, the pod is considered to have failed and is handled according @@ -2271,18 +2710,21 @@ type PodSpec struct { // in a similar fashion. // Init containers cannot currently be added or removed. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/containers + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // +patchMergeKey=name + // +patchStrategy=merge InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` // List of containers belonging to the pod. // Containers cannot currently be added or removed. // There must be at least one container in a Pod. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/containers + // +patchMergeKey=name + // +patchStrategy=merge Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. - // More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy // +optional RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"` // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. @@ -2299,20 +2741,22 @@ type PodSpec struct { // Value must be a positive integer. // +optional ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` - // Set DNS policy for containers within the pod. - // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Set DNS policy for the pod. // Defaults to "ClusterFirst". - // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. // +optional DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: http://kubernetes.io/docs/user-guide/node-selection/README + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ // +optional NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ // +optional ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. @@ -2345,6 +2789,15 @@ type PodSpec struct { // +k8s:conversion-gen=false // +optional HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"` + // Share a single process namespace between all of the containers in a pod. + // When this is set containers will be able to view and signal processes from other containers + // in the same pod, and the first process in each container will not be assigned PID 1. + // HostPID and ShareProcessNamespace cannot both be set. + // Optional: Default to false. + // This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature. + // +k8s:conversion-gen=false + // +optional + ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"` // SecurityContext holds pod-level security attributes and common container settings. // Optional: Defaults to empty. See type description for default values of each field. // +optional @@ -2352,8 +2805,10 @@ type PodSpec struct { // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. For example, // in the case of docker, only DockerConfig type secrets are honored. - // More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod // +optional + // +patchMergeKey=name + // +patchStrategy=merge ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` // Specifies the hostname of the Pod // If not specified, the pod's hostname will be set to a system-defined value. @@ -2373,6 +2828,48 @@ type PodSpec struct { // If specified, the pod's tolerations. // +optional Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"` + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + // +patchMergeKey=ip + // +patchStrategy=merge + HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"` + // If specified, indicates the pod's priority. "system-node-critical" and + // "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"` + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"` + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` + + // If specified, all readiness gates will be evaluated for pod readiness. + // A pod is ready when all its containers are ready AND + // all conditions specified in the readiness gates have status equal to "True" + // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md + // +optional + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"` +} + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + // IP address of the host file entry. + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + // Hostnames for the above IP address. + Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"` } // PodSecurityContext holds pod-level security attributes and common container settings. @@ -2393,6 +2890,13 @@ type PodSecurityContext struct { // for that container. // +optional RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"` + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,6,opt,name=runAsGroup"` // Indicates that the container must run as a non-root user. // If true, the Kubelet will validate the image at runtime to ensure that it // does not run as UID 0 (root) and fail to start the container if it does. @@ -2417,6 +2921,10 @@ type PodSecurityContext struct { // If unset, the Kubelet will not modify the ownership and permissions of any volume. // +optional FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"` + // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + // sysctls (by the container runtime) might fail to launch. + // +optional + Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"` } // PodQOSClass defines the supported qos classes of Pods. @@ -2431,24 +2939,81 @@ const ( PodQOSBestEffort PodQOSClass = "BestEffort" ) +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"` + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"` + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"` +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // +optional + Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + // PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. +// state of a system, especially if the node that hosts the pod cannot contact the control +// plane. type PodStatus struct { - // Current condition of the pod. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase + // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. + // The conditions array, the reason and message fields, and the individual container status + // arrays contain more detail about the pod's status. + // There are five possible phase values: + // + // Pending: The pod has been accepted by the Kubernetes system, but one or more of the + // container images has not been created. This includes time before being scheduled as + // well as time spent downloading images over the network, which could take a while. + // Running: The pod has been bound to a node, and all of the containers have been created. + // At least one container is still running, or is in the process of starting or restarting. + // Succeeded: All containers in the pod have terminated in success, and will not be restarted. + // Failed: All containers in the pod have terminated, and at least one container has + // terminated in failure. The container either exited with non-zero status or was terminated + // by the system. + // Unknown: For some reason the state of the pod could not be obtained, typically due to an + // error in communicating with the host of the pod. + // + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase // +optional Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"` // Current service state of pod. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions // +optional + // +patchMergeKey=type + // +patchStrategy=merge Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` // A human readable message indicating details about why the pod is in this condition. // +optional Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` // A brief CamelCase message indicating details about why the pod is in this state. - // e.g. 'OutOfDisk' + // e.g. 'Evicted' // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be + // scheduled right away as preemption victims receive their graceful termination periods. + // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide + // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to + // give the resources on this node to a higher priority pod that is created after preemption. + // As a result, this field may be different than PodSpec.nodeName when the pod is + // scheduled. + // +optional + NominatedNodeName string `json:"nominatedNodeName,omitempty" protobuf:"bytes,11,opt,name=nominatedNodeName"` // IP address of the host to which the pod is assigned. Empty if not yet scheduled. // +optional @@ -2466,50 +3031,53 @@ type PodStatus struct { // The list has one entry per init container in the manifest. The most recent successful // init container will have ready = true, the most recently started container will have // startTime set. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"` // The list has one entry per container in the manifest. Each entry is currently the output // of `docker inspect`. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md + // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded type PodStatusResult struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Pod is a collection of containers that can run on a host. This resource is created // by clients and scheduled onto hosts. type Pod struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -2517,58 +3085,63 @@ type Pod struct { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // PodList is a list of Pods. type PodList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of pods. - // More info: http://kubernetes.io/docs/user-guide/pods + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` } // PodTemplateSpec describes the data a pod should have when created from a template type PodTemplateSpec struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodTemplate describes a template for creating copies of a predefined pod. type PodTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Template defines the pods that will be created from this pod template. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // PodTemplateList is a list of PodTemplates. type PodTemplateList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -2581,7 +3154,7 @@ type ReplicationControllerSpec struct { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller // +optional Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` @@ -2595,7 +3168,7 @@ type ReplicationControllerSpec struct { // If Selector is empty, it is defaulted to the labels present on the Pod template. // Label keys and values that must match in order to be controlled by this replication // controller, if empty defaulted to labels on Pod template. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` @@ -2607,7 +3180,7 @@ type ReplicationControllerSpec struct { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` } @@ -2616,7 +3189,7 @@ type ReplicationControllerSpec struct { // controller. type ReplicationControllerStatus struct { // Replicas is the most recently oberved number of replicas. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` // The number of pods that have labels matching the labels of the pod template of the replication controller. @@ -2637,6 +3210,8 @@ type ReplicationControllerStatus struct { // Represents the latest available observations of a replication controller's current state. // +optional + // +patchMergeKey=type + // +patchStrategy=merge Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` } @@ -2667,7 +3242,10 @@ type ReplicationControllerCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } -// +genclient=true +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicationController represents the configuration of a replication controller. type ReplicationController struct { @@ -2675,12 +3253,12 @@ type ReplicationController struct { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the replication controller. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -2688,21 +3266,23 @@ type ReplicationController struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ReplicationControllerList is a collection of replication controllers. type ReplicationControllerList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of replication controllers. - // More info: http://kubernetes.io/docs/user-guide/replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -2717,6 +3297,24 @@ const ( ServiceAffinityNone ServiceAffinity = "None" ) +const DefaultClientIPServiceAffinitySeconds int32 = 10800 + +// SessionAffinityConfig represents the configurations of session affinity. +type SessionAffinityConfig struct { + // clientIP contains the configurations of Client IP based session affinity. + // +optional + ClientIP *ClientIPConfig `json:"clientIP,omitempty" protobuf:"bytes,1,opt,name=clientIP"` +} + +// ClientIPConfig represents the configurations of Client IP based session affinity. +type ClientIPConfig struct { + // timeoutSeconds specifies the seconds of ClientIP type session sticky time. + // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + // Default value is 10800(for 3 hours). + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"` +} + // Service Type string describes ingress methods for a service type ServiceType string @@ -2740,6 +3338,16 @@ const ( ServiceTypeExternalName ServiceType = "ExternalName" ) +// Service External Traffic Policy Type string +type ServiceExternalTrafficPolicyType string + +const ( + // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. + ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + // ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior. + ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" +) + // ServiceStatus represents the current status of a service. type ServiceStatus struct { // LoadBalancer contains the current status of the load-balancer, @@ -2773,7 +3381,9 @@ type LoadBalancerIngress struct { // ServiceSpec describes the attributes that a user creates on a service. type ServiceSpec struct { // The list of ports that are exposed by this service. - // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +patchMergeKey=port + // +patchStrategy=merge Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` // Route service traffic to pods with label keys and values matching this @@ -2781,7 +3391,7 @@ type ServiceSpec struct { // external process managing its endpoints, which Kubernetes will not // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. // Ignored if type is ExternalName. - // More info: http://kubernetes.io/docs/user-guide/services#overview + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ // +optional Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` @@ -2793,7 +3403,7 @@ type ServiceSpec struct { // can be specified for headless services when proxying is not required. // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if // type is ExternalName. - // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +optional ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"` @@ -2810,7 +3420,7 @@ type ServiceSpec struct { // "LoadBalancer" builds on NodePort and creates an // external load-balancer (if supported in the current cloud) which routes // to the clusterIP. - // More info: http://kubernetes.io/docs/user-guide/services#overview + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types // +optional Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` @@ -2818,26 +3428,15 @@ type ServiceSpec struct { // will also accept traffic for this service. These IPs are not managed by // Kubernetes. The user is responsible for ensuring that traffic arrives // at a node with this IP. A common example is external load-balancers - // that are not part of the Kubernetes system. A previous form of this - // functionality exists as the deprecatedPublicIPs field. When using this - // field, callers should also clear the deprecatedPublicIPs field. + // that are not part of the Kubernetes system. // +optional ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"` - // deprecatedPublicIPs is deprecated and replaced by the externalIPs field - // with almost the exact same semantics. This field is retained in the v1 - // API for compatibility until at least 8/20/2016. It will be removed from - // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are - // set, deprecatedPublicIPs is used. - // +k8s:conversion-gen=false - // +optional - DeprecatedPublicIPs []string `json:"deprecatedPublicIPs,omitempty" protobuf:"bytes,6,rep,name=deprecatedPublicIPs"` - // Supports "ClientIP" and "None". Used to maintain session affinity. // Enable client IP based session affinity. // Must be ClientIP or None. // Defaults to None. - // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +optional SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"` @@ -2852,15 +3451,45 @@ type ServiceSpec struct { // If specified and supported by the platform, this will restrict traffic through the cloud-provider // load-balancer will be restricted to the specified client IPs. This field will be ignored if the // cloud-provider does not support the feature." - // More info: http://kubernetes.io/docs/user-guide/services-firewalls + // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ // +optional LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` // externalName is the external reference that kubedns or equivalent will // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. // +optional ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"` + + // publishNotReadyAddresses, when set to true, indicates that DNS implementations + // must publish the notReadyAddresses of subsets for the Endpoints associated with + // the Service. The default value is false. + // The primary use case for setting this field is to use a StatefulSet's Headless Service + // to propagate SRV records for its Pods without respect to their readiness for purpose + // of peer discovery. + // +optional + PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"` + // sessionAffinityConfig contains the configurations of session affinity. + // +optional + SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` } // ServicePort contains information on service's port. @@ -2887,7 +3516,7 @@ type ServicePort struct { // of the 'port' field is used (an identity map). // This field is ignored for services with clusterIP=None, and should be // omitted or set equal to the 'port' field. - // More info: http://kubernetes.io/docs/user-guide/services#defining-a-service + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service // +optional TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` @@ -2895,12 +3524,14 @@ type ServicePort struct { // Usually assigned by the system. If specified, it will be allocated to the service // if unused or else creation of the service will fail. // Default is to auto-allocate a port if the ServiceType of this Service requires one. - // More info: http://kubernetes.io/docs/user-guide/services#type--nodeport + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport // +optional NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"` } -// +genclient=true +// +genclient +// +genclient:skipVerbs=deleteCollection +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Service is a named abstraction of software service (for example, mysql) consisting of local port // (for example 3306) that the proxy listens on, and the selector that determines which pods @@ -2908,19 +3539,19 @@ type ServicePort struct { type Service struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a service. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -2931,11 +3562,13 @@ const ( ClusterIPNone = "None" ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ServiceList holds a list of services. type ServiceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -2943,7 +3576,8 @@ type ServiceList struct { Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ServiceAccount binds together: // * a name, understood by users, and perhaps by peripheral systems, for an identity @@ -2952,19 +3586,21 @@ type ServiceList struct { type ServiceAccount struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. - // More info: http://kubernetes.io/docs/user-guide/secrets + // More info: https://kubernetes.io/docs/concepts/configuration/secret // +optional + // +patchMergeKey=name + // +patchStrategy=merge Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"` // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret + // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod // +optional ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"` @@ -2974,20 +3610,23 @@ type ServiceAccount struct { AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ServiceAccountList is a list of ServiceAccount objects type ServiceAccountList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ServiceAccounts. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Endpoints is a collection of endpoints that implement the actual service. Example: // Name: "mysvc", @@ -3004,7 +3643,7 @@ type ServiceAccountList struct { type Endpoints struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3015,7 +3654,8 @@ type Endpoints struct { // subsets for the different ports. No address will appear in both Addresses and // NotReadyAddresses in the same subset. // Sets of addresses and ports that comprise a service. - Subsets []EndpointSubset `json:"subsets" protobuf:"bytes,2,rep,name=subsets"` + // +optional + Subsets []EndpointSubset `json:"subsets,omitempty" protobuf:"bytes,2,rep,name=subsets"` } // EndpointSubset is a group of addresses with a common set of ports. The @@ -3081,11 +3721,13 @@ type EndpointPort struct { Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // EndpointsList is a list of endpoints. type EndpointsList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3098,20 +3740,67 @@ type NodeSpec struct { // PodCIDR represents the pod IP range assigned to the node. // +optional PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"` - // External ID of the node assigned by some machine database (e.g. a cloud provider). - // Deprecated. - // +optional - ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` // ID of the node assigned by the cloud provider in the format: :// // +optional ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration + // More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration // +optional Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"` // If specified, the node's taints. // +optional Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` + // If specified, the source to get node configuration from + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + // +optional + ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"` + + // Deprecated. Not all kubelets will set this field. Remove field after 1.13. + // see: https://issues.k8s.io/61966 + // +optional + DoNotUse_ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` +} + +// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. +type NodeConfigSource struct { + // For historical context, regarding the below kind, apiVersion, and configMapRef deprecation tags: + // 1. kind/apiVersion were used by the kubelet to persist this struct to disk (they had no protobuf tags) + // 2. configMapRef and proto tag 1 were used by the API to refer to a configmap, + // but used a generic ObjectReference type that didn't really have the fields we needed + // All uses/persistence of the NodeConfigSource struct prior to 1.11 were gated by alpha feature flags, + // so there was no persisted data for these fields that needed to be migrated/handled. + + // +k8s:deprecated=kind + // +k8s:deprecated=apiVersion + // +k8s:deprecated=configMapRef,protobuf=1 + + // ConfigMap is a reference to a Node's ConfigMap + ConfigMap *ConfigMapNodeConfigSource `json:"configMap,omitempty" protobuf:"bytes,2,opt,name=configMap"` +} + +// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. +type ConfigMapNodeConfigSource struct { + // Namespace is the metadata.namespace of the referenced ConfigMap. + // This field is required in all cases. + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + + // Name is the metadata.name of the referenced ConfigMap. + // This field is required in all cases. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // UID is the metadata.UID of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"` + + // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` + + // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure + // This field is required in all cases. + KubeletConfigKey string `json:"kubeletConfigKey" protobuf:"bytes,5,opt,name=kubeletConfigKey"` } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -3136,11 +3825,11 @@ type NodeDaemonEndpoints struct { // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. type NodeSystemInfo struct { // MachineID reported by the node. For unique machine identification - // in the cluster this field is prefered. Learn more from man(5) + // in the cluster this field is preferred. Learn more from man(5) // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"` // SystemUUID reported by the node. For unique machine identification - // MachineID is prefered. This field is specific to Red Hat hosts + // MachineID is preferred. This field is specific to Red Hat hosts // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"` // Boot ID reported by the node. @@ -3161,10 +3850,57 @@ type NodeSystemInfo struct { Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"` } +// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource. +type NodeConfigStatus struct { + // Assigned reports the checkpointed config the node will try to use. + // When Node.Spec.ConfigSource is updated, the node checkpoints the associated + // config payload to local disk, along with a record indicating intended + // config. The node refers to this record to choose its config checkpoint, and + // reports this record in Assigned. Assigned only updates in the status after + // the record has been checkpointed to disk. When the Kubelet is restarted, + // it tries to make the Assigned config the Active config by loading and + // validating the checkpointed payload identified by Assigned. + // +optional + Assigned *NodeConfigSource `json:"assigned,omitempty" protobuf:"bytes,1,opt,name=assigned"` + // Active reports the checkpointed config the node is actively using. + // Active will represent either the current version of the Assigned config, + // or the current LastKnownGood config, depending on whether attempting to use the + // Assigned config results in an error. + // +optional + Active *NodeConfigSource `json:"active,omitempty" protobuf:"bytes,2,opt,name=active"` + // LastKnownGood reports the checkpointed config the node will fall back to + // when it encounters an error attempting to use the Assigned config. + // The Assigned config becomes the LastKnownGood config when the node determines + // that the Assigned config is stable and correct. + // This is currently implemented as a 10-minute soak period starting when the local + // record of Assigned config is updated. If the Assigned config is Active at the end + // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is + // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, + // because the local default config is always assumed good. + // You should not make assumptions about the node's method of determining config stability + // and correctness, as this may change or become configurable in the future. + // +optional + LastKnownGood *NodeConfigSource `json:"lastKnownGood,omitempty" protobuf:"bytes,3,opt,name=lastKnownGood"` + // Error describes any problems reconciling the Spec.ConfigSource to the Active config. + // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned + // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting + // to load or validate the Assigned config, etc. + // Errors may occur at different points while syncing config. Earlier errors (e.g. download or + // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across + // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in + // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error + // by fixing the config assigned in Spec.ConfigSource. + // You can find additional information for debugging by searching the error message in the Kubelet log. + // Error is a human-readable description of the error state; machines can check whether or not Error + // is empty, but should not rely on the stability of the Error text across Kubelet versions. + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"` +} + // NodeStatus is information about the current status of a node. type NodeStatus struct { // Capacity represents the total resources of a node. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity // +optional Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` // Allocatable represents the resources of a node that are available for scheduling. @@ -3172,24 +3908,28 @@ type NodeStatus struct { // +optional Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"` // NodePhase is the recently observed lifecycle phase of the node. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase + // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase // The field is never populated, and now is deprecated. // +optional Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"` // Conditions is an array of current observed node conditions. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition + // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition // +optional + // +patchMergeKey=type + // +patchStrategy=merge Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` // List of addresses reachable to the node. // Queried from cloud provider, if available. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses + // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses // +optional + // +patchMergeKey=type + // +patchStrategy=merge Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"` // Endpoints of daemons running on the Node. // +optional DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"` // Set of ids/uuids to uniquely identify the node. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info + // More info: https://kubernetes.io/docs/concepts/nodes/node/#info // +optional NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"` // List of container images on this node @@ -3201,6 +3941,9 @@ type NodeStatus struct { // List of volumes that are attached to the node. // +optional VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"` + // Status of the config assigned to the node via the dynamic Kubelet config feature. + // +optional + Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"` } type UniqueVolumeName string @@ -3250,7 +3993,7 @@ type PodSignature struct { // Describe a container image type ContainerImage struct { // Names by which this image is known. - // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` // The size of the image in bytes. // +optional @@ -3284,10 +4027,10 @@ const ( NodeMemoryPressure NodeConditionType = "MemoryPressure" // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. NodeDiskPressure NodeConditionType = "DiskPressure" + // NodePIDPressure means the kubelet is under pressure due to insufficient available PID. + NodePIDPressure NodeConditionType = "PIDPressure" // NodeNetworkUnavailable means that network for the node is not correctly configured. NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" - // NodeInodePressure means the kubelet is under pressure due to insufficient available inodes. - NodeInodePressure NodeConditionType = "InodePressure" ) // NodeCondition contains condition information for a node. @@ -3314,13 +4057,11 @@ type NodeAddressType string // These are valid address type of node. const ( - // Deprecated: NodeLegacyHostIP will be removed in 1.7. - NodeLegacyHostIP NodeAddressType = "LegacyHostIP" - NodeHostName NodeAddressType = "Hostname" - NodeExternalIP NodeAddressType = "ExternalIP" - NodeInternalIP NodeAddressType = "InternalIP" - NodeExternalDNS NodeAddressType = "ExternalDNS" - NodeInternalDNS NodeAddressType = "InternalDNS" + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" ) // NodeAddress contains information for the node's address. @@ -3346,49 +4087,56 @@ const ( ResourceMemory ResourceName = "memory" // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) ResourceStorage ResourceName = "storage" - // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. - ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" - // Number of Pods that may be running on this Node: see ResourcePods + // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. + ResourceEphemeralStorage ResourceName = "ephemeral-storage" ) const ( - // Namespace prefix for opaque counted resources (alpha). - ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" + // Default namespace prefix. + ResourceDefaultNamespacePrefix = "kubernetes.io/" + // Name prefix for huge page resources (alpha). + ResourceHugePagesPrefix = "hugepages-" + // Name prefix for storage resource limits + ResourceAttachableVolumesPrefix = "attachable-volumes-" ) // ResourceList is a set of (resource name, quantity) pairs. type ResourceList map[ResourceName]resource.Quantity -// +genclient=true -// +nonNamespaced=true +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Node is a worker node in Kubernetes. // Each node will have a unique identifier in the cache (i.e. in etcd). type Node struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a node. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // NodeList is the whole list of all Nodes which have been registered with master. type NodeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3408,7 +4156,7 @@ const ( // NamespaceSpec describes the attributes on a Namespace. type NamespaceSpec struct { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"` } @@ -3416,7 +4164,7 @@ type NamespaceSpec struct { // NamespaceStatus is information about the current status of a Namespace. type NamespaceStatus struct { // Phase is the current lifecycle phase of the namespace. - // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` } @@ -3431,48 +4179,54 @@ const ( NamespaceTerminating NamespacePhase = "Terminating" ) -// +genclient=true -// +nonNamespaced=true +// +genclient +// +genclient:nonNamespaced +// +genclient:skipVerbs=deleteCollection +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Namespace provides a scope for Names. // Use of multiple namespaces is optional. type Namespace struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of the Namespace. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status describes the current status of a Namespace. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // NamespaceList is a list of Namespaces. type NamespaceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Namespace objects in the list. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"` } -// Binding ties one object to another. -// For example, a pod is bound to a node by a scheduler. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. type Binding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3488,83 +4242,7 @@ type Preconditions struct { UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` } -// DeletionPropagation decides if a deletion will propagate to the dependents of the object, and how the garbage collector will handle the propagation. -type DeletionPropagation string - -const ( - // Orphans the dependents. - DeletePropagationOrphan DeletionPropagation = "Orphan" - // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. - DeletePropagationBackground DeletionPropagation = "Background" - // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. - // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. - // This policy is cascading, i.e., the dependents will be deleted with Foreground. - DeletePropagationForeground DeletionPropagation = "Foreground" -) - -// DeleteOptions may be provided when deleting an API object -// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. -// +k8s:openapi-gen=false -type DeleteOptions struct { - metav1.TypeMeta `json:",inline"` - - // The duration in seconds before the object should be deleted. Value must be non-negative integer. - // The value zero indicates delete immediately. If this value is nil, the default grace period for the - // specified type will be used. - // Defaults to a per object value if not specified. zero means delete immediately. - // +optional - GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"` - - // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be - // returned. - // +optional - Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` - - // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. - // Should the dependent objects be orphaned. If true/false, the "orphan" - // finalizer will be added to/removed from the object's finalizers list. - // Either this field or PropagationPolicy may be set, but not both. - // +optional - OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"` - - // Whether and how garbage collection will be performed. - // Either this field or OrphanDependents may be set, but not both. - // The default policy is decided by the existing finalizer set in the - // metadata.finalizers and the resource-specific default policy. - // +optional - PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"` -} - -// ListOptions is the query options to a standard REST list call. -// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. -// +k8s:openapi-gen=false -type ListOptions struct { - metav1.TypeMeta `json:",inline"` - - // A selector to restrict the list of returned objects by their labels. - // Defaults to everything. - // +optional - LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` - // A selector to restrict the list of returned objects by their fields. - // Defaults to everything. - // +optional - FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` - // Watch for changes to the described resources and return them as a stream of - // add, update, and remove notifications. Specify resourceVersion. - // +optional - Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - // When specified for list: - // - if unset, then the result is returned from remote storage based on quorum-read flag; - // - if it's 0, then we simply return what we currently have in cache, no guarantee; - // - if set to non zero, then the result is at least as fresh as given rv. - // +optional - ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` - // Timeout for the list/watch call. - // +optional - TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` -} +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodLogOptions is the query options for a Pod's logs REST call. type PodLogOptions struct { @@ -3606,6 +4284,8 @@ type PodLogOptions struct { LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // PodAttachOptions is the query options to a Pod's remote attach call. // --- // TODO: merge w/ PodExecOptions below for stdin, stdout, etc @@ -3641,6 +4321,8 @@ type PodAttachOptions struct { Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // PodExecOptions is the query options to a Pod's remote exec call. // --- // TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging @@ -3677,6 +4359,8 @@ type PodExecOptions struct { Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // PodPortForwardOptions is the query options to a Pod's port forward call // when using WebSockets. // The `port` query parameter must specify the port or @@ -3692,6 +4376,8 @@ type PodPortForwardOptions struct { Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // PodProxyOptions is the query options to a Pod's proxy call. type PodProxyOptions struct { metav1.TypeMeta `json:",inline"` @@ -3701,6 +4387,8 @@ type PodProxyOptions struct { Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // NodeProxyOptions is the query options to a Node's proxy call. type NodeProxyOptions struct { metav1.TypeMeta `json:",inline"` @@ -3710,6 +4398,8 @@ type NodeProxyOptions struct { Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ServiceProxyOptions is the query options to a Service's proxy call. type ServiceProxyOptions struct { metav1.TypeMeta `json:",inline"` @@ -3724,28 +4414,29 @@ type ServiceProxyOptions struct { } // ObjectReference contains enough information to let you inspect or modify the referred object. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ObjectReference struct { // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // Namespace of the referent. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ // +optional Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // +optional Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // API version of the referent. // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` // Specific resourceVersion to which this reference is made, if any. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` @@ -3765,12 +4456,14 @@ type ObjectReference struct { // referenced object inside the same namespace. type LocalObjectReference struct { // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // TODO: Add other useful fields. apiVersion, kind, uid? // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // SerializedReference is a reference to serialized object. type SerializedReference struct { metav1.TypeMeta `json:",inline"` @@ -3797,14 +4490,14 @@ const ( EventTypeWarning string = "Warning" ) -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. type Event struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // The object that this event is about. @@ -3840,13 +4533,58 @@ type Event struct { // Type of this event (Normal, Warning), new types could be added in the future // +optional Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"` + + // Time when this Event was first observed. + // +optional + EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"` + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"` + + // What action was taken/failed regarding to the Regarding object. + // +optional + Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"` + + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"` + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"` + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"` } +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continuously for some time. +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"` + // Time of the last occurrence observed + LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"` + // State of this Series: Ongoing or Finished + State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"` +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // EventList is a list of events. type EventList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3854,17 +4592,10 @@ type EventList struct { Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` } -// List holds a list of objects, which may not be known by the server. -type List struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - // List of objects - Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"` -} +// List holds a list of objects, which may not be known by the server. +type List metav1.List // LimitType is a type of object that is limited type LimitType string @@ -3906,32 +4637,35 @@ type LimitRangeSpec struct { Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // LimitRange sets resource usage limits for each kind of resource in a Namespace. type LimitRange struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the limits enforced. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // LimitRangeList is a list of LimitRange items. type LimitRangeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of LimitRange objects. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -3961,10 +4695,23 @@ const ( ResourceRequestsMemory ResourceName = "requests.memory" // Storage request, in bytes ResourceRequestsStorage ResourceName = "requests.storage" + // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" // CPU limit, in cores. (500m = .5 cores) ResourceLimitsCPU ResourceName = "limits.cpu" // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) ResourceLimitsMemory ResourceName = "limits.memory" + // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" +) + +// The following identify resource prefix for Kubernetes object types +const ( + // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. + ResourceRequestsHugePagesPrefix = "requests.hugepages-" + // Default resource requests prefix + DefaultResourceRequestsPrefix = "requests." ) // A ResourceQuotaScope defines a filter that must match each object tracked by a quota @@ -3979,24 +4726,66 @@ const ( ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" // Match all pod objects that do not have best effort quality of service ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" + // Match all pod objects that have priority class mentioned + ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass" ) // ResourceQuotaSpec defines the desired hard limits to enforce for Quota. type ResourceQuotaSpec struct { - // Hard is the set of desired hard limits for each named resource. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // hard is the set of desired hard limits for each named resource. + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` // A collection of filters that must match each object tracked by a quota. // If not specified, the quota matches all objects. // +optional Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"` + // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota + // but expressed using ScopeSelectorOperator in combination with possible values. + // For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched. + // +optional + ScopeSelector *ScopeSelector `json:"scopeSelector,omitempty" protobuf:"bytes,3,opt,name=scopeSelector"` } +// A scope selector represents the AND of the selectors represented +// by the scoped-resource selector requirements. +type ScopeSelector struct { + // A list of scope selector requirements by scope of the resources. + // +optional + MatchExpressions []ScopedResourceSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"` +} + +// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator +// that relates the scope name and values. +type ScopedResourceSelectorRequirement struct { + // The name of the scope that the selector applies to. + ScopeName ResourceQuotaScope `json:"scopeName" protobuf:"bytes,1,opt,name=scopeName"` + // Represents a scope's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + Operator ScopeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=ScopedResourceSelectorOperator"` + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. + // This array is replaced during a strategic merge patch. + // +optional + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A scope selector operator is the set of operators that can be used in +// a scope selector requirement. +type ScopeSelectorOperator string + +const ( + ScopeSelectorOpIn ScopeSelectorOperator = "In" + ScopeSelectorOpNotIn ScopeSelectorOperator = "NotIn" + ScopeSelectorOpExists ScopeSelectorOperator = "Exists" + ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist" +) + // ResourceQuotaStatus defines the enforced hard limits and observed use. type ResourceQuotaStatus struct { // Hard is the set of enforced hard limits for each named resource. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` // Used is the current observed total usage of the resource in the namespace. @@ -4004,56 +4793,59 @@ type ResourceQuotaStatus struct { Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ResourceQuota sets aggregate quota restrictions enforced per namespace type ResourceQuota struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired quota. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status defines the actual enforced quota and its current usage. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ResourceQuotaList is a list of ResourceQuota items. type ResourceQuotaList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of ResourceQuota objects. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Secret holds secret data of a certain type. The total bytes of the values in // the Data field must be less than MaxSecretSize bytes. type Secret struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN - // or leading dot followed by valid DNS_SUBDOMAIN. - // The serialized form of the secret data is a base64 encoded string, - // representing the arbitrary (possibly non-string) data value here. - // Described in https://tools.ietf.org/html/rfc4648#section-4 + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 // +optional Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` @@ -4153,40 +4945,58 @@ const ( TLSPrivateKeyKey = "tls.key" ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // SecretList is a list of Secret. type SecretList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of secret objects. - // More info: http://kubernetes.io/docs/user-guide/secrets + // More info: https://kubernetes.io/docs/concepts/configuration/secret Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ConfigMap holds configuration data for pods to consume. type ConfigMap struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Data contains the configuration data. - // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // Values with non-UTF-8 byte sequences must use the BinaryData field. + // The keys stored in Data must not overlap with the keys in + // the BinaryData field, this is enforced during validation process. // +optional Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` + + // BinaryData contains the binary data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // BinaryData can contain byte sequences that are not in the UTF-8 range. + // The keys stored in BinaryData must not overlap with the ones in + // the Data field, this is enforced during validation process. + // Using this field will require 1.10+ apiserver and + // kubelet. + // +optional + BinaryData map[string][]byte `json:"binaryData,omitempty" protobuf:"bytes,3,rep,name=binaryData"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ConfigMapList is a resource containing a list of ConfigMap objects. type ConfigMapList struct { metav1.TypeMeta `json:",inline"` - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4220,27 +5030,32 @@ type ComponentCondition struct { Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"` } -// +genclient=true -// +nonNamespaced=true +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ComponentStatus (and ComponentStatusList) holds the cluster validation info. type ComponentStatus struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of component conditions observed // +optional + // +patchMergeKey=type + // +patchStrategy=merge Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // Status of all the conditions for the component as a list of ComponentStatus objects. type ComponentStatusList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4320,6 +5135,12 @@ type SecurityContext struct { // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"` + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,8,opt,name=runAsGroup"` // Indicates that the container must run as a non-root user. // If true, the Kubelet will validate the image at runtime to ensure that it // does not run as UID 0 (root) and fail to start the container if it does. @@ -4332,6 +5153,14 @@ type SecurityContext struct { // Default is false. // +optional ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"` + // AllowPrivilegeEscalation controls whether a process can gain more + // privileges than its parent process. This bool directly controls if + // the no_new_privs flag will be set on the container process. + // AllowPrivilegeEscalation is true always when the container is: + // 1) run as Privileged + // 2) has CAP_SYS_ADMIN + // +optional + AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"` } // SELinuxOptions are the labels to be applied to the container @@ -4350,11 +5179,13 @@ type SELinuxOptions struct { Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // RangeAllocation is not a public type. type RangeAllocation struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4372,9 +5203,54 @@ const ( // corresponding to every RequiredDuringScheduling affinity rule. // When the --hard-pod-affinity-weight scheduler flag is not specified, // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int = 1 - - // When the --failure-domains scheduler flag is not specified, - // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. - DefaultFailureDomains string = metav1.LabelHostname + "," + metav1.LabelZoneFailureDomain + "," + metav1.LabelZoneRegion + DefaultHardPodAffinitySymmetricWeight int32 = 1 +) + +// Sysctl defines a kernel parameter to be set +type Sysctl struct { + // Name of a property to set + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Value of a property to set + Value string `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` +} + +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParam = "command" + + // Name of header that specifies stream type + StreamType = "streamType" + // Value for streamType header for stdin stream + StreamTypeStdin = "stdin" + // Value for streamType header for stdout stream + StreamTypeStdout = "stdout" + // Value for streamType header for stderr stream + StreamTypeStderr = "stderr" + // Value for streamType header for data stream + StreamTypeData = "data" + // Value for streamType header for error stream + StreamTypeError = "error" + // Value for streamType header for terminal resize stream + StreamTypeResize = "resize" + + // Name of header that specifies the port being forwarded + PortHeader = "port" + // Name of header that specifies a request ID used to associate the error + // and data streams for a single forwarded connection + PortForwardRequestIDHeader = "requestID" ) diff --git a/vendor/k8s.io/client-go/pkg/api/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go similarity index 60% rename from vendor/k8s.io/client-go/pkg/api/v1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 75416d59a..5b4e173db 100644 --- a/vendor/k8s.io/client-go/pkg/api/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,13 +26,13 @@ package v1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AWSElasticBlockStoreVolumeSource = map[string]string{ "": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", - "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", - "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", } func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string { @@ -76,12 +76,25 @@ var map_AzureDiskVolumeSource = map[string]string{ "cachingMode": "Host Caching mode: None, Read Only, Read Write.", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "kind": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", } func (AzureDiskVolumeSource) SwaggerDoc() map[string]string { return map_AzureDiskVolumeSource } +var map_AzureFilePersistentVolumeSource = map[string]string{ + "": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "secretName": "the name of secret that contains Azure Storage Account Name and Key", + "shareName": "Share Name", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "secretNamespace": "the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod", +} + +func (AzureFilePersistentVolumeSource) SwaggerDoc() map[string]string { + return map_AzureFilePersistentVolumeSource +} + var map_AzureFileVolumeSource = map[string]string{ "": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", "secretName": "the name of secret that contains Azure Storage Account Name and Key", @@ -94,8 +107,8 @@ func (AzureFileVolumeSource) SwaggerDoc() map[string]string { } var map_Binding = map[string]string{ - "": "Binding ties one object to another. For example, a pod is bound to a node by a scheduler.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "target": "The target object that you want to bind to the standard object.", } @@ -103,6 +116,22 @@ func (Binding) SwaggerDoc() map[string]string { return map_Binding } +var map_CSIPersistentVolumeSource = map[string]string{ + "": "Represents storage that is managed by an external CSI volume driver (Beta feature)", + "driver": "Driver is the name of the driver to use for this volume. Required.", + "volumeHandle": "VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", + "readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "volumeAttributes": "Attributes of the volume to publish.", + "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", +} + +func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_CSIPersistentVolumeSource +} + var map_Capabilities = map[string]string{ "": "Adds and removes POSIX capabilities from running containers.", "add": "Added capabilities", @@ -113,31 +142,67 @@ func (Capabilities) SwaggerDoc() map[string]string { return map_Capabilities } +var map_CephFSPersistentVolumeSource = map[string]string{ + "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", +} + +func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_CephFSPersistentVolumeSource +} + var map_CephFSVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", } func (CephFSVolumeSource) SwaggerDoc() map[string]string { return map_CephFSVolumeSource } +var map_CinderPersistentVolumeSource = map[string]string{ + "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", +} + +func (CinderPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_CinderPersistentVolumeSource +} + var map_CinderVolumeSource = map[string]string{ - "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", } func (CinderVolumeSource) SwaggerDoc() map[string]string { return map_CinderVolumeSource } +var map_ClientIPConfig = map[string]string{ + "": "ClientIPConfig represents the configurations of Client IP based session affinity.", + "timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).", +} + +func (ClientIPConfig) SwaggerDoc() map[string]string { + return map_ClientIPConfig +} + var map_ComponentCondition = map[string]string{ "": "Information about the condition of a component.", "type": "Type of condition for a component. Valid value: \"Healthy\"", @@ -152,7 +217,7 @@ func (ComponentCondition) SwaggerDoc() map[string]string { var map_ComponentStatus = map[string]string{ "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "conditions": "List of component conditions observed", } @@ -162,7 +227,7 @@ func (ComponentStatus) SwaggerDoc() map[string]string { var map_ComponentStatusList = map[string]string{ "": "Status of all the conditions for the component as a list of ComponentStatus objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of ComponentStatus objects.", } @@ -171,9 +236,10 @@ func (ComponentStatusList) SwaggerDoc() map[string]string { } var map_ConfigMap = map[string]string{ - "": "ConfigMap holds configuration data for pods to consume.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "data": "Data contains the configuration data. Each key must be a valid DNS_SUBDOMAIN with an optional leading dot.", + "": "ConfigMap holds configuration data for pods to consume.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "data": "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.", + "binaryData": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.", } func (ConfigMap) SwaggerDoc() map[string]string { @@ -201,7 +267,7 @@ func (ConfigMapKeySelector) SwaggerDoc() map[string]string { var map_ConfigMapList = map[string]string{ "": "ConfigMapList is a resource containing a list of ConfigMap objects.", - "metadata": "More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is the list of ConfigMaps.", } @@ -209,6 +275,19 @@ func (ConfigMapList) SwaggerDoc() map[string]string { return map_ConfigMapList } +var map_ConfigMapNodeConfigSource = map[string]string{ + "": "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.", + "namespace": "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.", + "name": "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.", + "uid": "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", + "resourceVersion": "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", + "kubeletConfigKey": "KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.", +} + +func (ConfigMapNodeConfigSource) SwaggerDoc() map[string]string { + return map_ConfigMapNodeConfigSource +} + var map_ConfigMapProjection = map[string]string{ "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", @@ -233,22 +312,23 @@ func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { var map_Container = map[string]string{ "": "A single application container that you want to run within a pod.", "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - "image": "Docker image name. More info: http://kubernetes.io/docs/user-guide/images", - "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands", - "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands", + "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", - "resources": "Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources", + "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", - "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", - "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future.", + "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", - "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/images#updating-images", - "securityContext": "Security options the pod should run with. More info: http://releases.k8s.io/HEAD/docs/design/security_context.md", + "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "securityContext": "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", @@ -260,7 +340,7 @@ func (Container) SwaggerDoc() map[string]string { var map_ContainerImage = map[string]string{ "": "Describe a container image", - "names": "Names by which this image is known. e.g. [\"gcr.io/google_containers/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", + "names": "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", "sizeBytes": "The size of the image in bytes.", } @@ -333,9 +413,9 @@ var map_ContainerStatus = map[string]string{ "lastState": "Details about the container's last termination condition.", "ready": "Specifies whether the container has passed its readiness probe.", "restartCount": "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.", - "image": "The image the container is running. More info: http://kubernetes.io/docs/user-guide/images", + "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images", "imageID": "ImageID of the container's image.", - "containerID": "Container's ID in the format 'docker://'. More info: http://kubernetes.io/docs/user-guide/container-environment#container-information", + "containerID": "Container's ID in the format 'docker://'.", } func (ContainerStatus) SwaggerDoc() map[string]string { @@ -351,18 +431,6 @@ func (DaemonEndpoint) SwaggerDoc() map[string]string { return map_DaemonEndpoint } -var map_DeleteOptions = map[string]string{ - "": "DeleteOptions may be provided when deleting an API object DEPRECATED: This type has been moved to meta/v1 and will be removed soon.", - "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", - "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "PropagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", -} - -func (DeleteOptions) SwaggerDoc() map[string]string { - return map_DeleteOptions -} - var map_DownwardAPIProjection = map[string]string{ "": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", "items": "Items is a list of DownwardAPIVolume file", @@ -395,8 +463,9 @@ func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { } var map_EmptyDirVolumeSource = map[string]string{ - "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", - "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", + "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", + "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "sizeLimit": "Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", } func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { @@ -439,7 +508,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string { var map_Endpoints = map[string]string{ "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", } @@ -449,7 +518,7 @@ func (Endpoints) SwaggerDoc() map[string]string { var map_EndpointsList = map[string]string{ "": "EndpointsList is a list of endpoints.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of endpoints.", } @@ -459,7 +528,7 @@ func (EndpointsList) SwaggerDoc() map[string]string { var map_EnvFromSource = map[string]string{ "": "EnvFromSource represents the source of a set of ConfigMaps", - "prefix": "An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + "prefix": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", "configMapRef": "The ConfigMap to select from", "secretRef": "The Secret to select from", } @@ -481,8 +550,8 @@ func (EnvVar) SwaggerDoc() map[string]string { var map_EnvVarSource = map[string]string{ "": "EnvVarSource represents a source for the value of an EnvVar.", - "fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.podIP.", - "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + "fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.", + "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", "configMapKeyRef": "Selects a key of a ConfigMap.", "secretKeyRef": "Selects a key of a secret in the pod's namespace", } @@ -492,16 +561,22 @@ func (EnvVarSource) SwaggerDoc() map[string]string { } var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "involvedObject": "The object that this event is about.", - "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", - "message": "A human-readable description of the status of this operation.", - "source": "The component reporting this event. Should be a short machine understandable string.", - "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", - "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", - "count": "The number of times this event has occurred.", - "type": "Type of this event (Normal, Warning), new types could be added in the future", + "": "Event is a report of an event somewhere in the cluster.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "involvedObject": "The object that this event is about.", + "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", + "message": "A human-readable description of the status of this operation.", + "source": "The component reporting this event. Should be a short machine understandable string.", + "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", + "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", + "count": "The number of times this event has occurred.", + "type": "Type of this event (Normal, Warning), new types could be added in the future", + "eventTime": "Time when this Event was first observed.", + "series": "Data about the Event series this event represents or nil if it's a singleton Event.", + "action": "What action was taken/failed regarding to the Regarding object.", + "related": "Optional secondary object for more complex actions.", + "reportingComponent": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + "reportingInstance": "ID of the controller instance, e.g. `kubelet-xyzf`.", } func (Event) SwaggerDoc() map[string]string { @@ -510,7 +585,7 @@ func (Event) SwaggerDoc() map[string]string { var map_EventList = map[string]string{ "": "EventList is a list of events.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of events", } @@ -518,6 +593,17 @@ func (EventList) SwaggerDoc() map[string]string { return map_EventList } +var map_EventSeries = map[string]string{ + "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", + "count": "Number of occurrences in this series up to the last heartbeat time", + "lastObservedTime": "Time of the last occurrence observed", + "state": "State of this Series: Ongoing or Finished", +} + +func (EventSeries) SwaggerDoc() map[string]string { + return map_EventSeries +} + var map_EventSource = map[string]string{ "": "EventSource contains information for an event.", "component": "Component from which the event is generated.", @@ -539,18 +625,32 @@ func (ExecAction) SwaggerDoc() map[string]string { var map_FCVolumeSource = map[string]string{ "": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", - "targetWWNs": "Required: FC target worldwide names (WWNs)", - "lun": "Required: FC target lun number", + "targetWWNs": "Optional: FC target worldwide names (WWNs)", + "lun": "Optional: FC target lun number", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "wwids": "Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", } func (FCVolumeSource) SwaggerDoc() map[string]string { return map_FCVolumeSource } +var map_FlexPersistentVolumeSource = map[string]string{ + "": "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.", + "driver": "Driver is the name of the driver to use for this volume.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "options": "Optional: Extra command options if any.", +} + +func (FlexPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_FlexPersistentVolumeSource +} + var map_FlexVolumeSource = map[string]string{ - "": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "driver": "Driver is the name of the driver to use for this volume.", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", @@ -574,10 +674,10 @@ func (FlockerVolumeSource) SwaggerDoc() map[string]string { var map_GCEPersistentDiskVolumeSource = map[string]string{ "": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", - "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", - "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", } func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { @@ -585,7 +685,7 @@ func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { } var map_GitRepoVolumeSource = map[string]string{ - "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "repository": "Repository URL", "revision": "Commit hash for the specified revision.", "directory": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", @@ -597,9 +697,9 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string { var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", } func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { @@ -640,24 +740,58 @@ func (Handler) SwaggerDoc() map[string]string { return map_Handler } +var map_HostAlias = map[string]string{ + "": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.", + "ip": "IP address of the host file entry.", + "hostnames": "Hostnames for the above IP address.", +} + +func (HostAlias) SwaggerDoc() map[string]string { + return map_HostAlias +} + var map_HostPathVolumeSource = map[string]string{ "": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", - "path": "Path of the directory on the host. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", + "path": "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "type": "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", } func (HostPathVolumeSource) SwaggerDoc() map[string]string { return map_HostPathVolumeSource } +var map_ISCSIPersistentVolumeSource = map[string]string{ + "": "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "iqn": "Target iSCSI Qualified Name.", + "lun": "iSCSI Target Lun number.", + "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "portals": "iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", + "chapAuthSession": "whether support iSCSI Session CHAP authentication", + "secretRef": "CHAP Secret for iSCSI target and initiator authentication", + "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", +} + +func (ISCSIPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_ISCSIPersistentVolumeSource +} + var map_ISCSIVolumeSource = map[string]string{ - "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", - "targetPortal": "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - "iqn": "Target iSCSI Qualified Name.", - "lun": "iSCSI target lun number.", - "iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#iscsi", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", - "portals": "iSCSI target portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "iqn": "Target iSCSI Qualified Name.", + "lun": "iSCSI Target Lun number.", + "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "portals": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", + "chapAuthSession": "whether support iSCSI Session CHAP authentication", + "secretRef": "CHAP Secret for iSCSI target and initiator authentication", + "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", } func (ISCSIVolumeSource) SwaggerDoc() map[string]string { @@ -677,8 +811,8 @@ func (KeyToPath) SwaggerDoc() map[string]string { var map_Lifecycle = map[string]string{ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", - "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details", - "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details", + "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", } func (Lifecycle) SwaggerDoc() map[string]string { @@ -687,8 +821,8 @@ func (Lifecycle) SwaggerDoc() map[string]string { var map_LimitRange = map[string]string{ "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the limits enforced. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (LimitRange) SwaggerDoc() map[string]string { @@ -711,8 +845,8 @@ func (LimitRangeItem) SwaggerDoc() map[string]string { var map_LimitRangeList = map[string]string{ "": "LimitRangeList is a list of LimitRange items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "Items is a list of LimitRange objects. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", } func (LimitRangeList) SwaggerDoc() map[string]string { @@ -728,29 +862,6 @@ func (LimitRangeSpec) SwaggerDoc() map[string]string { return map_LimitRangeSpec } -var map_List = map[string]string{ - "": "List holds a list of objects, which may not be known by the server.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "List of objects", -} - -func (List) SwaggerDoc() map[string]string { - return map_List -} - -var map_ListOptions = map[string]string{ - "": "ListOptions is the query options to a standard REST list call. DEPRECATED: This type has been moved to meta/v1 and will be removed soon.", - "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "timeoutSeconds": "Timeout for the list/watch call.", -} - -func (ListOptions) SwaggerDoc() map[string]string { - return map_ListOptions -} - var map_LoadBalancerIngress = map[string]string{ "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", @@ -772,18 +883,27 @@ func (LoadBalancerStatus) SwaggerDoc() map[string]string { var map_LocalObjectReference = map[string]string{ "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", } func (LocalObjectReference) SwaggerDoc() map[string]string { return map_LocalObjectReference } +var map_LocalVolumeSource = map[string]string{ + "": "Local represents directly-attached storage with node affinity (Beta feature)", + "path": "The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...). Directories can be represented only by PersistentVolume with VolumeMode=Filesystem. Block devices can be represented only by VolumeMode=Block, which also requires the BlockVolume alpha feature gate to be enabled.", +} + +func (LocalVolumeSource) SwaggerDoc() map[string]string { + return map_LocalVolumeSource +} + var map_NFSVolumeSource = map[string]string{ "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", - "server": "Server is the hostname or IP address of the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", - "path": "Path that is exported by the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", - "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", + "server": "Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "path": "Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", } func (NFSVolumeSource) SwaggerDoc() map[string]string { @@ -792,9 +912,9 @@ func (NFSVolumeSource) SwaggerDoc() map[string]string { var map_Namespace = map[string]string{ "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Namespace. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status describes the current status of a Namespace. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (Namespace) SwaggerDoc() map[string]string { @@ -803,8 +923,8 @@ func (Namespace) SwaggerDoc() map[string]string { var map_NamespaceList = map[string]string{ "": "NamespaceList is a list of Namespaces.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "Items is the list of Namespace objects in the list. More info: http://kubernetes.io/docs/user-guide/namespaces", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", } func (NamespaceList) SwaggerDoc() map[string]string { @@ -813,7 +933,7 @@ func (NamespaceList) SwaggerDoc() map[string]string { var map_NamespaceSpec = map[string]string{ "": "NamespaceSpec describes the attributes on a Namespace.", - "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers", + "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", } func (NamespaceSpec) SwaggerDoc() map[string]string { @@ -822,7 +942,7 @@ func (NamespaceSpec) SwaggerDoc() map[string]string { var map_NamespaceStatus = map[string]string{ "": "NamespaceStatus is information about the current status of a Namespace.", - "phase": "Phase is the current lifecycle phase of the namespace. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases", + "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", } func (NamespaceStatus) SwaggerDoc() map[string]string { @@ -831,9 +951,9 @@ func (NamespaceStatus) SwaggerDoc() map[string]string { var map_Node = map[string]string{ "": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a node. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (Node) SwaggerDoc() map[string]string { @@ -874,6 +994,27 @@ func (NodeCondition) SwaggerDoc() map[string]string { return map_NodeCondition } +var map_NodeConfigSource = map[string]string{ + "": "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.", + "configMap": "ConfigMap is a reference to a Node's ConfigMap", +} + +func (NodeConfigSource) SwaggerDoc() map[string]string { + return map_NodeConfigSource +} + +var map_NodeConfigStatus = map[string]string{ + "": "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.", + "assigned": "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.", + "active": "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.", + "lastKnownGood": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.", + "error": "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.", +} + +func (NodeConfigStatus) SwaggerDoc() map[string]string { + return map_NodeConfigStatus +} + var map_NodeDaemonEndpoints = map[string]string{ "": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.", "kubeletEndpoint": "Endpoint on which Kubelet is listening.", @@ -885,7 +1026,7 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { var map_NodeList = map[string]string{ "": "NodeList is the whole list of all Nodes which have been registered with master.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of nodes", } @@ -902,6 +1043,15 @@ func (NodeProxyOptions) SwaggerDoc() map[string]string { return map_NodeProxyOptions } +var map_NodeResources = map[string]string{ + "": "NodeResources is an object for conveying resource information about a node. see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.", + "Capacity": "Capacity represents the available resources of a node", +} + +func (NodeResources) SwaggerDoc() map[string]string { + return map_NodeResources +} + var map_NodeSelector = map[string]string{ "": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", "nodeSelectorTerms": "Required. A list of node selector terms. The terms are ORed.", @@ -923,8 +1073,9 @@ func (NodeSelectorRequirement) SwaggerDoc() map[string]string { } var map_NodeSelectorTerm = map[string]string{ - "": "A null or empty node selector term matches no objects.", - "matchExpressions": "Required. A list of node selector requirements. The requirements are ANDed.", + "": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", + "matchExpressions": "A list of node selector requirements by node's labels.", + "matchFields": "A list of node selector requirements by node's fields.", } func (NodeSelectorTerm) SwaggerDoc() map[string]string { @@ -934,10 +1085,11 @@ func (NodeSelectorTerm) SwaggerDoc() map[string]string { var map_NodeSpec = map[string]string{ "": "NodeSpec describes the attributes that a node is created with.", "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", - "externalID": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated.", "providerID": "ID of the node assigned by the cloud provider in the format: ://", - "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration", + "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", "taints": "If specified, the node's taints.", + "configSource": "If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field", + "externalID": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966", } func (NodeSpec) SwaggerDoc() map[string]string { @@ -946,16 +1098,17 @@ func (NodeSpec) SwaggerDoc() map[string]string { var map_NodeStatus = map[string]string{ "": "NodeStatus is information about the current status of a node.", - "capacity": "Capacity represents the total resources of a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details.", + "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", - "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase The field is never populated, and now is deprecated.", - "conditions": "Conditions is an array of current observed node conditions. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses", + "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", + "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses", "daemonEndpoints": "Endpoints of daemons running on the Node.", - "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info", + "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", "images": "List of container images on this node", "volumesInUse": "List of attachable volumes in use (mounted) by the node.", "volumesAttached": "List of volumes that are attached to the node.", + "config": "Status of the config assigned to the node via the dynamic Kubelet config feature.", } func (NodeStatus) SwaggerDoc() map[string]string { @@ -964,8 +1117,8 @@ func (NodeStatus) SwaggerDoc() map[string]string { var map_NodeSystemInfo = map[string]string{ "": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", - "machineID": "MachineID reported by the node. For unique machine identification in the cluster this field is prefered. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html", - "systemUUID": "SystemUUID reported by the node. For unique machine identification MachineID is prefered. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html", + "machineID": "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html", + "systemUUID": "SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html", "bootID": "Boot ID reported by the node.", "kernelVersion": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", "osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", @@ -990,37 +1143,14 @@ func (ObjectFieldSelector) SwaggerDoc() map[string]string { return map_ObjectFieldSelector } -var map_ObjectMeta = map[string]string{ - "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon.", - "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency", - "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", - "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", - "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", - "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", - "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", - "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", - "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", - "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", - "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", -} - -func (ObjectMeta) SwaggerDoc() map[string]string { - return map_ObjectMeta -} - var map_ObjectReference = map[string]string{ "": "ObjectReference contains enough information to let you inspect or modify the referred object.", - "kind": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "namespace": "Namespace of the referent. More info: http://kubernetes.io/docs/user-guide/namespaces", - "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "namespace": "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", + "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", "apiVersion": "API version of the referent.", - "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", } @@ -1029,10 +1159,10 @@ func (ObjectReference) SwaggerDoc() map[string]string { } var map_PersistentVolume = map[string]string{ - "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes", - "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes", + "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", + "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", } func (PersistentVolume) SwaggerDoc() map[string]string { @@ -1041,19 +1171,31 @@ func (PersistentVolume) SwaggerDoc() map[string]string { var map_PersistentVolumeClaim = map[string]string{ "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", - "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } func (PersistentVolumeClaim) SwaggerDoc() map[string]string { return map_PersistentVolumeClaim } +var map_PersistentVolumeClaimCondition = map[string]string{ + "": "PersistentVolumeClaimCondition contails details about state of pvc", + "lastProbeTime": "Last time we probed the condition.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", + "message": "Human-readable message indicating details about last transition.", +} + +func (PersistentVolumeClaimCondition) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimCondition +} + var map_PersistentVolumeClaimList = map[string]string{ "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "A list of persistent volume claims. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { @@ -1062,11 +1204,12 @@ func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimSpec = map[string]string{ "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", - "accessModes": "AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1", + "accessModes": "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", "selector": "A label query over volumes to consider for binding.", - "resources": "Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources", + "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", - "storageClassName": "Name of the StorageClass required by the claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1", + "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1076,8 +1219,9 @@ func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimStatus = map[string]string{ "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", "phase": "Phase represents the current phase of PersistentVolumeClaim.", - "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1", + "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", "capacity": "Represents the actual resources of the underlying volume.", + "conditions": "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", } func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { @@ -1086,7 +1230,7 @@ func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimVolumeSource = map[string]string{ "": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", - "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", + "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "readOnly": "Will force the ReadOnly setting in VolumeMounts. Default false.", } @@ -1096,8 +1240,8 @@ func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { var map_PersistentVolumeList = map[string]string{ "": "PersistentVolumeList is a list of PersistentVolume items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "List of persistent volumes. More info: http://kubernetes.io/docs/user-guide/persistent-volumes", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", } func (PersistentVolumeList) SwaggerDoc() map[string]string { @@ -1106,18 +1250,18 @@ func (PersistentVolumeList) SwaggerDoc() map[string]string { var map_PersistentVolumeSource = map[string]string{ "": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", - "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", - "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", - "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", - "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", - "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", @@ -1125,6 +1269,9 @@ var map_PersistentVolumeSource = map[string]string{ "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "local": "Local represents directly-attached storage with node affinity", + "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", + "csi": "CSI represents storage that handled by an external CSI driver (Beta feature).", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1133,11 +1280,14 @@ func (PersistentVolumeSource) SwaggerDoc() map[string]string { var map_PersistentVolumeSpec = map[string]string{ "": "PersistentVolumeSpec is the specification of a persistent volume.", - "capacity": "A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity", - "accessModes": "AccessModes contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes", - "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding", - "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy", + "capacity": "A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "accessModes": "AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes", + "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding", + "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", + "mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", + "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.", + "nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1146,7 +1296,7 @@ func (PersistentVolumeSpec) SwaggerDoc() map[string]string { var map_PersistentVolumeStatus = map[string]string{ "": "PersistentVolumeStatus is the current status of a persistent volume.", - "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase", + "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", "message": "A human-readable message indicating details about why the volume is in this state.", "reason": "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", } @@ -1167,9 +1317,9 @@ func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string { var map_Pod = map[string]string{ "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (Pod) SwaggerDoc() map[string]string { @@ -1178,7 +1328,7 @@ func (Pod) SwaggerDoc() map[string]string { var map_PodAffinity = map[string]string{ "": "Pod affinity is a group of inter pod affinity scheduling rules.", - "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "requiredDuringSchedulingIgnoredDuringExecution": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", } @@ -1187,10 +1337,10 @@ func (PodAffinity) SwaggerDoc() map[string]string { } var map_PodAffinityTerm = map[string]string{ - "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key tches that of any node on which a pod of the set of pods is running", + "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", "labelSelector": "A label query over a set of resources, in this case pods.", "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", - "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.", + "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -1199,7 +1349,7 @@ func (PodAffinityTerm) SwaggerDoc() map[string]string { var map_PodAntiAffinity = map[string]string{ "": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", - "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "requiredDuringSchedulingIgnoredDuringExecution": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", } @@ -1222,8 +1372,8 @@ func (PodAttachOptions) SwaggerDoc() map[string]string { var map_PodCondition = map[string]string{ "": "PodCondition contains details for the current condition of this pod.", - "type": "Type is the type of the condition. Currently only Ready. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions", - "status": "Status is the status of the condition. Can be True, False, Unknown. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions", + "type": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "status": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", "lastProbeTime": "Last time we probed the condition.", "lastTransitionTime": "Last time the condition transitioned from one status to another.", "reason": "Unique, one-word, CamelCase reason for the condition's last transition.", @@ -1234,6 +1384,26 @@ func (PodCondition) SwaggerDoc() map[string]string { return map_PodCondition } +var map_PodDNSConfig = map[string]string{ + "": "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", + "nameservers": "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.", + "searches": "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.", + "options": "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.", +} + +func (PodDNSConfig) SwaggerDoc() map[string]string { + return map_PodDNSConfig +} + +var map_PodDNSConfigOption = map[string]string{ + "": "PodDNSConfigOption defines DNS resolver options of a pod.", + "name": "Required.", +} + +func (PodDNSConfigOption) SwaggerDoc() map[string]string { + return map_PodDNSConfigOption +} + var map_PodExecOptions = map[string]string{ "": "PodExecOptions is the query options to a Pod's remote exec call.", "stdin": "Redirect the standard input stream of the pod for this call. Defaults to false.", @@ -1250,8 +1420,8 @@ func (PodExecOptions) SwaggerDoc() map[string]string { var map_PodList = map[string]string{ "": "PodList is a list of Pods.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "List of pods. More info: http://kubernetes.io/docs/user-guide/pods", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md", } func (PodList) SwaggerDoc() map[string]string { @@ -1292,13 +1462,24 @@ func (PodProxyOptions) SwaggerDoc() map[string]string { return map_PodProxyOptions } +var map_PodReadinessGate = map[string]string{ + "": "PodReadinessGate contains the reference to a pod condition", + "conditionType": "ConditionType refers to a condition in the pod's condition list with matching type.", +} + +func (PodReadinessGate) SwaggerDoc() map[string]string { + return map_PodReadinessGate +} + var map_PodSecurityContext = map[string]string{ "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", + "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", } func (PodSecurityContext) SwaggerDoc() map[string]string { @@ -1316,28 +1497,34 @@ func (PodSignature) SwaggerDoc() map[string]string { var map_PodSpec = map[string]string{ "": "PodSpec is a description of a pod.", - "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: http://kubernetes.io/docs/user-guide/volumes", - "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers", - "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers", - "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy", + "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", + "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", - "dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", - "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: http://kubernetes.io/docs/user-guide/node-selection/README", - "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md", + "dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", "hostPID": "Use the host's pid namespace. Optional: Default to false.", "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", + "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature.", "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", - "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod", + "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", "subdomain": "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all.", "affinity": "If specified, the pod's scheduling constraints", "schedulerName": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", "tolerations": "If specified, the pod's tolerations.", + "hostAliases": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", + "priorityClassName": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1345,17 +1532,18 @@ func (PodSpec) SwaggerDoc() map[string]string { } var map_PodStatus = map[string]string{ - "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.", - "phase": "Current condition of the pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase", - "conditions": "Current service state of pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions", + "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", + "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", + "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", "message": "A human readable message indicating details about why the pod is in this condition.", - "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'", + "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", + "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses", - "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md", + "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1364,8 +1552,8 @@ func (PodStatus) SwaggerDoc() map[string]string { var map_PodStatusResult = map[string]string{ "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (PodStatusResult) SwaggerDoc() map[string]string { @@ -1374,8 +1562,8 @@ func (PodStatusResult) SwaggerDoc() map[string]string { var map_PodTemplate = map[string]string{ "": "PodTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "template": "Template defines the pods that will be created from this pod template. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (PodTemplate) SwaggerDoc() map[string]string { @@ -1384,7 +1572,7 @@ func (PodTemplate) SwaggerDoc() map[string]string { var map_PodTemplateList = map[string]string{ "": "PodTemplateList is a list of PodTemplates.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of pod templates", } @@ -1394,8 +1582,8 @@ func (PodTemplateList) SwaggerDoc() map[string]string { var map_PodTemplateSpec = map[string]string{ "": "PodTemplateSpec describes the data a pod should have when created from a template", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (PodTemplateSpec) SwaggerDoc() map[string]string { @@ -1446,8 +1634,8 @@ func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { var map_Probe = map[string]string{ "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", - "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", - "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", + "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", @@ -1480,16 +1668,32 @@ func (QuobyteVolumeSource) SwaggerDoc() map[string]string { return map_QuobyteVolumeSource } +var map_RBDPersistentVolumeSource = map[string]string{ + "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", +} + +func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_RBDPersistentVolumeSource +} + var map_RBDVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it.", - "user": "The rados user name. Default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", } func (RBDVolumeSource) SwaggerDoc() map[string]string { @@ -1498,7 +1702,7 @@ func (RBDVolumeSource) SwaggerDoc() map[string]string { var map_RangeAllocation = map[string]string{ "": "RangeAllocation is not a public type.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "range": "Range is string that identifies the range represented by 'data'.", "data": "Data is a bit array containing all allocated addresses in the previous segment.", } @@ -1509,9 +1713,9 @@ func (RangeAllocation) SwaggerDoc() map[string]string { var map_ReplicationController = map[string]string{ "": "ReplicationController represents the configuration of a replication controller.", - "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (ReplicationController) SwaggerDoc() map[string]string { @@ -1533,8 +1737,8 @@ func (ReplicationControllerCondition) SwaggerDoc() map[string]string { var map_ReplicationControllerList = map[string]string{ "": "ReplicationControllerList is a collection of replication controllers.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "List of replication controllers. More info: http://kubernetes.io/docs/user-guide/replication-controller", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } func (ReplicationControllerList) SwaggerDoc() map[string]string { @@ -1543,10 +1747,10 @@ func (ReplicationControllerList) SwaggerDoc() map[string]string { var map_ReplicationControllerSpec = map[string]string{ "": "ReplicationControllerSpec is the specification of a replication controller.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template", + "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", } func (ReplicationControllerSpec) SwaggerDoc() map[string]string { @@ -1555,7 +1759,7 @@ func (ReplicationControllerSpec) SwaggerDoc() map[string]string { var map_ReplicationControllerStatus = map[string]string{ "": "ReplicationControllerStatus represents the current status of a replication controller.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller", + "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.", "readyReplicas": "The number of ready replicas for this replication controller.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replication controller.", @@ -1580,9 +1784,9 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string { var map_ResourceQuota = map[string]string{ "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired quota. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status defines the actual enforced quota and its current usage. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (ResourceQuota) SwaggerDoc() map[string]string { @@ -1591,8 +1795,8 @@ func (ResourceQuota) SwaggerDoc() map[string]string { var map_ResourceQuotaList = map[string]string{ "": "ResourceQuotaList is a list of ResourceQuota items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "Items is a list of ResourceQuota objects. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", } func (ResourceQuotaList) SwaggerDoc() map[string]string { @@ -1600,9 +1804,10 @@ func (ResourceQuotaList) SwaggerDoc() map[string]string { } var map_ResourceQuotaSpec = map[string]string{ - "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", - "hard": "Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", - "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", + "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", + "hard": "hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", + "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", + "scopeSelector": "scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.", } func (ResourceQuotaSpec) SwaggerDoc() map[string]string { @@ -1611,7 +1816,7 @@ func (ResourceQuotaSpec) SwaggerDoc() map[string]string { var map_ResourceQuotaStatus = map[string]string{ "": "ResourceQuotaStatus defines the enforced hard limits and observed use.", - "hard": "Hard is the set of enforced hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", + "hard": "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", "used": "Used is the current observed total usage of the resource in the namespace.", } @@ -1621,8 +1826,8 @@ func (ResourceQuotaStatus) SwaggerDoc() map[string]string { var map_ResourceRequirements = map[string]string{ "": "ResourceRequirements describes the compute resource requirements.", - "limits": "Limits describes the maximum amount of compute resources allowed. More info: http://kubernetes.io/docs/user-guide/compute-resources/", - "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: http://kubernetes.io/docs/user-guide/compute-resources/", + "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", } func (ResourceRequirements) SwaggerDoc() map[string]string { @@ -1641,15 +1846,33 @@ func (SELinuxOptions) SwaggerDoc() map[string]string { return map_SELinuxOptions } +var map_ScaleIOPersistentVolumeSource = map[string]string{ + "": "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume", + "gateway": "The host address of the ScaleIO API Gateway.", + "system": "The name of the storage system as configured in ScaleIO.", + "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", + "protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.", + "storagePool": "The ScaleIO Storage Pool associated with the protection domain.", + "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.", + "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (ScaleIOPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_ScaleIOPersistentVolumeSource +} + var map_ScaleIOVolumeSource = map[string]string{ "": "ScaleIOVolumeSource represents a persistent ScaleIO volume", "gateway": "The host address of the ScaleIO API Gateway.", "system": "The name of the storage system as configured in ScaleIO.", "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", - "protectionDomain": "The name of the Protection Domain for the configured storage (defaults to \"default\").", - "storagePool": "The Storage Pool associated with the protection domain (defaults to \"default\").", - "storageMode": "Indicates whether the storage for a volume should be thick or thin (defaults to \"thin\").", + "protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.", + "storagePool": "The ScaleIO Storage Pool associated with the protection domain.", + "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.", "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", @@ -1659,10 +1882,30 @@ func (ScaleIOVolumeSource) SwaggerDoc() map[string]string { return map_ScaleIOVolumeSource } +var map_ScopeSelector = map[string]string{ + "": "A scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.", + "matchExpressions": "A list of scope selector requirements by scope of the resources.", +} + +func (ScopeSelector) SwaggerDoc() map[string]string { + return map_ScopeSelector +} + +var map_ScopedResourceSelectorRequirement = map[string]string{ + "": "A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.", + "scopeName": "The name of the scope that the selector applies to.", + "operator": "Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist.", + "values": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", +} + +func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string { + return map_ScopedResourceSelectorRequirement +} + var map_Secret = map[string]string{ "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "data": "Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN or leading dot followed by valid DNS_SUBDOMAIN. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "data": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", "type": "Used to facilitate programmatic handling of secret data.", } @@ -1692,8 +1935,8 @@ func (SecretKeySelector) SwaggerDoc() map[string]string { var map_SecretList = map[string]string{ "": "SecretList is a list of Secret.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "Items is a list of secret objects. More info: http://kubernetes.io/docs/user-guide/secrets", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret", } func (SecretList) SwaggerDoc() map[string]string { @@ -1710,9 +1953,19 @@ func (SecretProjection) SwaggerDoc() map[string]string { return map_SecretProjection } +var map_SecretReference = map[string]string{ + "": "SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace", + "name": "Name is unique within a namespace to reference a secret resource.", + "namespace": "Namespace defines the space within which the secret name must be unique.", +} + +func (SecretReference) SwaggerDoc() map[string]string { + return map_SecretReference +} + var map_SecretVolumeSource = map[string]string{ "": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", - "secretName": "Name of the secret in the pod's namespace to use. More info: http://kubernetes.io/docs/user-guide/volumes#secrets", + "secretName": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", "optional": "Specify whether the Secret or it's keys must be defined", @@ -1723,13 +1976,15 @@ func (SecretVolumeSource) SwaggerDoc() map[string]string { } var map_SecurityContext = map[string]string{ - "": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", - "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", - "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", - "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.", + "": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", + "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", + "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", + "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.", + "allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN", } func (SecurityContext) SwaggerDoc() map[string]string { @@ -1747,9 +2002,9 @@ func (SerializedReference) SwaggerDoc() map[string]string { var map_Service = map[string]string{ "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a service. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (Service) SwaggerDoc() map[string]string { @@ -1758,9 +2013,9 @@ func (Service) SwaggerDoc() map[string]string { var map_ServiceAccount = map[string]string{ "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://kubernetes.io/docs/user-guide/secrets", - "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret", + "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", } @@ -1770,17 +2025,28 @@ func (ServiceAccount) SwaggerDoc() map[string]string { var map_ServiceAccountList = map[string]string{ "": "ServiceAccountList is a list of ServiceAccount objects", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "List of ServiceAccounts. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", } func (ServiceAccountList) SwaggerDoc() map[string]string { return map_ServiceAccountList } +var map_ServiceAccountTokenProjection = map[string]string{ + "": "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).", + "audience": "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", + "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", + "path": "Path is the path relative to the mount point of the file to project the token into.", +} + +func (ServiceAccountTokenProjection) SwaggerDoc() map[string]string { + return map_ServiceAccountTokenProjection +} + var map_ServiceList = map[string]string{ "": "ServiceList holds a list of services.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of services", } @@ -1793,8 +2059,8 @@ var map_ServicePort = map[string]string{ "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\" and \"UDP\". Default is TCP.", "port": "The port that will be exposed by this service.", - "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: http://kubernetes.io/docs/user-guide/services#defining-a-service", - "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: http://kubernetes.io/docs/user-guide/services#type--nodeport", + "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", + "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", } func (ServicePort) SwaggerDoc() map[string]string { @@ -1812,16 +2078,19 @@ func (ServiceProxyOptions) SwaggerDoc() map[string]string { var map_ServiceSpec = map[string]string{ "": "ServiceSpec describes the attributes that a user creates on a service.", - "ports": "The list of ports that are exposed by this service. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies", - "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: http://kubernetes.io/docs/user-guide/services#overview", - "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies", - "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: http://kubernetes.io/docs/user-guide/services#overview", - "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system. A previous form of this functionality exists as the deprecatedPublicIPs field. When using this field, callers should also clear the deprecatedPublicIPs field.", - "deprecatedPublicIPs": "deprecatedPublicIPs is deprecated and replaced by the externalIPs field with almost the exact same semantics. This field is retained in the v1 API for compatibility until at least 8/20/2016. It will be removed from any new API revisions. If both deprecatedPublicIPs *and* externalIPs are set, deprecatedPublicIPs is used.", - "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies", + "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", + "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services ", + "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", + "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", - "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: http://kubernetes.io/docs/user-guide/services-firewalls", - "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid DNS name and requires Type to be ExternalName.", + "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", + "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.", + "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", + "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", + "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.", + "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -1837,9 +2106,55 @@ func (ServiceStatus) SwaggerDoc() map[string]string { return map_ServiceStatus } +var map_SessionAffinityConfig = map[string]string{ + "": "SessionAffinityConfig represents the configurations of session affinity.", + "clientIP": "clientIP contains the configurations of Client IP based session affinity.", +} + +func (SessionAffinityConfig) SwaggerDoc() map[string]string { + return map_SessionAffinityConfig +} + +var map_StorageOSPersistentVolumeSource = map[string]string{ + "": "Represents a StorageOS persistent volume resource.", + "volumeName": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "volumeNamespace": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "secretRef": "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", +} + +func (StorageOSPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_StorageOSPersistentVolumeSource +} + +var map_StorageOSVolumeSource = map[string]string{ + "": "Represents a StorageOS persistent volume resource.", + "volumeName": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "volumeNamespace": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "secretRef": "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", +} + +func (StorageOSVolumeSource) SwaggerDoc() map[string]string { + return map_StorageOSVolumeSource +} + +var map_Sysctl = map[string]string{ + "": "Sysctl defines a kernel parameter to be set", + "name": "Name of a property to set", + "value": "Value of a property to set", +} + +func (Sysctl) SwaggerDoc() map[string]string { + return map_Sysctl +} + var map_TCPSocketAction = map[string]string{ "": "TCPSocketAction describes an action based on opening a socket", "port": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", + "host": "Optional: Host name to connect to, defaults to the pod IP.", } func (TCPSocketAction) SwaggerDoc() map[string]string { @@ -1847,7 +2162,7 @@ func (TCPSocketAction) SwaggerDoc() map[string]string { } var map_Taint = map[string]string{ - "": "The node this Taint is attached to has the effect \"effect\" on any pod that that does not tolerate the Taint.", + "": "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.", "key": "Required. The taint key to be applied to a node.", "value": "Required. The taint value corresponding to the taint key.", "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", @@ -1871,32 +2186,72 @@ func (Toleration) SwaggerDoc() map[string]string { return map_Toleration } +var map_TopologySelectorLabelRequirement = map[string]string{ + "": "A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.", + "key": "The label key that the selector applies to.", + "values": "An array of string values. One value must match the label to be selected. Each entry in Values is ORed.", +} + +func (TopologySelectorLabelRequirement) SwaggerDoc() map[string]string { + return map_TopologySelectorLabelRequirement +} + +var map_TopologySelectorTerm = map[string]string{ + "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", + "matchLabelExpressions": "A list of topology selector requirements by labels.", +} + +func (TopologySelectorTerm) SwaggerDoc() map[string]string { + return map_TopologySelectorTerm +} + var map_Volume = map[string]string{ "": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", - "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", } func (Volume) SwaggerDoc() map[string]string { return map_Volume } +var map_VolumeDevice = map[string]string{ + "": "volumeDevice describes a mapping of a raw block device within a container.", + "name": "name must match the name of a persistentVolumeClaim in the pod", + "devicePath": "devicePath is the path inside of the container that the device will be mapped to.", +} + +func (VolumeDevice) SwaggerDoc() map[string]string { + return map_VolumeDevice +} + var map_VolumeMount = map[string]string{ - "": "VolumeMount describes a mounting of a Volume within a container.", - "name": "This must match the Name of a Volume.", - "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", - "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", - "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", + "": "VolumeMount describes a mounting of a Volume within a container.", + "name": "This must match the Name of a Volume.", + "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", + "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", + "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", + "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationHostToContainer is used. This field is beta in 1.10.", } func (VolumeMount) SwaggerDoc() map[string]string { return map_VolumeMount } +var map_VolumeNodeAffinity = map[string]string{ + "": "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.", + "required": "Required specifies hard node constraints that must be met.", +} + +func (VolumeNodeAffinity) SwaggerDoc() map[string]string { + return map_VolumeNodeAffinity +} + var map_VolumeProjection = map[string]string{ - "": "Projection that may be projected along with other supported volume types", - "secret": "information about the secret data to project", - "downwardAPI": "information about the downwardAPI data to project", - "configMap": "information about the configMap data to project", + "": "Projection that may be projected along with other supported volume types", + "secret": "information about the secret data to project", + "downwardAPI": "information about the downwardAPI data to project", + "configMap": "information about the configMap data to project", + "serviceAccountToken": "information about the serviceAccountToken data to project", } func (VolumeProjection) SwaggerDoc() map[string]string { @@ -1905,19 +2260,19 @@ func (VolumeProjection) SwaggerDoc() map[string]string { var map_VolumeSource = map[string]string{ "": "Represents the source of a volume to mount. Only one of its members may be specified.", - "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", - "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", - "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", - "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", - "gitRepo": "GitRepo represents a git repository at a particular revision.", - "secret": "Secret represents a secret that should populate this volume. More info: http://kubernetes.io/docs/user-guide/volumes#secrets", - "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: http://kubernetes.io/docs/user-guide/volumes#nfs", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", - "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", - "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "gitRepo": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + "secret": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", + "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", @@ -1931,6 +2286,7 @@ var map_VolumeSource = map[string]string{ "projected": "Items for all in one resources secrets, configmaps, and downward API", "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", } func (VolumeSource) SwaggerDoc() map[string]string { @@ -1938,9 +2294,11 @@ func (VolumeSource) SwaggerDoc() map[string]string { } var map_VsphereVirtualDiskVolumeSource = map[string]string{ - "": "Represents a vSphere volume resource.", - "volumePath": "Path that identifies vSphere volume vmdk", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "": "Represents a vSphere volume resource.", + "volumePath": "Path that identifies vSphere volume vmdk", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "storagePolicyName": "Storage Policy Based Management (SPBM) profile name.", + "storagePolicyID": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", } func (VsphereVirtualDiskVolumeSource) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..0501bbcb5 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -0,0 +1,6065 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource { + if in == nil { + return nil + } + out := new(AWSElasticBlockStoreVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Affinity) DeepCopyInto(out *Affinity) { + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + if *in == nil { + *out = nil + } else { + *out = new(NodeAffinity) + (*in).DeepCopyInto(*out) + } + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + if *in == nil { + *out = nil + } else { + *out = new(PodAffinity) + (*in).DeepCopyInto(*out) + } + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + if *in == nil { + *out = nil + } else { + *out = new(PodAntiAffinity) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. +func (in *Affinity) DeepCopy() *Affinity { + if in == nil { + return nil + } + out := new(Affinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume. +func (in *AttachedVolume) DeepCopy() *AttachedVolume { + if in == nil { + return nil + } + out := new(AttachedVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvoidPods) DeepCopyInto(out *AvoidPods) { + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods. +func (in *AvoidPods) DeepCopy() *AvoidPods { + if in == nil { + return nil + } + out := new(AvoidPods) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + if *in == nil { + *out = nil + } else { + *out = new(AzureDataDiskCachingMode) + **out = **in + } + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + if *in == nil { + *out = nil + } else { + *out = new(AzureDataDiskKind) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource. +func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource { + if in == nil { + return nil + } + out := new(AzureDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) { + *out = *in + if in.SecretNamespace != nil { + in, out := &in.SecretNamespace, &out.SecretNamespace + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource. +func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource { + if in == nil { + return nil + } + out := new(AzureFilePersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource. +func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource { + if in == nil { + return nil + } + out := new(AzureFileVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Binding) DeepCopyInto(out *Binding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Target = in.Target + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. +func (in *Binding) DeepCopy() *Binding { + if in == nil { + return nil + } + out := new(Binding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Binding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { + *out = *in + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ControllerPublishSecretRef != nil { + in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.NodeStageSecretRef != nil { + in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.NodePublishSecretRef != nil { + in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. +func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Capabilities) DeepCopyInto(out *Capabilities) { + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. +func (in *Capabilities) DeepCopy() *Capabilities { + if in == nil { + return nil + } + out := new(Capabilities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource. +func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CephFSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource. +func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource { + if in == nil { + return nil + } + out := new(CephFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CinderPersistentVolumeSource) DeepCopyInto(out *CinderPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderPersistentVolumeSource. +func (in *CinderPersistentVolumeSource) DeepCopy() *CinderPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CinderPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource. +func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { + if in == nil { + return nil + } + out := new(CinderVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig. +func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { + if in == nil { + return nil + } + out := new(ClientIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition. +func (in *ComponentCondition) DeepCopy() *ComponentCondition { + if in == nil { + return nil + } + out := new(ComponentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. +func (in *ComponentStatus) DeepCopy() *ComponentStatus { + if in == nil { + return nil + } + out := new(ComponentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList. +func (in *ComponentStatusList) DeepCopy() *ComponentStatusList { + if in == nil { + return nil + } + out := new(ComponentStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.BinaryData != nil { + in, out := &in.BinaryData, &out.BinaryData + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]byte, len(val)) + copy((*out)[key], val) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap. +func (in *ConfigMap) DeepCopy() *ConfigMap { + if in == nil { + return nil + } + out := new(ConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMap) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. +func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { + if in == nil { + return nil + } + out := new(ConfigMapEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector. +func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { + if in == nil { + return nil + } + out := new(ConfigMapKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList. +func (in *ConfigMapList) DeepCopy() *ConfigMapList { + if in == nil { + return nil + } + out := new(ConfigMapList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMapList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapNodeConfigSource) DeepCopyInto(out *ConfigMapNodeConfigSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNodeConfigSource. +func (in *ConfigMapNodeConfigSource) DeepCopy() *ConfigMapNodeConfigSource { + if in == nil { + return nil + } + out := new(ConfigMapNodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection. +func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection { + if in == nil { + return nil + } + out := new(ConfigMapProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. +func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { + if in == nil { + return nil + } + out := new(ConfigMapVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Container) DeepCopyInto(out *Container) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + if *in == nil { + *out = nil + } else { + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + if *in == nil { + *out = nil + } else { + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + if *in == nil { + *out = nil + } else { + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + if *in == nil { + *out = nil + } else { + *out = new(SecurityContext) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. +func (in *Container) DeepCopy() *Container { + if in == nil { + return nil + } + out := new(Container) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage. +func (in *ContainerImage) DeepCopy() *ContainerImage { + if in == nil { + return nil + } + out := new(ContainerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerPort) DeepCopyInto(out *ContainerPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort. +func (in *ContainerPort) DeepCopy() *ContainerPort { + if in == nil { + return nil + } + out := new(ContainerPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerState) DeepCopyInto(out *ContainerState) { + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + if *in == nil { + *out = nil + } else { + *out = new(ContainerStateWaiting) + **out = **in + } + } + if in.Running != nil { + in, out := &in.Running, &out.Running + if *in == nil { + *out = nil + } else { + *out = new(ContainerStateRunning) + (*in).DeepCopyInto(*out) + } + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + if *in == nil { + *out = nil + } else { + *out = new(ContainerStateTerminated) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState. +func (in *ContainerState) DeepCopy() *ContainerState { + if in == nil { + return nil + } + out := new(ContainerState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning. +func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning { + if in == nil { + return nil + } + out := new(ContainerStateRunning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + in.FinishedAt.DeepCopyInto(&out.FinishedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated. +func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated { + if in == nil { + return nil + } + out := new(ContainerStateTerminated) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting. +func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting { + if in == nil { + return nil + } + out := new(ContainerStateWaiting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { + *out = *in + in.State.DeepCopyInto(&out.State) + in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. +func (in *ContainerStatus) DeepCopy() *ContainerStatus { + if in == nil { + return nil + } + out := new(ContainerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint. +func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint { + if in == nil { + return nil + } + out := new(DaemonEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection. +func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection { + if in == nil { + return nil + } + out := new(DownwardAPIProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectFieldSelector) + **out = **in + } + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + if *in == nil { + *out = nil + } else { + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile. +func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. +func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { + *out = *in + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + if *in == nil { + *out = nil + } else { + x := (*in).DeepCopy() + *out = &x + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource. +func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource { + if in == nil { + return nil + } + out := new(EmptyDirVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. +func (in *EndpointAddress) DeepCopy() *EndpointAddress { + if in == nil { + return nil + } + out := new(EndpointAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset. +func (in *EndpointSubset) DeepCopy() *EndpointSubset { + if in == nil { + return nil + } + out := new(EndpointSubset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoints) DeepCopyInto(out *Endpoints) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. +func (in *Endpoints) DeepCopy() *Endpoints { + if in == nil { + return nil + } + out := new(Endpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Endpoints) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList. +func (in *EndpointsList) DeepCopy() *EndpointsList { + if in == nil { + return nil + } + out := new(EndpointsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + if *in == nil { + *out = nil + } else { + *out = new(ConfigMapEnvSource) + (*in).DeepCopyInto(*out) + } + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretEnvSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource. +func (in *EnvFromSource) DeepCopy() *EnvFromSource { + if in == nil { + return nil + } + out := new(EnvFromSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVar) DeepCopyInto(out *EnvVar) { + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + if *in == nil { + *out = nil + } else { + *out = new(EnvVarSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. +func (in *EnvVar) DeepCopy() *EnvVar { + if in == nil { + return nil + } + out := new(EnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectFieldSelector) + **out = **in + } + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + if *in == nil { + *out = nil + } else { + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + if *in == nil { + *out = nil + } else { + *out = new(ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + if *in == nil { + *out = nil + } else { + *out = new(SecretKeySelector) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource. +func (in *EnvVarSource) DeepCopy() *EnvVarSource { + if in == nil { + return nil + } + out := new(EnvVarSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.InvolvedObject = in.InvolvedObject + out.Source = in.Source + in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) + in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + if *in == nil { + *out = nil + } else { + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + } + if in.Related != nil { + in, out := &in.Related, &out.Related + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSource) DeepCopyInto(out *EventSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource. +func (in *EventSource) DeepCopy() *EventSource { + if in == nil { + return nil + } + out := new(EventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecAction) DeepCopyInto(out *ExecAction) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. +func (in *ExecAction) DeepCopy() *ExecAction { + if in == nil { + return nil + } + out := new(ExecAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.WWIDs != nil { + in, out := &in.WWIDs, &out.WWIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource. +func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { + if in == nil { + return nil + } + out := new(FCVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource. +func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource { + if in == nil { + return nil + } + out := new(FlexPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource. +func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource { + if in == nil { + return nil + } + out := new(FlexVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource. +func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource { + if in == nil { + return nil + } + out := new(FlockerVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource. +func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(GCEPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource. +func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { + if in == nil { + return nil + } + out := new(GitRepoVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource. +func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) { + *out = *in + out.Port = in.Port + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction. +func (in *HTTPGetAction) DeepCopy() *HTTPGetAction { + if in == nil { + return nil + } + out := new(HTTPGetAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. +func (in *HTTPHeader) DeepCopy() *HTTPHeader { + if in == nil { + return nil + } + out := new(HTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Handler) DeepCopyInto(out *Handler) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + if *in == nil { + *out = nil + } else { + *out = new(ExecAction) + (*in).DeepCopyInto(*out) + } + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + if *in == nil { + *out = nil + } else { + *out = new(HTTPGetAction) + (*in).DeepCopyInto(*out) + } + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + if *in == nil { + *out = nil + } else { + *out = new(TCPSocketAction) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. +func (in *Handler) DeepCopy() *Handler { + if in == nil { + return nil + } + out := new(Handler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAlias) DeepCopyInto(out *HostAlias) { + *out = *in + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. +func (in *HostAlias) DeepCopy() *HostAlias { + if in == nil { + return nil + } + out := new(HostAlias) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + if *in == nil { + *out = nil + } else { + *out = new(HostPathType) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource. +func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { + if in == nil { + return nil + } + out := new(HostPathVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. +func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource. +func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath. +func (in *KeyToPath) DeepCopy() *KeyToPath { + if in == nil { + return nil + } + out := new(KeyToPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + if *in == nil { + *out = nil + } else { + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + if *in == nil { + *out = nil + } else { + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. +func (in *Lifecycle) DeepCopy() *Lifecycle { + if in == nil { + return nil + } + out := new(Lifecycle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRange) DeepCopyInto(out *LimitRange) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange. +func (in *LimitRange) DeepCopy() *LimitRange { + if in == nil { + return nil + } + out := new(LimitRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRange) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem. +func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { + if in == nil { + return nil + } + out := new(LimitRangeItem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList. +func (in *LimitRangeList) DeepCopy() *LimitRangeList { + if in == nil { + return nil + } + out := new(LimitRangeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRangeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec. +func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { + if in == nil { + return nil + } + out := new(LimitRangeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress. +func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress { + if in == nil { + return nil + } + out := new(LoadBalancerIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. +func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { + if in == nil { + return nil + } + out := new(LoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource. +func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { + if in == nil { + return nil + } + out := new(LocalVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource. +func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource { + if in == nil { + return nil + } + out := new(NFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Namespace) DeepCopyInto(out *Namespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. +func (in *Namespace) DeepCopy() *Namespace { + if in == nil { + return nil + } + out := new(Namespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Namespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList. +func (in *NamespaceList) DeepCopy() *NamespaceList { + if in == nil { + return nil + } + out := new(NamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. +func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { + if in == nil { + return nil + } + out := new(NamespaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus. +func (in *NamespaceStatus) DeepCopy() *NamespaceStatus { + if in == nil { + return nil + } + out := new(NamespaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Node) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. +func (in *NodeAddress) DeepCopy() *NodeAddress { + if in == nil { + return nil + } + out := new(NodeAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + if *in == nil { + *out = nil + } else { + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. +func (in *NodeAffinity) DeepCopy() *NodeAffinity { + if in == nil { + return nil + } + out := new(NodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { + *out = *in + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition. +func (in *NodeCondition) DeepCopy() *NodeCondition { + if in == nil { + return nil + } + out := new(NodeCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { + *out = *in + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + if *in == nil { + *out = nil + } else { + *out = new(ConfigMapNodeConfigSource) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource. +func (in *NodeConfigSource) DeepCopy() *NodeConfigSource { + if in == nil { + return nil + } + out := new(NodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) { + *out = *in + if in.Assigned != nil { + in, out := &in.Assigned, &out.Assigned + if *in == nil { + *out = nil + } else { + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + } + if in.Active != nil { + in, out := &in.Active, &out.Active + if *in == nil { + *out = nil + } else { + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + } + if in.LastKnownGood != nil { + in, out := &in.LastKnownGood, &out.LastKnownGood + if *in == nil { + *out = nil + } else { + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus. +func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus { + if in == nil { + return nil + } + out := new(NodeConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { + *out = *in + out.KubeletEndpoint = in.KubeletEndpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints. +func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { + if in == nil { + return nil + } + out := new(NodeDaemonEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeList) DeepCopyInto(out *NodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. +func (in *NodeList) DeepCopy() *NodeList { + if in == nil { + return nil + } + out := new(NodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions. +func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions { + if in == nil { + return nil + } + out := new(NodeProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResources) DeepCopyInto(out *NodeResources) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. +func (in *NodeResources) DeepCopy() *NodeResources { + if in == nil { + return nil + } + out := new(NodeResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. +func (in *NodeSelector) DeepCopy() *NodeSelector { + if in == nil { + return nil + } + out := new(NodeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement. +func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement { + if in == nil { + return nil + } + out := new(NodeSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MatchFields != nil { + in, out := &in.MatchFields, &out.MatchFields + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. +func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { + if in == nil { + return nil + } + out := new(NodeSelectorTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConfigSource != nil { + in, out := &in.ConfigSource, &out.ConfigSource + if *in == nil { + *out = nil + } else { + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. +func (in *NodeSpec) DeepCopy() *NodeSpec { + if in == nil { + return nil + } + out := new(NodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + out.DaemonEndpoints = in.DaemonEndpoints + out.NodeInfo = in.NodeInfo + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + if *in == nil { + *out = nil + } else { + *out = new(NodeConfigStatus) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo. +func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo { + if in == nil { + return nil + } + out := new(NodeSystemInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector. +func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector { + if in == nil { + return nil + } + out := new(ObjectFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume. +func (in *PersistentVolume) DeepCopy() *PersistentVolume { + if in == nil { + return nil + } + out := new(PersistentVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolume) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. +func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { + if in == nil { + return nil + } + out := new(PersistentVolumeClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition. +func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList. +func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec. +func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PersistentVolumeClaimCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus. +func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. +func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList. +func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList { + if in == nil { + return nil + } + out := new(PersistentVolumeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + if *in == nil { + *out = nil + } else { + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + if *in == nil { + *out = nil + } else { + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + if *in == nil { + *out = nil + } else { + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + if *in == nil { + *out = nil + } else { + *out = new(GlusterfsVolumeSource) + **out = **in + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + if *in == nil { + *out = nil + } else { + *out = new(NFSVolumeSource) + **out = **in + } + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + if *in == nil { + *out = nil + } else { + *out = new(RBDPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + if *in == nil { + *out = nil + } else { + *out = new(ISCSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + if *in == nil { + *out = nil + } else { + *out = new(CinderPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + if *in == nil { + *out = nil + } else { + *out = new(CephFSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + if *in == nil { + *out = nil + } else { + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + if *in == nil { + *out = nil + } else { + *out = new(FlockerVolumeSource) + **out = **in + } + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + if *in == nil { + *out = nil + } else { + *out = new(FlexPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + if *in == nil { + *out = nil + } else { + *out = new(AzureFilePersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + if *in == nil { + *out = nil + } else { + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + if *in == nil { + *out = nil + } else { + *out = new(QuobyteVolumeSource) + **out = **in + } + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + if *in == nil { + *out = nil + } else { + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + if *in == nil { + *out = nil + } else { + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + if *in == nil { + *out = nil + } else { + *out = new(PortworxVolumeSource) + **out = **in + } + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + if *in == nil { + *out = nil + } else { + *out = new(ScaleIOPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Local != nil { + in, out := &in.Local, &out.Local + if *in == nil { + *out = nil + } else { + *out = new(LocalVolumeSource) + **out = **in + } + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + if *in == nil { + *out = nil + } else { + *out = new(StorageOSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + if *in == nil { + *out = nil + } else { + *out = new(CSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource. +func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource) + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + if *in == nil { + *out = nil + } else { + *out = new(VolumeNodeAffinity) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec. +func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus. +func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource. +func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(PhotonPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pod) DeepCopyInto(out *Pod) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. +func (in *Pod) DeepCopy() *Pod { + if in == nil { + return nil + } + out := new(Pod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Pod) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinity) DeepCopyInto(out *PodAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity. +func (in *PodAffinity) DeepCopy() *PodAffinity { + if in == nil { + return nil + } + out := new(PodAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm. +func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm { + if in == nil { + return nil + } + out := new(PodAffinityTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity. +func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity { + if in == nil { + return nil + } + out := new(PodAntiAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions. +func (in *PodAttachOptions) DeepCopy() *PodAttachOptions { + if in == nil { + return nil + } + out := new(PodAttachOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodAttachOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCondition) DeepCopyInto(out *PodCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. +func (in *PodCondition) DeepCopy() *PodCondition { + if in == nil { + return nil + } + out := new(PodCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions. +func (in *PodExecOptions) DeepCopy() *PodExecOptions { + if in == nil { + return nil + } + out := new(PodExecOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodExecOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodList) DeepCopyInto(out *PodList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. +func (in *PodList) DeepCopy() *PodList { + if in == nil { + return nil + } + out := new(PodList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions. +func (in *PodLogOptions) DeepCopy() *PodLogOptions { + if in == nil { + return nil + } + out := new(PodLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions. +func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions { + if in == nil { + return nil + } + out := new(PodPortForwardOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions. +func (in *PodProxyOptions) DeepCopy() *PodProxyOptions { + if in == nil { + return nil + } + out := new(PodProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate. +func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { + if in == nil { + return nil + } + out := new(PodReadinessGate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + if *in == nil { + *out = nil + } else { + *out = new(SELinuxOptions) + **out = **in + } + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.Sysctls != nil { + in, out := &in.Sysctls, &out.Sysctls + *out = make([]Sysctl, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext. +func (in *PodSecurityContext) DeepCopy() *PodSecurityContext { + if in == nil { + return nil + } + out := new(PodSecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSignature) DeepCopyInto(out *PodSignature) { + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.OwnerReference) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature. +func (in *PodSignature) DeepCopy() *PodSignature { + if in == nil { + return nil + } + out := new(PodSignature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSpec) DeepCopyInto(out *PodSpec) { + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.ShareProcessNamespace != nil { + in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + if *in == nil { + *out = nil + } else { + *out = new(PodSecurityContext) + (*in).DeepCopyInto(*out) + } + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + if *in == nil { + *out = nil + } else { + *out = new(Affinity) + (*in).DeepCopyInto(*out) + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + if *in == nil { + *out = nil + } else { + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + } + if in.ReadinessGates != nil { + in, out := &in.ReadinessGates, &out.ReadinessGates + *out = make([]PodReadinessGate, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. +func (in *PodSpec) DeepCopy() *PodSpec { + if in == nil { + return nil + } + out := new(PodSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatus) DeepCopyInto(out *PodStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. +func (in *PodStatus) DeepCopy() *PodStatus { + if in == nil { + return nil + } + out := new(PodStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult. +func (in *PodStatusResult) DeepCopy() *PodStatusResult { + if in == nil { + return nil + } + out := new(PodStatusResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodStatusResult) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. +func (in *PodTemplate) DeepCopy() *PodTemplate { + if in == nil { + return nil + } + out := new(PodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList. +func (in *PodTemplateList) DeepCopy() *PodTemplateList { + if in == nil { + return nil + } + out := new(PodTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. +func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { + if in == nil { + return nil + } + out := new(PodTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource. +func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource { + if in == nil { + return nil + } + out := new(PortworxVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preconditions) DeepCopyInto(out *Preconditions) { + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + if *in == nil { + *out = nil + } else { + *out = new(types.UID) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. +func (in *Preconditions) DeepCopy() *Preconditions { + if in == nil { + return nil + } + out := new(Preconditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) { + *out = *in + in.PodSignature.DeepCopyInto(&out.PodSignature) + in.EvictionTime.DeepCopyInto(&out.EvictionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry. +func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry { + if in == nil { + return nil + } + out := new(PreferAvoidPodsEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) { + *out = *in + in.Preference.DeepCopyInto(&out.Preference) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm. +func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { + if in == nil { + return nil + } + out := new(PreferredSchedulingTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + in.Handler.DeepCopyInto(&out.Handler) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource. +func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource { + if in == nil { + return nil + } + out := new(ProjectedVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource. +func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { + if in == nil { + return nil + } + out := new(QuobyteVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource. +func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource { + if in == nil { + return nil + } + out := new(RBDPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource. +func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource { + if in == nil { + return nil + } + out := new(RBDVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. +func (in *RangeAllocation) DeepCopy() *RangeAllocation { + if in == nil { + return nil + } + out := new(RangeAllocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RangeAllocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationController) DeepCopyInto(out *ReplicationController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController. +func (in *ReplicationController) DeepCopy() *ReplicationController { + if in == nil { + return nil + } + out := new(ReplicationController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition. +func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition { + if in == nil { + return nil + } + out := new(ReplicationControllerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList. +func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList { + if in == nil { + return nil + } + out := new(ReplicationControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + if *in == nil { + *out = nil + } else { + *out = new(PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec. +func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec { + if in == nil { + return nil + } + out := new(ReplicationControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus. +func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { + if in == nil { + return nil + } + out := new(ReplicationControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector. +func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { + if in == nil { + return nil + } + out := new(ResourceFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourceList) DeepCopyInto(out *ResourceList) { + { + in := &in + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. +func (in ResourceList) DeepCopy() ResourceList { + if in == nil { + return nil + } + out := new(ResourceList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. +func (in *ResourceQuota) DeepCopy() *ResourceQuota { + if in == nil { + return nil + } + out := new(ResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. +func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { + if in == nil { + return nil + } + out := new(ResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + if in.ScopeSelector != nil { + in, out := &in.ScopeSelector, &out.ScopeSelector + if *in == nil { + *out = nil + } else { + *out = new(ScopeSelector) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. +func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { + if in == nil { + return nil + } + out := new(ResourceQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. +func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { + if in == nil { + return nil + } + out := new(ResourceQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. +func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { + if in == nil { + return nil + } + out := new(ResourceRequirements) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions. +func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { + if in == nil { + return nil + } + out := new(SELinuxOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource. +func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource. +func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeSelector) DeepCopyInto(out *ScopeSelector) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]ScopedResourceSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSelector. +func (in *ScopeSelector) DeepCopy() *ScopeSelector { + if in == nil { + return nil + } + out := new(ScopeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopedResourceSelectorRequirement) DeepCopyInto(out *ScopedResourceSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopedResourceSelectorRequirement. +func (in *ScopedResourceSelectorRequirement) DeepCopy() *ScopedResourceSelectorRequirement { + if in == nil { + return nil + } + out := new(ScopedResourceSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Secret) DeepCopyInto(out *Secret) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]byte, len(val)) + copy((*out)[key], val) + } + } + } + if in.StringData != nil { + in, out := &in.StringData, &out.StringData + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. +func (in *Secret) DeepCopy() *Secret { + if in == nil { + return nil + } + out := new(Secret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Secret) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. +func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { + if in == nil { + return nil + } + out := new(SecretEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. +func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { + if in == nil { + return nil + } + out := new(SecretKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretList) DeepCopyInto(out *SecretList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. +func (in *SecretList) DeepCopy() *SecretList { + if in == nil { + return nil + } + out := new(SecretList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection. +func (in *SecretProjection) DeepCopy() *SecretProjection { + if in == nil { + return nil + } + out := new(SecretProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretReference) DeepCopyInto(out *SecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. +func (in *SecretReference) DeepCopy() *SecretReference { + if in == nil { + return nil + } + out := new(SecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. +func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { + if in == nil { + return nil + } + out := new(SecretVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + if *in == nil { + *out = nil + } else { + *out = new(Capabilities) + (*in).DeepCopyInto(*out) + } + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + if *in == nil { + *out = nil + } else { + *out = new(SELinuxOptions) + **out = **in + } + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. +func (in *SecurityContext) DeepCopy() *SecurityContext { + if in == nil { + return nil + } + out := new(SecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializedReference) DeepCopyInto(out *SerializedReference) { + *out = *in + out.TypeMeta = in.TypeMeta + out.Reference = in.Reference + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference. +func (in *SerializedReference) DeepCopy() *SerializedReference { + if in == nil { + return nil + } + out := new(SerializedReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SerializedReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Service) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. +func (in *ServiceAccount) DeepCopy() *ServiceAccount { + if in == nil { + return nil + } + out := new(ServiceAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. +func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { + if in == nil { + return nil + } + out := new(ServiceAccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) { + *out = *in + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection. +func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection { + if in == nil { + return nil + } + out := new(ServiceAccountTokenProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceList) DeepCopyInto(out *ServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. +func (in *ServiceList) DeepCopy() *ServiceList { + if in == nil { + return nil + } + out := new(ServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePort) DeepCopyInto(out *ServicePort) { + *out = *in + out.TargetPort = in.TargetPort + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. +func (in *ServicePort) DeepCopy() *ServicePort { + if in == nil { + return nil + } + out := new(ServicePort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions. +func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions { + if in == nil { + return nil + } + out := new(ServiceProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SessionAffinityConfig != nil { + in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig + if *in == nil { + *out = nil + } else { + *out = new(SessionAffinityConfig) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. +func (in *ServiceStatus) DeepCopy() *ServiceStatus { + if in == nil { + return nil + } + out := new(ServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { + *out = *in + if in.ClientIP != nil { + in, out := &in.ClientIP, &out.ClientIP + if *in == nil { + *out = nil + } else { + *out = new(ClientIPConfig) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig. +func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { + if in == nil { + return nil + } + out := new(SessionAffinityConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource. +func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource. +func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sysctl) DeepCopyInto(out *Sysctl) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl. +func (in *Sysctl) DeepCopy() *Sysctl { + if in == nil { + return nil + } + out := new(Sysctl) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) { + *out = *in + out.Port = in.Port + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction. +func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { + if in == nil { + return nil + } + out := new(TCPSocketAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Taint) DeepCopyInto(out *Taint) { + *out = *in + if in.TimeAdded != nil { + in, out := &in.TimeAdded, &out.TimeAdded + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. +func (in *Taint) DeepCopy() *Taint { + if in == nil { + return nil + } + out := new(Taint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Toleration) DeepCopyInto(out *Toleration) { + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. +func (in *Toleration) DeepCopy() *Toleration { + if in == nil { + return nil + } + out := new(Toleration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySelectorLabelRequirement) DeepCopyInto(out *TopologySelectorLabelRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorLabelRequirement. +func (in *TopologySelectorLabelRequirement) DeepCopy() *TopologySelectorLabelRequirement { + if in == nil { + return nil + } + out := new(TopologySelectorLabelRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySelectorTerm) DeepCopyInto(out *TopologySelectorTerm) { + *out = *in + if in.MatchLabelExpressions != nil { + in, out := &in.MatchLabelExpressions, &out.MatchLabelExpressions + *out = make([]TopologySelectorLabelRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorTerm. +func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { + if in == nil { + return nil + } + out := new(TopologySelectorTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + in.VolumeSource.DeepCopyInto(&out.VolumeSource) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. +func (in *VolumeDevice) DeepCopy() *VolumeDevice { + if in == nil { + return nil + } + out := new(VolumeDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { + *out = *in + if in.MountPropagation != nil { + in, out := &in.MountPropagation, &out.MountPropagation + if *in == nil { + *out = nil + } else { + *out = new(MountPropagationMode) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. +func (in *VolumeMount) DeepCopy() *VolumeMount { + if in == nil { + return nil + } + out := new(VolumeMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) { + *out = *in + if in.Required != nil { + in, out := &in.Required, &out.Required + if *in == nil { + *out = nil + } else { + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity. +func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity { + if in == nil { + return nil + } + out := new(VolumeNodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + if *in == nil { + *out = nil + } else { + *out = new(SecretProjection) + (*in).DeepCopyInto(*out) + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + if *in == nil { + *out = nil + } else { + *out = new(DownwardAPIProjection) + (*in).DeepCopyInto(*out) + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + if *in == nil { + *out = nil + } else { + *out = new(ConfigMapProjection) + (*in).DeepCopyInto(*out) + } + } + if in.ServiceAccountToken != nil { + in, out := &in.ServiceAccountToken, &out.ServiceAccountToken + if *in == nil { + *out = nil + } else { + *out = new(ServiceAccountTokenProjection) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection. +func (in *VolumeProjection) DeepCopy() *VolumeProjection { + if in == nil { + return nil + } + out := new(VolumeProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + if *in == nil { + *out = nil + } else { + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + if *in == nil { + *out = nil + } else { + *out = new(EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + if *in == nil { + *out = nil + } else { + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + if *in == nil { + *out = nil + } else { + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + if *in == nil { + *out = nil + } else { + *out = new(GitRepoVolumeSource) + **out = **in + } + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + if *in == nil { + *out = nil + } else { + *out = new(SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + if *in == nil { + *out = nil + } else { + *out = new(NFSVolumeSource) + **out = **in + } + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + if *in == nil { + *out = nil + } else { + *out = new(ISCSIVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + if *in == nil { + *out = nil + } else { + *out = new(GlusterfsVolumeSource) + **out = **in + } + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + if *in == nil { + *out = nil + } else { + *out = new(RBDVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + if *in == nil { + *out = nil + } else { + *out = new(FlexVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + if *in == nil { + *out = nil + } else { + *out = new(CinderVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + if *in == nil { + *out = nil + } else { + *out = new(CephFSVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + if *in == nil { + *out = nil + } else { + *out = new(FlockerVolumeSource) + **out = **in + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + if *in == nil { + *out = nil + } else { + *out = new(DownwardAPIVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + if *in == nil { + *out = nil + } else { + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + if *in == nil { + *out = nil + } else { + *out = new(AzureFileVolumeSource) + **out = **in + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + if *in == nil { + *out = nil + } else { + *out = new(ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + if *in == nil { + *out = nil + } else { + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + if *in == nil { + *out = nil + } else { + *out = new(QuobyteVolumeSource) + **out = **in + } + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + if *in == nil { + *out = nil + } else { + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + if *in == nil { + *out = nil + } else { + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + if *in == nil { + *out = nil + } else { + *out = new(ProjectedVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + if *in == nil { + *out = nil + } else { + *out = new(PortworxVolumeSource) + **out = **in + } + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + if *in == nil { + *out = nil + } else { + *out = new(ScaleIOVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + if *in == nil { + *out = nil + } else { + *out = new(StorageOSVolumeSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. +func (in *VolumeSource) DeepCopy() *VolumeSource { + if in == nil { + return nil + } + out := new(VolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource. +func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource { + if in == nil { + return nil + } + out := new(VsphereVirtualDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) { + *out = *in + in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm. +func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { + if in == nil { + return nil + } + out := new(WeightedPodAffinityTerm) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/events/v1beta1/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go new file mode 100644 index 000000000..8b1a3e312 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// +groupName=events.k8s.io +package v1beta1 // import "k8s.io/api/events/v1beta1" diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go new file mode 100644 index 000000000..b0e313c4b --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -0,0 +1,1306 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto + + It has these top-level messages: + Event + EventList + EventSeries +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*Event)(nil), "k8s.io.api.events.v1beta1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.api.events.v1beta1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.events.v1beta1.EventSeries") +} +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) + n2, err := m.EventTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.Series != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) + n3, err := m.Series.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i += copy(dAtA[i:], m.ReportingController) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i += copy(dAtA[i:], m.ReportingInstance) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i += copy(dAtA[i:], m.Action) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Regarding.Size())) + n4, err := m.Regarding.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if m.Related != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) + n5, err := m.Related.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) + i += copy(dAtA[i:], m.Note) + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedSource.Size())) + n6, err := m.DeprecatedSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedFirstTimestamp.Size())) + n7, err := m.DeprecatedFirstTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedLastTimestamp.Size())) + n8, err := m.DeprecatedLastTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + dAtA[i] = 0x78 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) + return i, nil +} + +func (m *EventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EventSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) + n10, err := m.LastObservedTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i += copy(dAtA[i:], m.State) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Event) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Regarding.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Note) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedFirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedLastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DeprecatedCount)) + return n +} + +func (m *EventList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EventSeries) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Regarding:` + strings.Replace(strings.Replace(this.Regarding.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1) + `,`, + `Note:` + fmt.Sprintf("%v", this.Note) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `DeprecatedSource:` + strings.Replace(strings.Replace(this.DeprecatedSource.String(), "EventSource", "k8s_io_api_core_v1.EventSource", 1), `&`, ``, 1) + `,`, + `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedFirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedLastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedCount:` + fmt.Sprintf("%v", this.DeprecatedCount) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Series == nil { + m.Series = &EventSeries{} + } + if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingController", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingController = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingInstance = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Regarding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Regarding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Related == nil { + m.Related = &k8s_io_api_core_v1.ObjectReference{} + } + if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Note", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Note = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedFirstTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedFirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedLastTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedLastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedCount", wireType) + } + m.DeprecatedCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DeprecatedCount |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = EventSeriesState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 814 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x16, 0x13, 0x4b, 0xb6, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x3a, 0x40, + 0x60, 0x14, 0x08, 0x59, 0xa7, 0x45, 0xdb, 0x6b, 0x18, 0xbb, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, + 0x15, 0x3d, 0x64, 0x45, 0x4d, 0xe8, 0xad, 0xa5, 0x5d, 0x62, 0x77, 0x29, 0xc0, 0xb7, 0x5e, 0x0a, + 0xf4, 0xd8, 0x67, 0xe8, 0x13, 0xf4, 0x31, 0x7c, 0xcc, 0x31, 0x27, 0xa1, 0x66, 0xdf, 0xa2, 0xa7, + 0x82, 0xcb, 0x95, 0x28, 0x8b, 0x16, 0xec, 0x22, 0x37, 0x72, 0xe6, 0xfb, 0x99, 0x19, 0x0e, 0x07, + 0x45, 0xe7, 0xdf, 0xe9, 0x80, 0xcb, 0xf0, 0x3c, 0x1b, 0x80, 0x12, 0x60, 0x40, 0x87, 0x13, 0x10, + 0x43, 0xa9, 0x42, 0x97, 0x60, 0x29, 0x0f, 0x61, 0x02, 0xc2, 0xe8, 0x70, 0xb2, 0x3f, 0x00, 0xc3, + 0xf6, 0xc3, 0x04, 0x04, 0x28, 0x66, 0x60, 0x18, 0xa4, 0x4a, 0x1a, 0x89, 0x9f, 0x94, 0xd0, 0x80, + 0xa5, 0x3c, 0x28, 0xa1, 0x81, 0x83, 0xee, 0x3c, 0x4f, 0xb8, 0x39, 0xcb, 0x06, 0x41, 0x2c, 0xc7, + 0x61, 0x22, 0x13, 0x19, 0x5a, 0xc6, 0x20, 0x7b, 0x6f, 0xdf, 0xec, 0x8b, 0x7d, 0x2a, 0x95, 0x76, + 0x76, 0x17, 0x4c, 0x63, 0xa9, 0x20, 0x9c, 0xd4, 0xdc, 0x76, 0xbe, 0xae, 0x30, 0x63, 0x16, 0x9f, + 0x71, 0x01, 0xea, 0x22, 0x4c, 0xcf, 0x93, 0x22, 0xa0, 0xc3, 0x31, 0x18, 0x76, 0x13, 0x2b, 0x5c, + 0xc5, 0x52, 0x99, 0x30, 0x7c, 0x0c, 0x35, 0xc2, 0x37, 0xb7, 0x11, 0x74, 0x7c, 0x06, 0x63, 0x56, + 0xe3, 0x7d, 0xb5, 0x8a, 0x97, 0x19, 0x3e, 0x0a, 0xb9, 0x30, 0xda, 0xa8, 0x65, 0xd2, 0xee, 0x9f, + 0x6d, 0xd4, 0x3c, 0x2c, 0x26, 0x87, 0xdf, 0xa1, 0x8d, 0xa2, 0x85, 0x21, 0x33, 0x8c, 0x78, 0x7d, + 0x6f, 0xaf, 0xf3, 0xe2, 0xcb, 0xa0, 0x1a, 0xef, 0x5c, 0x31, 0x48, 0xcf, 0x93, 0x22, 0xa0, 0x83, + 0x02, 0x1d, 0x4c, 0xf6, 0x83, 0xb7, 0x83, 0x5f, 0x20, 0x36, 0xc7, 0x60, 0x58, 0x84, 0x2f, 0xa7, + 0xbd, 0x46, 0x3e, 0xed, 0xa1, 0x2a, 0x46, 0xe7, 0xaa, 0xf8, 0x1d, 0x6a, 0xdb, 0x8f, 0x74, 0xca, + 0xc7, 0x40, 0xee, 0x59, 0x8b, 0xf0, 0x6e, 0x16, 0xc7, 0x3c, 0x56, 0xb2, 0xa0, 0x45, 0x5b, 0xce, + 0xa1, 0x7d, 0x38, 0x53, 0xa2, 0x95, 0x28, 0x7e, 0x83, 0x5a, 0x1a, 0x14, 0x07, 0x4d, 0xee, 0x5b, + 0xf9, 0x67, 0xc1, 0xca, 0x05, 0x09, 0xac, 0xc0, 0x89, 0x45, 0x47, 0x28, 0x9f, 0xf6, 0x5a, 0xe5, + 0x33, 0x75, 0x0a, 0xf8, 0x18, 0x3d, 0x56, 0x90, 0x4a, 0x65, 0xb8, 0x48, 0x5e, 0x49, 0x61, 0x94, + 0x1c, 0x8d, 0x40, 0x91, 0xb5, 0xbe, 0xb7, 0xd7, 0x8e, 0x3e, 0x73, 0x65, 0x3c, 0xa6, 0x75, 0x08, + 0xbd, 0x89, 0x87, 0x7f, 0x40, 0x5b, 0xf3, 0xf0, 0x6b, 0xa1, 0x0d, 0x13, 0x31, 0x90, 0xa6, 0x15, + 0x7b, 0xe2, 0xc4, 0xb6, 0xe8, 0x32, 0x80, 0xd6, 0x39, 0xf8, 0x19, 0x6a, 0xb1, 0xd8, 0x70, 0x29, + 0x48, 0xcb, 0xb2, 0x1f, 0x39, 0x76, 0xeb, 0xa5, 0x8d, 0x52, 0x97, 0x2d, 0x70, 0x0a, 0x98, 0x96, + 0x82, 0xac, 0x5f, 0xc7, 0x51, 0x1b, 0xa5, 0x2e, 0x8b, 0x4f, 0x51, 0x5b, 0x41, 0xc2, 0xd4, 0x90, + 0x8b, 0x84, 0x6c, 0xd8, 0xb1, 0x3d, 0x5d, 0x1c, 0x5b, 0xf1, 0x37, 0x54, 0x9f, 0x99, 0xc2, 0x7b, + 0x50, 0x20, 0xe2, 0x85, 0x2f, 0x41, 0x67, 0x6c, 0x5a, 0x09, 0xe1, 0x37, 0x68, 0x5d, 0xc1, 0xa8, + 0x58, 0x34, 0xd2, 0xbe, 0xbb, 0x66, 0x27, 0x9f, 0xf6, 0xd6, 0x69, 0xc9, 0xa3, 0x33, 0x01, 0xdc, + 0x47, 0x6b, 0x42, 0x1a, 0x20, 0xc8, 0xf6, 0xf1, 0xc0, 0xf9, 0xae, 0xfd, 0x28, 0x0d, 0x50, 0x9b, + 0x29, 0x10, 0xe6, 0x22, 0x05, 0xd2, 0xb9, 0x8e, 0x38, 0xbd, 0x48, 0x81, 0xda, 0x0c, 0x06, 0xd4, + 0x1d, 0x42, 0xaa, 0x20, 0x2e, 0x14, 0x4f, 0x64, 0xa6, 0x62, 0x20, 0x0f, 0x6c, 0x61, 0xbd, 0x9b, + 0x0a, 0x2b, 0x97, 0xc3, 0xc2, 0x22, 0xe2, 0xe4, 0xba, 0x07, 0x4b, 0x02, 0xb4, 0x26, 0x89, 0x7f, + 0xf7, 0x10, 0xa9, 0x82, 0xdf, 0x73, 0xa5, 0xed, 0x62, 0x6a, 0xc3, 0xc6, 0x29, 0x79, 0x68, 0xfd, + 0xbe, 0xb8, 0xdb, 0xca, 0xdb, 0x6d, 0xef, 0x3b, 0x6b, 0x72, 0xb0, 0x42, 0x93, 0xae, 0x74, 0xc3, + 0xbf, 0x79, 0x68, 0xbb, 0x4a, 0x1e, 0xb1, 0xc5, 0x4a, 0x1e, 0xfd, 0xef, 0x4a, 0x7a, 0xae, 0x92, + 0xed, 0x83, 0x9b, 0x25, 0xe9, 0x2a, 0x2f, 0xfc, 0x12, 0x6d, 0x56, 0xa9, 0x57, 0x32, 0x13, 0x86, + 0x6c, 0xf6, 0xbd, 0xbd, 0x66, 0xb4, 0xed, 0x24, 0x37, 0x0f, 0xae, 0xa7, 0xe9, 0x32, 0x7e, 0xf7, + 0x2f, 0x0f, 0x95, 0xff, 0xfb, 0x11, 0xd7, 0x06, 0xff, 0x5c, 0x3b, 0x54, 0xc1, 0xdd, 0x1a, 0x29, + 0xd8, 0xf6, 0x4c, 0x75, 0x9d, 0xf3, 0xc6, 0x2c, 0xb2, 0x70, 0xa4, 0x0e, 0x51, 0x93, 0x1b, 0x18, + 0x6b, 0x72, 0xaf, 0x7f, 0x7f, 0xaf, 0xf3, 0xa2, 0x7f, 0xdb, 0x05, 0x89, 0x1e, 0x3a, 0xb1, 0xe6, + 0xeb, 0x82, 0x46, 0x4b, 0xf6, 0x6e, 0xee, 0xa1, 0xce, 0xc2, 0x85, 0xc1, 0x4f, 0x51, 0x33, 0xb6, + 0xbd, 0x7b, 0xb6, 0xf7, 0x39, 0xa9, 0xec, 0xb8, 0xcc, 0xe1, 0x0c, 0x75, 0x47, 0x4c, 0x9b, 0xb7, + 0x03, 0x0d, 0x6a, 0x02, 0xc3, 0x4f, 0xb9, 0x93, 0xf3, 0xa5, 0x3d, 0x5a, 0x12, 0xa4, 0x35, 0x0b, + 0xfc, 0x2d, 0x6a, 0x6a, 0xc3, 0x0c, 0xd8, 0xa3, 0xd9, 0x8e, 0x3e, 0x9f, 0xd5, 0x76, 0x52, 0x04, + 0xff, 0x9d, 0xf6, 0xba, 0x0b, 0x8d, 0xd8, 0x18, 0x2d, 0xf1, 0xd1, 0xf3, 0xcb, 0x2b, 0xbf, 0xf1, + 0xe1, 0xca, 0x6f, 0x7c, 0xbc, 0xf2, 0x1b, 0xbf, 0xe6, 0xbe, 0x77, 0x99, 0xfb, 0xde, 0x87, 0xdc, + 0xf7, 0x3e, 0xe6, 0xbe, 0xf7, 0x77, 0xee, 0x7b, 0x7f, 0xfc, 0xe3, 0x37, 0x7e, 0x5a, 0x77, 0xf3, + 0xfa, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x69, 0xa9, 0x7b, 0x6e, 0xf2, 0x07, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto new file mode 100644 index 000000000..60ee899ba --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -0,0 +1,122 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.events.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +message Event { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Required. Time when this Event was first observed. + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + optional EventSeries series = 3; + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + optional string reportingController = 4; + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + optional string reportingInstance = 5; + + // What action was taken/failed regarding to the regarding object. + // +optional + optional string action = 6; + + // Why the action was taken. + optional string reason = 7; + + // The object this Event is about. In most cases it's an Object reporting controller implements. + // E.g. ReplicaSetController implements ReplicaSets and this event is emitted because + // it acts on some changes in a ReplicaSet object. + // +optional + optional k8s.io.api.core.v1.ObjectReference regarding = 8; + + // Optional secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // +optional + optional k8s.io.api.core.v1.ObjectReference related = 9; + + // Optional. A human-readable description of the status of this operation. + // Maximal length of the note is 1kB, but libraries should be prepared to + // handle values up to 64kB. + // +optional + optional string note = 10; + + // Type of this event (Normal, Warning), new types could be added in the + // future. + // +optional + optional string type = 11; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional int32 deprecatedCount = 15; +} + +// EventList is a list of Event objects. +message EventList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated Event items = 2; +} + +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continuously for some time. +message EventSeries { + // Number of occurrences in this series up to the last heartbeat time + optional int32 count = 1; + + // Time when last Event from the series was seen before last heartbeat. + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + + // Information whether this series is ongoing or finished. + optional string state = 3; +} + diff --git a/vendor/k8s.io/api/events/v1beta1/register.go b/vendor/k8s.io/api/events/v1beta1/register.go new file mode 100644 index 000000000..450678291 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "events.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Event{}, + &EventList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go new file mode 100644 index 000000000..dc48ddb06 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -0,0 +1,122 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +type Event struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Required. Time when this Event was first observed. + EventTime metav1.MicroTime `json:"eventTime" protobuf:"bytes,2,opt,name=eventTime"` + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries `json:"series,omitempty" protobuf:"bytes,3,opt,name=series"` + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string `json:"reportingController,omitempty" protobuf:"bytes,4,opt,name=reportingController"` + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string `json:"reportingInstance,omitemtpy" protobuf:"bytes,5,opt,name=reportingInstance"` + + // What action was taken/failed regarding to the regarding object. + // +optional + Action string `json:"action,omitemtpy" protobuf:"bytes,6,name=action"` + + // Why the action was taken. + Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` + + // The object this Event is about. In most cases it's an Object reporting controller implements. + // E.g. ReplicaSetController implements ReplicaSets and this event is emitted because + // it acts on some changes in a ReplicaSet object. + // +optional + Regarding corev1.ObjectReference `json:"regarding,omitempty" protobuf:"bytes,8,opt,name=regarding"` + + // Optional secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // +optional + Related *corev1.ObjectReference `json:"related,omitempty" protobuf:"bytes,9,opt,name=related"` + + // Optional. A human-readable description of the status of this operation. + // Maximal length of the note is 1kB, but libraries should be prepared to + // handle values up to 64kB. + // +optional + Note string `json:"note,omitempty" protobuf:"bytes,10,opt,name=note"` + + // Type of this event (Normal, Warning), new types could be added in the + // future. + // +optional + Type string `json:"type,omitempty" protobuf:"bytes,11,opt,name=type"` + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedSource corev1.EventSource `json:"deprecatedSource,omitempty" protobuf:"bytes,12,opt,name=deprecatedSource"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedFirstTimestamp metav1.Time `json:"deprecatedFirstTimestamp,omitempty" protobuf:"bytes,13,opt,name=deprecatedFirstTimestamp"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedLastTimestamp metav1.Time `json:"deprecatedLastTimestamp,omitempty" protobuf:"bytes,14,opt,name=deprecatedLastTimestamp"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedCount int32 `json:"deprecatedCount,omitempty" protobuf:"varint,15,opt,name=deprecatedCount"` +} + +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continuously for some time. +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 `json:"count" protobuf:"varint,1,opt,name=count"` + // Time when last Event from the series was seen before last heartbeat. + LastObservedTime metav1.MicroTime `json:"lastObservedTime" protobuf:"bytes,2,opt,name=lastObservedTime"` + // Information whether this series is ongoing or finished. + State EventSeriesState `json:"state" protobuf:"bytes,3,opt,name=state"` +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of Event objects. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..a15672c19 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Event = map[string]string{ + "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.", + "eventTime": "Required. Time when this Event was first observed.", + "series": "Data about the Event series this event represents or nil if it's a singleton Event.", + "reportingController": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + "reportingInstance": "ID of the controller instance, e.g. `kubelet-xyzf`.", + "action": "What action was taken/failed regarding to the regarding object.", + "reason": "Why the action was taken.", + "regarding": "The object this Event is about. In most cases it's an Object reporting controller implements. E.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.", + "related": "Optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.", + "note": "Optional. A human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.", + "type": "Type of this event (Normal, Warning), new types could be added in the future.", + "deprecatedSource": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedFirstTimestamp": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedLastTimestamp": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedCount": "Deprecated field assuring backward compatibility with core.v1 Event type", +} + +func (Event) SwaggerDoc() map[string]string { + return map_Event +} + +var map_EventList = map[string]string{ + "": "EventList is a list of Event objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (EventList) SwaggerDoc() map[string]string { + return map_EventList +} + +var map_EventSeries = map[string]string{ + "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", + "count": "Number of occurrences in this series up to the last heartbeat time", + "lastObservedTime": "Time when last Event from the series was seen before last heartbeat.", + "state": "Information whether this series is ongoing or finished.", +} + +func (EventSeries) SwaggerDoc() map[string]string { + return map_EventSeries +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..9652044b3 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,125 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + if *in == nil { + *out = nil + } else { + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + } + out.Regarding = in.Regarding + if in.Related != nil { + in, out := &in.Related, &out.Related + if *in == nil { + *out = nil + } else { + *out = new(v1.ObjectReference) + **out = **in + } + } + out.DeprecatedSource = in.DeprecatedSource + in.DeprecatedFirstTimestamp.DeepCopyInto(&out.DeprecatedFirstTimestamp) + in.DeprecatedLastTimestamp.DeepCopyInto(&out.DeprecatedLastTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go new file mode 100644 index 000000000..8ce18304b --- /dev/null +++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +package v1beta1 // import "k8s.io/api/extensions/v1beta1" diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go similarity index 68% rename from vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.pb.go rename to vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 1c72f7311..0604fb957 100644 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,22 +15,24 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto // DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto + k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto It has these top-level messages: - APIVersion + AllowedFlexVolume + AllowedHostPath CustomMetricCurrentStatus CustomMetricCurrentStatusList CustomMetricTarget CustomMetricTargetList DaemonSet + DaemonSetCondition DaemonSetList DaemonSetSpec DaemonSetStatus @@ -47,6 +49,7 @@ limitations under the License. HTTPIngressRuleValue HostPortRange IDRange + IPBlock Ingress IngressBackend IngressList @@ -56,6 +59,7 @@ limitations under the License. IngressStatus IngressTLS NetworkPolicy + NetworkPolicyEgressRule NetworkPolicyIngressRule NetworkPolicyList NetworkPolicyPeer @@ -79,10 +83,6 @@ limitations under the License. ScaleSpec ScaleStatus SupplementalGroupsStrategyOptions - ThirdPartyResource - ThirdPartyResourceData - ThirdPartyResourceDataList - ThirdPartyResourceList */ package v1beta1 @@ -90,15 +90,16 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" import strings "strings" import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" import io "io" @@ -109,361 +110,395 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *APIVersion) Reset() { *m = APIVersion{} } -func (*APIVersion) ProtoMessage() {} -func (*APIVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} } func (*CustomMetricCurrentStatus) ProtoMessage() {} func (*CustomMetricCurrentStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptorGenerated, []int{2} } func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} } func (*CustomMetricCurrentStatusList) ProtoMessage() {} func (*CustomMetricCurrentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptorGenerated, []int{3} } func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} } func (*CustomMetricTarget) ProtoMessage() {} -func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} } func (*CustomMetricTargetList) ProtoMessage() {} -func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{11} +} func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *HostPortRange) Reset() { *m = HostPortRange{} } func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *IDRange) Reset() { *m = IDRange{} } func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } + +func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } +func (*NetworkPolicyEgressRule) ProtoMessage() {} +func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{34} +} func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{31} + return fileDescriptorGenerated, []int{35} } func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } func (*ReplicationControllerDummy) ProtoMessage() {} func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{44} + return fileDescriptorGenerated, []int{48} } func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{47} + return fileDescriptorGenerated, []int{51} } func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{48} + return fileDescriptorGenerated, []int{52} } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{53} + return fileDescriptorGenerated, []int{57} } -func (m *ThirdPartyResource) Reset() { *m = ThirdPartyResource{} } -func (*ThirdPartyResource) ProtoMessage() {} -func (*ThirdPartyResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } - -func (m *ThirdPartyResourceData) Reset() { *m = ThirdPartyResourceData{} } -func (*ThirdPartyResourceData) ProtoMessage() {} -func (*ThirdPartyResourceData) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } - -func (m *ThirdPartyResourceDataList) Reset() { *m = ThirdPartyResourceDataList{} } -func (*ThirdPartyResourceDataList) ProtoMessage() {} -func (*ThirdPartyResourceDataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{56} -} - -func (m *ThirdPartyResourceList) Reset() { *m = ThirdPartyResourceList{} } -func (*ThirdPartyResourceList) ProtoMessage() {} -func (*ThirdPartyResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } - func init() { - proto.RegisterType((*APIVersion)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.APIVersion") - proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatus") - proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatusList") - proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.CustomMetricTarget") - proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.CustomMetricTargetList") - proto.RegisterType((*DaemonSet)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSet") - proto.RegisterType((*DaemonSetList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSetList") - proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSetSpec") - proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSetStatus") - proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSetUpdateStrategy") - proto.RegisterType((*Deployment)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.Deployment") - proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentCondition") - proto.RegisterType((*DeploymentList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentList") - proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentRollback") - proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentSpec") - proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentStatus") - proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentStrategy") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.FSGroupStrategyOptions") - proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.HTTPIngressPath") - proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IDRange") - proto.RegisterType((*Ingress)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.Ingress") - proto.RegisterType((*IngressBackend)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressBackend") - proto.RegisterType((*IngressList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressList") - proto.RegisterType((*IngressRule)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressRule") - proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressRuleValue") - proto.RegisterType((*IngressSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressSpec") - proto.RegisterType((*IngressStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressStatus") - proto.RegisterType((*IngressTLS)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressTLS") - proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicy") - proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicyIngressRule") - proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicyList") - proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicyPeer") - proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicyPort") - proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicySpec") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.PodSecurityPolicySpec") - proto.RegisterType((*ReplicaSet)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSet") - proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSetCondition") - proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSetList") - proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSetSpec") - proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSetStatus") - proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicationControllerDummy") - proto.RegisterType((*RollbackConfig)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.RollbackConfig") - proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.RollingUpdateDaemonSet") - proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.SELinuxStrategyOptions") - proto.RegisterType((*Scale)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ScaleStatus") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.SupplementalGroupsStrategyOptions") - proto.RegisterType((*ThirdPartyResource)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ThirdPartyResource") - proto.RegisterType((*ThirdPartyResourceData)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ThirdPartyResourceData") - proto.RegisterType((*ThirdPartyResourceDataList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ThirdPartyResourceDataList") - proto.RegisterType((*ThirdPartyResourceList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ThirdPartyResourceList") + proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") + proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") + proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatus") + proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatusList") + proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTarget") + proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTargetList") + proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") + proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") + proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetSpec") + proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetStatus") + proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetUpdateStrategy") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.extensions.v1beta1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.extensions.v1beta1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.extensions.v1beta1.DeploymentList") + proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") + proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.FSGroupStrategyOptions") + proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressPath") + proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.extensions.v1beta1.HostPortRange") + proto.RegisterType((*IDRange)(nil), "k8s.io.api.extensions.v1beta1.IDRange") + proto.RegisterType((*IPBlock)(nil), "k8s.io.api.extensions.v1beta1.IPBlock") + proto.RegisterType((*Ingress)(nil), "k8s.io.api.extensions.v1beta1.Ingress") + proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.extensions.v1beta1.IngressBackend") + proto.RegisterType((*IngressList)(nil), "k8s.io.api.extensions.v1beta1.IngressList") + proto.RegisterType((*IngressRule)(nil), "k8s.io.api.extensions.v1beta1.IngressRule") + proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.IngressRuleValue") + proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.extensions.v1beta1.IngressSpec") + proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.extensions.v1beta1.IngressStatus") + proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.extensions.v1beta1.IngressTLS") + proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicy") + proto.RegisterType((*NetworkPolicyEgressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyEgressRule") + proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyIngressRule") + proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyList") + proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPeer") + proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort") + proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec") + proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicy") + proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicyList") + proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicySpec") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet") + proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetStatus") + proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.api.extensions.v1beta1.ReplicationControllerDummy") + proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") + proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") + proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } -func (m *APIVersion) Marshal() (data []byte, err error) { +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *APIVersion) MarshalTo(data []byte) (int, error) { +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) return i, nil } -func (m *CustomMetricCurrentStatus) Marshal() (data []byte, err error) { +func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CustomMetricCurrentStatus) MarshalTo(data []byte) (int, error) { +func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i += copy(dAtA[i:], m.PathPrefix) + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size())) - n1, err := m.CurrentValue.MarshalTo(data[i:]) + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *CustomMetricCurrentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomMetricCurrentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) + n1, err := m.CurrentValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -471,27 +506,27 @@ func (m *CustomMetricCurrentStatus) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *CustomMetricCurrentStatusList) Marshal() (data []byte, err error) { +func (m *CustomMetricCurrentStatusList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CustomMetricCurrentStatusList) MarshalTo(data []byte) (int, error) { +func (m *CustomMetricCurrentStatusList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -501,29 +536,29 @@ func (m *CustomMetricCurrentStatusList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *CustomMetricTarget) Marshal() (data []byte, err error) { +func (m *CustomMetricTarget) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CustomMetricTarget) MarshalTo(data []byte) (int, error) { +func (m *CustomMetricTarget) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) + n2, err := m.TargetValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -531,27 +566,27 @@ func (m *CustomMetricTarget) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *CustomMetricTargetList) Marshal() (data []byte, err error) { +func (m *CustomMetricTargetList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CustomMetricTargetList) MarshalTo(data []byte) (int, error) { +func (m *CustomMetricTargetList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -561,41 +596,41 @@ func (m *CustomMetricTargetList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *DaemonSet) Marshal() (data []byte, err error) { +func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *DaemonSet) MarshalTo(data []byte) (int, error) { +func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n3 - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n4 - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n5, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -603,35 +638,77 @@ func (m *DaemonSet) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *DaemonSetList) Marshal() (data []byte, err error) { +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *DaemonSetList) MarshalTo(data []byte) (int, error) { +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n6, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n6 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n7, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -641,251 +718,111 @@ func (m *DaemonSetList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *DaemonSetSpec) Marshal() (data []byte, err error) { +func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *DaemonSetSpec) MarshalTo(data []byte) (int, error) { +func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Selector != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n8, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 } - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.UpdateStrategy.Size())) - n9, err := m.UpdateStrategy.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n9, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n9 - data[i] = 0x20 + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.TemplateGeneration)) - return i, nil -} - -func (m *DaemonSetStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return data[:n], nil -} - -func (m *DaemonSetStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 + i += n10 + dAtA[i] = 0x20 i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentNumberScheduled)) - data[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + dAtA[i] = 0x28 i++ - i = encodeVarintGenerated(data, i, uint64(m.NumberMisscheduled)) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.DesiredNumberScheduled)) - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.NumberReady)) - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(m.UpdatedNumberScheduled)) - data[i] = 0x38 - i++ - i = encodeVarintGenerated(data, i, uint64(m.NumberAvailable)) - data[i] = 0x40 - i++ - i = encodeVarintGenerated(data, i, uint64(m.NumberUnavailable)) - return i, nil -} - -func (m *DaemonSetUpdateStrategy) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DaemonSetUpdateStrategy) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - if m.RollingUpdate != nil { - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 i++ - i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) - n10, err := m.RollingUpdate.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) } return i, nil } -func (m *Deployment) Marshal() (data []byte, err error) { +func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Deployment) MarshalTo(data []byte) (int, error) { +func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n11, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } - i += n11 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n12, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n12 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n13, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n13 - return i, nil -} - -func (m *DeploymentCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DeploymentCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size())) - n14, err := m.LastUpdateTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n14 - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n15, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n15 - return i, nil -} - -func (m *DeploymentList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DeploymentList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n16, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n16 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -895,238 +832,410 @@ func (m *DeploymentList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *DeploymentRollback) Marshal() (data []byte, err error) { +func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *DeploymentRollback) MarshalTo(data []byte) (int, error) { +func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - if len(m.UpdatedAnnotations) > 0 { - for k := range m.UpdatedAnnotations { - data[i] = 0x12 - i++ - v := m.UpdatedAnnotations[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n11 } - data[i] = 0x1a + return i, nil +} + +func (m *Deployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) - n17, err := m.RollbackTo.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n13, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n14, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + return i, nil +} + +func (m *DeploymentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n17, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n17 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } -func (m *DeploymentSpec) Marshal() (data []byte, err error) { +func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *DeploymentSpec) MarshalTo(data []byte) (int, error) { +func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if len(m.UpdatedAnnotations) > 0 { + keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) + for k := range m.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + for _, k := range keysForUpdatedAnnotations { + dAtA[i] = 0x12 + i++ + v := m.UpdatedAnnotations[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) + n18, err := m.RollbackTo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + return i, nil +} + +func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Replicas != nil { - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) } if m.Selector != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n18, err := m.Selector.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n19, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 } - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n19, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n19 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Strategy.Size())) - n20, err := m.Strategy.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n20, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n20 - data[i] = 0x28 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.RevisionHistoryLimit)) + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - data[i] = 0x38 + i += n21 + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + dAtA[i] = 0x38 i++ if m.Paused { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ if m.RollbackTo != nil { - data[i] = 0x42 + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) - n21, err := m.RollbackTo.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n21 - } - if m.ProgressDeadlineSeconds != nil { - data[i] = 0x48 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.ProgressDeadlineSeconds)) - } - return i, nil -} - -func (m *DeploymentStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DeploymentStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.UpdatedReplicas)) - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.UnavailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x38 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) - return i, nil -} - -func (m *DeploymentStrategy) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DeploymentStrategy) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - if m.RollingUpdate != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) + n22, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n22 } + if m.ProgressDeadlineSeconds != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + } return i, nil } -func (m *FSGroupStrategyOptions) Marshal() (data []byte, err error) { +func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *FSGroupStrategyOptions) MarshalTo(data []byte) (int, error) { +func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) - i += copy(data[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + return i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n23, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + return i, nil +} + +func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { for _, msg := range m.Ranges { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1136,57 +1245,57 @@ func (m *FSGroupStrategyOptions) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *HTTPIngressPath) Marshal() (data []byte, err error) { +func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *HTTPIngressPath) MarshalTo(data []byte) (int, error) { +func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Backend.Size())) - n23, err := m.Backend.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) + n24, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n24 return i, nil } -func (m *HTTPIngressRuleValue) Marshal() (data []byte, err error) { +func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *HTTPIngressRuleValue) MarshalTo(data []byte) (int, error) { +func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Paths) > 0 { for _, msg := range m.Paths { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1196,119 +1305,126 @@ func (m *HTTPIngressRuleValue) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *HostPortRange) Marshal() (data []byte, err error) { +func (m *HostPortRange) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *HostPortRange) MarshalTo(data []byte) (int, error) { +func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.Min)) - data[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(m.Max)) + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) return i, nil } -func (m *IDRange) Marshal() (data []byte, err error) { +func (m *IDRange) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *IDRange) MarshalTo(data []byte) (int, error) { +func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.Min)) - data[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(m.Max)) + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) return i, nil } -func (m *Ingress) Marshal() (data []byte, err error) { +func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Ingress) MarshalTo(data []byte) (int, error) { +func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n24, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i += copy(dAtA[i:], m.CIDR) + if len(m.Except) > 0 { + for _, s := range m.Except { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - i += n24 - data[i] = 0x12 + return i, nil +} + +func (m *Ingress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n25, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n25, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n25 - data[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n26, err := m.Status.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n26, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n26 - return i, nil -} - -func (m *IngressBackend) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *IngressBackend) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) - i += copy(data[i:], m.ServiceName) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ServicePort.Size())) - n27, err := m.ServicePort.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n27, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1316,35 +1432,65 @@ func (m *IngressBackend) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *IngressList) Marshal() (data []byte, err error) { +func (m *IngressBackend) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *IngressList) MarshalTo(data []byte) (int, error) { +func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n28, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i += copy(dAtA[i:], m.ServiceName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) + n28, err := m.ServicePort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n28 + return i, nil +} + +func (m *IngressList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n29, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1354,95 +1500,95 @@ func (m *IngressList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *IngressRule) Marshal() (data []byte, err error) { +func (m *IngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *IngressRule) MarshalTo(data []byte) (int, error) { +func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Host))) - i += copy(data[i:], m.Host) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.IngressRuleValue.Size())) - n29, err := m.IngressRuleValue.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) + n30, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n30 return i, nil } -func (m *IngressRuleValue) Marshal() (data []byte, err error) { +func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *IngressRuleValue) MarshalTo(data []byte) (int, error) { +func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.HTTP != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.HTTP.Size())) - n30, err := m.HTTP.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n30 - } - return i, nil -} - -func (m *IngressSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *IngressSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Backend != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Backend.Size())) - n31, err := m.Backend.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) + n31, err := m.HTTP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n31 } + return i, nil +} + +func (m *IngressSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Backend != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) + n32, err := m.Backend.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } if len(m.TLS) > 0 { for _, msg := range m.TLS { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1451,10 +1597,10 @@ func (m *IngressSpec) MarshalTo(data []byte) (int, error) { } if len(m.Rules) > 0 { for _, msg := range m.Rules { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1464,124 +1610,166 @@ func (m *IngressSpec) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *IngressStatus) Marshal() (data []byte, err error) { +func (m *IngressStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *IngressStatus) MarshalTo(data []byte) (int, error) { +func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) - n32, err := m.LoadBalancer.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) + n33, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n33 return i, nil } -func (m *IngressTLS) Marshal() (data []byte, err error) { +func (m *IngressTLS) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *IngressTLS) MarshalTo(data []byte) (int, error) { +func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Hosts) > 0 { for _, s := range m.Hosts { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) - i += copy(data[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) return i, nil } -func (m *NetworkPolicy) Marshal() (data []byte, err error) { +func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *NetworkPolicy) MarshalTo(data []byte) (int, error) { +func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n33 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n34, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n34 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n35, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 return i, nil } -func (m *NetworkPolicyIngressRule) Marshal() (data []byte, err error) { +func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *NetworkPolicyIngressRule) MarshalTo(data []byte) (int, error) { +func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Ports) > 0 { for _, msg := range m.Ports { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.To) > 0 { + for _, msg := range m.To { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1590,10 +1778,10 @@ func (m *NetworkPolicyIngressRule) MarshalTo(data []byte) (int, error) { } if len(m.From) > 0 { for _, msg := range m.From { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1603,35 +1791,35 @@ func (m *NetworkPolicyIngressRule) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *NetworkPolicyList) Marshal() (data []byte, err error) { +func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *NetworkPolicyList) MarshalTo(data []byte) (int, error) { +func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n35, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n36, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n36 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1641,463 +1829,216 @@ func (m *NetworkPolicyList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *NetworkPolicyPeer) Marshal() (data []byte, err error) { +func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *NetworkPolicyPeer) MarshalTo(data []byte) (int, error) { +func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.PodSelector != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) - n36, err := m.PodSelector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n36 - } - if m.NamespaceSelector != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.NamespaceSelector.Size())) - n37, err := m.NamespaceSelector.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) + n37, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n37 } - return i, nil -} - -func (m *NetworkPolicyPort) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NetworkPolicyPort) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Protocol != nil { - data[i] = 0xa + if m.NamespaceSelector != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.Protocol))) - i += copy(data[i:], *m.Protocol) - } - if m.Port != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) - n38, err := m.Port.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) + n38, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n38 } - return i, nil -} - -func (m *NetworkPolicySpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NetworkPolicySpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) - n39, err := m.PodSelector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n39 - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n + if m.IPBlock != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) + n39, err := m.IPBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n39 } return i, nil } -func (m *PodSecurityPolicy) Marshal() (data []byte, err error) { +func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodSecurityPolicy) MarshalTo(data []byte) (int, error) { +func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n40, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err + if m.Protocol != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i += copy(dAtA[i:], *m.Protocol) } - i += n40 - data[i] = 0x12 + if m.Port != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) + n40, err := m.Port.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + return i, nil +} + +func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n41, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) + n41, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n41 + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Egress) > 0 { + for _, msg := range m.Egress { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } -func (m *PodSecurityPolicyList) Marshal() (data []byte, err error) { +func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodSecurityPolicyList) MarshalTo(data []byte) (int, error) { +func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n42, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n42, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n42 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodSecurityPolicySpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodSecurityPolicySpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 + dAtA[i] = 0x12 i++ - if m.Privileged { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - data[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - data[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - data[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - data[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x30 - i++ - if m.HostNetwork { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - if len(m.HostPorts) > 0 { - for _, msg := range m.HostPorts { - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x40 - i++ - if m.HostPID { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x48 - i++ - if m.HostIPC { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x52 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SELinux.Size())) - n43, err := m.SELinux.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n43, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n43 - data[i] = 0x5a + return i, nil +} + +func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.RunAsUser.Size())) - n44, err := m.RunAsUser.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n44, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n44 - data[i] = 0x62 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SupplementalGroups.Size())) - n45, err := m.SupplementalGroups.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n45 - data[i] = 0x6a - i++ - i = encodeVarintGenerated(data, i, uint64(m.FSGroup.Size())) - n46, err := m.FSGroup.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n46 - data[i] = 0x70 - i++ - if m.ReadOnlyRootFilesystem { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *ReplicaSet) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicaSet) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n47, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n47 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n48, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n48 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n49, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n49 - return i, nil -} - -func (m *ReplicaSetCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicaSetCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n50, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n50 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *ReplicaSetList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicaSetList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n51, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n51 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -2107,86 +2048,454 @@ func (m *ReplicaSetList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ReplicaSetSpec) Marshal() (data []byte, err error) { +func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ReplicaSetSpec) MarshalTo(data []byte) (int, error) { +func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x30 + i++ + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.HostPorts) > 0 { + for _, msg := range m.HostPorts { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x40 + i++ + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x48 + i++ + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) + n45, err := m.SELinux.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) + n46, err := m.RunAsUser.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) + n47, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) + n48, err := m.FSGroup.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + dAtA[i] = 0x70 + i++ + if m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.DefaultAllowPrivilegeEscalation != nil { + dAtA[i] = 0x78 + i++ + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.AllowPrivilegeEscalation != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.AllowedHostPaths) > 0 { + for _, msg := range m.AllowedHostPaths { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.AllowedFlexVolumes) > 0 { + for _, msg := range m.AllowedFlexVolumes { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.AllowedUnsafeSysctls) > 0 { + for _, s := range m.AllowedUnsafeSysctls { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ForbiddenSysctls) > 0 { + for _, s := range m.ForbiddenSysctls { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n49, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n50, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n51, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 + return i, nil +} + +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n52, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n53, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Replicas != nil { - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) } if m.Selector != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n52, err := m.Selector.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n54, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n54 } - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n53, err := m.Template.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n55, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 - data[i] = 0x20 + i += n55 + dAtA[i] = 0x20 i++ - i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) return i, nil } -func (m *ReplicaSetStatus) Marshal() (data []byte, err error) { +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ReplicaSetStatus) MarshalTo(data []byte) (int, error) { +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - data[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas)) - data[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + dAtA[i] = 0x18 i++ - i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) - data[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x20 i++ - i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) - data[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x28 i++ - i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) if len(m.Conditions) > 0 { for _, msg := range m.Conditions { - data[i] = 0x32 + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -2196,17 +2505,17 @@ func (m *ReplicaSetStatus) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ReplicationControllerDummy) Marshal() (data []byte, err error) { +func (m *ReplicationControllerDummy) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ReplicationControllerDummy) MarshalTo(data []byte) (int, error) { +func (m *ReplicationControllerDummy) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -2214,85 +2523,47 @@ func (m *ReplicationControllerDummy) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *RollbackConfig) Marshal() (data []byte, err error) { +func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RollbackConfig) MarshalTo(data []byte) (int, error) { +func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.Revision)) + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) return i, nil } -func (m *RollingUpdateDaemonSet) Marshal() (data []byte, err error) { +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RollingUpdateDaemonSet) MarshalTo(data []byte) (int, error) { +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.MaxUnavailable != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) - n54, err := m.MaxUnavailable.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n54 - } - return i, nil -} - -func (m *RollingUpdateDeployment) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RollingUpdateDeployment) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MaxUnavailable != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) - n55, err := m.MaxUnavailable.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n55 - } - if m.MaxSurge != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxSurge.Size())) - n56, err := m.MaxSurge.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -2301,314 +2572,245 @@ func (m *RollingUpdateDeployment) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *RunAsUserStrategyOptions) Marshal() (data []byte, err error) { +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RunAsUserStrategyOptions) MarshalTo(data []byte) (int, error) { +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) - i += copy(data[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *SELinuxStrategyOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SELinuxStrategyOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) - i += copy(data[i:], m.Rule) - if m.SELinuxOptions != nil { - data[i] = 0x12 + if m.MaxUnavailable != nil { + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) - n57, err := m.SELinuxOptions.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n57, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n57 } + if m.MaxSurge != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) + n58, err := m.MaxSurge.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + } return i, nil } -func (m *Scale) Marshal() (data []byte, err error) { +func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Scale) MarshalTo(data []byte) (int, error) { +func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n58, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - i += n58 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n59, err := m.Spec.MarshalTo(data[i:]) + return i, nil +} + +func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { - return 0, err + return nil, err } - i += n59 - data[i] = 0x1a + return dAtA[:n], nil +} + +func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n60, err := m.Status.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) + if m.SELinuxOptions != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) + n59, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 + } + return i, nil +} + +func (m *Scale) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n60, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n60 - return i, nil -} - -func (m *ScaleSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - return i, nil -} - -func (m *ScaleStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k := range m.Selector { - data[i] = 0x12 - i++ - v := m.Selector[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.TargetSelector))) - i += copy(data[i:], m.TargetSelector) - return i, nil -} - -func (m *SupplementalGroupsStrategyOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SupplementalGroupsStrategyOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) - i += copy(data[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ThirdPartyResource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ThirdPartyResource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n61, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n61, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n61 - data[i] = 0x12 + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Description))) - i += copy(data[i:], m.Description) - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ThirdPartyResourceData) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ThirdPartyResourceData) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n62, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n62, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n62 - if m.Data != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Data))) - i += copy(data[i:], m.Data) - } return i, nil } -func (m *ThirdPartyResourceDataList) Marshal() (data []byte, err error) { +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ThirdPartyResourceDataList) MarshalTo(data []byte) (int, error) { +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n63, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { - return 0, err + return nil, err } - i += n63 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 + return dAtA[:n], nil +} + +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for _, k := range keysForSelector { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + v := m.Selector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i += copy(dAtA[i:], m.TargetSelector) + return i, nil +} + +func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -2618,79 +2820,50 @@ func (m *ThirdPartyResourceDataList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ThirdPartyResourceList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ThirdPartyResourceList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n64, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n64 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } -func (m *APIVersion) Size() (n int) { +func (m *AllowedFlexVolume) Size() (n int) { var l int _ = l - l = len(m.Name) + l = len(m.Driver) n += 1 + l + sovGenerated(uint64(l)) return n } +func (m *AllowedHostPath) Size() (n int) { + var l int + _ = l + l = len(m.PathPrefix) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + func (m *CustomMetricCurrentStatus) Size() (n int) { var l int _ = l @@ -2747,6 +2920,22 @@ func (m *DaemonSet) Size() (n int) { return n } +func (m *DaemonSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *DaemonSetList) Size() (n int) { var l int _ = l @@ -2774,6 +2963,9 @@ func (m *DaemonSetSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.MinReadySeconds)) n += 1 + sovGenerated(uint64(m.TemplateGeneration)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } return n } @@ -2788,6 +2980,15 @@ func (m *DaemonSetStatus) Size() (n int) { n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) n += 1 + sovGenerated(uint64(m.NumberAvailable)) n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -2909,6 +3110,9 @@ func (m *DeploymentStatus) Size() (n int) { } } n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } return n } @@ -2976,6 +3180,20 @@ func (m *IDRange) Size() (n int) { return n } +func (m *IPBlock) Size() (n int) { + var l int + _ = l + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Except) > 0 { + for _, s := range m.Except { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *Ingress) Size() (n int) { var l int _ = l @@ -3086,6 +3304,24 @@ func (m *NetworkPolicy) Size() (n int) { return n } +func (m *NetworkPolicyEgressRule) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.To) > 0 { + for _, e := range m.To { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *NetworkPolicyIngressRule) Size() (n int) { var l int _ = l @@ -3129,6 +3365,10 @@ func (m *NetworkPolicyPeer) Size() (n int) { l = m.NamespaceSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.IPBlock != nil { + l = m.IPBlock.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -3157,6 +3397,18 @@ func (m *NetworkPolicySpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.Egress) > 0 { + for _, e := range m.Egress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3230,6 +3482,36 @@ func (m *PodSecurityPolicySpec) Size() (n int) { l = m.FSGroup.Size() n += 1 + l + sovGenerated(uint64(l)) n += 2 + if m.DefaultAllowPrivilegeEscalation != nil { + n += 2 + } + if m.AllowPrivilegeEscalation != nil { + n += 3 + } + if len(m.AllowedHostPaths) > 0 { + for _, e := range m.AllowedHostPaths { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedFlexVolumes) > 0 { + for _, e := range m.AllowedFlexVolumes { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedUnsafeSysctls) > 0 { + for _, s := range m.AllowedUnsafeSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.ForbiddenSysctls) > 0 { + for _, s := range m.ForbiddenSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3421,62 +3703,6 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { return n } -func (m *ThirdPartyResource) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Versions) > 0 { - for _, e := range m.Versions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResourceData) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ThirdPartyResourceDataList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResourceList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func sovGenerated(x uint64) (n int) { for { n++ @@ -3490,12 +3716,23 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *APIVersion) String() string { +func (this *AllowedFlexVolume) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&APIVersion{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + s := strings.Join([]string{`&AllowedFlexVolume{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `}`, + }, "") + return s +} +func (this *AllowedHostPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedHostPath{`, + `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, `}`, }, "") return s @@ -3554,6 +3791,20 @@ func (this *DaemonSet) String() string { }, "") return s } +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *DaemonSetList) String() string { if this == nil { return "nil" @@ -3571,10 +3822,11 @@ func (this *DaemonSetSpec) String() string { } s := strings.Join([]string{`&DaemonSetSpec{`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `}`, }, "") return s @@ -3592,6 +3844,8 @@ func (this *DaemonSetStatus) String() string { `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3674,7 +3928,7 @@ func (this *DeploymentSpec) String() string { s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -3697,6 +3951,7 @@ func (this *DeploymentStatus) String() string { `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, }, "") return s @@ -3766,6 +4021,17 @@ func (this *IDRange) String() string { }, "") return s } +func (this *IPBlock) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPBlock{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `Except:` + fmt.Sprintf("%v", this.Except) + `,`, + `}`, + }, "") + return s +} func (this *Ingress) String() string { if this == nil { return "nil" @@ -3838,7 +4104,7 @@ func (this *IngressStatus) String() string { return "nil" } s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_kubernetes_pkg_api_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_api_core_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3865,6 +4131,17 @@ func (this *NetworkPolicy) String() string { }, "") return s } +func (this *NetworkPolicyEgressRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyEgressRule{`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, + `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *NetworkPolicyIngressRule) String() string { if this == nil { return "nil" @@ -3894,6 +4171,7 @@ func (this *NetworkPolicyPeer) String() string { s := strings.Join([]string{`&NetworkPolicyPeer{`, `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, `}`, }, "") return s @@ -3916,6 +4194,8 @@ func (this *NetworkPolicySpec) String() string { s := strings.Join([]string{`&NetworkPolicySpec{`, `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, + `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, + `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, `}`, }, "") return s @@ -3961,6 +4241,12 @@ func (this *PodSecurityPolicySpec) String() string { `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, + `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, + `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, + `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, + `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `}`, }, "") return s @@ -4009,7 +4295,7 @@ func (this *ReplicaSetSpec) String() string { s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -4087,7 +4373,7 @@ func (this *SELinuxStrategyOptions) String() string { } s := strings.Join([]string{`&SELinuxStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_kubernetes_pkg_api_v1.SELinuxOptions", 1) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, `}`, }, "") return s @@ -4147,51 +4433,6 @@ func (this *SupplementalGroupsStrategyOptions) String() string { }, "") return s } -func (this *ThirdPartyResource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResource{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "APIVersion", "APIVersion", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceData) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceData{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + valueToStringGenerated(this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceDataList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceDataList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResourceData", "ThirdPartyResourceData", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResource", "ThirdPartyResource", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -4200,8 +4441,8 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *APIVersion) Unmarshal(data []byte) error { - l := len(data) +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -4213,7 +4454,7 @@ func (m *APIVersion) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4223,15 +4464,15 @@ func (m *APIVersion) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: APIVersion: wiretype end group for non-group") + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: APIVersion: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4241,7 +4482,7 @@ func (m *APIVersion) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4256,11 +4497,11 @@ func (m *APIVersion) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -4279,8 +4520,8 @@ func (m *APIVersion) Unmarshal(data []byte) error { } return nil } -func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -4292,7 +4533,106 @@ func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PathPrefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4320,7 +4660,7 @@ func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4335,7 +4675,7 @@ func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -4349,7 +4689,7 @@ func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4363,13 +4703,13 @@ func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -4388,8 +4728,8 @@ func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error { } return nil } -func (m *CustomMetricCurrentStatusList) Unmarshal(data []byte) error { - l := len(data) +func (m *CustomMetricCurrentStatusList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -4401,7 +4741,7 @@ func (m *CustomMetricCurrentStatusList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4429,7 +4769,7 @@ func (m *CustomMetricCurrentStatusList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4444,13 +4784,13 @@ func (m *CustomMetricCurrentStatusList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, CustomMetricCurrentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -4469,8 +4809,8 @@ func (m *CustomMetricCurrentStatusList) Unmarshal(data []byte) error { } return nil } -func (m *CustomMetricTarget) Unmarshal(data []byte) error { - l := len(data) +func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -4482,7 +4822,7 @@ func (m *CustomMetricTarget) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4510,7 +4850,7 @@ func (m *CustomMetricTarget) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4525,7 +4865,7 @@ func (m *CustomMetricTarget) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -4539,7 +4879,7 @@ func (m *CustomMetricTarget) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4553,13 +4893,13 @@ func (m *CustomMetricTarget) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -4578,8 +4918,8 @@ func (m *CustomMetricTarget) Unmarshal(data []byte) error { } return nil } -func (m *CustomMetricTargetList) Unmarshal(data []byte) error { - l := len(data) +func (m *CustomMetricTargetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -4591,7 +4931,7 @@ func (m *CustomMetricTargetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4619,7 +4959,7 @@ func (m *CustomMetricTargetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4634,13 +4974,13 @@ func (m *CustomMetricTargetList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, CustomMetricTarget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -4659,8 +4999,8 @@ func (m *CustomMetricTargetList) Unmarshal(data []byte) error { } return nil } -func (m *DaemonSet) Unmarshal(data []byte) error { - l := len(data) +func (m *DaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -4672,7 +5012,7 @@ func (m *DaemonSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4700,7 +5040,7 @@ func (m *DaemonSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4714,7 +5054,7 @@ func (m *DaemonSet) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4730,7 +5070,7 @@ func (m *DaemonSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4744,7 +5084,7 @@ func (m *DaemonSet) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4760,7 +5100,7 @@ func (m *DaemonSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4774,13 +5114,13 @@ func (m *DaemonSet) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -4799,8 +5139,8 @@ func (m *DaemonSet) Unmarshal(data []byte) error { } return nil } -func (m *DaemonSetList) Unmarshal(data []byte) error { - l := len(data) +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -4812,7 +5152,203 @@ func (m *DaemonSetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4840,7 +5376,7 @@ func (m *DaemonSetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4854,7 +5390,7 @@ func (m *DaemonSetList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4870,7 +5406,7 @@ func (m *DaemonSetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4885,13 +5421,13 @@ func (m *DaemonSetList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, DaemonSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -4910,8 +5446,8 @@ func (m *DaemonSetList) Unmarshal(data []byte) error { } return nil } -func (m *DaemonSetSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -4923,7 +5459,7 @@ func (m *DaemonSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4951,7 +5487,7 @@ func (m *DaemonSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4968,7 +5504,7 @@ func (m *DaemonSetSpec) Unmarshal(data []byte) error { if m.Selector == nil { m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4984,7 +5520,7 @@ func (m *DaemonSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4998,7 +5534,7 @@ func (m *DaemonSetSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5014,7 +5550,7 @@ func (m *DaemonSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5028,7 +5564,7 @@ func (m *DaemonSetSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.UpdateStrategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5044,7 +5580,7 @@ func (m *DaemonSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -5063,16 +5599,36 @@ func (m *DaemonSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.TemplateGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -5091,8 +5647,8 @@ func (m *DaemonSetSpec) Unmarshal(data []byte) error { } return nil } -func (m *DaemonSetStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -5104,7 +5660,7 @@ func (m *DaemonSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5132,7 +5688,7 @@ func (m *DaemonSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -5151,7 +5707,7 @@ func (m *DaemonSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.NumberMisscheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -5170,7 +5726,7 @@ func (m *DaemonSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -5189,7 +5745,7 @@ func (m *DaemonSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.NumberReady |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -5208,7 +5764,7 @@ func (m *DaemonSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -5227,7 +5783,7 @@ func (m *DaemonSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -5246,7 +5802,7 @@ func (m *DaemonSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.NumberAvailable |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -5265,16 +5821,67 @@ func (m *DaemonSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.NumberUnavailable |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -5293,8 +5900,8 @@ func (m *DaemonSetStatus) Unmarshal(data []byte) error { } return nil } -func (m *DaemonSetUpdateStrategy) Unmarshal(data []byte) error { - l := len(data) +func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -5306,7 +5913,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5334,7 +5941,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5349,7 +5956,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DaemonSetUpdateStrategyType(data[iNdEx:postIndex]) + m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -5363,7 +5970,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5380,13 +5987,13 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(data []byte) error { if m.RollingUpdate == nil { m.RollingUpdate = &RollingUpdateDaemonSet{} } - if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -5405,8 +6012,8 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(data []byte) error { } return nil } -func (m *Deployment) Unmarshal(data []byte) error { - l := len(data) +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -5418,7 +6025,7 @@ func (m *Deployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5446,7 +6053,7 @@ func (m *Deployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5460,7 +6067,7 @@ func (m *Deployment) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5476,7 +6083,7 @@ func (m *Deployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5490,7 +6097,7 @@ func (m *Deployment) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5506,7 +6113,7 @@ func (m *Deployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5520,13 +6127,13 @@ func (m *Deployment) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -5545,8 +6152,8 @@ func (m *Deployment) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentCondition) Unmarshal(data []byte) error { - l := len(data) +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -5558,7 +6165,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5586,7 +6193,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5601,7 +6208,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DeploymentConditionType(data[iNdEx:postIndex]) + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -5615,7 +6222,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5630,7 +6237,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -5644,7 +6251,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5659,7 +6266,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -5673,7 +6280,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5688,7 +6295,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -5702,7 +6309,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5716,7 +6323,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5732,7 +6339,7 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5746,13 +6353,13 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -5771,8 +6378,8 @@ func (m *DeploymentCondition) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentList) Unmarshal(data []byte) error { - l := len(data) +func (m *DeploymentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -5784,7 +6391,7 @@ func (m *DeploymentList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5812,7 +6419,7 @@ func (m *DeploymentList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5826,7 +6433,7 @@ func (m *DeploymentList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5842,7 +6449,7 @@ func (m *DeploymentList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5857,13 +6464,13 @@ func (m *DeploymentList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, Deployment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -5882,8 +6489,8 @@ func (m *DeploymentList) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentRollback) Unmarshal(data []byte) error { - l := len(data) +func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -5895,7 +6502,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5923,7 +6530,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5938,7 +6545,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -5952,7 +6559,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5974,7 +6581,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5989,7 +6596,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6004,52 +6611,57 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - m.UpdatedAnnotations[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.UpdatedAnnotations[mapkey] = mapvalue + } else { + var mapvalue string + m.UpdatedAnnotations[mapkey] = mapvalue + } iNdEx = postIndex case 3: if wireType != 2 { @@ -6063,7 +6675,7 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6077,13 +6689,13 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -6102,8 +6714,8 @@ func (m *DeploymentRollback) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -6115,7 +6727,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6143,7 +6755,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -6163,7 +6775,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6180,7 +6792,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if m.Selector == nil { m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6196,7 +6808,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6210,7 +6822,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6226,7 +6838,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6240,7 +6852,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Strategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6256,7 +6868,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -6275,7 +6887,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -6295,7 +6907,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6315,7 +6927,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6332,7 +6944,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if m.RollbackTo == nil { m.RollbackTo = &RollbackConfig{} } - if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6348,7 +6960,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -6358,7 +6970,7 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { m.ProgressDeadlineSeconds = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -6377,8 +6989,8 @@ func (m *DeploymentSpec) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -6390,7 +7002,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6418,7 +7030,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -6437,7 +7049,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -6456,7 +7068,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.UpdatedReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -6475,7 +7087,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -6494,7 +7106,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.UnavailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -6513,7 +7125,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6528,7 +7140,7 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Conditions = append(m.Conditions, DeploymentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6544,16 +7156,36 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -6572,8 +7204,8 @@ func (m *DeploymentStatus) Unmarshal(data []byte) error { } return nil } -func (m *DeploymentStrategy) Unmarshal(data []byte) error { - l := len(data) +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -6585,7 +7217,7 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6613,7 +7245,7 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6628,7 +7260,7 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DeploymentStrategyType(data[iNdEx:postIndex]) + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -6642,7 +7274,7 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6659,13 +7291,13 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { if m.RollingUpdate == nil { m.RollingUpdate = &RollingUpdateDeployment{} } - if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -6684,8 +7316,8 @@ func (m *DeploymentStrategy) Unmarshal(data []byte) error { } return nil } -func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -6697,7 +7329,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6725,7 +7357,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6740,7 +7372,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = FSGroupStrategyType(data[iNdEx:postIndex]) + m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -6754,7 +7386,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6769,13 +7401,13 @@ func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -6794,8 +7426,8 @@ func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { } return nil } -func (m *HTTPIngressPath) Unmarshal(data []byte) error { - l := len(data) +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -6807,7 +7439,7 @@ func (m *HTTPIngressPath) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6835,7 +7467,7 @@ func (m *HTTPIngressPath) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6850,7 +7482,7 @@ func (m *HTTPIngressPath) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(data[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -6864,7 +7496,7 @@ func (m *HTTPIngressPath) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6878,13 +7510,13 @@ func (m *HTTPIngressPath) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -6903,8 +7535,8 @@ func (m *HTTPIngressPath) Unmarshal(data []byte) error { } return nil } -func (m *HTTPIngressRuleValue) Unmarshal(data []byte) error { - l := len(data) +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -6916,7 +7548,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6944,7 +7576,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6959,13 +7591,13 @@ func (m *HTTPIngressRuleValue) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -6984,8 +7616,8 @@ func (m *HTTPIngressRuleValue) Unmarshal(data []byte) error { } return nil } -func (m *HostPortRange) Unmarshal(data []byte) error { - l := len(data) +func (m *HostPortRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -6997,7 +7629,7 @@ func (m *HostPortRange) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -7025,7 +7657,7 @@ func (m *HostPortRange) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Min |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -7044,7 +7676,7 @@ func (m *HostPortRange) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Max |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -7053,7 +7685,7 @@ func (m *HostPortRange) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -7072,8 +7704,8 @@ func (m *HostPortRange) Unmarshal(data []byte) error { } return nil } -func (m *IDRange) Unmarshal(data []byte) error { - l := len(data) +func (m *IDRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -7085,7 +7717,7 @@ func (m *IDRange) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -7113,7 +7745,7 @@ func (m *IDRange) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Min |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -7132,7 +7764,7 @@ func (m *IDRange) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Max |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -7141,7 +7773,7 @@ func (m *IDRange) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -7160,8 +7792,8 @@ func (m *IDRange) Unmarshal(data []byte) error { } return nil } -func (m *Ingress) Unmarshal(data []byte) error { - l := len(data) +func (m *IPBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -7173,7 +7805,115 @@ func (m *Ingress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CIDR = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -7201,7 +7941,7 @@ func (m *Ingress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -7215,7 +7955,7 @@ func (m *Ingress) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7231,7 +7971,7 @@ func (m *Ingress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -7245,7 +7985,7 @@ func (m *Ingress) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7261,7 +8001,7 @@ func (m *Ingress) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -7275,13 +8015,13 @@ func (m *Ingress) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -7300,8 +8040,8 @@ func (m *Ingress) Unmarshal(data []byte) error { } return nil } -func (m *IngressBackend) Unmarshal(data []byte) error { - l := len(data) +func (m *IngressBackend) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -7313,7 +8053,7 @@ func (m *IngressBackend) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -7341,7 +8081,7 @@ func (m *IngressBackend) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -7356,7 +8096,7 @@ func (m *IngressBackend) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceName = string(data[iNdEx:postIndex]) + m.ServiceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -7370,7 +8110,7 @@ func (m *IngressBackend) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -7384,13 +8124,13 @@ func (m *IngressBackend) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ServicePort.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -7409,8 +8149,8 @@ func (m *IngressBackend) Unmarshal(data []byte) error { } return nil } -func (m *IngressList) Unmarshal(data []byte) error { - l := len(data) +func (m *IngressList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -7422,7 +8162,7 @@ func (m *IngressList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -7450,7 +8190,7 @@ func (m *IngressList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -7464,7 +8204,7 @@ func (m *IngressList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7480,7 +8220,7 @@ func (m *IngressList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -7495,13 +8235,13 @@ func (m *IngressList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -7520,8 +8260,8 @@ func (m *IngressList) Unmarshal(data []byte) error { } return nil } -func (m *IngressRule) Unmarshal(data []byte) error { - l := len(data) +func (m *IngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -7533,7 +8273,7 @@ func (m *IngressRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -7561,7 +8301,7 @@ func (m *IngressRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -7576,7 +8316,7 @@ func (m *IngressRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(data[iNdEx:postIndex]) + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -7590,7 +8330,7 @@ func (m *IngressRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -7604,13 +8344,13 @@ func (m *IngressRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.IngressRuleValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -7629,8 +8369,8 @@ func (m *IngressRule) Unmarshal(data []byte) error { } return nil } -func (m *IngressRuleValue) Unmarshal(data []byte) error { - l := len(data) +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -7642,7 +8382,7 @@ func (m *IngressRuleValue) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -7670,7 +8410,7 @@ func (m *IngressRuleValue) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -7687,13 +8427,13 @@ func (m *IngressRuleValue) Unmarshal(data []byte) error { if m.HTTP == nil { m.HTTP = &HTTPIngressRuleValue{} } - if err := m.HTTP.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -7712,8 +8452,8 @@ func (m *IngressRuleValue) Unmarshal(data []byte) error { } return nil } -func (m *IngressSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *IngressSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -7725,7 +8465,7 @@ func (m *IngressSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -7753,7 +8493,7 @@ func (m *IngressSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -7770,7 +8510,7 @@ func (m *IngressSpec) Unmarshal(data []byte) error { if m.Backend == nil { m.Backend = &IngressBackend{} } - if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7786,7 +8526,7 @@ func (m *IngressSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -7801,7 +8541,7 @@ func (m *IngressSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7817,7 +8557,7 @@ func (m *IngressSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -7832,13 +8572,13 @@ func (m *IngressSpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -7857,8 +8597,8 @@ func (m *IngressSpec) Unmarshal(data []byte) error { } return nil } -func (m *IngressStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -7870,7 +8610,7 @@ func (m *IngressStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -7898,7 +8638,7 @@ func (m *IngressStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -7912,13 +8652,13 @@ func (m *IngressStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -7937,8 +8677,8 @@ func (m *IngressStatus) Unmarshal(data []byte) error { } return nil } -func (m *IngressTLS) Unmarshal(data []byte) error { - l := len(data) +func (m *IngressTLS) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -7950,7 +8690,7 @@ func (m *IngressTLS) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -7978,7 +8718,7 @@ func (m *IngressTLS) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -7993,7 +8733,7 @@ func (m *IngressTLS) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hosts = append(m.Hosts, string(data[iNdEx:postIndex])) + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { @@ -8007,7 +8747,7 @@ func (m *IngressTLS) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -8022,11 +8762,11 @@ func (m *IngressTLS) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(data[iNdEx:postIndex]) + m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -8045,8 +8785,8 @@ func (m *IngressTLS) Unmarshal(data []byte) error { } return nil } -func (m *NetworkPolicy) Unmarshal(data []byte) error { - l := len(data) +func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -8058,7 +8798,7 @@ func (m *NetworkPolicy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -8086,7 +8826,7 @@ func (m *NetworkPolicy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8100,7 +8840,7 @@ func (m *NetworkPolicy) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8116,7 +8856,7 @@ func (m *NetworkPolicy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8130,13 +8870,13 @@ func (m *NetworkPolicy) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -8155,8 +8895,8 @@ func (m *NetworkPolicy) Unmarshal(data []byte) error { } return nil } -func (m *NetworkPolicyIngressRule) Unmarshal(data []byte) error { - l := len(data) +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -8168,7 +8908,119 @@ func (m *NetworkPolicyIngressRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.To = append(m.To, NetworkPolicyPeer{}) + if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -8196,7 +9048,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8211,7 +9063,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8227,7 +9079,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8242,13 +9094,13 @@ func (m *NetworkPolicyIngressRule) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.From = append(m.From, NetworkPolicyPeer{}) - if err := m.From[len(m.From)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -8267,8 +9119,8 @@ func (m *NetworkPolicyIngressRule) Unmarshal(data []byte) error { } return nil } -func (m *NetworkPolicyList) Unmarshal(data []byte) error { - l := len(data) +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -8280,7 +9132,7 @@ func (m *NetworkPolicyList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -8308,7 +9160,7 @@ func (m *NetworkPolicyList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8322,7 +9174,7 @@ func (m *NetworkPolicyList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8338,7 +9190,7 @@ func (m *NetworkPolicyList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8353,13 +9205,13 @@ func (m *NetworkPolicyList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, NetworkPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -8378,8 +9230,8 @@ func (m *NetworkPolicyList) Unmarshal(data []byte) error { } return nil } -func (m *NetworkPolicyPeer) Unmarshal(data []byte) error { - l := len(data) +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -8391,7 +9243,7 @@ func (m *NetworkPolicyPeer) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -8419,7 +9271,7 @@ func (m *NetworkPolicyPeer) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8436,7 +9288,7 @@ func (m *NetworkPolicyPeer) Unmarshal(data []byte) error { if m.PodSelector == nil { m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } - if err := m.PodSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8452,7 +9304,7 @@ func (m *NetworkPolicyPeer) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8469,13 +9321,46 @@ func (m *NetworkPolicyPeer) Unmarshal(data []byte) error { if m.NamespaceSelector == nil { m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } - if err := m.NamespaceSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPBlock == nil { + m.IPBlock = &IPBlock{} + } + if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -8494,8 +9379,8 @@ func (m *NetworkPolicyPeer) Unmarshal(data []byte) error { } return nil } -func (m *NetworkPolicyPort) Unmarshal(data []byte) error { - l := len(data) +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -8507,7 +9392,7 @@ func (m *NetworkPolicyPort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -8535,7 +9420,7 @@ func (m *NetworkPolicyPort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -8550,7 +9435,7 @@ func (m *NetworkPolicyPort) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := k8s_io_kubernetes_pkg_api_v1.Protocol(data[iNdEx:postIndex]) + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) m.Protocol = &s iNdEx = postIndex case 2: @@ -8565,7 +9450,7 @@ func (m *NetworkPolicyPort) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8582,13 +9467,13 @@ func (m *NetworkPolicyPort) Unmarshal(data []byte) error { if m.Port == nil { m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } - if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -8607,8 +9492,8 @@ func (m *NetworkPolicyPort) Unmarshal(data []byte) error { } return nil } -func (m *NetworkPolicySpec) Unmarshal(data []byte) error { - l := len(data) +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -8620,7 +9505,7 @@ func (m *NetworkPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -8648,7 +9533,7 @@ func (m *NetworkPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8662,7 +9547,7 @@ func (m *NetworkPolicySpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PodSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8678,7 +9563,7 @@ func (m *NetworkPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8693,13 +9578,73 @@ func (m *NetworkPolicySpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -8718,8 +9663,8 @@ func (m *NetworkPolicySpec) Unmarshal(data []byte) error { } return nil } -func (m *PodSecurityPolicy) Unmarshal(data []byte) error { - l := len(data) +func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -8731,7 +9676,7 @@ func (m *PodSecurityPolicy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -8759,7 +9704,7 @@ func (m *PodSecurityPolicy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8773,7 +9718,7 @@ func (m *PodSecurityPolicy) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8789,7 +9734,7 @@ func (m *PodSecurityPolicy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8803,13 +9748,13 @@ func (m *PodSecurityPolicy) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -8828,8 +9773,8 @@ func (m *PodSecurityPolicy) Unmarshal(data []byte) error { } return nil } -func (m *PodSecurityPolicyList) Unmarshal(data []byte) error { - l := len(data) +func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -8841,7 +9786,7 @@ func (m *PodSecurityPolicyList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -8869,7 +9814,7 @@ func (m *PodSecurityPolicyList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8883,7 +9828,7 @@ func (m *PodSecurityPolicyList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8899,7 +9844,7 @@ func (m *PodSecurityPolicyList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -8914,13 +9859,13 @@ func (m *PodSecurityPolicyList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, PodSecurityPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -8939,8 +9884,8 @@ func (m *PodSecurityPolicyList) Unmarshal(data []byte) error { } return nil } -func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { - l := len(data) +func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -8952,7 +9897,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -8980,7 +9925,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9000,7 +9945,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -9015,7 +9960,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) + m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { @@ -9029,7 +9974,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -9044,7 +9989,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) + m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { @@ -9058,7 +10003,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -9073,7 +10018,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) + m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 5: if wireType != 2 { @@ -9087,7 +10032,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -9102,7 +10047,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, FSType(data[iNdEx:postIndex])) + m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 6: if wireType != 0 { @@ -9116,7 +10061,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9136,7 +10081,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9151,7 +10096,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.HostPorts = append(m.HostPorts, HostPortRange{}) - if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9167,7 +10112,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9187,7 +10132,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9207,7 +10152,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9221,7 +10166,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SELinux.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9237,7 +10182,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9251,7 +10196,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RunAsUser.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9267,7 +10212,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9281,7 +10226,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SupplementalGroups.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9297,7 +10242,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9311,7 +10256,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FSGroup.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9327,7 +10272,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9335,9 +10280,171 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { } } m.ReadOnlyRootFilesystem = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DefaultAllowPrivilegeEscalation = &b + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) + if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -9356,8 +10463,8 @@ func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { } return nil } -func (m *ReplicaSet) Unmarshal(data []byte) error { - l := len(data) +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -9369,7 +10476,7 @@ func (m *ReplicaSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -9397,7 +10504,7 @@ func (m *ReplicaSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9411,7 +10518,7 @@ func (m *ReplicaSet) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9427,7 +10534,7 @@ func (m *ReplicaSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9441,7 +10548,7 @@ func (m *ReplicaSet) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9457,7 +10564,7 @@ func (m *ReplicaSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9471,13 +10578,13 @@ func (m *ReplicaSet) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -9496,8 +10603,8 @@ func (m *ReplicaSet) Unmarshal(data []byte) error { } return nil } -func (m *ReplicaSetCondition) Unmarshal(data []byte) error { - l := len(data) +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -9509,7 +10616,7 @@ func (m *ReplicaSetCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -9537,7 +10644,7 @@ func (m *ReplicaSetCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -9552,7 +10659,7 @@ func (m *ReplicaSetCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ReplicaSetConditionType(data[iNdEx:postIndex]) + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -9566,7 +10673,7 @@ func (m *ReplicaSetCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -9581,7 +10688,7 @@ func (m *ReplicaSetCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -9595,7 +10702,7 @@ func (m *ReplicaSetCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9609,7 +10716,7 @@ func (m *ReplicaSetCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9625,7 +10732,7 @@ func (m *ReplicaSetCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -9640,7 +10747,7 @@ func (m *ReplicaSetCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -9654,7 +10761,7 @@ func (m *ReplicaSetCondition) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -9669,11 +10776,11 @@ func (m *ReplicaSetCondition) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -9692,8 +10799,8 @@ func (m *ReplicaSetCondition) Unmarshal(data []byte) error { } return nil } -func (m *ReplicaSetList) Unmarshal(data []byte) error { - l := len(data) +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -9705,7 +10812,7 @@ func (m *ReplicaSetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -9733,7 +10840,7 @@ func (m *ReplicaSetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9747,7 +10854,7 @@ func (m *ReplicaSetList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9763,7 +10870,7 @@ func (m *ReplicaSetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9778,13 +10885,13 @@ func (m *ReplicaSetList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, ReplicaSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -9803,8 +10910,8 @@ func (m *ReplicaSetList) Unmarshal(data []byte) error { } return nil } -func (m *ReplicaSetSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -9816,7 +10923,7 @@ func (m *ReplicaSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -9844,7 +10951,7 @@ func (m *ReplicaSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -9864,7 +10971,7 @@ func (m *ReplicaSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9881,7 +10988,7 @@ func (m *ReplicaSetSpec) Unmarshal(data []byte) error { if m.Selector == nil { m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9897,7 +11004,7 @@ func (m *ReplicaSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -9911,7 +11018,7 @@ func (m *ReplicaSetSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9927,7 +11034,7 @@ func (m *ReplicaSetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -9936,7 +11043,7 @@ func (m *ReplicaSetSpec) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -9955,8 +11062,8 @@ func (m *ReplicaSetSpec) Unmarshal(data []byte) error { } return nil } -func (m *ReplicaSetStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -9968,7 +11075,7 @@ func (m *ReplicaSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -9996,7 +11103,7 @@ func (m *ReplicaSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -10015,7 +11122,7 @@ func (m *ReplicaSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -10034,7 +11141,7 @@ func (m *ReplicaSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -10053,7 +11160,7 @@ func (m *ReplicaSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -10072,7 +11179,7 @@ func (m *ReplicaSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -10091,7 +11198,7 @@ func (m *ReplicaSetStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -10106,13 +11213,13 @@ func (m *ReplicaSetStatus) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Conditions = append(m.Conditions, ReplicaSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -10131,8 +11238,8 @@ func (m *ReplicaSetStatus) Unmarshal(data []byte) error { } return nil } -func (m *ReplicationControllerDummy) Unmarshal(data []byte) error { - l := len(data) +func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -10144,7 +11251,7 @@ func (m *ReplicationControllerDummy) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -10162,7 +11269,7 @@ func (m *ReplicationControllerDummy) Unmarshal(data []byte) error { switch fieldNum { default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -10181,8 +11288,8 @@ func (m *ReplicationControllerDummy) Unmarshal(data []byte) error { } return nil } -func (m *RollbackConfig) Unmarshal(data []byte) error { - l := len(data) +func (m *RollbackConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -10194,7 +11301,7 @@ func (m *RollbackConfig) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -10222,7 +11329,7 @@ func (m *RollbackConfig) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -10231,7 +11338,7 @@ func (m *RollbackConfig) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -10250,8 +11357,8 @@ func (m *RollbackConfig) Unmarshal(data []byte) error { } return nil } -func (m *RollingUpdateDaemonSet) Unmarshal(data []byte) error { - l := len(data) +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -10263,7 +11370,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -10291,7 +11398,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -10308,13 +11415,13 @@ func (m *RollingUpdateDaemonSet) Unmarshal(data []byte) error { if m.MaxUnavailable == nil { m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } - if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -10333,8 +11440,8 @@ func (m *RollingUpdateDaemonSet) Unmarshal(data []byte) error { } return nil } -func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { - l := len(data) +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -10346,7 +11453,7 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -10374,7 +11481,7 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -10391,7 +11498,7 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { if m.MaxUnavailable == nil { m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } - if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10407,7 +11514,7 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -10424,13 +11531,13 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { if m.MaxSurge == nil { m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } - if err := m.MaxSurge.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -10449,8 +11556,8 @@ func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { } return nil } -func (m *RunAsUserStrategyOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -10462,7 +11569,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -10490,7 +11597,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -10505,7 +11612,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = RunAsUserStrategy(data[iNdEx:postIndex]) + m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -10519,7 +11626,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -10534,13 +11641,13 @@ func (m *RunAsUserStrategyOptions) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -10559,8 +11666,8 @@ func (m *RunAsUserStrategyOptions) Unmarshal(data []byte) error { } return nil } -func (m *SELinuxStrategyOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -10572,7 +11679,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -10600,7 +11707,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -10615,7 +11722,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = SELinuxStrategy(data[iNdEx:postIndex]) + m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -10629,7 +11736,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -10644,15 +11751,15 @@ func (m *SELinuxStrategyOptions) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } if m.SELinuxOptions == nil { - m.SELinuxOptions = &k8s_io_kubernetes_pkg_api_v1.SELinuxOptions{} + m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} } - if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -10671,8 +11778,8 @@ func (m *SELinuxStrategyOptions) Unmarshal(data []byte) error { } return nil } -func (m *Scale) Unmarshal(data []byte) error { - l := len(data) +func (m *Scale) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -10684,7 +11791,7 @@ func (m *Scale) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -10712,7 +11819,7 @@ func (m *Scale) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -10726,7 +11833,7 @@ func (m *Scale) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10742,7 +11849,7 @@ func (m *Scale) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -10756,7 +11863,7 @@ func (m *Scale) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10772,7 +11879,7 @@ func (m *Scale) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -10786,13 +11893,13 @@ func (m *Scale) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -10811,8 +11918,8 @@ func (m *Scale) Unmarshal(data []byte) error { } return nil } -func (m *ScaleSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *ScaleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -10824,7 +11931,7 @@ func (m *ScaleSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -10852,7 +11959,7 @@ func (m *ScaleSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -10861,7 +11968,7 @@ func (m *ScaleSpec) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -10880,8 +11987,8 @@ func (m *ScaleSpec) Unmarshal(data []byte) error { } return nil } -func (m *ScaleStatus) Unmarshal(data []byte) error { - l := len(data) +func (m *ScaleStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -10893,7 +12000,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -10921,7 +12028,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -10940,7 +12047,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -10962,7 +12069,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -10977,7 +12084,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -10992,52 +12099,57 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue if m.Selector == nil { m.Selector = make(map[string]string) } - m.Selector[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue + } iNdEx = postIndex case 3: if wireType != 2 { @@ -11051,7 +12163,7 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -11066,11 +12178,11 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetSelector = string(data[iNdEx:postIndex]) + m.TargetSelector = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -11089,8 +12201,8 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { } return nil } -func (m *SupplementalGroupsStrategyOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -11102,7 +12214,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -11130,7 +12242,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -11145,7 +12257,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = SupplementalGroupsStrategyType(data[iNdEx:postIndex]) + m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -11159,7 +12271,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -11174,13 +12286,13 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -11199,481 +12311,8 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(data []byte) error { } return nil } -func (m *ThirdPartyResource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, APIVersion{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceData) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceDataList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceDataList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceDataList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ThirdPartyResourceData{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ThirdPartyResource{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -11684,7 +12323,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -11702,7 +12341,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -11719,7 +12358,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -11742,7 +12381,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -11753,7 +12392,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -11777,217 +12416,238 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 3369 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x5b, 0xdb, 0x6f, 0x1b, 0xc7, - 0xd5, 0xf7, 0x8a, 0xa4, 0x45, 0x1d, 0x59, 0x92, 0x35, 0x72, 0x64, 0x46, 0x49, 0x44, 0x67, 0x3f, - 0x7c, 0x89, 0xf3, 0x7d, 0x09, 0xf5, 0xc5, 0xf9, 0x9c, 0x26, 0x4e, 0xe2, 0x44, 0x94, 0x7c, 0x51, - 0x21, 0xc9, 0xcc, 0x90, 0x32, 0x1a, 0xe7, 0xd6, 0x15, 0x39, 0xa2, 0xd6, 0xde, 0x5b, 0x76, 0x67, - 0x15, 0x11, 0x41, 0xdb, 0x00, 0x45, 0xf3, 0x58, 0xb4, 0x2f, 0x45, 0x0a, 0xa4, 0x8f, 0x7d, 0xe8, - 0x4b, 0x9b, 0x3c, 0xb4, 0x69, 0xff, 0x82, 0xfa, 0xa1, 0x28, 0x52, 0xa0, 0x05, 0x8a, 0x22, 0x15, - 0x6a, 0x05, 0xcd, 0x3f, 0xd0, 0xe6, 0xc5, 0x4f, 0xc5, 0xcc, 0xce, 0xde, 0x77, 0x65, 0x93, 0xb2, - 0x89, 0x02, 0x7d, 0xe3, 0xce, 0x9c, 0xf3, 0x3b, 0x97, 0x39, 0x73, 0xe6, 0xcc, 0x85, 0xf0, 0xd2, - 0x8d, 0xe7, 0x9c, 0x9a, 0x6a, 0x2e, 0xdc, 0x70, 0x37, 0x89, 0x6d, 0x10, 0x4a, 0x9c, 0x05, 0xeb, - 0x46, 0x77, 0x41, 0xb1, 0x54, 0x67, 0x81, 0xec, 0x52, 0x62, 0x38, 0xaa, 0x69, 0x38, 0x0b, 0x3b, - 0x4f, 0x6f, 0x12, 0xaa, 0x3c, 0xbd, 0xd0, 0x25, 0x06, 0xb1, 0x15, 0x4a, 0x3a, 0x35, 0xcb, 0x36, - 0xa9, 0x89, 0x9e, 0xf2, 0xd8, 0x6b, 0x21, 0x7b, 0xcd, 0xba, 0xd1, 0xad, 0x31, 0xf6, 0x5a, 0xc8, - 0x5e, 0x13, 0xec, 0x73, 0x4f, 0x75, 0x55, 0xba, 0xed, 0x6e, 0xd6, 0xda, 0xa6, 0xbe, 0xd0, 0x35, - 0xbb, 0xe6, 0x02, 0x47, 0xd9, 0x74, 0xb7, 0xf8, 0x17, 0xff, 0xe0, 0xbf, 0x3c, 0xf4, 0xb9, 0xff, - 0x17, 0xca, 0x29, 0x96, 0xaa, 0x2b, 0xed, 0x6d, 0xd5, 0x20, 0x76, 0xcf, 0x57, 0x6f, 0xc1, 0x26, - 0x8e, 0xe9, 0xda, 0x6d, 0x92, 0xd4, 0xe9, 0x40, 0x2e, 0x67, 0x41, 0x27, 0x54, 0x59, 0xd8, 0x49, - 0x59, 0x32, 0xb7, 0x90, 0xc7, 0x65, 0xbb, 0x06, 0x55, 0xf5, 0xb4, 0x98, 0x67, 0xef, 0xc4, 0xe0, - 0xb4, 0xb7, 0x89, 0xae, 0xa4, 0xf8, 0x9e, 0xc9, 0xe3, 0x73, 0xa9, 0xaa, 0x2d, 0xa8, 0x06, 0x75, - 0xa8, 0x7d, 0x90, 0x4d, 0x0e, 0xb1, 0x77, 0x88, 0x1d, 0x1d, 0x25, 0x45, 0xb7, 0x34, 0x92, 0x65, - 0xd3, 0x93, 0xb9, 0x83, 0x9b, 0x41, 0x2d, 0xd7, 0x00, 0x16, 0x1b, 0x2b, 0x57, 0x89, 0xcd, 0xc6, - 0x0c, 0x9d, 0x82, 0xa2, 0xa1, 0xe8, 0xa4, 0x22, 0x9d, 0x92, 0x4e, 0x8f, 0xd5, 0x8f, 0xdd, 0xdc, - 0xab, 0x1e, 0xd9, 0xdf, 0xab, 0x16, 0xd7, 0x15, 0x9d, 0x60, 0xde, 0x23, 0xff, 0x58, 0x82, 0x07, - 0x97, 0x5c, 0x87, 0x9a, 0xfa, 0x1a, 0xa1, 0xb6, 0xda, 0x5e, 0x72, 0x6d, 0x9b, 0x18, 0xb4, 0x49, - 0x15, 0xea, 0x3a, 0x77, 0xe6, 0x47, 0xd7, 0xa0, 0xb4, 0xa3, 0x68, 0x2e, 0xa9, 0x8c, 0x9c, 0x92, - 0x4e, 0x8f, 0x9f, 0xa9, 0xd5, 0x44, 0x2c, 0x45, 0x1d, 0xe3, 0x47, 0x53, 0xcd, 0x1f, 0xed, 0xda, - 0xab, 0xae, 0x62, 0x50, 0x95, 0xf6, 0xea, 0x27, 0x04, 0xe4, 0x31, 0x21, 0xf7, 0x2a, 0xc3, 0xc2, - 0x1e, 0xa4, 0xfc, 0x7d, 0x09, 0x1e, 0xc9, 0xd5, 0x6d, 0x55, 0x75, 0x28, 0xd2, 0xa1, 0xa4, 0x52, - 0xa2, 0x3b, 0x15, 0xe9, 0x54, 0xe1, 0xf4, 0xf8, 0x99, 0xcb, 0xb5, 0xbe, 0x22, 0xb9, 0x96, 0x0b, - 0x5e, 0x9f, 0x10, 0x7a, 0x95, 0x56, 0x18, 0x3c, 0xf6, 0xa4, 0xc8, 0x3f, 0x94, 0x00, 0x45, 0x79, - 0x5a, 0x8a, 0xdd, 0x25, 0xf4, 0x2e, 0xbc, 0xf4, 0xda, 0xe1, 0xbc, 0x34, 0x23, 0x20, 0xc7, 0x3d, - 0x81, 0x31, 0x27, 0xbd, 0x2f, 0xc1, 0x6c, 0x5a, 0x27, 0xee, 0x9d, 0xad, 0xb8, 0x77, 0x16, 0x0f, - 0xe1, 0x1d, 0x0f, 0x35, 0xc7, 0x2d, 0xbf, 0x1c, 0x81, 0xb1, 0x65, 0x85, 0xe8, 0xa6, 0xd1, 0x24, - 0x14, 0x7d, 0x13, 0xca, 0x6c, 0x7a, 0x76, 0x14, 0xaa, 0x70, 0x8f, 0x8c, 0x9f, 0xf9, 0xbf, 0x83, - 0xcc, 0x75, 0x6a, 0x8c, 0xba, 0xb6, 0xf3, 0x74, 0xed, 0xca, 0xe6, 0x75, 0xd2, 0xa6, 0x6b, 0x84, - 0x2a, 0x75, 0x24, 0xe4, 0x40, 0xd8, 0x86, 0x03, 0x54, 0xf4, 0x16, 0x14, 0x1d, 0x8b, 0xb4, 0x85, - 0x33, 0x5f, 0xec, 0xd3, 0xac, 0x40, 0xd3, 0xa6, 0x45, 0xda, 0xe1, 0x68, 0xb1, 0x2f, 0xcc, 0x71, - 0xd1, 0x16, 0x1c, 0x75, 0x78, 0x18, 0x54, 0x0a, 0x5c, 0xc2, 0xf9, 0x81, 0x25, 0x78, 0xc1, 0x34, - 0x29, 0x64, 0x1c, 0xf5, 0xbe, 0xb1, 0x40, 0x97, 0x7f, 0x27, 0xc1, 0x44, 0x40, 0xcb, 0x47, 0xec, - 0x8d, 0x94, 0xef, 0x6a, 0x77, 0xe7, 0x3b, 0xc6, 0xcd, 0x3d, 0x77, 0x5c, 0xc8, 0x2a, 0xfb, 0x2d, - 0x11, 0xbf, 0xbd, 0xe9, 0xc7, 0xc3, 0x08, 0x8f, 0x87, 0xe7, 0x06, 0x35, 0x2b, 0x27, 0x0c, 0xbe, - 0x28, 0x44, 0xcc, 0x61, 0xee, 0x44, 0x6f, 0x42, 0xd9, 0x21, 0x1a, 0x69, 0x53, 0xd3, 0x16, 0xe6, - 0x3c, 0x73, 0x97, 0xe6, 0x28, 0x9b, 0x44, 0x6b, 0x0a, 0xd6, 0xfa, 0x31, 0x66, 0x8f, 0xff, 0x85, - 0x03, 0x48, 0xf4, 0x3a, 0x94, 0x29, 0xd1, 0x2d, 0x4d, 0xa1, 0xfe, 0xc4, 0x7a, 0x2a, 0xdf, 0x24, - 0x06, 0xdb, 0x30, 0x3b, 0x2d, 0xc1, 0xc0, 0x07, 0x3f, 0x70, 0x96, 0xdf, 0x8a, 0x03, 0x40, 0xf4, - 0x81, 0x04, 0x93, 0xae, 0xd5, 0x61, 0xa4, 0x94, 0x25, 0xd8, 0x6e, 0x4f, 0x44, 0xc3, 0xc5, 0x41, - 0xdd, 0xb6, 0x11, 0x43, 0xab, 0xcf, 0x0a, 0xe1, 0x93, 0xf1, 0x76, 0x9c, 0x90, 0x8a, 0x16, 0x61, - 0x4a, 0x57, 0x0d, 0x4c, 0x94, 0x4e, 0xaf, 0x49, 0xda, 0xa6, 0xd1, 0x71, 0x2a, 0xc5, 0x53, 0xd2, - 0xe9, 0x52, 0xfd, 0xa4, 0x00, 0x98, 0x5a, 0x8b, 0x77, 0xe3, 0x24, 0x3d, 0xfa, 0x3a, 0x20, 0xdf, - 0xae, 0x4b, 0xde, 0x7a, 0xa1, 0x9a, 0x46, 0xa5, 0x74, 0x4a, 0x3a, 0x5d, 0xa8, 0xcf, 0x09, 0x14, - 0xd4, 0x4a, 0x51, 0xe0, 0x0c, 0x2e, 0xf9, 0x9f, 0x45, 0x98, 0x4a, 0x04, 0x38, 0xba, 0x0a, 0xb3, - 0x6d, 0x2f, 0x7d, 0xae, 0xbb, 0xfa, 0x26, 0xb1, 0x9b, 0xed, 0x6d, 0xd2, 0x71, 0x35, 0xd2, 0xe1, - 0xa3, 0x5e, 0xaa, 0xcf, 0x0b, 0x19, 0xb3, 0x4b, 0x99, 0x54, 0x38, 0x87, 0x9b, 0xe9, 0x6d, 0xf0, - 0xa6, 0x35, 0xd5, 0x71, 0x02, 0xcc, 0x11, 0x8e, 0x19, 0xe8, 0xbd, 0x9e, 0xa2, 0xc0, 0x19, 0x5c, - 0x4c, 0xc7, 0x0e, 0x71, 0x54, 0x9b, 0x74, 0x92, 0x3a, 0x16, 0xe2, 0x3a, 0x2e, 0x67, 0x52, 0xe1, - 0x1c, 0x6e, 0x74, 0x16, 0xc6, 0x3d, 0x69, 0xdc, 0xe3, 0x62, 0x68, 0x82, 0x84, 0xbd, 0x1e, 0x76, - 0xe1, 0x28, 0x1d, 0x33, 0xcd, 0xdc, 0xe4, 0x55, 0x40, 0x27, 0x7f, 0x48, 0xae, 0xa4, 0x28, 0x70, - 0x06, 0x17, 0x33, 0xcd, 0x8b, 0x99, 0x94, 0x69, 0x47, 0xe3, 0xa6, 0x6d, 0x64, 0x52, 0xe1, 0x1c, - 0x6e, 0x16, 0x79, 0x9e, 0xca, 0x8b, 0x3b, 0x8a, 0xaa, 0x29, 0x9b, 0x1a, 0xa9, 0x8c, 0xc6, 0x23, - 0x6f, 0x3d, 0xde, 0x8d, 0x93, 0xf4, 0xe8, 0x12, 0x4c, 0x7b, 0x4d, 0x1b, 0x86, 0x12, 0x80, 0x94, - 0x39, 0xc8, 0x83, 0x02, 0x64, 0x7a, 0x3d, 0x49, 0x80, 0xd3, 0x3c, 0xf2, 0x5f, 0x24, 0x38, 0x99, - 0x33, 0x93, 0xd0, 0xcb, 0x50, 0xa4, 0x3d, 0xcb, 0x5f, 0x7f, 0xff, 0xd7, 0xcf, 0xe8, 0xad, 0x9e, - 0x45, 0x6e, 0xef, 0x55, 0x1f, 0xca, 0x61, 0x63, 0xdd, 0x98, 0x33, 0xa2, 0x6f, 0xc3, 0x84, 0x6d, - 0x6a, 0x9a, 0x6a, 0x74, 0x3d, 0x12, 0x91, 0x4d, 0x2e, 0xf4, 0x39, 0xd3, 0x71, 0x14, 0x23, 0xcc, - 0x96, 0xd3, 0xfb, 0x7b, 0xd5, 0x89, 0x58, 0x1f, 0x8e, 0x8b, 0x93, 0x7f, 0x3d, 0x02, 0xb0, 0x4c, - 0x2c, 0xcd, 0xec, 0xe9, 0xc4, 0x18, 0xc6, 0x0a, 0xfa, 0x76, 0x6c, 0x05, 0x7d, 0xa9, 0xdf, 0x8c, - 0x16, 0xa8, 0x9a, 0xbb, 0x84, 0x76, 0x13, 0x4b, 0xe8, 0xcb, 0x83, 0x8b, 0x38, 0x78, 0x0d, 0xbd, - 0x55, 0x80, 0x99, 0x90, 0x78, 0xc9, 0x34, 0x3a, 0x2a, 0x9f, 0x13, 0x2f, 0xc4, 0x62, 0xe2, 0xf1, - 0x44, 0x4c, 0x9c, 0xcc, 0x60, 0x89, 0xc4, 0xc3, 0xd5, 0x40, 0xfb, 0x11, 0xce, 0x7e, 0x3e, 0x2e, - 0xfc, 0xf6, 0x5e, 0xf5, 0xc0, 0xa2, 0xbc, 0x16, 0x60, 0xc6, 0x95, 0x45, 0x8f, 0xc1, 0x51, 0x9b, - 0x28, 0x8e, 0x69, 0xf0, 0x34, 0x31, 0x16, 0x1a, 0x85, 0x79, 0x2b, 0x16, 0xbd, 0xe8, 0x09, 0x18, - 0xd5, 0x89, 0xe3, 0x28, 0x5d, 0xc2, 0x33, 0xc2, 0x58, 0x7d, 0x4a, 0x10, 0x8e, 0xae, 0x79, 0xcd, - 0xd8, 0xef, 0x47, 0xd7, 0x61, 0x52, 0x53, 0x1c, 0x11, 0xda, 0x2d, 0x55, 0x27, 0x7c, 0xce, 0x8f, - 0x9f, 0xf9, 0x9f, 0xbb, 0x8b, 0x18, 0xc6, 0x11, 0xae, 0x44, 0xab, 0x31, 0x24, 0x9c, 0x40, 0x46, - 0x3b, 0x80, 0x58, 0x4b, 0xcb, 0x56, 0x0c, 0xc7, 0x73, 0x19, 0x93, 0x37, 0xda, 0xb7, 0xbc, 0x20, - 0xbf, 0xad, 0xa6, 0xd0, 0x70, 0x86, 0x04, 0xf9, 0xf7, 0x12, 0x4c, 0x86, 0x03, 0x36, 0x84, 0x42, - 0xe9, 0xad, 0x78, 0xa1, 0xf4, 0xfc, 0xc0, 0xc1, 0x9b, 0x53, 0x29, 0x7d, 0x58, 0x00, 0x14, 0x12, - 0xb1, 0xd4, 0xb0, 0xa9, 0xb4, 0x6f, 0xdc, 0xc5, 0x3e, 0xe2, 0xa7, 0x12, 0x20, 0x91, 0xac, 0x17, - 0x0d, 0xc3, 0xa4, 0x3c, 0xff, 0xfb, 0x6a, 0xbe, 0x36, 0xb0, 0x9a, 0xbe, 0x06, 0xb5, 0x8d, 0x14, - 0xf6, 0x05, 0x83, 0xda, 0xbd, 0x70, 0xc4, 0xd2, 0x04, 0x38, 0x43, 0x21, 0xf4, 0x0e, 0x80, 0x2d, - 0x30, 0x5b, 0xa6, 0x48, 0x01, 0x2f, 0x0d, 0x90, 0x4d, 0x19, 0xc0, 0x92, 0x69, 0x6c, 0xa9, 0xdd, - 0x30, 0xa1, 0xe1, 0x00, 0x18, 0x47, 0x84, 0xcc, 0x5d, 0x80, 0x93, 0x39, 0xda, 0xa3, 0xe3, 0x50, - 0xb8, 0x41, 0x7a, 0x9e, 0x5b, 0x31, 0xfb, 0x89, 0x4e, 0x44, 0xf7, 0x63, 0x63, 0x62, 0x2b, 0x75, - 0x6e, 0xe4, 0x39, 0x49, 0xfe, 0xb2, 0x14, 0x8d, 0x35, 0x5e, 0xc5, 0x9e, 0x86, 0xb2, 0x4d, 0x2c, - 0x4d, 0x6d, 0x2b, 0x8e, 0xa8, 0x67, 0x78, 0x41, 0x8a, 0x45, 0x1b, 0x0e, 0x7a, 0x63, 0xf5, 0xee, - 0xc8, 0xfd, 0xad, 0x77, 0x0b, 0xf7, 0xba, 0xde, 0x35, 0xa1, 0xec, 0xf8, 0x85, 0x6e, 0x91, 0x83, - 0x2f, 0x1e, 0x22, 0x67, 0x8b, 0x1a, 0x37, 0x10, 0x18, 0x54, 0xb7, 0x81, 0x90, 0xac, 0xba, 0xb6, - 0xd4, 0x67, 0x5d, 0xbb, 0x0a, 0x27, 0x6c, 0xb2, 0xa3, 0x32, 0x35, 0x2e, 0xab, 0x0e, 0x35, 0xed, - 0xde, 0xaa, 0xaa, 0xab, 0x54, 0x94, 0x3d, 0x95, 0xfd, 0xbd, 0xea, 0x09, 0x9c, 0xd1, 0x8f, 0x33, - 0xb9, 0x58, 0x76, 0xb6, 0x14, 0xd7, 0x21, 0x1d, 0x9e, 0xd2, 0xca, 0x61, 0x76, 0x6e, 0xf0, 0x56, - 0x2c, 0x7a, 0x91, 0x1e, 0x0b, 0xee, 0xf2, 0xbd, 0x08, 0xee, 0xc9, 0xfc, 0xc0, 0x46, 0x1b, 0x70, - 0xd2, 0xb2, 0xcd, 0xae, 0x4d, 0x1c, 0x67, 0x99, 0x28, 0x1d, 0x4d, 0x35, 0x88, 0xef, 0xaf, 0x31, - 0x6e, 0xe7, 0x43, 0xfb, 0x7b, 0xd5, 0x93, 0x8d, 0x6c, 0x12, 0x9c, 0xc7, 0x2b, 0x7f, 0x54, 0x84, - 0xe3, 0xc9, 0x55, 0x36, 0xa7, 0x2a, 0x95, 0x06, 0xaa, 0x4a, 0x9f, 0x8c, 0x4c, 0x1b, 0xaf, 0x64, - 0x0f, 0xa2, 0x21, 0x63, 0xea, 0x2c, 0xc2, 0x94, 0xc8, 0x23, 0x7e, 0xa7, 0xa8, 0xcb, 0x83, 0x68, - 0xd8, 0x88, 0x77, 0xe3, 0x24, 0x3d, 0xab, 0x35, 0xc3, 0x12, 0xd2, 0x07, 0x29, 0xc6, 0x6b, 0xcd, - 0xc5, 0x24, 0x01, 0x4e, 0xf3, 0xa0, 0x35, 0x98, 0x71, 0x8d, 0x34, 0x94, 0x17, 0x9d, 0x0f, 0x09, - 0xa8, 0x99, 0x8d, 0x34, 0x09, 0xce, 0xe2, 0x43, 0x3b, 0x00, 0x6d, 0xbf, 0x20, 0x70, 0x2a, 0x47, - 0x79, 0xae, 0xae, 0x0f, 0x3c, 0xb7, 0x82, 0xda, 0x22, 0xcc, 0x88, 0x41, 0x93, 0x83, 0x23, 0x92, - 0xd0, 0x0b, 0x30, 0x61, 0xf3, 0x8d, 0x87, 0x6f, 0x80, 0x57, 0xbc, 0x3f, 0x20, 0xd8, 0x26, 0x70, - 0xb4, 0x13, 0xc7, 0x69, 0xe5, 0x3f, 0x48, 0xd1, 0x25, 0x2a, 0x28, 0xb5, 0xcf, 0xc5, 0xca, 0xaa, - 0xc7, 0x12, 0x65, 0xd5, 0x6c, 0x9a, 0x23, 0x52, 0x55, 0x7d, 0x27, 0xbb, 0xca, 0xbe, 0x78, 0xa8, - 0x2a, 0x3b, 0x5c, 0x6a, 0xef, 0x5c, 0x66, 0x7f, 0x22, 0xc1, 0xec, 0xc5, 0xe6, 0x25, 0xdb, 0x74, - 0x2d, 0x5f, 0xbd, 0x2b, 0x96, 0xe7, 0xab, 0xaf, 0x41, 0xd1, 0x76, 0x35, 0xdf, 0xae, 0xff, 0xf2, - 0xed, 0xc2, 0xae, 0xc6, 0xec, 0x9a, 0x49, 0x70, 0x79, 0x46, 0x31, 0x06, 0xf4, 0x16, 0x1c, 0xb5, - 0x15, 0xa3, 0x4b, 0xfc, 0x45, 0xf8, 0xd9, 0x3e, 0xad, 0x59, 0x59, 0xc6, 0x8c, 0x3d, 0x52, 0x0a, - 0x72, 0x34, 0x2c, 0x50, 0xe5, 0x9f, 0x48, 0x30, 0x75, 0xb9, 0xd5, 0x6a, 0xac, 0x18, 0x7c, 0x16, - 0x37, 0x14, 0xba, 0xcd, 0xea, 0x04, 0x4b, 0xa1, 0xdb, 0xc9, 0x3a, 0x81, 0xf5, 0x61, 0xde, 0x83, - 0xb6, 0x61, 0x94, 0x65, 0x0f, 0x62, 0x74, 0x06, 0x2c, 0xf1, 0x85, 0xb8, 0xba, 0x07, 0x12, 0xd6, - 0x9f, 0xa2, 0x01, 0xfb, 0xf0, 0xf2, 0x7b, 0x70, 0x22, 0xa2, 0x1e, 0xf3, 0x17, 0x3f, 0x9d, 0x44, - 0x6d, 0x28, 0x31, 0x4d, 0xfc, 0xb3, 0xc7, 0x7e, 0x8f, 0xd0, 0x12, 0x26, 0x87, 0x75, 0x14, 0xfb, - 0x72, 0xb0, 0x87, 0x2d, 0xaf, 0xc1, 0xc4, 0x65, 0xd3, 0xa1, 0x0d, 0xd3, 0xa6, 0xdc, 0x6d, 0xe8, - 0x11, 0x28, 0xe8, 0xaa, 0x21, 0x56, 0xe9, 0x71, 0xc1, 0x53, 0x60, 0xeb, 0x08, 0x6b, 0xe7, 0xdd, - 0xca, 0xae, 0xc8, 0x46, 0x61, 0xb7, 0xb2, 0x8b, 0x59, 0xbb, 0x7c, 0x09, 0x46, 0xc5, 0x70, 0x44, - 0x81, 0x0a, 0x07, 0x03, 0x15, 0x32, 0x80, 0x7e, 0x31, 0x02, 0xa3, 0x42, 0xfb, 0x21, 0x6c, 0xe6, - 0xde, 0x88, 0x6d, 0xe6, 0xce, 0x0d, 0x36, 0xd2, 0xb9, 0x3b, 0xb9, 0x4e, 0x62, 0x27, 0xf7, 0xe2, - 0x80, 0xf8, 0x07, 0x6f, 0xe3, 0x3e, 0x96, 0x60, 0x32, 0x1e, 0x73, 0xe8, 0x2c, 0x8c, 0xb3, 0x35, - 0x45, 0x6d, 0x93, 0xf5, 0xb0, 0x28, 0x0e, 0x0e, 0x56, 0x9a, 0x61, 0x17, 0x8e, 0xd2, 0xa1, 0x6e, - 0xc0, 0xc6, 0xc2, 0x42, 0x38, 0x25, 0xdf, 0xe5, 0x2e, 0x55, 0xb5, 0x9a, 0x77, 0x5f, 0x53, 0x5b, - 0x31, 0xe8, 0x15, 0xbb, 0x49, 0x6d, 0xd5, 0xe8, 0xa6, 0x04, 0xf1, 0x18, 0x8b, 0x22, 0xcb, 0x37, - 0x25, 0x18, 0x17, 0x2a, 0x0f, 0x61, 0x4b, 0xf2, 0x7a, 0x7c, 0x4b, 0xf2, 0xec, 0x80, 0xf3, 0x39, - 0x7b, 0x3f, 0xf2, 0x69, 0x68, 0x0a, 0x9b, 0xc1, 0x2c, 0xc1, 0x6c, 0x9b, 0x0e, 0x4d, 0x26, 0x18, - 0x36, 0xd7, 0x30, 0xef, 0x41, 0xdf, 0x93, 0xe0, 0xb8, 0x9a, 0x98, 0xf3, 0xc2, 0xd7, 0x2f, 0x0f, - 0xa6, 0x5a, 0x00, 0x53, 0xaf, 0x08, 0x79, 0xc7, 0x93, 0x3d, 0x38, 0x25, 0x52, 0x76, 0x21, 0x45, - 0x85, 0x14, 0x28, 0x6e, 0x53, 0x6a, 0x89, 0x41, 0x58, 0x1a, 0x3c, 0xf3, 0x84, 0x2a, 0x95, 0xb9, - 0xf9, 0xad, 0x56, 0x03, 0x73, 0x68, 0xf9, 0xe7, 0x23, 0x81, 0xc3, 0x9a, 0xde, 0x24, 0x09, 0xf2, - 0xad, 0x74, 0x2f, 0xf2, 0xed, 0x78, 0x56, 0xae, 0x45, 0xdf, 0x80, 0x02, 0xd5, 0x06, 0xdd, 0x94, - 0x0a, 0x09, 0xad, 0xd5, 0x66, 0x98, 0xb0, 0x5a, 0xab, 0x4d, 0xcc, 0x20, 0xd1, 0xdb, 0x50, 0x62, - 0xab, 0x19, 0x9b, 0xe3, 0x85, 0xc1, 0x73, 0x08, 0xf3, 0x57, 0x18, 0x61, 0xec, 0xcb, 0xc1, 0x1e, - 0xae, 0xfc, 0x1e, 0x4c, 0xc4, 0x12, 0x01, 0xba, 0x0e, 0xc7, 0x34, 0x53, 0xe9, 0xd4, 0x15, 0x4d, - 0x31, 0xda, 0xc4, 0x4e, 0xa6, 0xc6, 0xec, 0xfd, 0xcc, 0x6a, 0x84, 0x43, 0x24, 0x94, 0xe0, 0x02, - 0x31, 0xda, 0x87, 0x63, 0xd8, 0xb2, 0x02, 0x10, 0x5a, 0x8f, 0xaa, 0x50, 0x62, 0x21, 0xec, 0xad, - 0x4c, 0x63, 0xf5, 0x31, 0xa6, 0x2b, 0x8b, 0x6c, 0x07, 0x7b, 0xed, 0xe8, 0x0c, 0x80, 0x43, 0xda, - 0x36, 0xa1, 0x3c, 0xef, 0x78, 0x27, 0x40, 0x41, 0x06, 0x6e, 0x06, 0x3d, 0x38, 0x42, 0x25, 0xff, - 0x49, 0x82, 0x89, 0x75, 0x42, 0xdf, 0x35, 0xed, 0x1b, 0x0d, 0x53, 0x53, 0xdb, 0xbd, 0x21, 0xe4, - 0xfd, 0xcd, 0x58, 0xde, 0x7f, 0xa5, 0xcf, 0x31, 0x8b, 0x69, 0x9b, 0x97, 0xfd, 0xe5, 0xbf, 0x4b, - 0x50, 0x89, 0x51, 0x46, 0xd3, 0x04, 0x81, 0x92, 0x65, 0xda, 0xd4, 0x5f, 0xe3, 0x0f, 0xa5, 0x01, - 0x4b, 0xa9, 0x91, 0x55, 0x9e, 0xc1, 0x62, 0x0f, 0x9d, 0xd9, 0xb9, 0x65, 0x9b, 0xba, 0x88, 0xfb, - 0xc3, 0x49, 0x21, 0xc4, 0x0e, 0xed, 0xbc, 0x68, 0x9b, 0x3a, 0xe6, 0xd8, 0xf2, 0x1f, 0x25, 0x98, - 0x8e, 0x51, 0x0e, 0x21, 0xa5, 0x2b, 0xf1, 0x94, 0xfe, 0xe2, 0x61, 0x0c, 0xcb, 0x49, 0xec, 0x5f, - 0x25, 0xcd, 0x62, 0x0e, 0x40, 0x5b, 0x30, 0x6e, 0x99, 0x9d, 0xe6, 0x3d, 0xb8, 0x99, 0x9b, 0x62, - 0x2b, 0x64, 0x23, 0xc4, 0xc2, 0x51, 0x60, 0xb4, 0x0b, 0xd3, 0x86, 0xa2, 0x13, 0xc7, 0x52, 0xda, - 0xa4, 0x79, 0x0f, 0xce, 0x45, 0x1e, 0xe0, 0xb7, 0x05, 0x49, 0x44, 0x9c, 0x16, 0x22, 0xff, 0x2a, - 0x65, 0xb7, 0x69, 0x53, 0xf4, 0x2a, 0x94, 0xf9, 0x23, 0x89, 0xb6, 0xa9, 0x89, 0xa5, 0xed, 0x2c, - 0x1b, 0x9a, 0x86, 0x68, 0xbb, 0xbd, 0x57, 0xfd, 0xef, 0x03, 0x8f, 0x75, 0x7d, 0x42, 0x1c, 0xc0, - 0xa0, 0x75, 0x28, 0x5a, 0x87, 0x29, 0x33, 0xf8, 0xc2, 0xc2, 0x6b, 0x0b, 0x8e, 0x23, 0xff, 0x23, - 0xa9, 0x38, 0x5f, 0x5e, 0xae, 0xdf, 0xb3, 0x01, 0x0b, 0xca, 0x9a, 0xdc, 0x41, 0xb3, 0x61, 0x54, - 0xac, 0xb2, 0x22, 0x2e, 0x2f, 0x1d, 0x26, 0x2e, 0xa3, 0x2b, 0x43, 0xb0, 0x89, 0xf0, 0x1b, 0x7d, - 0x41, 0xf2, 0x5f, 0x25, 0x98, 0xe6, 0x0a, 0xb5, 0x5d, 0x5b, 0xa5, 0xbd, 0xa1, 0x65, 0xd0, 0xad, - 0x58, 0x06, 0x5d, 0xee, 0xd3, 0xd0, 0x94, 0xc6, 0xb9, 0x59, 0xf4, 0x73, 0x09, 0x1e, 0x48, 0x51, - 0x0f, 0x21, 0xc3, 0x90, 0x78, 0x86, 0x79, 0xe5, 0xb0, 0x06, 0xe6, 0x64, 0x99, 0x9b, 0x90, 0x61, - 0x1e, 0x0f, 0xdc, 0x33, 0x00, 0x96, 0xad, 0xee, 0xa8, 0x1a, 0xe9, 0x8a, 0xcb, 0xe0, 0x72, 0x38, - 0x24, 0x8d, 0xa0, 0x07, 0x47, 0xa8, 0xd0, 0xb7, 0x60, 0xb6, 0x43, 0xb6, 0x14, 0x57, 0xa3, 0x8b, - 0x9d, 0xce, 0x92, 0x62, 0x29, 0x9b, 0xaa, 0xa6, 0x52, 0x55, 0xec, 0xb0, 0xc7, 0xea, 0x17, 0xbc, - 0x4b, 0xda, 0x2c, 0x8a, 0xdb, 0x7b, 0xd5, 0xc7, 0x0f, 0xbe, 0x98, 0xf1, 0x89, 0x7b, 0x38, 0x47, - 0x08, 0xfa, 0xae, 0x04, 0x15, 0x9b, 0xbc, 0xe3, 0xaa, 0x36, 0xe9, 0x2c, 0xdb, 0xa6, 0x15, 0xd3, - 0xa0, 0xc0, 0x35, 0xb8, 0xb4, 0xbf, 0x57, 0xad, 0xe0, 0x1c, 0x9a, 0x7e, 0x74, 0xc8, 0x15, 0x84, - 0x28, 0xcc, 0x28, 0x9a, 0x66, 0xbe, 0x4b, 0xe2, 0x1e, 0x28, 0x72, 0xf9, 0xf5, 0xfd, 0xbd, 0xea, - 0xcc, 0x62, 0xba, 0xbb, 0x1f, 0xd1, 0x59, 0xf0, 0x68, 0x01, 0x46, 0x77, 0x4c, 0xcd, 0xd5, 0x89, - 0x53, 0x29, 0x71, 0x49, 0x2c, 0xe3, 0x8e, 0x5e, 0xf5, 0x9a, 0x6e, 0xef, 0x55, 0x8f, 0x5e, 0x6c, - 0xf2, 0xa3, 0x0f, 0x9f, 0x8a, 0xed, 0xd1, 0x58, 0xcd, 0x24, 0xa6, 0x3c, 0x3f, 0x77, 0x2d, 0x87, - 0x39, 0xe6, 0x72, 0xd8, 0x85, 0xa3, 0x74, 0x48, 0x87, 0xb1, 0x6d, 0xb1, 0x6f, 0x77, 0x2a, 0xa3, - 0x03, 0xad, 0x7e, 0xb1, 0x7d, 0x7f, 0x7d, 0x5a, 0x88, 0x1c, 0xf3, 0x9b, 0x1d, 0x1c, 0x4a, 0x40, - 0x4f, 0xc0, 0x28, 0xff, 0x58, 0x59, 0xe6, 0xa7, 0xb5, 0xe5, 0x30, 0x13, 0x5d, 0xf6, 0x9a, 0xb1, - 0xdf, 0xef, 0x93, 0xae, 0x34, 0x96, 0xf8, 0xe1, 0x6a, 0x82, 0x74, 0xa5, 0xb1, 0x84, 0xfd, 0x7e, - 0x64, 0xc1, 0xa8, 0x43, 0x56, 0x55, 0xc3, 0xdd, 0xad, 0xc0, 0x40, 0xd7, 0xc5, 0xcd, 0x0b, 0x9c, - 0x3b, 0x71, 0x14, 0x15, 0x4a, 0x14, 0xfd, 0xd8, 0x17, 0x83, 0x76, 0x61, 0xcc, 0x76, 0x8d, 0x45, - 0x67, 0xc3, 0x21, 0x76, 0x65, 0x9c, 0xcb, 0xec, 0x37, 0x39, 0x63, 0x9f, 0x3f, 0x29, 0x35, 0xf0, - 0x60, 0x40, 0x81, 0x43, 0x61, 0xe8, 0x23, 0x09, 0x90, 0xe3, 0x5a, 0x96, 0x46, 0x74, 0x62, 0x50, - 0x45, 0xe3, 0xa7, 0x61, 0x4e, 0xe5, 0x18, 0xd7, 0xa1, 0xd1, 0xaf, 0xdd, 0x29, 0xa0, 0xa4, 0x32, - 0xc1, 0x51, 0x73, 0x9a, 0x14, 0x67, 0xe8, 0xc1, 0x86, 0x62, 0xcb, 0xe1, 0xbf, 0x2b, 0x13, 0x03, - 0x0d, 0x45, 0xf6, 0xa9, 0x60, 0x38, 0x14, 0xa2, 0x1f, 0xfb, 0x62, 0xd0, 0x55, 0x98, 0xb5, 0x89, - 0xd2, 0xb9, 0x62, 0x68, 0x3d, 0x6c, 0x9a, 0xf4, 0xa2, 0xaa, 0x11, 0xa7, 0xe7, 0x50, 0xa2, 0x57, - 0x26, 0x79, 0xd8, 0x04, 0x4f, 0x2e, 0x70, 0x26, 0x15, 0xce, 0xe1, 0xe6, 0x2f, 0x01, 0xc4, 0x19, - 0xec, 0x70, 0xde, 0xd2, 0x1d, 0xee, 0x25, 0x40, 0xa8, 0xea, 0x7d, 0x7b, 0x09, 0x10, 0x11, 0x71, - 0xf0, 0x11, 0xd2, 0x57, 0x23, 0x30, 0x13, 0x12, 0xdf, 0xf5, 0x4b, 0x80, 0x0c, 0x96, 0x21, 0xbc, - 0x04, 0xc8, 0xbe, 0x4a, 0x2f, 0xdc, 0xef, 0xab, 0xf4, 0xfb, 0xf0, 0x02, 0x81, 0xdf, 0xce, 0x87, - 0x4e, 0xfc, 0xf7, 0xbf, 0x9d, 0x0f, 0x75, 0xcd, 0x29, 0x67, 0x7e, 0x33, 0x12, 0x35, 0xe8, 0x3f, - 0xe8, 0x0a, 0xf8, 0xf0, 0x2f, 0x0d, 0xe5, 0xcf, 0x0b, 0x70, 0x3c, 0x39, 0x63, 0x63, 0x37, 0x81, - 0xd2, 0x1d, 0x6f, 0x02, 0x1b, 0x70, 0x62, 0xcb, 0xd5, 0xb4, 0x1e, 0x77, 0x48, 0xe4, 0x3a, 0xd0, - 0x3b, 0xb5, 0x7f, 0x58, 0x70, 0x9e, 0xb8, 0x98, 0x41, 0x83, 0x33, 0x39, 0x73, 0x6e, 0x35, 0x0b, - 0x03, 0xdd, 0x6a, 0xa6, 0x2e, 0xd5, 0x8a, 0x77, 0x7f, 0xa9, 0x96, 0x7d, 0x43, 0x59, 0x1a, 0xe0, - 0x86, 0xf2, 0x5e, 0x5c, 0x29, 0x66, 0x24, 0xbe, 0x3b, 0x5d, 0x29, 0xca, 0x0f, 0xc3, 0x9c, 0x60, - 0x63, 0xdf, 0x4b, 0xa6, 0x41, 0x6d, 0x53, 0xd3, 0x88, 0xbd, 0xec, 0xea, 0x7a, 0x4f, 0x3e, 0x0f, - 0x93, 0xf1, 0x7b, 0x6d, 0x6f, 0xe4, 0xbd, 0xab, 0x76, 0x71, 0x97, 0x12, 0x19, 0x79, 0xaf, 0x1d, - 0x07, 0x14, 0xf2, 0x07, 0x12, 0xcc, 0x66, 0xbf, 0xa1, 0x43, 0x1a, 0x4c, 0xea, 0xca, 0x6e, 0xf4, - 0x11, 0xa1, 0x34, 0xe0, 0x8e, 0x1b, 0xed, 0xef, 0x55, 0x27, 0xd7, 0x62, 0x58, 0x38, 0x81, 0x2d, - 0x7f, 0x21, 0xc1, 0xc9, 0x9c, 0x6b, 0xc6, 0xe1, 0x6a, 0x82, 0xae, 0x41, 0x59, 0x57, 0x76, 0x9b, - 0xae, 0xdd, 0x25, 0x03, 0x9f, 0x31, 0xf0, 0x5c, 0xb2, 0x26, 0x50, 0x70, 0x80, 0x27, 0x7f, 0x22, - 0x41, 0x25, 0xaf, 0x1e, 0x44, 0x67, 0x63, 0x17, 0xa2, 0x8f, 0x26, 0x2e, 0x44, 0xa7, 0x53, 0x7c, - 0x43, 0xba, 0x0e, 0xfd, 0x54, 0x82, 0xd9, 0xec, 0xba, 0x19, 0x3d, 0x13, 0xd3, 0xb8, 0x9a, 0xd0, - 0x78, 0x2a, 0xc1, 0x25, 0xf4, 0xdd, 0x86, 0x49, 0x51, 0x5d, 0x0b, 0x18, 0xe1, 0xe5, 0x27, 0x0f, - 0xce, 0xaa, 0x02, 0xcc, 0xaf, 0x13, 0xf9, 0x48, 0xc6, 0xdb, 0x70, 0x02, 0x57, 0xfe, 0xd9, 0x08, - 0x94, 0x9a, 0x6d, 0x45, 0x23, 0x43, 0x28, 0xea, 0xae, 0xc5, 0x8a, 0xba, 0x7e, 0xdf, 0xf9, 0x73, - 0x2d, 0x73, 0xeb, 0xb9, 0xcd, 0x44, 0x3d, 0x77, 0x6e, 0x20, 0xf4, 0x83, 0x4b, 0xb9, 0xe7, 0x61, - 0x2c, 0x50, 0xa2, 0xbf, 0xd5, 0x43, 0xfe, 0x78, 0x04, 0xc6, 0x23, 0x22, 0xfa, 0x5c, 0x7b, 0x76, - 0x62, 0xab, 0xf7, 0x20, 0x7f, 0x29, 0x8a, 0xc8, 0xae, 0xf9, 0xeb, 0xb7, 0xf7, 0x86, 0x2e, 0x7c, - 0x0b, 0x95, 0x5e, 0xd6, 0xcf, 0xc3, 0x24, 0xe5, 0xff, 0xb0, 0x09, 0xce, 0xf8, 0x0a, 0x3c, 0x8a, - 0x83, 0x97, 0x99, 0xad, 0x58, 0x2f, 0x4e, 0x50, 0xcf, 0xbd, 0x00, 0x13, 0x31, 0x61, 0x7d, 0x3d, - 0x79, 0xfb, 0xad, 0x04, 0x8f, 0xde, 0x71, 0x4f, 0x86, 0xea, 0xb1, 0xe9, 0x55, 0x4b, 0x4c, 0xaf, - 0xf9, 0x7c, 0x80, 0x21, 0x3e, 0x96, 0xf8, 0xd1, 0x08, 0xa0, 0xd6, 0xb6, 0x6a, 0x77, 0x1a, 0x8a, - 0x4d, 0x7b, 0x58, 0xfc, 0x8f, 0x6a, 0x08, 0x13, 0xee, 0x2c, 0x8c, 0x77, 0x88, 0xd3, 0xb6, 0x55, - 0xee, 0x2c, 0xb1, 0x57, 0x08, 0xce, 0x41, 0x96, 0xc3, 0x2e, 0x1c, 0xa5, 0x43, 0x5d, 0x28, 0xef, - 0x78, 0xff, 0xd4, 0xf3, 0x6f, 0xde, 0xfa, 0x2d, 0x66, 0xc3, 0xff, 0xfa, 0x85, 0xf1, 0x25, 0x1a, - 0x1c, 0x1c, 0x80, 0xcb, 0x1f, 0x4a, 0x30, 0x9b, 0x76, 0xcc, 0x32, 0x53, 0xfd, 0xfe, 0x3b, 0xe7, - 0x61, 0x28, 0x72, 0x74, 0xe6, 0x95, 0x63, 0xde, 0x89, 0x37, 0x93, 0x8c, 0x79, 0xab, 0xfc, 0xa5, - 0x04, 0x73, 0xd9, 0xaa, 0x0d, 0x61, 0x2b, 0x71, 0x3d, 0xbe, 0x95, 0xe8, 0xf7, 0xd8, 0x20, 0x5b, - 0xef, 0x9c, 0x6d, 0xc5, 0x5e, 0xe6, 0x18, 0x0c, 0xc1, 0xc8, 0xad, 0xb8, 0x91, 0x8b, 0x87, 0x36, - 0x32, 0xdb, 0xc0, 0xfa, 0x13, 0x37, 0x6f, 0xcd, 0x1f, 0xf9, 0xec, 0xd6, 0xfc, 0x91, 0x3f, 0xdf, - 0x9a, 0x3f, 0xf2, 0xfe, 0xfe, 0xbc, 0x74, 0x73, 0x7f, 0x5e, 0xfa, 0x6c, 0x7f, 0x5e, 0xfa, 0xdb, - 0xfe, 0xbc, 0xf4, 0x83, 0x2f, 0xe6, 0x8f, 0x5c, 0x1b, 0x15, 0x98, 0xff, 0x0a, 0x00, 0x00, 0xff, - 0xff, 0x56, 0x18, 0xbd, 0xf5, 0xb1, 0x3c, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 3637 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcb, 0x6f, 0x1c, 0x47, + 0x73, 0xd7, 0xec, 0x2e, 0xb9, 0xcb, 0xa2, 0xf8, 0x6a, 0xd2, 0xe4, 0x7e, 0x94, 0xc5, 0xd5, 0x37, + 0x06, 0x14, 0xc9, 0x91, 0x76, 0x2d, 0xd9, 0xd2, 0xa7, 0x58, 0x88, 0x6d, 0x2e, 0x29, 0x4a, 0x74, + 0xf8, 0x52, 0x2f, 0xa9, 0x38, 0x46, 0xe4, 0x78, 0xb8, 0xdb, 0x5c, 0x8e, 0x38, 0x3b, 0x33, 0x9e, + 0xe9, 0xa1, 0xb9, 0x40, 0x10, 0xe4, 0x10, 0x04, 0x08, 0x90, 0x20, 0xc9, 0xc1, 0x79, 0xdc, 0xe2, + 0x4b, 0x4e, 0x09, 0x92, 0x5b, 0x72, 0x30, 0x0c, 0x04, 0x70, 0x00, 0x21, 0x70, 0x00, 0xdf, 0xe2, + 0x13, 0x11, 0xd3, 0xa7, 0x20, 0xff, 0x40, 0xa0, 0x43, 0x10, 0x74, 0x4f, 0xcf, 0x7b, 0x86, 0xbb, + 0x4b, 0x4b, 0x44, 0x10, 0xe4, 0xc6, 0xed, 0xaa, 0xfa, 0x55, 0x75, 0x75, 0x75, 0x55, 0x4d, 0x77, + 0x13, 0x56, 0x0e, 0xee, 0xd9, 0x55, 0xd5, 0xa8, 0x1d, 0x38, 0xbb, 0xc4, 0xd2, 0x09, 0x25, 0x76, + 0xed, 0x90, 0xe8, 0x2d, 0xc3, 0xaa, 0x09, 0x82, 0x62, 0xaa, 0x35, 0x72, 0x44, 0x89, 0x6e, 0xab, + 0x86, 0x6e, 0xd7, 0x0e, 0x6f, 0xed, 0x12, 0xaa, 0xdc, 0xaa, 0xb5, 0x89, 0x4e, 0x2c, 0x85, 0x92, + 0x56, 0xd5, 0xb4, 0x0c, 0x6a, 0xa0, 0xcb, 0x2e, 0x7b, 0x55, 0x31, 0xd5, 0x6a, 0xc0, 0x5e, 0x15, + 0xec, 0xf3, 0x37, 0xdb, 0x2a, 0xdd, 0x77, 0x76, 0xab, 0x4d, 0xa3, 0x53, 0x6b, 0x1b, 0x6d, 0xa3, + 0xc6, 0xa5, 0x76, 0x9d, 0x3d, 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0xb9, 0x68, 0xf3, 0x72, 0x48, 0x79, + 0xd3, 0xb0, 0x48, 0xed, 0x30, 0xa1, 0x71, 0xfe, 0x7a, 0x88, 0xc7, 0x34, 0x34, 0xb5, 0xd9, 0xcd, + 0x32, 0x6e, 0xfe, 0x9d, 0x80, 0xb5, 0xa3, 0x34, 0xf7, 0x55, 0x9d, 0x58, 0xdd, 0x9a, 0x79, 0xd0, + 0xe6, 0xb2, 0x16, 0xb1, 0x0d, 0xc7, 0x6a, 0x92, 0x81, 0xa4, 0xec, 0x5a, 0x87, 0x50, 0x25, 0xcd, + 0xac, 0x5a, 0x96, 0x94, 0xe5, 0xe8, 0x54, 0xed, 0x24, 0xd5, 0xdc, 0xed, 0x25, 0x60, 0x37, 0xf7, + 0x49, 0x47, 0x49, 0xc8, 0xbd, 0x9d, 0x25, 0xe7, 0x50, 0x55, 0xab, 0xa9, 0x3a, 0xb5, 0xa9, 0x15, + 0x17, 0x92, 0xef, 0xc3, 0xd4, 0xa2, 0xa6, 0x19, 0x9f, 0x93, 0xd6, 0x8a, 0x46, 0x8e, 0x9e, 0x18, + 0x9a, 0xd3, 0x21, 0xe8, 0x2a, 0x0c, 0xb7, 0x2c, 0xf5, 0x90, 0x58, 0x65, 0xe9, 0x8a, 0x74, 0x6d, + 0xa4, 0x3e, 0xfe, 0xfc, 0xb8, 0x72, 0xe1, 0xe4, 0xb8, 0x32, 0xbc, 0xcc, 0x47, 0xb1, 0xa0, 0xca, + 0x36, 0x4c, 0x08, 0xe1, 0x47, 0x86, 0x4d, 0xb7, 0x14, 0xba, 0x8f, 0x6e, 0x03, 0x98, 0x0a, 0xdd, + 0xdf, 0xb2, 0xc8, 0x9e, 0x7a, 0x24, 0xc4, 0x91, 0x10, 0x87, 0x2d, 0x9f, 0x82, 0x43, 0x5c, 0xe8, + 0x06, 0x94, 0x2c, 0xa2, 0xb4, 0x36, 0x75, 0xad, 0x5b, 0xce, 0x5d, 0x91, 0xae, 0x95, 0xea, 0x93, + 0x42, 0xa2, 0x84, 0xc5, 0x38, 0xf6, 0x39, 0xe4, 0xbf, 0x94, 0xe0, 0x67, 0x4b, 0x8e, 0x4d, 0x8d, + 0xce, 0x3a, 0xa1, 0x96, 0xda, 0x5c, 0x72, 0x2c, 0x8b, 0xe8, 0xb4, 0x41, 0x15, 0xea, 0xd8, 0xe8, + 0x0a, 0x14, 0x74, 0xa5, 0x43, 0x84, 0xe6, 0x8b, 0x02, 0xa7, 0xb0, 0xa1, 0x74, 0x08, 0xe6, 0x14, + 0xf4, 0x31, 0x0c, 0x1d, 0x2a, 0x9a, 0x43, 0xb8, 0xaa, 0xd1, 0xdb, 0xd5, 0x6a, 0x10, 0xa8, 0xbe, + 0xdb, 0xaa, 0xe6, 0x41, 0x9b, 0x47, 0xae, 0x17, 0x0b, 0xd5, 0xc7, 0x8e, 0xa2, 0x53, 0x95, 0x76, + 0xeb, 0x33, 0x02, 0xf2, 0xa2, 0xd0, 0xfb, 0x84, 0x61, 0x61, 0x17, 0x52, 0xfe, 0x1d, 0xb8, 0x9c, + 0x69, 0xda, 0x9a, 0x6a, 0x53, 0xf4, 0x14, 0x86, 0x54, 0x4a, 0x3a, 0x76, 0x59, 0xba, 0x92, 0xbf, + 0x36, 0x7a, 0xfb, 0x5e, 0xf5, 0xd4, 0x5d, 0x52, 0xcd, 0x04, 0xab, 0x8f, 0x09, 0x33, 0x86, 0x56, + 0x19, 0x1c, 0x76, 0x51, 0xe5, 0x3f, 0x95, 0x00, 0x85, 0x65, 0xb6, 0x15, 0xab, 0x4d, 0x68, 0x1f, + 0x4e, 0xf9, 0x8d, 0x9f, 0xe6, 0x94, 0x69, 0x01, 0x39, 0xea, 0x2a, 0x8c, 0xf8, 0xc4, 0x84, 0xd9, + 0xa4, 0x49, 0xdc, 0x19, 0x4f, 0xa2, 0xce, 0xb8, 0x35, 0x80, 0x33, 0x5c, 0x94, 0x0c, 0x2f, 0x7c, + 0x91, 0x83, 0x91, 0x65, 0x85, 0x74, 0x0c, 0xbd, 0x41, 0x28, 0xfa, 0x14, 0x4a, 0x6c, 0x6b, 0xb6, + 0x14, 0xaa, 0x70, 0x07, 0x8c, 0xde, 0x7e, 0xeb, 0xb4, 0xd9, 0xd9, 0x55, 0xc6, 0x5d, 0x3d, 0xbc, + 0x55, 0xdd, 0xdc, 0x7d, 0x46, 0x9a, 0x74, 0x9d, 0x50, 0x25, 0x88, 0xe0, 0x60, 0x0c, 0xfb, 0xa8, + 0x68, 0x03, 0x0a, 0xb6, 0x49, 0x9a, 0xc2, 0x77, 0x37, 0x7a, 0x4c, 0xc3, 0xb7, 0xac, 0x61, 0x92, + 0x66, 0xb0, 0x18, 0xec, 0x17, 0xe6, 0x38, 0xe8, 0x09, 0x0c, 0xdb, 0x7c, 0x95, 0xcb, 0xf9, 0xc4, + 0x6a, 0x9c, 0x8e, 0xe8, 0xc6, 0x86, 0xbf, 0x5d, 0xdd, 0xdf, 0x58, 0xa0, 0xc9, 0xff, 0x91, 0x03, + 0xe4, 0xf3, 0x2e, 0x19, 0x7a, 0x4b, 0xa5, 0xaa, 0xa1, 0xa3, 0x77, 0xa1, 0x40, 0xbb, 0xa6, 0x17, + 0x1d, 0x57, 0x3d, 0x83, 0xb6, 0xbb, 0x26, 0x79, 0x71, 0x5c, 0x99, 0x4d, 0x4a, 0x30, 0x0a, 0xe6, + 0x32, 0x68, 0xcd, 0x37, 0x35, 0xc7, 0xa5, 0xdf, 0x89, 0xaa, 0x7e, 0x71, 0x5c, 0x49, 0xc9, 0xdc, + 0x55, 0x1f, 0x29, 0x6a, 0x20, 0x3a, 0x04, 0xa4, 0x29, 0x36, 0xdd, 0xb6, 0x14, 0xdd, 0x76, 0x35, + 0xa9, 0x1d, 0x22, 0x9c, 0xf0, 0x66, 0x7f, 0x8b, 0xc6, 0x24, 0xea, 0xf3, 0xc2, 0x0a, 0xb4, 0x96, + 0x40, 0xc3, 0x29, 0x1a, 0x58, 0xbe, 0xb3, 0x88, 0x62, 0x1b, 0x7a, 0xb9, 0x10, 0xcd, 0x77, 0x98, + 0x8f, 0x62, 0x41, 0x45, 0xd7, 0xa1, 0xd8, 0x21, 0xb6, 0xad, 0xb4, 0x49, 0x79, 0x88, 0x33, 0x4e, + 0x08, 0xc6, 0xe2, 0xba, 0x3b, 0x8c, 0x3d, 0xba, 0xfc, 0x95, 0x04, 0x63, 0xbe, 0xe7, 0x78, 0xb4, + 0xff, 0x66, 0x22, 0x0e, 0xab, 0xfd, 0x4d, 0x89, 0x49, 0xf3, 0x28, 0xf4, 0xb3, 0xa2, 0x37, 0x12, + 0x8a, 0xc1, 0x75, 0x6f, 0x2f, 0xe5, 0xf8, 0x5e, 0xba, 0xd6, 0x6f, 0xc8, 0x64, 0x6c, 0xa1, 0x3f, + 0x2b, 0x84, 0xcc, 0x67, 0xa1, 0x89, 0x9e, 0x42, 0xc9, 0x26, 0x1a, 0x69, 0x52, 0xc3, 0x12, 0xe6, + 0xbf, 0xdd, 0xa7, 0xf9, 0xca, 0x2e, 0xd1, 0x1a, 0x42, 0xb4, 0x7e, 0x91, 0xd9, 0xef, 0xfd, 0xc2, + 0x3e, 0x24, 0x7a, 0x0c, 0x25, 0x4a, 0x3a, 0xa6, 0xa6, 0x50, 0x2f, 0x07, 0xbd, 0x11, 0x9e, 0x02, + 0x8b, 0x1c, 0x06, 0xb6, 0x65, 0xb4, 0xb6, 0x05, 0x1b, 0xdf, 0x3e, 0xbe, 0x4b, 0xbc, 0x51, 0xec, + 0xc3, 0xa0, 0x43, 0x18, 0x77, 0xcc, 0x16, 0xe3, 0xa4, 0xac, 0xe2, 0xb5, 0xbb, 0x22, 0x92, 0xee, + 0xf6, 0xeb, 0x9b, 0x9d, 0x88, 0x74, 0x7d, 0x56, 0xe8, 0x1a, 0x8f, 0x8e, 0xe3, 0x98, 0x16, 0xb4, + 0x08, 0x13, 0x1d, 0x55, 0x67, 0x95, 0xab, 0xdb, 0x20, 0x4d, 0x43, 0x6f, 0xd9, 0x3c, 0xac, 0x86, + 0xea, 0x73, 0x02, 0x60, 0x62, 0x3d, 0x4a, 0xc6, 0x71, 0x7e, 0xf4, 0x21, 0x20, 0x6f, 0x1a, 0x0f, + 0xdd, 0x82, 0xad, 0x1a, 0x3a, 0x8f, 0xb9, 0x7c, 0x10, 0xdc, 0xdb, 0x09, 0x0e, 0x9c, 0x22, 0x85, + 0xd6, 0x60, 0xc6, 0x22, 0x87, 0x2a, 0x9b, 0xe3, 0x23, 0xd5, 0xa6, 0x86, 0xd5, 0x5d, 0x53, 0x3b, + 0x2a, 0x2d, 0x0f, 0x73, 0x9b, 0xca, 0x27, 0xc7, 0x95, 0x19, 0x9c, 0x42, 0xc7, 0xa9, 0x52, 0xf2, + 0x9f, 0x0f, 0xc3, 0x44, 0x2c, 0xdf, 0xa0, 0x27, 0x30, 0xdb, 0x74, 0x8b, 0xd3, 0x86, 0xd3, 0xd9, + 0x25, 0x56, 0xa3, 0xb9, 0x4f, 0x5a, 0x8e, 0x46, 0x5a, 0x3c, 0x50, 0x86, 0xea, 0x0b, 0xc2, 0xe2, + 0xd9, 0xa5, 0x54, 0x2e, 0x9c, 0x21, 0xcd, 0xbc, 0xa0, 0xf3, 0xa1, 0x75, 0xd5, 0xb6, 0x7d, 0xcc, + 0x1c, 0xc7, 0xf4, 0xbd, 0xb0, 0x91, 0xe0, 0xc0, 0x29, 0x52, 0xcc, 0xc6, 0x16, 0xb1, 0x55, 0x8b, + 0xb4, 0xe2, 0x36, 0xe6, 0xa3, 0x36, 0x2e, 0xa7, 0x72, 0xe1, 0x0c, 0x69, 0x74, 0x07, 0x46, 0x5d, + 0x6d, 0x7c, 0xfd, 0xc4, 0x42, 0xfb, 0xe5, 0x70, 0x23, 0x20, 0xe1, 0x30, 0x1f, 0x9b, 0x9a, 0xb1, + 0x6b, 0x13, 0xeb, 0x90, 0xb4, 0xb2, 0x17, 0x78, 0x33, 0xc1, 0x81, 0x53, 0xa4, 0xd8, 0xd4, 0xdc, + 0x08, 0x4c, 0x4c, 0x6d, 0x38, 0x3a, 0xb5, 0x9d, 0x54, 0x2e, 0x9c, 0x21, 0xcd, 0xe2, 0xd8, 0x35, + 0x79, 0xf1, 0x50, 0x51, 0x35, 0x65, 0x57, 0x23, 0xe5, 0x62, 0x34, 0x8e, 0x37, 0xa2, 0x64, 0x1c, + 0xe7, 0x47, 0x0f, 0x61, 0xca, 0x1d, 0xda, 0xd1, 0x15, 0x1f, 0xa4, 0xc4, 0x41, 0x7e, 0x26, 0x40, + 0xa6, 0x36, 0xe2, 0x0c, 0x38, 0x29, 0x83, 0xde, 0x85, 0xf1, 0xa6, 0xa1, 0x69, 0x3c, 0x1e, 0x97, + 0x0c, 0x47, 0xa7, 0xe5, 0x11, 0x8e, 0x82, 0xd8, 0x7e, 0x5c, 0x8a, 0x50, 0x70, 0x8c, 0x13, 0x11, + 0x80, 0xa6, 0x57, 0x70, 0xec, 0x32, 0xf4, 0xd5, 0x6b, 0x24, 0x8b, 0x5e, 0xd0, 0x03, 0xf8, 0x43, + 0x36, 0x0e, 0x01, 0xcb, 0xff, 0x22, 0xc1, 0x5c, 0x46, 0xea, 0x40, 0xef, 0x47, 0x4a, 0xec, 0x2f, + 0xc7, 0x4a, 0xec, 0xa5, 0x0c, 0xb1, 0x50, 0x9d, 0xd5, 0x61, 0xcc, 0x62, 0xb3, 0xd2, 0xdb, 0x2e, + 0x8b, 0xc8, 0x91, 0x77, 0x7a, 0x4c, 0x03, 0x87, 0x65, 0x82, 0x9c, 0x3f, 0x75, 0x72, 0x5c, 0x19, + 0x8b, 0xd0, 0x70, 0x14, 0x5e, 0xfe, 0x8b, 0x1c, 0xc0, 0x32, 0x31, 0x35, 0xa3, 0xdb, 0x21, 0xfa, + 0x79, 0xf4, 0x50, 0x9b, 0x91, 0x1e, 0xea, 0x66, 0xaf, 0xe5, 0xf1, 0x4d, 0xcb, 0x6c, 0xa2, 0x7e, + 0x3d, 0xd6, 0x44, 0xd5, 0xfa, 0x87, 0x3c, 0xbd, 0x8b, 0xfa, 0xb7, 0x3c, 0x4c, 0x07, 0xcc, 0x41, + 0x1b, 0x75, 0x3f, 0xb2, 0xc6, 0xbf, 0x14, 0x5b, 0xe3, 0xb9, 0x14, 0x91, 0x57, 0xd6, 0x47, 0xbd, + 0xfc, 0x7e, 0x06, 0x3d, 0x83, 0x71, 0xd6, 0x38, 0xb9, 0xe1, 0xc1, 0xdb, 0xb2, 0xe1, 0x81, 0xdb, + 0x32, 0xbf, 0x80, 0xae, 0x45, 0x90, 0x70, 0x0c, 0x39, 0xa3, 0x0d, 0x2c, 0xbe, 0xea, 0x36, 0x50, + 0xfe, 0x5a, 0x82, 0xf1, 0x60, 0x99, 0xce, 0xa1, 0x69, 0xdb, 0x88, 0x36, 0x6d, 0xd7, 0xfb, 0x0e, + 0xd1, 0x8c, 0xae, 0xed, 0xbf, 0x58, 0x83, 0xef, 0x33, 0xb1, 0x0d, 0xbe, 0xab, 0x34, 0x0f, 0xfa, + 0xf8, 0xfc, 0xfb, 0x42, 0x02, 0x24, 0xaa, 0xc0, 0xa2, 0xae, 0x1b, 0x54, 0x71, 0x73, 0xa5, 0x6b, + 0xd6, 0x6a, 0xdf, 0x66, 0x79, 0x1a, 0xab, 0x3b, 0x09, 0xac, 0x07, 0x3a, 0xb5, 0xba, 0xc1, 0x8a, + 0x24, 0x19, 0x70, 0x8a, 0x01, 0x48, 0x01, 0xb0, 0x04, 0xe6, 0xb6, 0x21, 0x36, 0xf2, 0xcd, 0x3e, + 0x72, 0x1e, 0x13, 0x58, 0x32, 0xf4, 0x3d, 0xb5, 0x1d, 0xa4, 0x1d, 0xec, 0x03, 0xe1, 0x10, 0xe8, + 0xfc, 0x03, 0x98, 0xcb, 0xb0, 0x16, 0x4d, 0x42, 0xfe, 0x80, 0x74, 0x5d, 0xb7, 0x61, 0xf6, 0x27, + 0x9a, 0x09, 0x7f, 0x26, 0x8f, 0x88, 0x2f, 0xdc, 0x77, 0x73, 0xf7, 0x24, 0xf9, 0xab, 0xa1, 0x70, + 0xec, 0xf0, 0x8e, 0xf9, 0x1a, 0x94, 0x2c, 0x62, 0x6a, 0x6a, 0x53, 0xb1, 0x45, 0x23, 0x74, 0xd1, + 0x3d, 0xd2, 0x70, 0xc7, 0xb0, 0x4f, 0x8d, 0xf4, 0xd6, 0xb9, 0x57, 0xdb, 0x5b, 0xe7, 0x5f, 0x4e, + 0x6f, 0xfd, 0x5b, 0x50, 0xb2, 0xbd, 0xae, 0xba, 0xc0, 0x21, 0x6f, 0x0d, 0x90, 0x5f, 0x45, 0x43, + 0xed, 0x2b, 0xf0, 0x5b, 0x69, 0x1f, 0x34, 0xad, 0x89, 0x1e, 0x1a, 0xb0, 0x89, 0x7e, 0xa9, 0x8d, + 0x2f, 0xcb, 0xa9, 0xa6, 0xe2, 0xd8, 0xa4, 0xc5, 0x13, 0x51, 0x29, 0xc8, 0xa9, 0x5b, 0x7c, 0x14, + 0x0b, 0x2a, 0x7a, 0x1a, 0x09, 0xd9, 0xd2, 0x59, 0x42, 0x76, 0x3c, 0x3b, 0x5c, 0xd1, 0x0e, 0xcc, + 0x99, 0x96, 0xd1, 0xb6, 0x88, 0x6d, 0x2f, 0x13, 0xa5, 0xa5, 0xa9, 0x3a, 0xf1, 0xfc, 0xe3, 0x76, + 0x44, 0x97, 0x4e, 0x8e, 0x2b, 0x73, 0x5b, 0xe9, 0x2c, 0x38, 0x4b, 0x56, 0x7e, 0x5e, 0x80, 0xc9, + 0x78, 0x05, 0xcc, 0x68, 0x52, 0xa5, 0x33, 0x35, 0xa9, 0x37, 0x42, 0x9b, 0xc1, 0xed, 0xe0, 0x43, + 0x67, 0x7c, 0x89, 0x0d, 0xb1, 0x08, 0x13, 0x22, 0x1b, 0x78, 0x44, 0xd1, 0xa6, 0xfb, 0xab, 0xbf, + 0x13, 0x25, 0xe3, 0x38, 0x3f, 0x6b, 0x3d, 0x83, 0x8e, 0xd2, 0x03, 0x29, 0x44, 0x5b, 0xcf, 0xc5, + 0x38, 0x03, 0x4e, 0xca, 0xa0, 0x75, 0x98, 0x76, 0xf4, 0x24, 0x94, 0x1b, 0x8d, 0x97, 0x04, 0xd4, + 0xf4, 0x4e, 0x92, 0x05, 0xa7, 0xc9, 0xa1, 0xbd, 0x48, 0x37, 0x3a, 0xcc, 0x33, 0xec, 0xed, 0xbe, + 0xf7, 0x4e, 0xdf, 0xed, 0x28, 0xba, 0x0f, 0x63, 0x16, 0xff, 0xee, 0xf0, 0x0c, 0x76, 0x7b, 0xf7, + 0xd7, 0x84, 0xd8, 0x18, 0x0e, 0x13, 0x71, 0x94, 0x37, 0xa5, 0xdd, 0x2e, 0xf5, 0xdb, 0x6e, 0xcb, + 0xff, 0x24, 0x85, 0x8b, 0x90, 0xdf, 0x02, 0xf7, 0x3a, 0x65, 0x4a, 0x48, 0x84, 0xba, 0x23, 0x23, + 0xbd, 0xfb, 0xbd, 0x3b, 0x50, 0xf7, 0x1b, 0x14, 0xcf, 0xde, 0xed, 0xef, 0x97, 0x12, 0xcc, 0xae, + 0x34, 0x1e, 0x5a, 0x86, 0x63, 0x7a, 0xe6, 0x6c, 0x9a, 0xae, 0x5f, 0x7f, 0x01, 0x05, 0xcb, 0xd1, + 0xbc, 0x79, 0xbc, 0xe1, 0xcd, 0x03, 0x3b, 0x1a, 0x9b, 0xc7, 0x74, 0x4c, 0xca, 0x9d, 0x04, 0x13, + 0x40, 0x1b, 0x30, 0x6c, 0x29, 0x7a, 0x9b, 0x78, 0x65, 0xf5, 0x6a, 0x0f, 0xeb, 0x57, 0x97, 0x31, + 0x63, 0x0f, 0x35, 0x6f, 0x5c, 0x1a, 0x0b, 0x14, 0xf9, 0x8f, 0x24, 0x98, 0x78, 0xb4, 0xbd, 0xbd, + 0xb5, 0xaa, 0xf3, 0x1d, 0xcd, 0x4f, 0xdf, 0xaf, 0x40, 0xc1, 0x54, 0xe8, 0x7e, 0xbc, 0xd2, 0x33, + 0x1a, 0xe6, 0x14, 0xf4, 0x11, 0x14, 0x59, 0x26, 0x21, 0x7a, 0xab, 0xcf, 0x56, 0x5b, 0xc0, 0xd7, + 0x5d, 0xa1, 0xa0, 0x43, 0x14, 0x03, 0xd8, 0x83, 0x93, 0x0f, 0x60, 0x26, 0x64, 0x0e, 0xf3, 0x07, + 0x3f, 0x06, 0x46, 0x0d, 0x18, 0x62, 0x9a, 0xbd, 0x53, 0xde, 0x5e, 0x87, 0x99, 0xb1, 0x29, 0x05, + 0x9d, 0x0e, 0xfb, 0x65, 0x63, 0x17, 0x4b, 0x5e, 0x87, 0x31, 0x7e, 0xe5, 0x60, 0x58, 0x94, 0xbb, + 0x05, 0x5d, 0x86, 0x7c, 0x47, 0xd5, 0x45, 0x9d, 0x1d, 0x15, 0x32, 0x79, 0x56, 0x23, 0xd8, 0x38, + 0x27, 0x2b, 0x47, 0x22, 0xf3, 0x04, 0x64, 0xe5, 0x08, 0xb3, 0x71, 0xf9, 0x21, 0x14, 0x85, 0xbb, + 0xc3, 0x40, 0xf9, 0xd3, 0x81, 0xf2, 0x29, 0x40, 0x9b, 0x50, 0x5c, 0xdd, 0xaa, 0x6b, 0x86, 0xdb, + 0x75, 0x35, 0xd5, 0x96, 0x15, 0x5f, 0x8b, 0xa5, 0xd5, 0x65, 0x8c, 0x39, 0x05, 0xc9, 0x30, 0x4c, + 0x8e, 0x9a, 0xc4, 0xa4, 0x3c, 0x22, 0x46, 0xea, 0xc0, 0x56, 0xf9, 0x01, 0x1f, 0xc1, 0x82, 0x22, + 0xff, 0x71, 0x0e, 0x8a, 0xc2, 0x1d, 0xe7, 0xf0, 0x15, 0xb6, 0x16, 0xf9, 0x0a, 0x7b, 0xb3, 0xbf, + 0xd0, 0xc8, 0xfc, 0x04, 0xdb, 0x8e, 0x7d, 0x82, 0xdd, 0xe8, 0x13, 0xef, 0xf4, 0xef, 0xaf, 0xbf, + 0x97, 0x60, 0x3c, 0x1a, 0x94, 0xe8, 0x0e, 0x8c, 0xb2, 0x82, 0xa3, 0x36, 0xc9, 0x46, 0xd0, 0xe7, + 0xfa, 0x87, 0x30, 0x8d, 0x80, 0x84, 0xc3, 0x7c, 0xa8, 0xed, 0x8b, 0xb1, 0x38, 0x12, 0x93, 0xce, + 0x76, 0xa9, 0x43, 0x55, 0xad, 0xea, 0x5e, 0xa3, 0x55, 0x57, 0x75, 0xba, 0x69, 0x35, 0xa8, 0xa5, + 0xea, 0xed, 0x84, 0x22, 0x1e, 0x94, 0x61, 0x64, 0xf9, 0x1f, 0x25, 0x18, 0x15, 0x26, 0x9f, 0xc3, + 0x57, 0xc5, 0xaf, 0x45, 0xbf, 0x2a, 0xae, 0xf6, 0xb9, 0xc1, 0xd3, 0x3f, 0x29, 0xfe, 0x3a, 0x30, + 0x9d, 0x6d, 0x69, 0x16, 0xd5, 0xfb, 0x86, 0x4d, 0xe3, 0x51, 0xcd, 0x36, 0x23, 0xe6, 0x14, 0xe4, + 0xc0, 0xa4, 0x1a, 0xcb, 0x01, 0xc2, 0xb5, 0xb5, 0xfe, 0x2c, 0xf1, 0xc5, 0xea, 0x65, 0x01, 0x3f, + 0x19, 0xa7, 0xe0, 0x84, 0x0a, 0x99, 0x40, 0x82, 0x0b, 0x3d, 0x86, 0xc2, 0x3e, 0xa5, 0x66, 0xca, + 0x79, 0x75, 0x8f, 0xcc, 0x13, 0x98, 0x50, 0xe2, 0xb3, 0xdb, 0xde, 0xde, 0xc2, 0x1c, 0x4a, 0xfe, + 0xef, 0xc0, 0x1f, 0x0d, 0x37, 0xc6, 0xfd, 0x7c, 0x2a, 0x9d, 0x25, 0x9f, 0x8e, 0xa6, 0xe5, 0x52, + 0xf4, 0x08, 0xf2, 0x54, 0xeb, 0xf7, 0xb3, 0x50, 0x20, 0x6e, 0xaf, 0x35, 0x82, 0x84, 0xb4, 0xbd, + 0xd6, 0xc0, 0x0c, 0x02, 0x6d, 0xc2, 0x10, 0xab, 0x3e, 0x6c, 0x0b, 0xe6, 0xfb, 0xdf, 0xd2, 0x6c, + 0xfe, 0x41, 0x40, 0xb0, 0x5f, 0x36, 0x76, 0x71, 0xe4, 0xcf, 0x60, 0x2c, 0xb2, 0x4f, 0xd1, 0xa7, + 0x70, 0x51, 0x33, 0x94, 0x56, 0x5d, 0xd1, 0x14, 0xbd, 0x49, 0xbc, 0xcb, 0x81, 0xab, 0x69, 0x5f, + 0x18, 0x6b, 0x21, 0x3e, 0xb1, 0xcb, 0xfd, 0xeb, 0xd4, 0x30, 0x0d, 0x47, 0x10, 0x65, 0x05, 0x20, + 0x98, 0x23, 0xaa, 0xc0, 0x10, 0x8b, 0x33, 0xb7, 0x9e, 0x8c, 0xd4, 0x47, 0x98, 0x85, 0x2c, 0xfc, + 0x6c, 0xec, 0x8e, 0xa3, 0xdb, 0x00, 0x36, 0x69, 0x5a, 0x84, 0xf2, 0x64, 0x90, 0x8b, 0x5e, 0x41, + 0x37, 0x7c, 0x0a, 0x0e, 0x71, 0xc9, 0xff, 0x2c, 0xc1, 0xd8, 0x06, 0xa1, 0x9f, 0x1b, 0xd6, 0xc1, + 0x16, 0x7f, 0x3a, 0x70, 0x0e, 0xc9, 0x16, 0x47, 0x92, 0xed, 0x5b, 0x3d, 0x56, 0x26, 0x62, 0x5d, + 0x56, 0xca, 0x95, 0xbf, 0x96, 0x60, 0x2e, 0xc2, 0xf9, 0x20, 0xd8, 0xba, 0x3b, 0x30, 0x64, 0x1a, + 0x16, 0xf5, 0x0a, 0xf1, 0x40, 0x0a, 0x59, 0x1a, 0x0b, 0x95, 0x62, 0x06, 0x83, 0x5d, 0x34, 0xb4, + 0x06, 0x39, 0x6a, 0x88, 0x50, 0x1d, 0x0c, 0x93, 0x10, 0xab, 0x0e, 0x02, 0x33, 0xb7, 0x6d, 0xe0, + 0x1c, 0x35, 0xd8, 0x42, 0x94, 0x23, 0x5c, 0xe1, 0xe4, 0xf3, 0x8a, 0x66, 0x80, 0xa1, 0xb0, 0x67, + 0x19, 0x9d, 0x33, 0xcf, 0xc1, 0x5f, 0x88, 0x15, 0xcb, 0xe8, 0x60, 0x8e, 0x25, 0x7f, 0x23, 0xc1, + 0x54, 0x84, 0xf3, 0x1c, 0x12, 0xff, 0xe3, 0x68, 0xe2, 0xbf, 0x31, 0xc8, 0x44, 0x32, 0xd2, 0xff, + 0x37, 0xb9, 0xd8, 0x34, 0xd8, 0x84, 0xd1, 0x1e, 0x8c, 0x9a, 0x46, 0xab, 0xf1, 0x12, 0xae, 0x03, + 0x27, 0x58, 0xdd, 0xdc, 0x0a, 0xb0, 0x70, 0x18, 0x18, 0x1d, 0xc1, 0x94, 0xae, 0x74, 0x88, 0x6d, + 0x2a, 0x4d, 0xd2, 0x78, 0x09, 0x07, 0x24, 0xaf, 0xf1, 0xfb, 0x86, 0x38, 0x22, 0x4e, 0x2a, 0x41, + 0xeb, 0x50, 0x54, 0x4d, 0xde, 0xc7, 0x89, 0xde, 0xa5, 0x67, 0x15, 0x75, 0xbb, 0x3e, 0x37, 0x9f, + 0x8b, 0x1f, 0xd8, 0xc3, 0x90, 0xff, 0x26, 0x1e, 0x0d, 0x2c, 0xfe, 0xd0, 0x43, 0x28, 0xf1, 0x47, + 0x38, 0x4d, 0x43, 0xf3, 0x6e, 0x06, 0xd8, 0xca, 0x6e, 0x89, 0xb1, 0x17, 0xc7, 0x95, 0x4b, 0x29, + 0x87, 0xbe, 0x1e, 0x19, 0xfb, 0xc2, 0x68, 0x03, 0x0a, 0xe6, 0x4f, 0xe9, 0x60, 0x78, 0x91, 0xe3, + 0x6d, 0x0b, 0xc7, 0x91, 0x7f, 0x2f, 0x1f, 0x33, 0x97, 0x97, 0xba, 0x67, 0x2f, 0x6d, 0xd5, 0xfd, + 0x8e, 0x29, 0x73, 0xe5, 0x77, 0xa1, 0x28, 0x2a, 0xbc, 0x08, 0xe6, 0x5f, 0x0c, 0x12, 0xcc, 0xe1, + 0x2a, 0xe6, 0x7f, 0xb0, 0x78, 0x83, 0x1e, 0x30, 0xfa, 0x04, 0x86, 0x89, 0xab, 0xc2, 0xad, 0x8d, + 0x77, 0x07, 0x51, 0x11, 0xe4, 0xd5, 0xa0, 0x51, 0x15, 0x63, 0x02, 0x15, 0xbd, 0xcf, 0xfc, 0xc5, + 0x78, 0xd9, 0x47, 0xa0, 0x5d, 0x2e, 0xf0, 0x72, 0x75, 0xd9, 0x9d, 0xb6, 0x3f, 0xfc, 0xe2, 0xb8, + 0x02, 0xc1, 0x4f, 0x1c, 0x96, 0x90, 0xff, 0x55, 0x82, 0x29, 0xee, 0xa1, 0xa6, 0x63, 0xa9, 0xb4, + 0x7b, 0x6e, 0x85, 0xe9, 0x49, 0xa4, 0x30, 0xbd, 0xd3, 0xc3, 0x2d, 0x09, 0x0b, 0x33, 0x8b, 0xd3, + 0xb7, 0x12, 0xbc, 0x96, 0xe0, 0x3e, 0x87, 0xbc, 0xb8, 0x13, 0xcd, 0x8b, 0x6f, 0x0d, 0x3a, 0xa1, + 0xac, 0xd6, 0x78, 0x3c, 0x65, 0x3a, 0x7c, 0xa7, 0xdc, 0x06, 0x30, 0x2d, 0xf5, 0x50, 0xd5, 0x48, + 0x5b, 0x5c, 0x82, 0x97, 0x42, 0x8f, 0xe0, 0x7c, 0x0a, 0x0e, 0x71, 0x21, 0x1b, 0x66, 0x5b, 0x64, + 0x4f, 0x71, 0x34, 0xba, 0xd8, 0x6a, 0x2d, 0x29, 0xa6, 0xb2, 0xab, 0x6a, 0x2a, 0x55, 0xc5, 0x71, + 0xc1, 0x48, 0xfd, 0xbe, 0x7b, 0x39, 0x9d, 0xc6, 0xf1, 0xe2, 0xb8, 0x72, 0x39, 0xed, 0x76, 0xc8, + 0x63, 0xe9, 0xe2, 0x0c, 0x68, 0xd4, 0x85, 0xb2, 0x45, 0x3e, 0x73, 0x54, 0x8b, 0xb4, 0x96, 0x2d, + 0xc3, 0x8c, 0xa8, 0xcd, 0x73, 0xb5, 0xbf, 0x7a, 0x72, 0x5c, 0x29, 0xe3, 0x0c, 0x9e, 0xde, 0x8a, + 0x33, 0xe1, 0xd1, 0x33, 0x98, 0x56, 0xdc, 0xb7, 0x83, 0x11, 0xad, 0xee, 0x2e, 0xb9, 0x77, 0x72, + 0x5c, 0x99, 0x5e, 0x4c, 0x92, 0x7b, 0x2b, 0x4c, 0x03, 0x45, 0x35, 0x28, 0x1e, 0xf2, 0x97, 0x8d, + 0x76, 0x79, 0x88, 0xe3, 0xb3, 0x42, 0x50, 0x74, 0x1f, 0x3b, 0x32, 0xcc, 0xe1, 0x95, 0x06, 0xdf, + 0x7d, 0x1e, 0x17, 0xfb, 0xa0, 0x64, 0xbd, 0xa4, 0xd8, 0xf1, 0xfc, 0xc4, 0xb8, 0x14, 0x64, 0xad, + 0x47, 0x01, 0x09, 0x87, 0xf9, 0xd0, 0x53, 0x18, 0xd9, 0x17, 0xa7, 0x12, 0x76, 0xb9, 0xd8, 0x57, + 0x11, 0x8e, 0x9c, 0x62, 0xd4, 0xa7, 0x84, 0x8a, 0x11, 0x6f, 0xd8, 0xc6, 0x01, 0x22, 0xba, 0x0e, + 0x45, 0xfe, 0x63, 0x75, 0x99, 0x1f, 0xc7, 0x95, 0x82, 0xdc, 0xf6, 0xc8, 0x1d, 0xc6, 0x1e, 0xdd, + 0x63, 0x5d, 0xdd, 0x5a, 0xe2, 0xc7, 0xc2, 0x31, 0xd6, 0xd5, 0xad, 0x25, 0xec, 0xd1, 0xd1, 0xa7, + 0x50, 0xb4, 0xc9, 0x9a, 0xaa, 0x3b, 0x47, 0x65, 0xe8, 0xeb, 0x52, 0xb9, 0xf1, 0x80, 0x73, 0xc7, + 0x0e, 0xc6, 0x02, 0x0d, 0x82, 0x8e, 0x3d, 0x58, 0xb4, 0x0f, 0x23, 0x96, 0xa3, 0x2f, 0xda, 0x3b, + 0x36, 0xb1, 0xca, 0xa3, 0x5c, 0x47, 0xaf, 0x74, 0x8e, 0x3d, 0xfe, 0xb8, 0x16, 0xdf, 0x43, 0x3e, + 0x07, 0x0e, 0xc0, 0xd1, 0x1f, 0x4a, 0x80, 0x6c, 0xc7, 0x34, 0x35, 0xd2, 0x21, 0x3a, 0x55, 0x34, + 0x7e, 0x16, 0x67, 0x97, 0x2f, 0x72, 0x9d, 0x1f, 0xf4, 0x9a, 0x57, 0x42, 0x30, 0xae, 0xdc, 0x3f, + 0xf4, 0x4e, 0xb2, 0xe2, 0x14, 0xbd, 0xcc, 0xb5, 0x7b, 0x36, 0xff, 0xbb, 0x3c, 0xd6, 0x97, 0x6b, + 0xd3, 0xcf, 0x1c, 0x03, 0xd7, 0x0a, 0x3a, 0xf6, 0x60, 0xd1, 0x13, 0x98, 0xf5, 0x1e, 0xc6, 0x62, + 0xc3, 0xa0, 0x2b, 0xaa, 0x46, 0xec, 0xae, 0x4d, 0x49, 0xa7, 0x3c, 0xce, 0x97, 0xdd, 0x7f, 0xfb, + 0x81, 0x53, 0xb9, 0x70, 0x86, 0x34, 0xea, 0x40, 0xc5, 0x4b, 0x19, 0x6c, 0x3f, 0xf9, 0x39, 0xeb, + 0x81, 0xdd, 0x54, 0x34, 0xf7, 0x1e, 0x60, 0x82, 0x2b, 0x78, 0xe3, 0xe4, 0xb8, 0x52, 0x59, 0x3e, + 0x9d, 0x15, 0xf7, 0xc2, 0x42, 0x1f, 0x41, 0x59, 0xc9, 0xd2, 0x33, 0xc9, 0xf5, 0xbc, 0xce, 0xf2, + 0x50, 0xa6, 0x82, 0x4c, 0x69, 0x44, 0x61, 0x52, 0x89, 0x3e, 0x51, 0xb6, 0xcb, 0x53, 0x7d, 0x1d, + 0x44, 0xc6, 0x5e, 0x36, 0x07, 0x87, 0x11, 0x31, 0x82, 0x8d, 0x13, 0x1a, 0xd0, 0x6f, 0x03, 0x52, + 0xe2, 0xaf, 0xaa, 0xed, 0x32, 0xea, 0xab, 0xfc, 0x24, 0x9e, 0x63, 0x07, 0x61, 0x97, 0x20, 0xd9, + 0x38, 0x45, 0x0f, 0x5a, 0x83, 0x19, 0x31, 0xba, 0xa3, 0xdb, 0xca, 0x1e, 0x69, 0x74, 0xed, 0x26, + 0xd5, 0xec, 0xf2, 0x34, 0xcf, 0x7d, 0xfc, 0xe2, 0x6b, 0x31, 0x85, 0x8e, 0x53, 0xa5, 0xd0, 0x07, + 0x30, 0xb9, 0x67, 0x58, 0xbb, 0x6a, 0xab, 0x45, 0x74, 0x0f, 0x69, 0x86, 0x23, 0xcd, 0x30, 0x6f, + 0xac, 0xc4, 0x68, 0x38, 0xc1, 0xcd, 0x1f, 0x93, 0x88, 0xab, 0x85, 0xf3, 0x79, 0x90, 0x3b, 0xd8, + 0x63, 0x92, 0xc0, 0xb4, 0x97, 0xf6, 0x98, 0x24, 0x04, 0x79, 0xfa, 0x61, 0xe6, 0x7f, 0xe6, 0x60, + 0x3a, 0x60, 0xee, 0xfb, 0x31, 0x49, 0x8a, 0xc8, 0xff, 0x3f, 0xca, 0xed, 0xfd, 0x28, 0xf7, 0x6b, + 0x09, 0xc6, 0x03, 0xd7, 0xfd, 0xef, 0x7b, 0xe0, 0x11, 0xd8, 0x96, 0xd1, 0x72, 0xfe, 0x5d, 0x2e, + 0x3c, 0x81, 0xff, 0xf3, 0xaf, 0x0c, 0x7e, 0xfa, 0x4b, 0x5a, 0xf9, 0xdb, 0x3c, 0x4c, 0xc6, 0x77, + 0x63, 0xe4, 0x32, 0x5a, 0xea, 0x79, 0x19, 0xbd, 0x05, 0x33, 0x7b, 0x8e, 0xa6, 0x75, 0xb9, 0x1b, + 0x42, 0x37, 0xd2, 0xee, 0x65, 0xd2, 0xeb, 0x42, 0x72, 0x66, 0x25, 0x85, 0x07, 0xa7, 0x4a, 0x66, + 0x5c, 0xac, 0xe7, 0xcf, 0x74, 0xb1, 0x9e, 0xb8, 0xe7, 0x2d, 0x0c, 0x70, 0xcf, 0x9b, 0x7a, 0x49, + 0x3e, 0x74, 0x86, 0x4b, 0xf2, 0xb3, 0xdc, 0x6a, 0xa7, 0x24, 0xb1, 0x9e, 0x8f, 0x2c, 0x5f, 0x87, + 0x79, 0x21, 0x46, 0xf9, 0x85, 0xb3, 0x4e, 0x2d, 0x43, 0xd3, 0x88, 0xb5, 0xec, 0x74, 0x3a, 0x5d, + 0xf9, 0x3d, 0x18, 0x8f, 0x3e, 0xa5, 0x70, 0x57, 0xda, 0x7d, 0xcd, 0x21, 0xae, 0xf4, 0x42, 0x2b, + 0xed, 0x8e, 0x63, 0x9f, 0x43, 0xfe, 0x7d, 0x09, 0x66, 0xd3, 0x9f, 0x4c, 0x22, 0x0d, 0xc6, 0x3b, + 0xca, 0x51, 0xf8, 0x19, 0xab, 0x74, 0xc6, 0xc3, 0x16, 0x7e, 0x87, 0xbe, 0x1e, 0xc1, 0xc2, 0x31, + 0x6c, 0xf9, 0x47, 0x09, 0xe6, 0x32, 0x6e, 0xaf, 0xcf, 0xd7, 0x12, 0xf4, 0x31, 0x94, 0x3a, 0xca, + 0x51, 0xc3, 0xb1, 0xda, 0xe4, 0xcc, 0xc7, 0x4b, 0x3c, 0x63, 0xac, 0x0b, 0x14, 0xec, 0xe3, 0xc9, + 0x5f, 0x4a, 0x50, 0xce, 0x6a, 0xf4, 0xd1, 0x9d, 0xc8, 0x3d, 0xfb, 0xcf, 0x63, 0xf7, 0xec, 0x53, + 0x09, 0xb9, 0x57, 0x74, 0xcb, 0xfe, 0xb7, 0x12, 0xcc, 0xa6, 0x7f, 0xf0, 0xa0, 0xb7, 0x23, 0x16, + 0x56, 0x62, 0x16, 0x4e, 0xc4, 0xa4, 0x84, 0x7d, 0x9f, 0xc0, 0xb8, 0xf8, 0x2c, 0x12, 0x30, 0xc2, + 0xab, 0x72, 0x5a, 0xae, 0x14, 0x10, 0xde, 0x67, 0x00, 0x5f, 0xaf, 0xe8, 0x18, 0x8e, 0xa1, 0xc9, + 0x7f, 0x90, 0x83, 0xa1, 0x46, 0x53, 0xd1, 0xc8, 0x39, 0xb4, 0x59, 0x1f, 0x46, 0xda, 0xac, 0x5e, + 0xff, 0x72, 0xc2, 0xad, 0xca, 0xec, 0xb0, 0x70, 0xac, 0xc3, 0x7a, 0xb3, 0x2f, 0xb4, 0xd3, 0x9b, + 0xab, 0x5f, 0x81, 0x11, 0x5f, 0xe9, 0x60, 0x39, 0x5f, 0xfe, 0xab, 0x1c, 0x8c, 0x86, 0x54, 0x0c, + 0x58, 0x31, 0xf6, 0x22, 0x95, 0xb6, 0x9f, 0x7f, 0xf4, 0x0b, 0xe9, 0xaa, 0x7a, 0xb5, 0xd5, 0x7d, + 0x32, 0x19, 0x3c, 0x92, 0x4b, 0x96, 0xdc, 0xf7, 0x60, 0x9c, 0xf2, 0x7f, 0x84, 0xf3, 0x0f, 0x65, + 0xf3, 0x3c, 0x16, 0xfd, 0x87, 0xb6, 0xdb, 0x11, 0x2a, 0x8e, 0x71, 0xcf, 0xdf, 0x87, 0xb1, 0x88, + 0xb2, 0x81, 0x5e, 0x3c, 0xfe, 0x83, 0x04, 0x3f, 0xef, 0xf9, 0xc9, 0x8c, 0xea, 0x91, 0x4d, 0x52, + 0x8d, 0x6d, 0x92, 0x85, 0x6c, 0x80, 0x57, 0xf7, 0x72, 0xa6, 0x7e, 0xf3, 0xf9, 0x0f, 0x0b, 0x17, + 0xbe, 0xfb, 0x61, 0xe1, 0xc2, 0xf7, 0x3f, 0x2c, 0x5c, 0xf8, 0xdd, 0x93, 0x05, 0xe9, 0xf9, 0xc9, + 0x82, 0xf4, 0xdd, 0xc9, 0x82, 0xf4, 0xfd, 0xc9, 0x82, 0xf4, 0xef, 0x27, 0x0b, 0xd2, 0x9f, 0xfc, + 0xb8, 0x70, 0xe1, 0xe3, 0xa2, 0x80, 0xfb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xc2, 0x4a, + 0x40, 0x12, 0x3d, 0x00, 0x00, } diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto similarity index 64% rename from vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.proto rename to vendor/k8s.io/api/extensions/v1beta1/generated.proto index 8ba21bc9b..3a87fbe5e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,24 +19,42 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.extensions.v1beta1; +package k8s.io.api.extensions.v1beta1; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/api/policy/v1beta1/generated.proto"; import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// An APIVersion represents a single concrete version of an object model. -message APIVersion { - // Name of this version (e.g. 'v1'). +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +// Deprecated: use AllowedFlexVolume from policy API Group instead. +message AllowedFlexVolume { + // driver is the name of the Flexvolume driver. + optional string driver = 1; +} + +// AllowedHostPath defines the host volume conditions that will be enabled by a policy +// for pods to use. It requires the path prefix to be defined. +// Deprecated: use AllowedHostPath from policy API Group instead. +message AllowedHostPath { + // pathPrefix is the path prefix that the host volume must match. + // It does not support `*`. + // Trailing slashes are trimmed when validating the path prefix with a host path. + // + // Examples: + // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` + // `/foo` would not allow `/food` or `/etc/foo` + optional string pathPrefix = 1; + + // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. // +optional - optional string name = 1; + optional bool readOnly = 2; } message CustomMetricCurrentStatus { @@ -64,15 +82,17 @@ message CustomMetricTargetList { repeated CustomMetricTarget items = 1; } +// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for +// more information. // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional DaemonSetSpec spec = 2; @@ -80,15 +100,36 @@ message DaemonSet { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional DaemonSetStatus status = 3; } +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -101,7 +142,7 @@ message DaemonSetSpec { // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; @@ -109,8 +150,8 @@ message DaemonSetSpec { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). - // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 2; + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + optional k8s.io.api.core.v1.PodTemplateSpec template = 2; // An update strategy to replace existing DaemonSet pods with new pods. // +optional @@ -123,27 +164,34 @@ message DaemonSetSpec { // +optional optional int32 minReadySeconds = 4; + // DEPRECATED. // A sequence number representing a specific generation of the template. // Populated by the system. It can be set only during the creation. // +optional optional int64 templateGeneration = 5; + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; } // DaemonSetStatus represents the current status of a daemon set. message DaemonSetStatus { // The number of nodes that are running at least 1 // daemon pod and are supposed to run the daemon pod. - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ optional int32 currentNumberScheduled = 1; // The number of nodes that are running the daemon pod, but are // not supposed to run the daemon pod. - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ optional int32 numberMisscheduled = 2; // The total number of nodes that should be running the daemon // pod (including nodes correctly running the daemon pod). - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ optional int32 desiredNumberScheduled = 3; // The number of nodes that should be running the daemon pod and have one @@ -169,6 +217,18 @@ message DaemonSetStatus { // (ready for at least spec.minReadySeconds) // +optional optional int32 numberUnavailable = 8; + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; } message DaemonSetUpdateStrategy { @@ -180,12 +240,14 @@ message DaemonSetUpdateStrategy { // Rolling update config params. Present only if type = "RollingUpdate". // --- // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as DeploymentStrategy.RollingUpdate. + // to be. Same as Deployment `strategy.rollingUpdate`. // See https://github.com/kubernetes/kubernetes/issues/35345 // +optional optional RollingUpdateDaemonSet rollingUpdate = 2; } +// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for +// more information. // Deployment enables declarative updates for Pods and ReplicaSets. message Deployment { // Standard object metadata. @@ -232,6 +294,7 @@ message DeploymentList { repeated Deployment items = 2; } +// DEPRECATED. // DeploymentRollback stores the information required to rollback a deployment. message DeploymentRollback { // Required: This must match the Name of a deployment. @@ -258,10 +321,11 @@ message DeploymentSpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional + // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; // Minimum number of seconds for which a newly created pod should be ready @@ -280,6 +344,7 @@ message DeploymentSpec { // +optional optional bool paused = 7; + // DEPRECATED. // The config this deployment is rolling back to. Will be cleared after rollback is done. // +optional optional RollbackConfig rollbackTo = 8; @@ -287,10 +352,10 @@ message DeploymentSpec { // The maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Once autoRollback is - // implemented, the deployment controller will automatically rollback failed - // deployments. Note that progress will not be estimated during the time a - // deployment is paused. This is not set by default. + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. This is not set + // by default. + // +optional optional int32 progressDeadlineSeconds = 9; } @@ -316,12 +381,22 @@ message DeploymentStatus { // +optional optional int32 availableReplicas = 4; - // Total number of unavailable pods targeted by this deployment. + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. // +optional optional int32 unavailableReplicas = 5; // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge repeated DeploymentCondition conditions = 6; + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + optional int32 collisionCount = 8; } // DeploymentStrategy describes how to replace existing pods with new ones. @@ -340,13 +415,14 @@ message DeploymentStrategy { } // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +// Deprecated: use FSGroupStrategyOptions from policy API Group instead. message FSGroupStrategyOptions { - // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. // +optional optional string rule = 1; - // Ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. + // ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. Required for MustRunAs. // +optional repeated IDRange ranges = 2; } @@ -379,8 +455,9 @@ message HTTPIngressRuleValue { repeated HTTPIngressPath paths = 1; } -// Host Port Range defines a range of host ports that will be enabled by a policy +// HostPortRange defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. +// Deprecated: use HostPortRange from policy API Group instead. message HostPortRange { // min is the start of the range, inclusive. optional int32 min = 1; @@ -389,32 +466,49 @@ message HostPortRange { optional int32 max = 2; } -// ID Range provides a min/max of an allowed range of IDs. +// IDRange provides a min/max of an allowed range of IDs. +// Deprecated: use IDRange from policy API Group instead. message IDRange { - // Min is the start of the range, inclusive. + // min is the start of the range, inclusive. optional int64 min = 1; - // Max is the end of the range, inclusive. + // max is the end of the range, inclusive. optional int64 max = 2; } +// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should +// not be included within this rule. +message IPBlock { + // CIDR is a string representing the IP Block + // Valid examples are "192.168.1.1/24" + optional string cidr = 1; + + // Except is a slice of CIDRs that should not be included within an IP Block + // Valid examples are "192.168.1.1/24" + // Except values will be rejected if they are outside the CIDR range + // +optional + repeated string except = 2; +} + // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name // based virtual hosting etc. message Ingress { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is the desired state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; // Status is the current state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; } @@ -431,7 +525,7 @@ message IngressBackend { // IngressList is a collection of Ingress. message IngressList { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -503,7 +597,7 @@ message IngressSpec { message IngressStatus { // LoadBalancer contains the current status of the load-balancer. // +optional - optional k8s.io.kubernetes.pkg.api.v1.LoadBalancerStatus loadBalancer = 1; + optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1; } // IngressTLS describes the transport layer security associated with an Ingress. @@ -524,9 +618,11 @@ message IngressTLS { optional string secretName = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. +// NetworkPolicy describes what network traffic is allowed for a set of Pods message NetworkPolicy { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -535,33 +631,53 @@ message NetworkPolicy { optional NetworkPolicySpec spec = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. +// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. +// This type is beta-level in 1.8 +message NetworkPolicyEgressRule { + // List of destination ports for outgoing traffic. + // Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + repeated NetworkPolicyPort ports = 1; + + // List of destinations for outgoing traffic of pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all destinations (traffic not restricted by + // destination). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the to list. + // +optional + repeated NetworkPolicyPeer to = 2; +} + +// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. // This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. message NetworkPolicyIngressRule { // List of ports which should be made accessible on the pods selected for this rule. // Each item in this list is combined using a logical OR. - // If this field is not provided, this rule matches all ports (traffic not restricted by port). - // If this field is empty, this rule matches no ports (no traffic matches). + // If this field is empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows traffic // only if the traffic matches at least one port in the list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. // +optional repeated NetworkPolicyPort ports = 1; // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. - // If this field is not provided, this rule matches all sources (traffic not restricted by source). - // If this field is empty, this rule matches no sources (no traffic matches). + // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). // If this field is present and contains at least on item, this rule allows traffic only if the // traffic matches at least one item in the from list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. // +optional repeated NetworkPolicyPeer from = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. // Network Policy List is a list of NetworkPolicy objects. message NetworkPolicyList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -569,23 +685,33 @@ message NetworkPolicyList { repeated NetworkPolicy items = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer. message NetworkPolicyPeer { - // This is a label selector which selects Pods in this namespace. - // This field follows standard label selector semantics. - // If not provided, this selector selects no pods. - // If present but empty, this selector selects all pods in this namespace. + // This is a label selector which selects Pods. This field follows standard label + // selector semantics; if present but empty, it selects all pods. + // + // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - // Selects Namespaces using cluster scoped-labels. This - // matches all pods in all namespaces selected by this label selector. - // This field follows standard label selector semantics. - // If omitted, this selector selects no namespaces. - // If present but empty, this selector selects all namespaces. + // Selects Namespaces using cluster-scoped labels. This field follows standard label + // selector semantics; if present but empty, it selects all namespaces. + // + // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; + + // IPBlock defines policy on a particular IPBlock. If this field is set then + // neither of the other fields can be. + // +optional + optional IPBlock ipBlock = 3; } +// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort. message NetworkPolicyPort { // Optional. The protocol (TCP or UDP) which traffic must match. // If not specified, this field defaults to TCP. @@ -601,6 +727,7 @@ message NetworkPolicyPort { optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec. message NetworkPolicySpec { // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules // is applied to any pods selected by this field. Multiple network policies can select the @@ -610,22 +737,45 @@ message NetworkPolicySpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // List of ingress rules to be applied to the selected pods. - // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // OR if the traffic source is the pod's local node, // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy // objects whose podSelector matches the pod. - // If this field is empty then this NetworkPolicy does not affect ingress isolation. - // If this field is present and contains at least one rule, this policy allows any traffic - // which matches at least one of the ingress rules in this list. + // If this field is empty then this NetworkPolicy does not allow any traffic + // (and serves solely to ensure that the pods it selects are isolated by default). // +optional repeated NetworkPolicyIngressRule ingress = 2; + + // List of egress rules to be applied to the selected pods. Outgoing traffic is + // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // otherwise allows the traffic), OR if the traffic matches at least one egress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves + // solely to ensure that the pods it selects are isolated by default). + // This field is beta-level in 1.8 + // +optional + repeated NetworkPolicyEgressRule egress = 3; + + // List of rule types that the NetworkPolicy relates to. + // Valid options are Ingress, Egress, or Ingress,Egress. + // If this field is not specified, it will default based on the existence of Ingress or Egress rules; + // policies that contain an Egress section are assumed to affect Egress, and all policies + // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. + // Likewise, if you want to write a policy that specifies that no egress is allowed, + // you must specify a policyTypes value that include "Egress" (since such a policy would not include + // an Egress section and would otherwise default to just [ "Ingress" ]). + // This field is beta-level in 1.8 + // +optional + repeated string policyTypes = 4; } -// Pod Security Policy governs the ability to make requests that affect the Security Context +// PodSecurityPolicy governs the ability to make requests that affect the Security Context // that will be applied to a pod and container. +// Deprecated: use PodSecurityPolicy from policy API Group instead. message PodSecurityPolicy { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -634,42 +784,45 @@ message PodSecurityPolicy { optional PodSecurityPolicySpec spec = 2; } -// Pod Security Policy List is a list of PodSecurityPolicy objects. +// PodSecurityPolicyList is a list of PodSecurityPolicy objects. +// Deprecated: use PodSecurityPolicyList from policy API Group instead. message PodSecurityPolicyList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated PodSecurityPolicy items = 2; } -// Pod Security Policy Spec defines the policy enforced. +// PodSecurityPolicySpec defines the policy enforced. +// Deprecated: use PodSecurityPolicySpec from policy API Group instead. message PodSecurityPolicySpec { // privileged determines if a pod can request to be run as privileged. // +optional optional bool privileged = 1; - // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. + // defaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capability in both + // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the allowedCapabilities list. // +optional repeated string defaultAddCapabilities = 2; - // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // requiredDropCapabilities are the capabilities that will be dropped from the container. These // are required to be dropped and cannot be added. // +optional repeated string requiredDropCapabilities = 3; - // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // allowedCapabilities is a list of capabilities that can be requested to add to the container. // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. // +optional repeated string allowedCapabilities = 4; - // volumes is a white list of allowed volume plugins. Empty indicates that all plugins - // may be used. + // volumes is a white list of allowed volume plugins. Empty indicates that + // no volumes may be used. To allow all volumes you may use '*'. // +optional repeated string volumes = 5; @@ -695,31 +848,75 @@ message PodSecurityPolicySpec { // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. optional RunAsUserStrategyOptions runAsUser = 11; - // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. optional FSGroupStrategyOptions fsGroup = 13; - // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file // system. If the container specifically requests to run with a non-read only root file system // the PSP should deny the pod. // If set to false the container may run with a read only root file system if it wishes but it // will not be forced to. // +optional optional bool readOnlyRootFilesystem = 14; + + // defaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + optional bool defaultAllowPrivilegeEscalation = 15; + + // allowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + optional bool allowPrivilegeEscalation = 16; + + // allowedHostPaths is a white list of allowed host paths. Empty indicates + // that all host paths may be used. + // +optional + repeated AllowedHostPath allowedHostPaths = 17; + + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "volumes" field. + // +optional + repeated AllowedFlexVolume allowedFlexVolumes = 18; + + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + repeated string allowedUnsafeSysctls = 19; + + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + repeated string forbiddenSysctls = 20; } -// ReplicaSet represents the configuration of a ReplicaSet. +// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for +// more information. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ReplicaSetSpec spec = 2; @@ -727,7 +924,7 @@ message ReplicaSet { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ReplicaSetStatus status = 3; } @@ -756,12 +953,12 @@ message ReplicaSetCondition { // ReplicaSetList is a collection of ReplicaSets. message ReplicaSetList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. - // More info: http://kubernetes.io/docs/user-guide/replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller repeated ReplicaSet items = 2; } @@ -770,7 +967,7 @@ message ReplicaSetSpec { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller // +optional optional int32 replicas = 1; @@ -783,21 +980,21 @@ message ReplicaSetSpec { // Selector is a label query over pods that should match the replica count. // If the selector is empty, it is defaulted to the labels present on the pod template. // Label keys and values that must match in order to be controlled by this replica set. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. message ReplicaSetStatus { // Replicas is the most recently oberved number of replicas. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller optional int32 replicas = 1; // The number of pods that have labels matching the labels of the pod template of the replicaset. @@ -818,6 +1015,8 @@ message ReplicaSetStatus { // Represents the latest available observations of a replica set's current state. // +optional + // +patchMergeKey=type + // +patchStrategy=merge repeated ReplicaSetCondition conditions = 6; } @@ -825,8 +1024,9 @@ message ReplicaSetStatus { message ReplicationControllerDummy { } +// DEPRECATED. message RollbackConfig { - // The revision to rollback to. If set to 0, rollbck to the last revision. + // The revision to rollback to. If set to 0, rollback to the last revision. // +optional optional int64 revision = 1; } @@ -881,38 +1081,41 @@ message RollingUpdateDeployment { optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } -// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy. +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. message RunAsUserStrategyOptions { - // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. + // rule is the strategy that will dictate the allowable RunAsUser values that may be set. optional string rule = 1; - // Ranges are the allowed ranges of uids that may be used. + // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid + // then supply a single range with the same start and end. Required for MustRunAs. // +optional repeated IDRange ranges = 2; } -// SELinux Strategy Options defines the strategy type and any options used to create the strategy. +// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use SELinuxStrategyOptions from policy API Group instead. message SELinuxStrategyOptions { - // type is the strategy that will dictate the allowable labels that may be set. + // rule is the strategy that will dictate the allowable labels that may be set. optional string rule = 1; // seLinuxOptions required to run as; required for MustRunAs - // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional - optional k8s.io.kubernetes.pkg.api.v1.SELinuxOptions seLinuxOptions = 2; + optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; } // represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } @@ -938,68 +1141,21 @@ message ScaleStatus { // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this // field and map-based selector field are populated. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional optional string targetSelector = 3; } // SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. message SupplementalGroupsStrategyOptions { - // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. // +optional optional string rule = 1; - // Ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. + // ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. Required for MustRunAs. // +optional repeated IDRange ranges = 2; } -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -message ThirdPartyResource { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Description is the description of this object. - // +optional - optional string description = 2; - - // Versions are versions for this third party object - // +optional - repeated APIVersion versions = 3; -} - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -message ThirdPartyResourceData { - // Standard object metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Data is the raw JSON data for this data. - // +optional - optional bytes data = 2; -} - -// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. -message ThirdPartyResourceDataList { - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of ThirdpartyResourceData. - repeated ThirdPartyResourceData items = 2; -} - -// ThirdPartyResourceList is a list of ThirdPartyResources. -message ThirdPartyResourceList { - // Standard list metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of ThirdPartyResources. - repeated ThirdPartyResource items = 2; -} - diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go similarity index 82% rename from vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/register.go rename to vendor/k8s.io/api/extensions/v1beta1/register.go index fa5b2e1b2..7625f6781 100644 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/register.go +++ b/vendor/k8s.io/api/extensions/v1beta1/register.go @@ -34,11 +34,14 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Deployment{}, @@ -46,12 +49,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &DeploymentRollback{}, &ReplicationControllerDummy{}, &Scale{}, - &ThirdPartyResource{}, - &ThirdPartyResourceList{}, &DaemonSetList{}, &DaemonSet{}, - &ThirdPartyResourceData{}, - &ThirdPartyResourceDataList{}, &Ingress{}, &IngressList{}, &ReplicaSet{}, diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go similarity index 65% rename from vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.go rename to vendor/k8s.io/api/extensions/v1beta1/types.go index 43dd2a9b0..3a86ef43a 100644 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -17,10 +17,11 @@ limitations under the License. package v1beta1 import ( + appsv1beta1 "k8s.io/api/apps/v1beta1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/pkg/api/v1" ) // describes the attributes of a scale subresource @@ -44,30 +45,33 @@ type ScaleStatus struct { // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this // field and map-based selector field are populated. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } -// +genclient=true -// +noMethods=true +// +genclient +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // Dummy definition type ReplicationControllerDummy struct { metav1.TypeMeta `json:",inline"` @@ -96,60 +100,13 @@ type CustomMetricCurrentStatusList struct { Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` } -// +genclient=true -// +nonNamespaced=true - -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -type ThirdPartyResource struct { - metav1.TypeMeta `json:",inline"` - - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Description is the description of this object. - // +optional - Description string `json:"description,omitempty" protobuf:"bytes,2,opt,name=description"` - - // Versions are versions for this third party object - // +optional - Versions []APIVersion `json:"versions,omitempty" protobuf:"bytes,3,rep,name=versions"` -} - -// ThirdPartyResourceList is a list of ThirdPartyResources. -type ThirdPartyResourceList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ThirdPartyResources. - Items []ThirdPartyResource `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// An APIVersion represents a single concrete version of an object model. -type APIVersion struct { - // Name of this version (e.g. 'v1'). - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` -} - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -type ThirdPartyResourceData struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Data is the raw JSON data for this data. - // +optional - Data []byte `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` -} - -// +genclient=true +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for +// more information. // Deployment enables declarative updates for Pods and ReplicaSets. type Deployment struct { metav1.TypeMeta `json:",inline"` @@ -183,7 +140,8 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. @@ -201,6 +159,7 @@ type DeploymentSpec struct { // +optional Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + // DEPRECATED. // The config this deployment is rolling back to. Will be cleared after rollback is done. // +optional RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` @@ -208,13 +167,16 @@ type DeploymentSpec struct { // The maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Once autoRollback is - // implemented, the deployment controller will automatically rollback failed - // deployments. Note that progress will not be estimated during the time a - // deployment is paused. This is not set by default. + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. This is not set + // by default. + // +optional ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED. // DeploymentRollback stores the information required to rollback a deployment. type DeploymentRollback struct { metav1.TypeMeta `json:",inline"` @@ -227,8 +189,9 @@ type DeploymentRollback struct { RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"` } +// DEPRECATED. type RollbackConfig struct { - // The revision to rollback to. If set to 0, rollbck to the last revision. + // The revision to rollback to. If set to 0, rollback to the last revision. // +optional Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"` } @@ -317,12 +280,22 @@ type DeploymentStatus struct { // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - // Total number of unavailable pods targeted by this deployment. + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. // +optional UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` } type DeploymentConditionType string @@ -347,7 +320,7 @@ type DeploymentCondition struct { // Type of deployment condition. Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` // The last time this condition was updated. LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` // Last time the condition transitioned from one status to another. @@ -358,6 +331,8 @@ type DeploymentCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // DeploymentList is a list of Deployments. type DeploymentList struct { metav1.TypeMeta `json:",inline"` @@ -378,7 +353,7 @@ type DaemonSetUpdateStrategy struct { // Rolling update config params. Present only if type = "RollingUpdate". //--- // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as DeploymentStrategy.RollingUpdate. + // to be. Same as Deployment `strategy.rollingUpdate`. // See https://github.com/kubernetes/kubernetes/issues/35345 // +optional RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` @@ -419,7 +394,7 @@ type DaemonSetSpec struct { // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` @@ -427,7 +402,7 @@ type DaemonSetSpec struct { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). - // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` // An update strategy to replace existing DaemonSet pods with new pods. @@ -441,27 +416,34 @@ type DaemonSetSpec struct { // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + // DEPRECATED. // A sequence number representing a specific generation of the template. // Populated by the system. It can be set only during the creation. // +optional TemplateGeneration int64 `json:"templateGeneration,omitempty" protobuf:"varint,5,opt,name=templateGeneration"` + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` } // DaemonSetStatus represents the current status of a daemon set. type DaemonSetStatus struct { // The number of nodes that are running at least 1 // daemon pod and are supposed to run the daemon pod. - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` // The number of nodes that are running the daemon pod, but are // not supposed to run the daemon pod. - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` // The total number of nodes that should be running the daemon // pod (including nodes correctly running the daemon pod). - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` // The number of nodes that should be running the daemon pod and have one @@ -487,20 +469,56 @@ type DaemonSetStatus struct { // (ready for at least spec.minReadySeconds) // +optional NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` } -// +genclient=true +type DaemonSetConditionType string +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for +// more information. // DaemonSet represents the configuration of a daemon set. type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -508,23 +526,31 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } const ( + // DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead. // DaemonSetTemplateGenerationKey is the key of the labels that is added // to daemon set pods to distinguish between old and new pod templates // during DaemonSet template update. DaemonSetTemplateGenerationKey string = "pod-template-generation" + + // DefaultDaemonSetUniqueLabelKey is the default label key that is added + // to existing DaemonSet pods to distinguish between old and new + // DaemonSet pods during DaemonSet template updates. + DefaultDaemonSetUniqueLabelKey = appsv1beta1.ControllerRevisionHashLabelKey ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // DaemonSetList is a collection of daemon sets. type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -532,19 +558,8 @@ type DaemonSetList struct { Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` } -// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. -type ThirdPartyResourceDataList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ThirdpartyResourceData. - Items []ThirdPartyResourceData `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services @@ -553,26 +568,28 @@ type ThirdPartyResourceDataList struct { type Ingress struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is the desired state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the current state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -710,20 +727,25 @@ type IngressBackend struct { ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"` } -// +genclient=true +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ReplicaSet represents the configuration of a ReplicaSet. +// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for +// more information. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. type ReplicaSet struct { metav1.TypeMeta `json:",inline"` // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -731,21 +753,23 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ReplicaSetList is a collection of ReplicaSets. type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ReplicaSets. - // More info: http://kubernetes.io/docs/user-guide/replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -754,7 +778,7 @@ type ReplicaSetSpec struct { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller // +optional Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` @@ -767,13 +791,13 @@ type ReplicaSetSpec struct { // Selector is a label query over pods that should match the replica count. // If the selector is empty, it is defaulted to the labels present on the pod template. // Label keys and values that must match in order to be controlled by this replica set. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` } @@ -781,7 +805,7 @@ type ReplicaSetSpec struct { // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { // Replicas is the most recently oberved number of replicas. - // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` // The number of pods that have labels matching the labels of the pod template of the replicaset. @@ -802,6 +826,8 @@ type ReplicaSetStatus struct { // Represents the latest available observations of a replica set's current state. // +optional + // +patchMergeKey=type + // +patchStrategy=merge Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` } @@ -820,7 +846,7 @@ type ReplicaSetCondition struct { // Type of replica set condition. Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` // The last time the condition transitioned from one status to another. // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` @@ -832,15 +858,17 @@ type ReplicaSetCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } -// +genclient=true -// +nonNamespaced=true +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Pod Security Policy governs the ability to make requests that affect the Security Context +// PodSecurityPolicy governs the ability to make requests that affect the Security Context // that will be applied to a pod and container. +// Deprecated: use PodSecurityPolicy from policy API Group instead. type PodSecurityPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -849,27 +877,29 @@ type PodSecurityPolicy struct { Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } -// Pod Security Policy Spec defines the policy enforced. +// PodSecurityPolicySpec defines the policy enforced. +// Deprecated: use PodSecurityPolicySpec from policy API Group instead. type PodSecurityPolicySpec struct { // privileged determines if a pod can request to be run as privileged. // +optional Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` - // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. + // defaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capability in both + // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the allowedCapabilities list. // +optional - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` - // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // requiredDropCapabilities are the capabilities that will be dropped from the container. These // are required to be dropped and cannot be added. // +optional - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` - // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // allowedCapabilities is a list of capabilities that can be requested to add to the container. // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. // +optional - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` - // volumes is a white list of allowed volume plugins. Empty indicates that all plugins - // may be used. + AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // volumes is a white list of allowed volume plugins. Empty indicates that + // no volumes may be used. To allow all volumes you may use '*'. // +optional Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. @@ -888,20 +918,75 @@ type PodSecurityPolicySpec struct { SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` - // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` - // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file // system. If the container specifically requests to run with a non-read only root file system // the PSP should deny the pod. // If set to false the container may run with a read only root file system if it wishes but it // will not be forced to. // +optional ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` + // defaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` + // allowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` + // allowedHostPaths is a white list of allowed host paths. Empty indicates + // that all host paths may be used. + // +optional + AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "volumes" field. + // +optional + AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` } -// FS Type gives strong typing to different file systems that are used by volumes. +// AllowedHostPath defines the host volume conditions that will be enabled by a policy +// for pods to use. It requires the path prefix to be defined. +// Deprecated: use AllowedHostPath from policy API Group instead. +type AllowedHostPath struct { + // pathPrefix is the path prefix that the host volume must match. + // It does not support `*`. + // Trailing slashes are trimmed when validating the path prefix with a host path. + // + // Examples: + // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` + // `/foo` would not allow `/food` or `/etc/foo` + PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` + + // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` +} + +// FSType gives strong typing to different file systems that are used by volumes. +// Deprecated: use FSType from policy API Group instead. type FSType string var ( @@ -929,8 +1014,16 @@ var ( All FSType = "*" ) -// Host Port Range defines a range of host ports that will be enabled by a policy +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +// Deprecated: use AllowedFlexVolume from policy API Group instead. +type AllowedFlexVolume struct { + // driver is the name of the Flexvolume driver. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` +} + +// HostPortRange defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. +// Deprecated: use HostPortRange from policy API Group instead. type HostPortRange struct { // min is the start of the range, inclusive. Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` @@ -938,117 +1031,143 @@ type HostPortRange struct { Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` } -// SELinux Strategy Options defines the strategy type and any options used to create the strategy. +// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use SELinuxStrategyOptions from policy API Group instead. type SELinuxStrategyOptions struct { - // type is the strategy that will dictate the allowable labels that may be set. + // rule is the strategy that will dictate the allowable labels that may be set. Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` // seLinuxOptions required to run as; required for MustRunAs - // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` } // SELinuxStrategy denotes strategy types for generating SELinux options for a // Security Context. +// Deprecated: use SELinuxStrategy from policy API Group instead. type SELinuxStrategy string const ( - // container must have SELinux labels of X applied. + // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. + // Deprecated: use SELinuxStrategyMustRunAs from policy API Group instead. SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // container may make requests for any SELinux context labels. + // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. + // Deprecated: use SELinuxStrategyRunAsAny from policy API Group instead. SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" ) -// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy. +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. type RunAsUserStrategyOptions struct { - // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. + // rule is the strategy that will dictate the allowable RunAsUser values that may be set. Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` - // Ranges are the allowed ranges of uids that may be used. + // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid + // then supply a single range with the same start and end. Required for MustRunAs. // +optional Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } -// ID Range provides a min/max of an allowed range of IDs. +// IDRange provides a min/max of an allowed range of IDs. +// Deprecated: use IDRange from policy API Group instead. type IDRange struct { - // Min is the start of the range, inclusive. + // min is the start of the range, inclusive. Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` - // Max is the end of the range, inclusive. + // max is the end of the range, inclusive. Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` } // RunAsUserStrategy denotes strategy types for generating RunAsUser values for a // Security Context. +// Deprecated: use RunAsUserStrategy from policy API Group instead. type RunAsUserStrategy string const ( - // container must run as a particular uid. + // RunAsUserStrategyMustRunAs means that container must run as a particular uid. + // Deprecated: use RunAsUserStrategyMustRunAs from policy API Group instead. RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // container must run as a non-root uid + // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. + // Deprecated: use RunAsUserStrategyMustRunAsNonRoot from policy API Group instead. RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // container may make requests for any uid. + // RunAsUserStrategyRunAsAny means that container may make requests for any uid. + // Deprecated: use RunAsUserStrategyRunAsAny from policy API Group instead. RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +// Deprecated: use FSGroupStrategyOptions from policy API Group instead. type FSGroupStrategyOptions struct { - // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. // +optional Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` - // Ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. + // ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. Required for MustRunAs. // +optional Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } // FSGroupStrategyType denotes strategy types for generating FSGroup values for a // SecurityContext +// Deprecated: use FSGroupStrategyType from policy API Group instead. type FSGroupStrategyType string const ( - // container must have FSGroup of X applied. + // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. + // Deprecated: use FSGroupStrategyMustRunAs from policy API Group instead. FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // container may make requests for any FSGroup labels. + // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. + // Deprecated: use FSGroupStrategyRunAsAny from policy API Group instead. FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" ) // SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. type SupplementalGroupsStrategyOptions struct { - // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. // +optional Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` - // Ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. + // ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. Required for MustRunAs. // +optional Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } // SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental // groups for a SecurityContext. +// Deprecated: use SupplementalGroupsStrategyType from policy API Group instead. type SupplementalGroupsStrategyType string const ( - // container must run as a particular gid. + // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. + // Deprecated: use SupplementalGroupsStrategyMustRunAs from policy API Group instead. SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // container may make requests for any gid. + // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. + // Deprecated: use SupplementalGroupsStrategyRunAsAny from policy API Group instead. SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" ) -// Pod Security Policy List is a list of PodSecurityPolicy objects. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSecurityPolicyList is a list of PodSecurityPolicy objects. +// Deprecated: use PodSecurityPolicyList from policy API Group instead. type PodSecurityPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. +// NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -1057,6 +1176,19 @@ type NetworkPolicy struct { Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } +// DEPRECATED 1.9 - This group version of PolicyType is deprecated by networking/v1/PolicyType. +// Policy Type string describes the NetworkPolicy type +// This type is beta-level in 1.8 +type PolicyType string + +const ( + // PolicyTypeIngress is a NetworkPolicy that affects ingress traffic on selected pods + PolicyTypeIngress PolicyType = "Ingress" + // PolicyTypeEgress is a NetworkPolicy that affects egress traffic on selected pods + PolicyTypeEgress PolicyType = "Egress" +) + +// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec. type NetworkPolicySpec struct { // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules // is applied to any pods selected by this field. Multiple network policies can select the @@ -1066,45 +1198,87 @@ type NetworkPolicySpec struct { PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` // List of ingress rules to be applied to the selected pods. - // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // OR if the traffic source is the pod's local node, // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy // objects whose podSelector matches the pod. - // If this field is empty then this NetworkPolicy does not affect ingress isolation. - // If this field is present and contains at least one rule, this policy allows any traffic - // which matches at least one of the ingress rules in this list. + // If this field is empty then this NetworkPolicy does not allow any traffic + // (and serves solely to ensure that the pods it selects are isolated by default). // +optional Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` + + // List of egress rules to be applied to the selected pods. Outgoing traffic is + // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // otherwise allows the traffic), OR if the traffic matches at least one egress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves + // solely to ensure that the pods it selects are isolated by default). + // This field is beta-level in 1.8 + // +optional + Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` + + // List of rule types that the NetworkPolicy relates to. + // Valid options are Ingress, Egress, or Ingress,Egress. + // If this field is not specified, it will default based on the existence of Ingress or Egress rules; + // policies that contain an Egress section are assumed to affect Egress, and all policies + // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. + // Likewise, if you want to write a policy that specifies that no egress is allowed, + // you must specify a policyTypes value that include "Egress" (since such a policy would not include + // an Egress section and would otherwise default to just [ "Ingress" ]). + // This field is beta-level in 1.8 + // +optional + PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. // This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. type NetworkPolicyIngressRule struct { // List of ports which should be made accessible on the pods selected for this rule. // Each item in this list is combined using a logical OR. - // If this field is not provided, this rule matches all ports (traffic not restricted by port). - // If this field is empty, this rule matches no ports (no traffic matches). + // If this field is empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows traffic // only if the traffic matches at least one port in the list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. // +optional Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. - // If this field is not provided, this rule matches all sources (traffic not restricted by source). - // If this field is empty, this rule matches no sources (no traffic matches). + // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). // If this field is present and contains at least on item, this rule allows traffic only if the // traffic matches at least one item in the from list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. // +optional From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. +// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. +// This type is beta-level in 1.8 +type NetworkPolicyEgressRule struct { + // List of destination ports for outgoing traffic. + // Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` + + // List of destinations for outgoing traffic of pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all destinations (traffic not restricted by + // destination). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the to list. + // +optional + To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` +} + +// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort. type NetworkPolicyPort struct { // Optional. The protocol (TCP or UDP) which traffic must match. // If not specified, this field defaults to TCP. // +optional - Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/kubernetes/pkg/api/v1.Protocol"` + Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"` // If specified, the port on the given protocol. This can // either be a numerical or named port on a pod. If this field is not provided, @@ -1115,30 +1289,55 @@ type NetworkPolicyPort struct { Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` } -type NetworkPolicyPeer struct { - // Exactly one of the following must be specified. +// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should +// not be included within this rule. +type IPBlock struct { + // CIDR is a string representing the IP Block + // Valid examples are "192.168.1.1/24" + CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` + // Except is a slice of CIDRs that should not be included within an IP Block + // Valid examples are "192.168.1.1/24" + // Except values will be rejected if they are outside the CIDR range + // +optional + Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` +} - // This is a label selector which selects Pods in this namespace. - // This field follows standard label selector semantics. - // If not provided, this selector selects no pods. - // If present but empty, this selector selects all pods in this namespace. +// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer. +type NetworkPolicyPeer struct { + // This is a label selector which selects Pods. This field follows standard label + // selector semantics; if present but empty, it selects all pods. + // + // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. // +optional PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` - // Selects Namespaces using cluster scoped-labels. This - // matches all pods in all namespaces selected by this label selector. - // This field follows standard label selector semantics. - // If omitted, this selector selects no namespaces. - // If present but empty, this selector selects all namespaces. + // Selects Namespaces using cluster-scoped labels. This field follows standard label + // selector semantics; if present but empty, it selects all namespaces. + // + // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` + + // IPBlock defines policy on a particular IPBlock. If this field is set then + // neither of the other fields can be. + // +optional + IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. // Network Policy List is a list of NetworkPolicy objects. type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go similarity index 55% rename from vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 587615590..d261b4247 100644 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,14 +26,24 @@ package v1beta1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE -var map_APIVersion = map[string]string{ - "": "An APIVersion represents a single concrete version of an object model.", - "name": "Name of this version (e.g. 'v1').", +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllowedFlexVolume = map[string]string{ + "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.", + "driver": "driver is the name of the Flexvolume driver.", } -func (APIVersion) SwaggerDoc() map[string]string { - return map_APIVersion +func (AllowedFlexVolume) SwaggerDoc() map[string]string { + return map_AllowedFlexVolume +} + +var map_AllowedHostPath = map[string]string{ + "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined. Deprecated: use AllowedHostPath from policy API Group instead.", + "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", + "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", +} + +func (AllowedHostPath) SwaggerDoc() map[string]string { + return map_AllowedHostPath } var map_CustomMetricCurrentStatus = map[string]string{ @@ -56,19 +66,32 @@ func (CustomMetricTarget) SwaggerDoc() map[string]string { } var map_DaemonSet = map[string]string{ - "": "DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { return map_DaemonSet } +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -77,12 +100,13 @@ func (DaemonSetList) SwaggerDoc() map[string]string { } var map_DaemonSetSpec = map[string]string{ - "": "DaemonSetSpec is the specification of a daemon set.", - "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template", - "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", - "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", - "templateGeneration": "A sequence number representing a specific generation of the template. Populated by the system. It can be set only during the creation.", + "": "DaemonSetSpec is the specification of a daemon set.", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", + "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + "templateGeneration": "DEPRECATED. A sequence number representing a specific generation of the template. Populated by the system. It can be set only during the creation.", + "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", } func (DaemonSetSpec) SwaggerDoc() map[string]string { @@ -91,14 +115,16 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { var map_DaemonSetStatus = map[string]string{ "": "DaemonSetStatus represents the current status of a daemon set.", - "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", - "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", - "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", + "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", "observedGeneration": "The most recent generation observed by the daemon set controller.", "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", } func (DaemonSetStatus) SwaggerDoc() map[string]string { @@ -115,7 +141,7 @@ func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { } var map_Deployment = map[string]string{ - "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "": "DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", "metadata": "Standard object metadata.", "spec": "Specification of the desired behavior of the Deployment.", "status": "Most recently observed status of the Deployment.", @@ -150,7 +176,7 @@ func (DeploymentList) SwaggerDoc() map[string]string { } var map_DeploymentRollback = map[string]string{ - "": "DeploymentRollback stores the information required to rollback a deployment.", + "": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.", "name": "Required: This must match the Name of a deployment.", "updatedAnnotations": "The annotations to be updated to a deployment", "rollbackTo": "The config of this deployment rollback.", @@ -169,8 +195,8 @@ var map_DeploymentSpec = map[string]string{ "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.", "paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.", - "rollbackTo": "The config this deployment is rolling back to. Will be cleared after rollback is done.", - "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Once autoRollback is implemented, the deployment controller will automatically rollback failed deployments. Note that progress will not be estimated during the time a deployment is paused. This is not set by default.", + "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is not set by default.", } func (DeploymentSpec) SwaggerDoc() map[string]string { @@ -184,8 +210,9 @@ var map_DeploymentStatus = map[string]string{ "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", "readyReplicas": "Total number of ready pods targeted by this deployment.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", - "unavailableReplicas": "Total number of unavailable pods targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", "conditions": "Represents the latest available observations of a deployment's current state.", + "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", } func (DeploymentStatus) SwaggerDoc() map[string]string { @@ -203,9 +230,9 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { } var map_FSGroupStrategyOptions = map[string]string{ - "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", - "rule": "Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", + "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use FSGroupStrategyOptions from policy API Group instead.", + "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", } func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { @@ -232,7 +259,7 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { } var map_HostPortRange = map[string]string{ - "": "Host Port Range defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.", + "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined. Deprecated: use HostPortRange from policy API Group instead.", "min": "min is the start of the range, inclusive.", "max": "max is the end of the range, inclusive.", } @@ -242,20 +269,30 @@ func (HostPortRange) SwaggerDoc() map[string]string { } var map_IDRange = map[string]string{ - "": "ID Range provides a min/max of an allowed range of IDs.", - "min": "Min is the start of the range, inclusive.", - "max": "Max is the end of the range, inclusive.", + "": "IDRange provides a min/max of an allowed range of IDs. Deprecated: use IDRange from policy API Group instead.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", } func (IDRange) SwaggerDoc() map[string]string { return map_IDRange } +var map_IPBlock = map[string]string{ + "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"", + "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range", +} + +func (IPBlock) SwaggerDoc() map[string]string { + return map_IPBlock +} + var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -274,7 +311,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is the list of Ingress.", } @@ -330,7 +367,8 @@ func (IngressTLS) SwaggerDoc() map[string]string { } var map_NetworkPolicy = map[string]string{ - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "": "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", } @@ -338,10 +376,20 @@ func (NetworkPolicy) SwaggerDoc() map[string]string { return map_NetworkPolicy } +var map_NetworkPolicyEgressRule = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", + "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", +} + +func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { + return map_NetworkPolicyEgressRule +} + var map_NetworkPolicyIngressRule = map[string]string{ - "": "This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", - "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is not provided, this rule matches all ports (traffic not restricted by port). If this field is empty, this rule matches no ports (no traffic matches). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is not provided, this rule matches all sources (traffic not restricted by source). If this field is empty, this rule matches no sources (no traffic matches). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "": "DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", + "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -349,8 +397,8 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { } var map_NetworkPolicyList = map[string]string{ - "": "Network Policy List is a list of NetworkPolicy objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "": "DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. Network Policy List is a list of NetworkPolicy objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -359,8 +407,10 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string { } var map_NetworkPolicyPeer = map[string]string{ - "podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If not provided, this selector selects no pods. If present but empty, this selector selects all pods in this namespace.", - "namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If omitted, this selector selects no namespaces. If present but empty, this selector selects all namespaces.", + "": "DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer.", + "podSelector": "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.", + "namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.", + "ipBlock": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", } func (NetworkPolicyPeer) SwaggerDoc() map[string]string { @@ -368,6 +418,7 @@ func (NetworkPolicyPeer) SwaggerDoc() map[string]string { } var map_NetworkPolicyPort = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.", "protocol": "Optional. The protocol (TCP or UDP) which traffic must match. If not specified, this field defaults to TCP.", "port": "If specified, the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", } @@ -377,8 +428,11 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string { } var map_NetworkPolicySpec = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec.", "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", - "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not affect ingress isolation. If this field is present and contains at least one rule, this policy allows any traffic which matches at least one of the ingress rules in this list.", + "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default).", + "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", + "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { @@ -386,8 +440,8 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { } var map_PodSecurityPolicy = map[string]string{ - "": "Pod Security Policy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "spec defines the policy enforced.", } @@ -396,9 +450,9 @@ func (PodSecurityPolicy) SwaggerDoc() map[string]string { } var map_PodSecurityPolicyList = map[string]string{ - "": "Pod Security Policy List is a list of PodSecurityPolicy objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "items is a list of schema objects.", } func (PodSecurityPolicyList) SwaggerDoc() map[string]string { @@ -406,21 +460,27 @@ func (PodSecurityPolicyList) SwaggerDoc() map[string]string { } var map_PodSecurityPolicySpec = map[string]string{ - "": "Pod Security Policy Spec defines the policy enforced.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", - "requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.", - "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that all plugins may be used.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "supplementalGroups": "SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", + "": "PodSecurityPolicySpec defines the policy enforced. Deprecated: use PodSecurityPolicySpec from policy API Group instead.", + "privileged": "privileged determines if a pod can request to be run as privileged.", + "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", + "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", + "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", + "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", + "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", + "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", + "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", + "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", + "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", + "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", + "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", + "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", + "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", + "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", + "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", + "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", + "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", + "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { @@ -428,10 +488,10 @@ func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { } var map_ReplicaSet = map[string]string{ - "": "ReplicaSet represents the configuration of a ReplicaSet.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -453,8 +513,8 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "List of ReplicaSets. More info: http://kubernetes.io/docs/user-guide/replication-controller", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } func (ReplicaSetList) SwaggerDoc() map[string]string { @@ -463,10 +523,10 @@ func (ReplicaSetList) SwaggerDoc() map[string]string { var map_ReplicaSetSpec = map[string]string{ "": "ReplicaSetSpec is the specification of a ReplicaSet.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template", + "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", } func (ReplicaSetSpec) SwaggerDoc() map[string]string { @@ -475,7 +535,7 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string { var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller", + "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", "readyReplicas": "The number of ready replicas for this replica set.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", @@ -496,7 +556,8 @@ func (ReplicationControllerDummy) SwaggerDoc() map[string]string { } var map_RollbackConfig = map[string]string{ - "revision": "The revision to rollback to. If set to 0, rollbck to the last revision.", + "": "DEPRECATED.", + "revision": "The revision to rollback to. If set to 0, rollback to the last revision.", } func (RollbackConfig) SwaggerDoc() map[string]string { @@ -523,9 +584,9 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { } var map_RunAsUserStrategyOptions = map[string]string{ - "": "Run A sUser Strategy Options defines the strategy type and any options used to create the strategy.", - "rule": "Rule is the strategy that will dictate the allowable RunAsUser values that may be set.", - "ranges": "Ranges are the allowed ranges of uids that may be used.", + "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", + "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", + "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", } func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { @@ -533,9 +594,9 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { } var map_SELinuxStrategyOptions = map[string]string{ - "": "SELinux Strategy Options defines the strategy type and any options used to create the strategy.", - "rule": "type is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context", + "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", + "rule": "rule is the strategy that will dictate the allowable labels that may be set.", + "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", } func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { @@ -544,9 +605,9 @@ func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -566,7 +627,7 @@ var map_ScaleStatus = map[string]string{ "": "represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } func (ScaleStatus) SwaggerDoc() map[string]string { @@ -574,54 +635,13 @@ func (ScaleStatus) SwaggerDoc() map[string]string { } var map_SupplementalGroupsStrategyOptions = map[string]string{ - "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", - "rule": "Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", + "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead.", + "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", } func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { return map_SupplementalGroupsStrategyOptions } -var map_ThirdPartyResource = map[string]string{ - "": "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.", - "metadata": "Standard object metadata", - "description": "Description is the description of this object.", - "versions": "Versions are versions for this third party object", -} - -func (ThirdPartyResource) SwaggerDoc() map[string]string { - return map_ThirdPartyResource -} - -var map_ThirdPartyResourceData = map[string]string{ - "": "An internal object, used for versioned storage in etcd. Not exposed to the end user.", - "metadata": "Standard object metadata.", - "data": "Data is the raw JSON data for this data.", -} - -func (ThirdPartyResourceData) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceData -} - -var map_ThirdPartyResourceDataList = map[string]string{ - "": "ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of ThirdpartyResourceData.", -} - -func (ThirdPartyResourceDataList) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceDataList -} - -var map_ThirdPartyResourceList = map[string]string{ - "": "ThirdPartyResourceList is a list of ThirdPartyResources.", - "metadata": "Standard list metadata.", - "items": "Items is the list of ThirdPartyResources.", -} - -func (ThirdPartyResourceList) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceList -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..8ec2b1ff7 --- /dev/null +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,1598 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { + if in == nil { + return nil + } + out := new(AllowedFlexVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. +func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { + if in == nil { + return nil + } + out := new(AllowedHostPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomMetricCurrentStatus) DeepCopyInto(out *CustomMetricCurrentStatus) { + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatus. +func (in *CustomMetricCurrentStatus) DeepCopy() *CustomMetricCurrentStatus { + if in == nil { + return nil + } + out := new(CustomMetricCurrentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomMetricCurrentStatusList) DeepCopyInto(out *CustomMetricCurrentStatusList) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricCurrentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatusList. +func (in *CustomMetricCurrentStatusList) DeepCopy() *CustomMetricCurrentStatusList { + if in == nil { + return nil + } + out := new(CustomMetricCurrentStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomMetricTarget) DeepCopyInto(out *CustomMetricTarget) { + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTarget. +func (in *CustomMetricTarget) DeepCopy() *CustomMetricTarget { + if in == nil { + return nil + } + out := new(CustomMetricTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomMetricTargetList) DeepCopyInto(out *CustomMetricTargetList) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricTarget, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTargetList. +func (in *CustomMetricTargetList) DeepCopy() *CustomMetricTargetList { + if in == nil { + return nil + } + out := new(CustomMetricTargetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. +func (in *DaemonSet) DeepCopy() *DaemonSet { + if in == nil { + return nil + } + out := new(DaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. +func (in *DaemonSetList) DeepCopy() *DaemonSetList { + if in == nil { + return nil + } + out := new(DaemonSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. +func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { + if in == nil { + return nil + } + out := new(DaemonSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. +func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { + if in == nil { + return nil + } + out := new(DaemonSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateDaemonSet) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. +func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { + if in == nil { + return nil + } + out := new(DaemonSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.RollbackTo = in.RollbackTo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback. +func (in *DeploymentRollback) DeepCopy() *DeploymentRollback { + if in == nil { + return nil + } + out := new(DeploymentRollback) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentRollback) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + if *in == nil { + *out = nil + } else { + *out = new(RollbackConfig) + **out = **in + } + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. +func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { + if in == nil { + return nil + } + out := new(FSGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { + *out = *in + out.Backend = in.Backend + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath. +func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath { + if in == nil { + return nil + } + out := new(HTTPIngressPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]HTTPIngressPath, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue. +func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { + if in == nil { + return nil + } + out := new(HTTPIngressRuleValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. +func (in *HostPortRange) DeepCopy() *HostPortRange { + if in == nil { + return nil + } + out := new(HostPortRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IDRange) DeepCopyInto(out *IDRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. +func (in *IDRange) DeepCopy() *IDRange { + if in == nil { + return nil + } + out := new(IDRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPBlock) DeepCopyInto(out *IPBlock) { + *out = *in + if in.Except != nil { + in, out := &in.Except, &out.Except + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock. +func (in *IPBlock) DeepCopy() *IPBlock { + if in == nil { + return nil + } + out := new(IPBlock) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Ingress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressBackend) DeepCopyInto(out *IngressBackend) { + *out = *in + out.ServicePort = in.ServicePort + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend. +func (in *IngressBackend) DeepCopy() *IngressBackend { + if in == nil { + return nil + } + out := new(IngressBackend) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressList) DeepCopyInto(out *IngressList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. +func (in *IngressList) DeepCopy() *IngressList { + if in == nil { + return nil + } + out := new(IngressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressRule) DeepCopyInto(out *IngressRule) { + *out = *in + in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule. +func (in *IngressRule) DeepCopy() *IngressRule { + if in == nil { + return nil + } + out := new(IngressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) { + *out = *in + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + if *in == nil { + *out = nil + } else { + *out = new(HTTPIngressRuleValue) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue. +func (in *IngressRuleValue) DeepCopy() *IngressRuleValue { + if in == nil { + return nil + } + out := new(IngressRuleValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + if *in == nil { + *out = nil + } else { + *out = new(IngressBackend) + **out = **in + } + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]IngressTLS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]IngressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. +func (in *IngressSpec) DeepCopy() *IngressSpec { + if in == nil { + return nil + } + out := new(IngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. +func (in *IngressStatus) DeepCopy() *IngressStatus { + if in == nil { + return nil + } + out := new(IngressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressTLS) DeepCopyInto(out *IngressTLS) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS. +func (in *IngressTLS) DeepCopy() *IngressTLS { + if in == nil { + return nil + } + out := new(IngressTLS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy. +func (in *NetworkPolicy) DeepCopy() *NetworkPolicy { + if in == nil { + return nil + } + out := new(NetworkPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.To != nil { + in, out := &in.To, &out.To + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule. +func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule { + if in == nil { + return nil + } + out := new(NetworkPolicyEgressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule. +func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { + if in == nil { + return nil + } + out := new(NetworkPolicyIngressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList. +func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList { + if in == nil { + return nil + } + out := new(NetworkPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) { + *out = *in + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.IPBlock != nil { + in, out := &in.IPBlock, &out.IPBlock + if *in == nil { + *out = nil + } else { + *out = new(IPBlock) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer. +func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer { + if in == nil { + return nil + } + out := new(NetworkPolicyPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + if *in == nil { + *out = nil + } else { + *out = new(core_v1.Protocol) + **out = **in + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort. +func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort { + if in == nil { + return nil + } + out := new(NetworkPolicyPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) { + *out = *in + in.PodSelector.DeepCopyInto(&out.PodSelector) + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]NetworkPolicyIngressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]NetworkPolicyEgressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PolicyTypes != nil { + in, out := &in.PolicyTypes, &out.PolicyTypes + *out = make([]PolicyType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec. +func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { + if in == nil { + return nil + } + out := new(NetworkPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. +func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { + if in == nil { + return nil + } + out := new(PodSecurityPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. +func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { + if in == nil { + return nil + } + out := new(PodSecurityPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { + *out = *in + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]core_v1.Capability, len(*in)) + copy(*out, *in) + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]core_v1.Capability, len(*in)) + copy(*out, *in) + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]core_v1.Capability, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]FSType, len(*in)) + copy(*out, *in) + } + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(*in)) + copy(*out, *in) + } + in.SELinux.DeepCopyInto(&out.SELinux) + in.RunAsUser.DeepCopyInto(&out.RunAsUser) + in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) + in.FSGroup.DeepCopyInto(&out.FSGroup) + if in.DefaultAllowPrivilegeEscalation != nil { + in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.AllowedHostPaths != nil { + in, out := &in.AllowedHostPaths, &out.AllowedHostPaths + *out = make([]AllowedHostPath, len(*in)) + copy(*out, *in) + } + if in.AllowedFlexVolumes != nil { + in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes + *out = make([]AllowedFlexVolume, len(*in)) + copy(*out, *in) + } + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ForbiddenSysctls != nil { + in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. +func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { + if in == nil { + return nil + } + out := new(PodSecurityPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. +func (in *ReplicaSet) DeepCopy() *ReplicaSet { + if in == nil { + return nil + } + out := new(ReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. +func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { + if in == nil { + return nil + } + out := new(ReplicaSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. +func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { + if in == nil { + return nil + } + out := new(ReplicaSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. +func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { + if in == nil { + return nil + } + out := new(ReplicaSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. +func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { + if in == nil { + return nil + } + out := new(ReplicaSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerDummy) DeepCopyInto(out *ReplicationControllerDummy) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerDummy. +func (in *ReplicationControllerDummy) DeepCopy() *ReplicationControllerDummy { + if in == nil { + return nil + } + out := new(ReplicationControllerDummy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationControllerDummy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig. +func (in *RollbackConfig) DeepCopy() *RollbackConfig { + if in == nil { + return nil + } + out := new(RollbackConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. +func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { + if in == nil { + return nil + } + out := new(RollingUpdateDaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. +func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsUserStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + if *in == nil { + *out = nil + } else { + *out = new(core_v1.SELinuxOptions) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. +func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { + if in == nil { + return nil + } + out := new(SELinuxStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. +func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { + if in == nil { + return nil + } + out := new(SupplementalGroupsStrategyOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go new file mode 100644 index 000000000..ef9ae2ae4 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +groupName=networking.k8s.io +package v1 // import "k8s.io/api/networking/v1" diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go new file mode 100644 index 000000000..089d31963 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -0,0 +1,1869 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto + + It has these top-level messages: + IPBlock + NetworkPolicy + NetworkPolicyEgressRule + NetworkPolicyIngressRule + NetworkPolicyList + NetworkPolicyPeer + NetworkPolicyPort + NetworkPolicySpec +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } +func (*NetworkPolicyEgressRule) ProtoMessage() {} +func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } +func (*NetworkPolicyIngressRule) ProtoMessage() {} +func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{3} +} + +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func init() { + proto.RegisterType((*IPBlock)(nil), "k8s.io.api.networking.v1.IPBlock") + proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.api.networking.v1.NetworkPolicy") + proto.RegisterType((*NetworkPolicyEgressRule)(nil), "k8s.io.api.networking.v1.NetworkPolicyEgressRule") + proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.api.networking.v1.NetworkPolicyIngressRule") + proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.api.networking.v1.NetworkPolicyList") + proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.networking.v1.NetworkPolicyPeer") + proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort") + proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec") +} +func (m *IPBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i += copy(dAtA[i:], m.CIDR) + if len(m.Except) > 0 { + for _, s := range m.Except { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.To) > 0 { + for _, msg := range m.To { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.From) > 0 { + for _, msg := range m.From { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PodSelector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) + n4, err := m.PodSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.NamespaceSelector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) + n5, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.IPBlock != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) + n6, err := m.IPBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Protocol != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i += copy(dAtA[i:], *m.Protocol) + } + if m.Port != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) + n7, err := m.Port.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) + n8, err := m.PodSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Egress) > 0 { + for _, msg := range m.Egress { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *IPBlock) Size() (n int) { + var l int + _ = l + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Except) > 0 { + for _, s := range m.Except { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicy) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NetworkPolicyEgressRule) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.To) > 0 { + for _, e := range m.To { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyIngressRule) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyPeer) Size() (n int) { + var l int + _ = l + if m.PodSelector != nil { + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPBlock != nil { + l = m.IPBlock.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkPolicyPort) Size() (n int) { + var l int + _ = l + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkPolicySpec) Size() (n int) { + var l int + _ = l + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Egress) > 0 { + for _, e := range m.Egress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *IPBlock) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPBlock{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `Except:` + fmt.Sprintf("%v", this.Except) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyEgressRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyEgressRule{`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, + `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyIngressRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyIngressRule{`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyPeer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyPeer{`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyPort{`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicySpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicySpec{`, + `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, + `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, + `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *IPBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CIDR = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.To = append(m.To, NetworkPolicyPeer{}) + if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodSelector == nil { + m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPBlock == nil { + m.IPBlock = &IPBlock{} + } + if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 829 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xf6, 0x3a, 0xce, 0x47, 0x27, 0x94, 0x92, 0x41, 0x08, 0x2b, 0x88, 0x75, 0xd8, 0x0b, 0x41, + 0x55, 0x67, 0x71, 0x8b, 0x10, 0x37, 0xc4, 0x42, 0x29, 0x96, 0x9a, 0xc4, 0x9a, 0xf4, 0x02, 0x02, + 0x89, 0xf5, 0xfa, 0xcd, 0x66, 0x6a, 0xef, 0xce, 0x6a, 0x66, 0x6c, 0x92, 0x1b, 0x3f, 0x81, 0x1f, + 0xc2, 0x91, 0x1b, 0x87, 0x72, 0xcc, 0xb1, 0xc7, 0x9e, 0x56, 0x64, 0xf9, 0x17, 0x39, 0xa1, 0x99, + 0x1d, 0x7b, 0xfd, 0x51, 0x0b, 0xb7, 0xa2, 0x37, 0xcf, 0x3b, 0xcf, 0xf3, 0xbc, 0x1f, 0xf3, 0xf8, + 0x5d, 0xf4, 0xd5, 0xe0, 0x0b, 0x49, 0x18, 0xf7, 0x07, 0xa3, 0x1e, 0x88, 0x14, 0x14, 0x48, 0x7f, + 0x0c, 0x69, 0x9f, 0x0b, 0xdf, 0x5e, 0x84, 0x19, 0xf3, 0x53, 0x50, 0xbf, 0x70, 0x31, 0x60, 0x69, + 0xec, 0x8f, 0xdb, 0x7e, 0x0c, 0x29, 0x88, 0x50, 0x41, 0x9f, 0x64, 0x82, 0x2b, 0x8e, 0x9b, 0x25, + 0x92, 0x84, 0x19, 0x23, 0x15, 0x92, 0x8c, 0xdb, 0xfb, 0xf7, 0x62, 0xa6, 0xce, 0x47, 0x3d, 0x12, + 0xf1, 0xc4, 0x8f, 0x79, 0xcc, 0x7d, 0x43, 0xe8, 0x8d, 0xce, 0xcc, 0xc9, 0x1c, 0xcc, 0xaf, 0x52, + 0x68, 0xdf, 0x9b, 0x49, 0x19, 0x71, 0x01, 0x2f, 0x49, 0xb6, 0x7f, 0x6f, 0x06, 0x03, 0x17, 0x0a, + 0x52, 0xc9, 0x78, 0x2a, 0xfd, 0x71, 0xbb, 0x07, 0x2a, 0x5c, 0x86, 0x7f, 0x32, 0x03, 0xcf, 0xf8, + 0x90, 0x45, 0x97, 0x2b, 0xa1, 0x9f, 0x55, 0xd0, 0x24, 0x8c, 0xce, 0x59, 0x0a, 0xe2, 0xd2, 0xcf, + 0x06, 0xb1, 0x0e, 0x48, 0x3f, 0x01, 0x15, 0xbe, 0xac, 0x1e, 0x7f, 0x15, 0x4b, 0x8c, 0x52, 0xc5, + 0x12, 0x58, 0x22, 0x7c, 0xfe, 0x5f, 0x04, 0x19, 0x9d, 0x43, 0x12, 0x2e, 0xf1, 0x1e, 0xac, 0xe2, + 0x8d, 0x14, 0x1b, 0xfa, 0x2c, 0x55, 0x52, 0x89, 0x45, 0x92, 0x77, 0x82, 0xb6, 0x3b, 0xdd, 0x60, + 0xc8, 0xa3, 0x01, 0x3e, 0x40, 0x8d, 0x88, 0xf5, 0x45, 0xd3, 0x39, 0x70, 0x0e, 0x6f, 0x05, 0x6f, + 0x5d, 0xe5, 0xad, 0x5a, 0x91, 0xb7, 0x1a, 0x5f, 0x77, 0xbe, 0xa1, 0xd4, 0xdc, 0x60, 0x0f, 0x6d, + 0xc1, 0x45, 0x04, 0x99, 0x6a, 0xd6, 0x0f, 0x36, 0x0e, 0x6f, 0x05, 0xa8, 0xc8, 0x5b, 0x5b, 0x0f, + 0x4d, 0x84, 0xda, 0x1b, 0xef, 0x2f, 0x07, 0xdd, 0x3e, 0x2e, 0xdf, 0xb8, 0x6b, 0xc6, 0x89, 0x7f, + 0x46, 0x3b, 0x7a, 0x36, 0xfd, 0x50, 0x85, 0x46, 0x7b, 0xf7, 0xfe, 0xa7, 0xa4, 0x32, 0xc4, 0xb4, + 0x54, 0x92, 0x0d, 0x62, 0x1d, 0x90, 0x44, 0xa3, 0xc9, 0xb8, 0x4d, 0x4e, 0x7a, 0x4f, 0x21, 0x52, + 0x47, 0xa0, 0xc2, 0x00, 0xdb, 0x6a, 0x50, 0x15, 0xa3, 0x53, 0x55, 0x7c, 0x84, 0x1a, 0x32, 0x83, + 0xa8, 0x59, 0x37, 0xea, 0x77, 0xc9, 0x2a, 0xbb, 0x91, 0xb9, 0xc2, 0x4e, 0x33, 0x88, 0xaa, 0x36, + 0xf5, 0x89, 0x1a, 0x19, 0xef, 0x0f, 0x07, 0xbd, 0x3f, 0x87, 0x7c, 0x18, 0x0b, 0x90, 0x92, 0x8e, + 0x86, 0x80, 0xbb, 0x68, 0x33, 0xe3, 0x42, 0xc9, 0xa6, 0x73, 0xb0, 0xf1, 0x0a, 0xb9, 0xba, 0x5c, + 0xa8, 0xe0, 0xb6, 0xcd, 0xb5, 0xa9, 0x4f, 0x92, 0x96, 0x42, 0xf8, 0x11, 0xaa, 0x2b, 0x6e, 0x06, + 0xfa, 0x0a, 0x72, 0x00, 0x22, 0x40, 0x56, 0xae, 0xfe, 0x84, 0xd3, 0xba, 0xe2, 0xde, 0x9f, 0x0e, + 0x6a, 0xce, 0xa1, 0x3a, 0xe9, 0x9b, 0xac, 0xfb, 0x08, 0x35, 0xce, 0x04, 0x4f, 0x5e, 0xa7, 0xf2, + 0xe9, 0xd0, 0xbf, 0x15, 0x3c, 0xa1, 0x46, 0xc6, 0x7b, 0xe6, 0xa0, 0xbd, 0x39, 0xe4, 0x63, 0x26, + 0x15, 0xfe, 0x71, 0xc9, 0x3b, 0x64, 0x3d, 0xef, 0x68, 0xb6, 0x71, 0xce, 0x3b, 0x36, 0xd7, 0xce, + 0x24, 0x32, 0xe3, 0x9b, 0xc7, 0x68, 0x93, 0x29, 0x48, 0xa4, 0xed, 0xe1, 0xe3, 0x35, 0x7b, 0xa8, + 0x06, 0xd2, 0xd1, 0x6c, 0x5a, 0x8a, 0x78, 0xcf, 0xea, 0x0b, 0x1d, 0xe8, 0x5e, 0xf1, 0x19, 0xda, + 0xcd, 0x78, 0xff, 0x14, 0x86, 0x10, 0x29, 0x2e, 0x6c, 0x13, 0x0f, 0xd6, 0x6c, 0x22, 0xec, 0xc1, + 0x70, 0x42, 0x0d, 0xee, 0x14, 0x79, 0x6b, 0xb7, 0x5b, 0x69, 0xd1, 0x59, 0x61, 0x7c, 0x81, 0xf6, + 0xd2, 0x30, 0x01, 0x99, 0x85, 0x11, 0x4c, 0xb3, 0xd5, 0x5f, 0x3f, 0xdb, 0x7b, 0x45, 0xde, 0xda, + 0x3b, 0x5e, 0x54, 0xa4, 0xcb, 0x49, 0xf0, 0x77, 0x68, 0x9b, 0x65, 0x66, 0x85, 0x34, 0x37, 0x4c, + 0xbe, 0x8f, 0x56, 0xcf, 0xd1, 0xee, 0x9a, 0x60, 0xb7, 0xc8, 0x5b, 0x93, 0xc5, 0x43, 0x27, 0x74, + 0xef, 0xf7, 0x45, 0x0f, 0x68, 0xc3, 0xe1, 0x47, 0x68, 0xc7, 0xec, 0xaa, 0x88, 0x0f, 0xed, 0x6e, + 0xba, 0xab, 0xdf, 0xb3, 0x6b, 0x63, 0x37, 0x79, 0xeb, 0x83, 0xe5, 0xcf, 0x02, 0x99, 0x5c, 0xd3, + 0x29, 0x19, 0x1f, 0xa3, 0x86, 0xb6, 0xae, 0x9d, 0xca, 0xea, 0x25, 0xa4, 0xf7, 0x25, 0x29, 0xf7, + 0x25, 0xe9, 0xa4, 0xea, 0x44, 0x9c, 0x2a, 0xc1, 0xd2, 0x38, 0xd8, 0xd1, 0x96, 0xd5, 0x25, 0x51, + 0xa3, 0xe3, 0xdd, 0x2c, 0x3e, 0xb8, 0xde, 0x21, 0xf8, 0xe9, 0xff, 0xf6, 0xe0, 0xef, 0x5a, 0x9b, + 0xad, 0x7e, 0xf4, 0x9f, 0xd0, 0x36, 0x2b, 0xff, 0xe4, 0xd6, 0xc2, 0xf7, 0xd7, 0xb4, 0xf0, 0xcc, + 0x6a, 0x08, 0xee, 0xd8, 0x34, 0xdb, 0x93, 0xe0, 0x44, 0x13, 0x7f, 0x8f, 0xb6, 0xa0, 0x54, 0xdf, + 0x30, 0xea, 0xed, 0x35, 0xd5, 0xab, 0x7d, 0x19, 0xbc, 0x6d, 0xc5, 0xb7, 0x6c, 0xcc, 0x0a, 0xe2, + 0x2f, 0xf5, 0x94, 0x34, 0xf6, 0xc9, 0x65, 0x06, 0xb2, 0xd9, 0x30, 0xdf, 0x93, 0x0f, 0xcb, 0x66, + 0xa7, 0xe1, 0x9b, 0xbc, 0x85, 0xaa, 0x23, 0x9d, 0x65, 0x04, 0x87, 0x57, 0xd7, 0x6e, 0xed, 0xf9, + 0xb5, 0x5b, 0x7b, 0x71, 0xed, 0xd6, 0x7e, 0x2d, 0x5c, 0xe7, 0xaa, 0x70, 0x9d, 0xe7, 0x85, 0xeb, + 0xbc, 0x28, 0x5c, 0xe7, 0xef, 0xc2, 0x75, 0x7e, 0xfb, 0xc7, 0xad, 0xfd, 0x50, 0x1f, 0xb7, 0xff, + 0x0d, 0x00, 0x00, 0xff, 0xff, 0x48, 0x47, 0x24, 0xc9, 0xc1, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto new file mode 100644 index 000000000..f52343cf6 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.networking.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/api/extensions/v1beta1/generated.proto"; +import "k8s.io/api/policy/v1beta1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should +// not be included within this rule. +message IPBlock { + // CIDR is a string representing the IP Block + // Valid examples are "192.168.1.1/24" + optional string cidr = 1; + + // Except is a slice of CIDRs that should not be included within an IP Block + // Valid examples are "192.168.1.1/24" + // Except values will be rejected if they are outside the CIDR range + // +optional + repeated string except = 2; +} + +// NetworkPolicy describes what network traffic is allowed for a set of Pods +message NetworkPolicy { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior for this NetworkPolicy. + // +optional + optional NetworkPolicySpec spec = 2; +} + +// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. +// This type is beta-level in 1.8 +message NetworkPolicyEgressRule { + // List of destination ports for outgoing traffic. + // Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + repeated NetworkPolicyPort ports = 1; + + // List of destinations for outgoing traffic of pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all destinations (traffic not restricted by + // destination). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the to list. + // +optional + repeated NetworkPolicyPeer to = 2; +} + +// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. +message NetworkPolicyIngressRule { + // List of ports which should be made accessible on the pods selected for this + // rule. Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + repeated NetworkPolicyPort ports = 1; + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all sources (traffic not restricted by + // source). If this field is present and contains at least on item, this rule + // allows traffic only if the traffic matches at least one item in the from list. + // +optional + repeated NetworkPolicyPeer from = 2; +} + +// NetworkPolicyList is a list of NetworkPolicy objects. +message NetworkPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated NetworkPolicy items = 2; +} + +// NetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of +// fields are allowed +message NetworkPolicyPeer { + // This is a label selector which selects Pods. This field follows standard label + // selector semantics; if present but empty, it selects all pods. + // + // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + + // Selects Namespaces using cluster-scoped labels. This field follows standard label + // selector semantics; if present but empty, it selects all namespaces. + // + // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; + + // IPBlock defines policy on a particular IPBlock. If this field is set then + // neither of the other fields can be. + // +optional + optional IPBlock ipBlock = 3; +} + +// NetworkPolicyPort describes a port to allow traffic on +message NetworkPolicyPort { + // The protocol (TCP or UDP) which traffic must match. If not specified, this + // field defaults to TCP. + // +optional + optional string protocol = 1; + + // The port on the given protocol. This can either be a numerical or named port on + // a pod. If this field is not provided, this matches all port names and numbers. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; +} + +// NetworkPolicySpec provides the specification of a NetworkPolicy +message NetworkPolicySpec { + // Selects the pods to which this NetworkPolicy object applies. The array of + // ingress rules is applied to any pods selected by this field. Multiple network + // policies can select the same set of pods. In this case, the ingress rules for + // each are combined additively. This field is NOT optional and follows standard + // label selector semantics. An empty podSelector matches all pods in this + // namespace. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + + // List of ingress rules to be applied to the selected pods. Traffic is allowed to + // a pod if there are no NetworkPolicies selecting the pod + // (and cluster policy otherwise allows the traffic), OR if the traffic source is + // the pod's local node, OR if the traffic matches at least one ingress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy does not allow any traffic (and serves + // solely to ensure that the pods it selects are isolated by default) + // +optional + repeated NetworkPolicyIngressRule ingress = 2; + + // List of egress rules to be applied to the selected pods. Outgoing traffic is + // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // otherwise allows the traffic), OR if the traffic matches at least one egress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves + // solely to ensure that the pods it selects are isolated by default). + // This field is beta-level in 1.8 + // +optional + repeated NetworkPolicyEgressRule egress = 3; + + // List of rule types that the NetworkPolicy relates to. + // Valid options are Ingress, Egress, or Ingress,Egress. + // If this field is not specified, it will default based on the existence of Ingress or Egress rules; + // policies that contain an Egress section are assumed to affect Egress, and all policies + // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. + // Likewise, if you want to write a policy that specifies that no egress is allowed, + // you must specify a policyTypes value that include "Egress" (since such a policy would not include + // an Egress section and would otherwise default to just [ "Ingress" ]). + // This field is beta-level in 1.8 + // +optional + repeated string policyTypes = 4; +} + diff --git a/vendor/k8s.io/api/networking/v1/register.go b/vendor/k8s.io/api/networking/v1/register.go new file mode 100644 index 000000000..f47f22e9e --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "networking.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NetworkPolicy{}, + &NetworkPolicyList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go new file mode 100644 index 000000000..e1b81fdc7 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -0,0 +1,203 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkPolicy describes what network traffic is allowed for a set of Pods +type NetworkPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior for this NetworkPolicy. + // +optional + Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// Policy Type string describes the NetworkPolicy type +// This type is beta-level in 1.8 +type PolicyType string + +const ( + // PolicyTypeIngress is a NetworkPolicy that affects ingress traffic on selected pods + PolicyTypeIngress PolicyType = "Ingress" + // PolicyTypeEgress is a NetworkPolicy that affects egress traffic on selected pods + PolicyTypeEgress PolicyType = "Egress" +) + +// NetworkPolicySpec provides the specification of a NetworkPolicy +type NetworkPolicySpec struct { + // Selects the pods to which this NetworkPolicy object applies. The array of + // ingress rules is applied to any pods selected by this field. Multiple network + // policies can select the same set of pods. In this case, the ingress rules for + // each are combined additively. This field is NOT optional and follows standard + // label selector semantics. An empty podSelector matches all pods in this + // namespace. + PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` + + // List of ingress rules to be applied to the selected pods. Traffic is allowed to + // a pod if there are no NetworkPolicies selecting the pod + // (and cluster policy otherwise allows the traffic), OR if the traffic source is + // the pod's local node, OR if the traffic matches at least one ingress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy does not allow any traffic (and serves + // solely to ensure that the pods it selects are isolated by default) + // +optional + Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` + + // List of egress rules to be applied to the selected pods. Outgoing traffic is + // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // otherwise allows the traffic), OR if the traffic matches at least one egress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves + // solely to ensure that the pods it selects are isolated by default). + // This field is beta-level in 1.8 + // +optional + Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` + + // List of rule types that the NetworkPolicy relates to. + // Valid options are Ingress, Egress, or Ingress,Egress. + // If this field is not specified, it will default based on the existence of Ingress or Egress rules; + // policies that contain an Egress section are assumed to affect Egress, and all policies + // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. + // Likewise, if you want to write a policy that specifies that no egress is allowed, + // you must specify a policyTypes value that include "Egress" (since such a policy would not include + // an Egress section and would otherwise default to just [ "Ingress" ]). + // This field is beta-level in 1.8 + // +optional + PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` +} + +// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. +type NetworkPolicyIngressRule struct { + // List of ports which should be made accessible on the pods selected for this + // rule. Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all sources (traffic not restricted by + // source). If this field is present and contains at least on item, this rule + // allows traffic only if the traffic matches at least one item in the from list. + // +optional + From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` +} + +// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. +// This type is beta-level in 1.8 +type NetworkPolicyEgressRule struct { + // List of destination ports for outgoing traffic. + // Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` + + // List of destinations for outgoing traffic of pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all destinations (traffic not restricted by + // destination). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the to list. + // +optional + To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` +} + +// NetworkPolicyPort describes a port to allow traffic on +type NetworkPolicyPort struct { + // The protocol (TCP or UDP) which traffic must match. If not specified, this + // field defaults to TCP. + // +optional + Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"` + + // The port on the given protocol. This can either be a numerical or named port on + // a pod. If this field is not provided, this matches all port names and numbers. + // +optional + Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` +} + +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should +// not be included within this rule. +type IPBlock struct { + // CIDR is a string representing the IP Block + // Valid examples are "192.168.1.1/24" + CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` + // Except is a slice of CIDRs that should not be included within an IP Block + // Valid examples are "192.168.1.1/24" + // Except values will be rejected if they are outside the CIDR range + // +optional + Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` +} + +// NetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of +// fields are allowed +type NetworkPolicyPeer struct { + // This is a label selector which selects Pods. This field follows standard label + // selector semantics; if present but empty, it selects all pods. + // + // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // +optional + PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` + + // Selects Namespaces using cluster-scoped labels. This field follows standard label + // selector semantics; if present but empty, it selects all namespaces. + // + // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` + + // IPBlock defines policy on a particular IPBlock. If this field is set then + // neither of the other fields can be. + // +optional + IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkPolicyList is a list of NetworkPolicy objects. +type NetworkPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..af2553a9d --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -0,0 +1,113 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_IPBlock = map[string]string{ + "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"", + "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range", +} + +func (IPBlock) SwaggerDoc() map[string]string { + return map_IPBlock +} + +var map_NetworkPolicy = map[string]string{ + "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior for this NetworkPolicy.", +} + +func (NetworkPolicy) SwaggerDoc() map[string]string { + return map_NetworkPolicy +} + +var map_NetworkPolicyEgressRule = map[string]string{ + "": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", + "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", +} + +func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { + return map_NetworkPolicyEgressRule +} + +var map_NetworkPolicyIngressRule = map[string]string{ + "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", + "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", +} + +func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { + return map_NetworkPolicyIngressRule +} + +var map_NetworkPolicyList = map[string]string{ + "": "NetworkPolicyList is a list of NetworkPolicy objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (NetworkPolicyList) SwaggerDoc() map[string]string { + return map_NetworkPolicyList +} + +var map_NetworkPolicyPeer = map[string]string{ + "": "NetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of fields are allowed", + "podSelector": "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.", + "namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.", + "ipBlock": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", +} + +func (NetworkPolicyPeer) SwaggerDoc() map[string]string { + return map_NetworkPolicyPeer +} + +var map_NetworkPolicyPort = map[string]string{ + "": "NetworkPolicyPort describes a port to allow traffic on", + "protocol": "The protocol (TCP or UDP) which traffic must match. If not specified, this field defaults to TCP.", + "port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.", +} + +func (NetworkPolicyPort) SwaggerDoc() map[string]string { + return map_NetworkPolicyPort +} + +var map_NetworkPolicySpec = map[string]string{ + "": "NetworkPolicySpec provides the specification of a NetworkPolicy", + "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", + "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", + "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", +} + +func (NetworkPolicySpec) SwaggerDoc() map[string]string { + return map_NetworkPolicySpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..0037638a8 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -0,0 +1,282 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPBlock) DeepCopyInto(out *IPBlock) { + *out = *in + if in.Except != nil { + in, out := &in.Except, &out.Except + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock. +func (in *IPBlock) DeepCopy() *IPBlock { + if in == nil { + return nil + } + out := new(IPBlock) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy. +func (in *NetworkPolicy) DeepCopy() *NetworkPolicy { + if in == nil { + return nil + } + out := new(NetworkPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.To != nil { + in, out := &in.To, &out.To + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule. +func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule { + if in == nil { + return nil + } + out := new(NetworkPolicyEgressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule. +func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { + if in == nil { + return nil + } + out := new(NetworkPolicyIngressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList. +func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList { + if in == nil { + return nil + } + out := new(NetworkPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) { + *out = *in + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.IPBlock != nil { + in, out := &in.IPBlock, &out.IPBlock + if *in == nil { + *out = nil + } else { + *out = new(IPBlock) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer. +func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer { + if in == nil { + return nil + } + out := new(NetworkPolicyPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + if *in == nil { + *out = nil + } else { + *out = new(core_v1.Protocol) + **out = **in + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort. +func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort { + if in == nil { + return nil + } + out := new(NetworkPolicyPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) { + *out = *in + in.PodSelector.DeepCopyInto(&out.PodSelector) + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]NetworkPolicyIngressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]NetworkPolicyEgressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PolicyTypes != nil { + in, out := &in.PolicyTypes, &out.PolicyTypes + *out = make([]PolicyType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec. +func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { + if in == nil { + return nil + } + out := new(NetworkPolicySpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go similarity index 87% rename from vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/doc.go rename to vendor/k8s.io/api/policy/v1beta1/doc.go index aadbe1416..9c456f923 100644 --- a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -14,7 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:deepcopy-gen=package + // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, // NetworkPolicy, etc. -package v1beta1 +// +k8s:openapi-gen=true +package v1beta1 // import "k8s.io/api/policy/v1beta1" diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go new file mode 100644 index 000000000..505fb0e03 --- /dev/null +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -0,0 +1,4056 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto + + It has these top-level messages: + AllowedFlexVolume + AllowedHostPath + Eviction + FSGroupStrategyOptions + HostPortRange + IDRange + PodDisruptionBudget + PodDisruptionBudgetList + PodDisruptionBudgetSpec + PodDisruptionBudgetStatus + PodSecurityPolicy + PodSecurityPolicyList + PodSecurityPolicySpec + RunAsUserStrategyOptions + SELinuxStrategyOptions + SupplementalGroupsStrategyOptions +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *Eviction) Reset() { *m = Eviction{} } +func (*Eviction) ProtoMessage() {} +func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } +func (*PodDisruptionBudget) ProtoMessage() {} +func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } +func (*PodDisruptionBudgetList) ProtoMessage() {} +func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } +func (*PodDisruptionBudgetSpec) ProtoMessage() {} +func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } +func (*PodDisruptionBudgetStatus) ProtoMessage() {} +func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{9} +} + +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } +func (*RunAsUserStrategyOptions) ProtoMessage() {} +func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{13} +} + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } +func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} +func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{15} +} + +func init() { + proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.policy.v1beta1.AllowedFlexVolume") + proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.policy.v1beta1.AllowedHostPath") + proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1beta1.Eviction") + proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.FSGroupStrategyOptions") + proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.policy.v1beta1.HostPortRange") + proto.RegisterType((*IDRange)(nil), "k8s.io.api.policy.v1beta1.IDRange") + proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudget") + proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList") + proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec") + proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus") + proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") + proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") + proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") + proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") + proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") +} +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + return i, nil +} + +func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i += copy(dAtA[i:], m.PathPrefix) + dAtA[i] = 0x10 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *Eviction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.DeleteOptions != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeleteOptions.Size())) + n2, err := m.DeleteOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HostPortRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + return i, nil +} + +func (m *IDRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + return i, nil +} + +func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDisruptionBudget) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n5, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDisruptionBudgetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDisruptionBudgetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MinAvailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinAvailable.Size())) + n7, err := m.MinAvailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n8, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.MaxUnavailable != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n9, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDisruptionBudgetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + if len(m.DisruptedPods) > 0 { + keysForDisruptedPods := make([]string, 0, len(m.DisruptedPods)) + for k := range m.DisruptedPods { + keysForDisruptedPods = append(keysForDisruptedPods, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) + for _, k := range keysForDisruptedPods { + dAtA[i] = 0x12 + i++ + v := m.DisruptedPods[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n10, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + } + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods)) + return i, nil +} + +func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n11, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n12, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + return i, nil +} + +func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n13, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x30 + i++ + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.HostPorts) > 0 { + for _, msg := range m.HostPorts { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x40 + i++ + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x48 + i++ + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) + n14, err := m.SELinux.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) + n15, err := m.RunAsUser.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) + n16, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) + n17, err := m.FSGroup.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + dAtA[i] = 0x70 + i++ + if m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.DefaultAllowPrivilegeEscalation != nil { + dAtA[i] = 0x78 + i++ + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.AllowPrivilegeEscalation != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.AllowedHostPaths) > 0 { + for _, msg := range m.AllowedHostPaths { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.AllowedFlexVolumes) > 0 { + for _, msg := range m.AllowedFlexVolumes { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.AllowedUnsafeSysctls) > 0 { + for _, s := range m.AllowedUnsafeSysctls { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ForbiddenSysctls) > 0 { + for _, s := range m.ForbiddenSysctls { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) + if m.SELinuxOptions != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) + n18, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *AllowedFlexVolume) Size() (n int) { + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AllowedHostPath) Size() (n int) { + var l int + _ = l + l = len(m.PathPrefix) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Eviction) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeleteOptions != nil { + l = m.DeleteOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *FSGroupStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostPortRange) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *IDRange) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *PodDisruptionBudget) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodDisruptionBudgetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDisruptionBudgetSpec) Size() (n int) { + var l int + _ = l + if m.MinAvailable != nil { + l = m.MinAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodDisruptionBudgetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if len(m.DisruptedPods) > 0 { + for k, v := range m.DisruptedPods { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + n += 1 + sovGenerated(uint64(m.PodDisruptionsAllowed)) + n += 1 + sovGenerated(uint64(m.CurrentHealthy)) + n += 1 + sovGenerated(uint64(m.DesiredHealthy)) + n += 1 + sovGenerated(uint64(m.ExpectedPods)) + return n +} + +func (m *PodSecurityPolicy) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicyList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicySpec) Size() (n int) { + var l int + _ = l + n += 2 + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.HostPorts) > 0 { + for _, e := range m.HostPorts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + l = m.SELinux.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.RunAsUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SupplementalGroups.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FSGroup.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.DefaultAllowPrivilegeEscalation != nil { + n += 2 + } + if m.AllowPrivilegeEscalation != nil { + n += 3 + } + if len(m.AllowedHostPaths) > 0 { + for _, e := range m.AllowedHostPaths { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedFlexVolumes) > 0 { + for _, e := range m.AllowedFlexVolumes { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedUnsafeSysctls) > 0 { + for _, s := range m.AllowedUnsafeSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.ForbiddenSysctls) > 0 { + for _, s := range m.ForbiddenSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RunAsUserStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SELinuxStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllowedFlexVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedFlexVolume{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `}`, + }, "") + return s +} +func (this *AllowedHostPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedHostPath{`, + `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Eviction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Eviction{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *FSGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FSGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HostPortRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPortRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *IDRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IDRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDisruptionBudget{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDisruptionBudgetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, + `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetStatus) String() string { + if this == nil { + return "nil" + } + keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) + for k := range this.DisruptedPods { + keysForDisruptedPods = append(keysForDisruptedPods, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) + mapStringForDisruptedPods := "map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time{" + for _, k := range keysForDisruptedPods { + mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) + } + mapStringForDisruptedPods += "}" + s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `DisruptedPods:` + mapStringForDisruptedPods + `,`, + `PodDisruptionsAllowed:` + fmt.Sprintf("%v", this.PodDisruptionsAllowed) + `,`, + `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, + `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, + `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicyList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicySpec{`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, + `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, + `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, + `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, + `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, + `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, + `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, + `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, + `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, + `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, + `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, + `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, + `}`, + }, "") + return s +} +func (this *RunAsUserStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RunAsUserStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SupplementalGroupsStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PathPrefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Eviction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Eviction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteOptions == nil { + m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{} + } + if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostPortRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Min |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IDRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Min |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodDisruptionBudget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MinAvailable == nil { + m.MinAvailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.DisruptedPods == nil { + m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.DisruptedPods[mapkey] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_apis_meta_v1.Time + m.DisruptedPods[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionsAllowed", wireType) + } + m.PodDisruptionsAllowed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) + } + m.CurrentHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentHealthy |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) + } + m.DesiredHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredHealthy |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) + } + m.ExpectedPods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpectedPods |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodSecurityPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostNetwork = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostPorts = append(m.HostPorts, HostPortRange{}) + if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostPID = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostIPC = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnlyRootFilesystem = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DefaultAllowPrivilegeEscalation = &b + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) + if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1679 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x23, 0xb7, + 0x15, 0xf7, 0xac, 0x6c, 0x4b, 0xa6, 0x6d, 0xad, 0x4d, 0x7b, 0xdd, 0x89, 0xd1, 0xd5, 0x24, 0x0a, + 0x50, 0x6c, 0x83, 0x64, 0x14, 0x7b, 0x93, 0xd6, 0x68, 0xda, 0x22, 0x1e, 0xcb, 0xff, 0x02, 0xbb, + 0x56, 0xa9, 0xdd, 0xa0, 0x2d, 0xb6, 0x45, 0x29, 0x0d, 0x2d, 0x31, 0x1e, 0xcd, 0x4c, 0x49, 0x8e, + 0x22, 0xdd, 0x7a, 0xe8, 0xa1, 0xe8, 0xa9, 0x5f, 0xa0, 0x9f, 0xa0, 0xe8, 0xa9, 0x5f, 0xc2, 0x05, + 0x8a, 0x22, 0xc7, 0xa0, 0x07, 0xa1, 0xab, 0xa2, 0x5f, 0x22, 0xa7, 0x62, 0x28, 0x8e, 0xa4, 0xf9, + 0x23, 0x79, 0x1d, 0x60, 0xf7, 0xa6, 0xe1, 0xfb, 0xfd, 0x7e, 0xef, 0xf1, 0xf1, 0xf1, 0x91, 0x14, + 0xb0, 0x6e, 0x0e, 0xb8, 0x49, 0xbd, 0xca, 0x4d, 0xd0, 0x20, 0xcc, 0x25, 0x82, 0xf0, 0x4a, 0x97, + 0xb8, 0xb6, 0xc7, 0x2a, 0xca, 0x80, 0x7d, 0x5a, 0xf1, 0x3d, 0x87, 0x36, 0xfb, 0x95, 0xee, 0x5e, + 0x83, 0x08, 0xbc, 0x57, 0x69, 0x11, 0x97, 0x30, 0x2c, 0x88, 0x6d, 0xfa, 0xcc, 0x13, 0x1e, 0x7c, + 0x6b, 0x04, 0x35, 0xb1, 0x4f, 0xcd, 0x11, 0xd4, 0x54, 0xd0, 0xdd, 0x0f, 0x5a, 0x54, 0xb4, 0x83, + 0x86, 0xd9, 0xf4, 0x3a, 0x95, 0x96, 0xd7, 0xf2, 0x2a, 0x92, 0xd1, 0x08, 0xae, 0xe5, 0x97, 0xfc, + 0x90, 0xbf, 0x46, 0x4a, 0xbb, 0xe5, 0x29, 0xa7, 0x4d, 0x8f, 0x91, 0x4a, 0x37, 0xe5, 0x6d, 0xf7, + 0xa3, 0x09, 0xa6, 0x83, 0x9b, 0x6d, 0xea, 0x12, 0xd6, 0xaf, 0xf8, 0x37, 0xad, 0x70, 0x80, 0x57, + 0x3a, 0x44, 0xe0, 0x2c, 0x56, 0x65, 0x16, 0x8b, 0x05, 0xae, 0xa0, 0x1d, 0x92, 0x22, 0xfc, 0xe0, + 0x2e, 0x02, 0x6f, 0xb6, 0x49, 0x07, 0xa7, 0x78, 0x4f, 0x67, 0xf1, 0x02, 0x41, 0x9d, 0x0a, 0x75, + 0x05, 0x17, 0x2c, 0x49, 0x2a, 0x7f, 0x02, 0x36, 0x0f, 0x1d, 0xc7, 0xfb, 0x92, 0xd8, 0x27, 0x0e, + 0xe9, 0x7d, 0xee, 0x39, 0x41, 0x87, 0xc0, 0xef, 0x81, 0x65, 0x9b, 0xd1, 0x2e, 0x61, 0xba, 0xf6, + 0xb6, 0xf6, 0x64, 0xc5, 0x2a, 0xde, 0x0e, 0x8c, 0x85, 0xe1, 0xc0, 0x58, 0xae, 0xca, 0x51, 0xa4, + 0xac, 0x65, 0x0e, 0x1e, 0x2a, 0xf2, 0x99, 0xc7, 0x45, 0x0d, 0x8b, 0x36, 0xdc, 0x07, 0xc0, 0xc7, + 0xa2, 0x5d, 0x63, 0xe4, 0x9a, 0xf6, 0x14, 0x1d, 0x2a, 0x3a, 0xa8, 0x8d, 0x2d, 0x68, 0x0a, 0x05, + 0xdf, 0x07, 0x05, 0x46, 0xb0, 0x7d, 0xe5, 0x3a, 0x7d, 0xfd, 0xc1, 0xdb, 0xda, 0x93, 0x82, 0xb5, + 0xa1, 0x18, 0x05, 0xa4, 0xc6, 0xd1, 0x18, 0x51, 0xfe, 0xb7, 0x06, 0x0a, 0xc7, 0x5d, 0xda, 0x14, + 0xd4, 0x73, 0xe1, 0x6f, 0x41, 0x21, 0xcc, 0xbb, 0x8d, 0x05, 0x96, 0xce, 0x56, 0xf7, 0x3f, 0x34, + 0x27, 0x35, 0x31, 0x4e, 0x83, 0xe9, 0xdf, 0xb4, 0xc2, 0x01, 0x6e, 0x86, 0x68, 0xb3, 0xbb, 0x67, + 0x5e, 0x35, 0xbe, 0x20, 0x4d, 0x71, 0x49, 0x04, 0x9e, 0x84, 0x37, 0x19, 0x43, 0x63, 0x55, 0xe8, + 0x80, 0x75, 0x9b, 0x38, 0x44, 0x90, 0x2b, 0x3f, 0xf4, 0xc8, 0x65, 0x84, 0xab, 0xfb, 0x4f, 0x5f, + 0xcd, 0x4d, 0x75, 0x9a, 0x6a, 0x6d, 0x0e, 0x07, 0xc6, 0x7a, 0x6c, 0x08, 0xc5, 0xc5, 0xcb, 0x7f, + 0xd1, 0xc0, 0xce, 0x49, 0xfd, 0x94, 0x79, 0x81, 0x5f, 0x17, 0xe1, 0x3a, 0xb5, 0xfa, 0xca, 0x04, + 0x7f, 0x08, 0x16, 0x59, 0xe0, 0x10, 0x95, 0xd3, 0x77, 0x55, 0xd0, 0x8b, 0x28, 0x70, 0xc8, 0x37, + 0x03, 0x63, 0x2b, 0xc1, 0x7a, 0xd6, 0xf7, 0x09, 0x92, 0x04, 0xf8, 0x19, 0x58, 0x66, 0xd8, 0x6d, + 0x91, 0x30, 0xf4, 0xdc, 0x93, 0xd5, 0xfd, 0xb2, 0x39, 0x73, 0xd7, 0x98, 0xe7, 0x55, 0x14, 0x42, + 0x27, 0x2b, 0x2e, 0x3f, 0x39, 0x52, 0x0a, 0xe5, 0x4b, 0xb0, 0x2e, 0x97, 0xda, 0x63, 0x42, 0x5a, + 0xe0, 0x63, 0x90, 0xeb, 0x50, 0x57, 0x06, 0xb5, 0x64, 0xad, 0x2a, 0x56, 0xee, 0x92, 0xba, 0x28, + 0x1c, 0x97, 0x66, 0xdc, 0x93, 0x39, 0x9b, 0x36, 0xe3, 0x1e, 0x0a, 0xc7, 0xcb, 0xa7, 0x20, 0xaf, + 0x3c, 0x4e, 0x0b, 0xe5, 0xe6, 0x0b, 0xe5, 0x32, 0x84, 0xfe, 0xfa, 0x00, 0x6c, 0xd5, 0x3c, 0xbb, + 0x4a, 0x39, 0x0b, 0x64, 0xbe, 0xac, 0xc0, 0x6e, 0x11, 0xf1, 0x06, 0xea, 0xe3, 0x19, 0x58, 0xe4, + 0x3e, 0x69, 0xaa, 0xb2, 0xd8, 0x9f, 0x93, 0xdb, 0x8c, 0xf8, 0xea, 0x3e, 0x69, 0x5a, 0x6b, 0xd1, + 0x52, 0x86, 0x5f, 0x48, 0xaa, 0xc1, 0x17, 0x60, 0x99, 0x0b, 0x2c, 0x02, 0xae, 0xe7, 0xa4, 0xee, + 0x47, 0xf7, 0xd4, 0x95, 0xdc, 0xc9, 0x2a, 0x8e, 0xbe, 0x91, 0xd2, 0x2c, 0xff, 0x53, 0x03, 0xdf, + 0xc9, 0x60, 0x5d, 0x50, 0x2e, 0xe0, 0x8b, 0x54, 0xc6, 0xcc, 0x57, 0xcb, 0x58, 0xc8, 0x96, 0xf9, + 0x1a, 0x6f, 0xde, 0x68, 0x64, 0x2a, 0x5b, 0x75, 0xb0, 0x44, 0x05, 0xe9, 0x44, 0xa5, 0x68, 0xde, + 0x6f, 0x5a, 0xd6, 0xba, 0x92, 0x5e, 0x3a, 0x0f, 0x45, 0xd0, 0x48, 0xab, 0xfc, 0xaf, 0x07, 0x99, + 0xd3, 0x09, 0xd3, 0x09, 0xaf, 0xc1, 0x5a, 0x87, 0xba, 0x87, 0x5d, 0x4c, 0x1d, 0xdc, 0x50, 0xbb, + 0x67, 0x5e, 0x11, 0x84, 0xbd, 0xd2, 0x1c, 0xf5, 0x4a, 0xf3, 0xdc, 0x15, 0x57, 0xac, 0x2e, 0x18, + 0x75, 0x5b, 0xd6, 0xc6, 0x70, 0x60, 0xac, 0x5d, 0x4e, 0x29, 0xa1, 0x98, 0x2e, 0xfc, 0x35, 0x28, + 0x70, 0xe2, 0x90, 0xa6, 0xf0, 0xd8, 0xfd, 0x3a, 0xc4, 0x05, 0x6e, 0x10, 0xa7, 0xae, 0xa8, 0xd6, + 0x5a, 0x98, 0xb7, 0xe8, 0x0b, 0x8d, 0x25, 0xa1, 0x03, 0x8a, 0x1d, 0xdc, 0x7b, 0xee, 0xe2, 0xf1, + 0x44, 0x72, 0xdf, 0x72, 0x22, 0x70, 0x38, 0x30, 0x8a, 0x97, 0x31, 0x2d, 0x94, 0xd0, 0x2e, 0xff, + 0x6f, 0x11, 0xbc, 0x35, 0xb3, 0xaa, 0xe0, 0x67, 0x00, 0x7a, 0x0d, 0x4e, 0x58, 0x97, 0xd8, 0xa7, + 0xa3, 0xd3, 0x84, 0x7a, 0xd1, 0xc6, 0xdd, 0x55, 0x0b, 0x04, 0xaf, 0x52, 0x08, 0x94, 0xc1, 0x82, + 0x7f, 0xd0, 0xc0, 0xba, 0x3d, 0x72, 0x43, 0xec, 0x9a, 0x67, 0x47, 0x85, 0x71, 0xfa, 0x6d, 0xea, + 0xdd, 0xac, 0x4e, 0x2b, 0x1d, 0xbb, 0x82, 0xf5, 0xad, 0x47, 0x2a, 0xa0, 0xf5, 0x98, 0x0d, 0xc5, + 0x9d, 0xc2, 0x4b, 0x00, 0xed, 0xb1, 0x24, 0x57, 0x67, 0x9a, 0x4c, 0xf1, 0x92, 0xf5, 0x58, 0x29, + 0x3c, 0x8a, 0xf9, 0x8d, 0x40, 0x28, 0x83, 0x08, 0x7f, 0x0a, 0x8a, 0xcd, 0x80, 0x31, 0xe2, 0x8a, + 0x33, 0x82, 0x1d, 0xd1, 0xee, 0xeb, 0x8b, 0x52, 0x6a, 0x47, 0x49, 0x15, 0x8f, 0x62, 0x56, 0x94, + 0x40, 0x87, 0x7c, 0x9b, 0x70, 0xca, 0x88, 0x1d, 0xf1, 0x97, 0xe2, 0xfc, 0x6a, 0xcc, 0x8a, 0x12, + 0x68, 0x78, 0x00, 0xd6, 0x48, 0xcf, 0x27, 0xcd, 0x28, 0xa7, 0xcb, 0x92, 0xbd, 0xad, 0xd8, 0x6b, + 0xc7, 0x53, 0x36, 0x14, 0x43, 0xee, 0x3a, 0x00, 0xa6, 0x93, 0x08, 0x37, 0x40, 0xee, 0x86, 0xf4, + 0x47, 0x27, 0x0f, 0x0a, 0x7f, 0xc2, 0x4f, 0xc1, 0x52, 0x17, 0x3b, 0x01, 0x51, 0xb5, 0xfe, 0xde, + 0xab, 0xd5, 0xfa, 0x33, 0xda, 0x21, 0x68, 0x44, 0xfc, 0xd1, 0x83, 0x03, 0xad, 0xfc, 0x0f, 0x0d, + 0x6c, 0xd6, 0x3c, 0xbb, 0x4e, 0x9a, 0x01, 0xa3, 0xa2, 0x5f, 0x93, 0xeb, 0xfc, 0x06, 0x7a, 0x36, + 0x8a, 0xf5, 0xec, 0x0f, 0xe7, 0xd7, 0x5a, 0x3c, 0xba, 0x59, 0x1d, 0xbb, 0x7c, 0xab, 0x81, 0x47, + 0x29, 0xf4, 0x1b, 0xe8, 0xa8, 0x3f, 0x8f, 0x77, 0xd4, 0xf7, 0xef, 0x33, 0x99, 0x19, 0xfd, 0xf4, + 0x4f, 0xc5, 0x8c, 0xa9, 0xc8, 0x6e, 0x1a, 0xde, 0xee, 0x18, 0xed, 0x52, 0x87, 0xb4, 0x88, 0x2d, + 0x27, 0x53, 0x98, 0xba, 0xdd, 0x8d, 0x2d, 0x68, 0x0a, 0x05, 0x39, 0xd8, 0xb1, 0xc9, 0x35, 0x0e, + 0x1c, 0x71, 0x68, 0xdb, 0x47, 0xd8, 0xc7, 0x0d, 0xea, 0x50, 0x41, 0xd5, 0x75, 0x64, 0xc5, 0xfa, + 0x64, 0x38, 0x30, 0x76, 0xaa, 0x99, 0x88, 0x6f, 0x06, 0xc6, 0xe3, 0xf4, 0xbd, 0xdc, 0x1c, 0x43, + 0xfa, 0x68, 0x86, 0x34, 0xec, 0x03, 0x9d, 0x91, 0xdf, 0x05, 0xe1, 0xa6, 0xa8, 0x32, 0xcf, 0x8f, + 0xb9, 0xcd, 0x49, 0xb7, 0x3f, 0x19, 0x0e, 0x0c, 0x1d, 0xcd, 0xc0, 0xdc, 0xed, 0x78, 0xa6, 0x3c, + 0xfc, 0x02, 0x6c, 0xe1, 0x51, 0x1f, 0x88, 0x79, 0x5d, 0x94, 0x5e, 0x0f, 0x86, 0x03, 0x63, 0xeb, + 0x30, 0x6d, 0xbe, 0xdb, 0x61, 0x96, 0x28, 0xac, 0x80, 0x7c, 0x57, 0x5e, 0xd9, 0xb9, 0xbe, 0x24, + 0xf5, 0x1f, 0x0d, 0x07, 0x46, 0x7e, 0x74, 0x8b, 0x0f, 0x35, 0x97, 0x4f, 0xea, 0xf2, 0x22, 0x18, + 0xa1, 0xe0, 0xc7, 0x60, 0xb5, 0xed, 0x71, 0xf1, 0x33, 0x22, 0xbe, 0xf4, 0xd8, 0x8d, 0x6c, 0x0c, + 0x05, 0x6b, 0x4b, 0xad, 0xe0, 0xea, 0xd9, 0xc4, 0x84, 0xa6, 0x71, 0xf0, 0x97, 0x60, 0xa5, 0xad, + 0xae, 0x7d, 0x5c, 0xcf, 0xcb, 0x42, 0x7b, 0x32, 0xa7, 0xd0, 0x62, 0x57, 0x44, 0x6b, 0x53, 0xc9, + 0xaf, 0x44, 0xc3, 0x1c, 0x4d, 0xd4, 0xe0, 0xf7, 0x41, 0x5e, 0x7e, 0x9c, 0x57, 0xf5, 0x82, 0x8c, + 0xe6, 0xa1, 0x82, 0xe7, 0xcf, 0x46, 0xc3, 0x28, 0xb2, 0x47, 0xd0, 0xf3, 0xda, 0x91, 0xbe, 0x92, + 0x86, 0x9e, 0xd7, 0x8e, 0x50, 0x64, 0x87, 0x2f, 0x40, 0x9e, 0x93, 0x0b, 0xea, 0x06, 0x3d, 0x1d, + 0xc8, 0x2d, 0xb7, 0x37, 0x27, 0xdc, 0xfa, 0xb1, 0x44, 0x26, 0x2e, 0xdc, 0x13, 0x75, 0x65, 0x47, + 0x91, 0x24, 0xb4, 0xc1, 0x0a, 0x0b, 0xdc, 0x43, 0xfe, 0x9c, 0x13, 0xa6, 0xaf, 0xa6, 0x4e, 0xfb, + 0xa4, 0x3e, 0x8a, 0xb0, 0x49, 0x0f, 0xe3, 0xcc, 0x8c, 0x11, 0x68, 0x22, 0x0c, 0xff, 0xa8, 0x01, + 0xc8, 0x03, 0xdf, 0x77, 0x48, 0x87, 0xb8, 0x02, 0x3b, 0xf2, 0x7e, 0xcf, 0xf5, 0x35, 0xe9, 0xef, + 0xc7, 0xf3, 0xe6, 0x93, 0x22, 0x25, 0x1d, 0x8f, 0x8f, 0xe9, 0x34, 0x14, 0x65, 0xf8, 0x0c, 0xd3, + 0x79, 0xcd, 0xe5, 0x6f, 0x7d, 0xfd, 0xce, 0x74, 0x66, 0xbf, 0x5f, 0x26, 0xe9, 0x54, 0x76, 0x14, + 0x49, 0xc2, 0xcf, 0xc1, 0x4e, 0xf4, 0xba, 0x43, 0x9e, 0x27, 0x4e, 0xa8, 0x43, 0x78, 0x9f, 0x0b, + 0xd2, 0xd1, 0x8b, 0x72, 0x99, 0x4b, 0x8a, 0xb9, 0x83, 0x32, 0x51, 0x68, 0x06, 0x1b, 0x76, 0x80, + 0x11, 0xb5, 0x87, 0x70, 0xef, 0x8c, 0xfb, 0xd3, 0x31, 0x6f, 0x62, 0x67, 0x74, 0x6b, 0x79, 0x28, + 0x1d, 0xbc, 0x3b, 0x1c, 0x18, 0x46, 0x75, 0x3e, 0x14, 0xdd, 0xa5, 0x05, 0x7f, 0x01, 0x74, 0x3c, + 0xcb, 0xcf, 0x86, 0xf4, 0xf3, 0xdd, 0xb0, 0xe7, 0xcc, 0x74, 0x30, 0x93, 0x0d, 0x7d, 0xb0, 0x81, + 0xe3, 0xef, 0x6c, 0xae, 0x6f, 0xca, 0x5d, 0xf8, 0xde, 0x9c, 0x75, 0x48, 0x3c, 0xcd, 0x2d, 0x5d, + 0xa5, 0x71, 0x23, 0x61, 0xe0, 0x28, 0xa5, 0x0e, 0x7b, 0x00, 0xe2, 0xe4, 0xdf, 0x02, 0x5c, 0x87, + 0x77, 0x1e, 0x31, 0xa9, 0xff, 0x12, 0x26, 0xa5, 0x96, 0x32, 0x71, 0x94, 0xe1, 0x03, 0x5e, 0x80, + 0x6d, 0x35, 0xfa, 0xdc, 0xe5, 0xf8, 0x9a, 0xd4, 0xfb, 0xbc, 0x29, 0x1c, 0xae, 0x6f, 0xc9, 0xfe, + 0xa6, 0x0f, 0x07, 0xc6, 0xf6, 0x61, 0x86, 0x1d, 0x65, 0xb2, 0xe0, 0xa7, 0x60, 0xe3, 0xda, 0x63, + 0x0d, 0x6a, 0xdb, 0xc4, 0x8d, 0x94, 0xb6, 0xa5, 0xd2, 0x76, 0x98, 0x89, 0x93, 0x84, 0x0d, 0xa5, + 0xd0, 0xe1, 0x8b, 0x5c, 0x9f, 0xb5, 0x81, 0xe1, 0xc7, 0xb1, 0x37, 0xf9, 0x3b, 0x89, 0x37, 0xf9, + 0x66, 0x8a, 0xf7, 0x1a, 0x5e, 0xe4, 0x7f, 0xd3, 0xc0, 0x4e, 0x76, 0x03, 0x83, 0x4f, 0x63, 0xd1, + 0x19, 0x89, 0xe8, 0x1e, 0x26, 0x58, 0x2a, 0xb6, 0xdf, 0x80, 0xa2, 0x6a, 0x73, 0xf1, 0x3f, 0x3c, + 0x62, 0x31, 0x86, 0xe7, 0x53, 0x78, 0x43, 0x51, 0x12, 0xd1, 0x16, 0x97, 0x6f, 0x8b, 0xf8, 0x18, + 0x4a, 0xa8, 0x95, 0xff, 0xae, 0x81, 0x77, 0xee, 0x6c, 0x50, 0xd0, 0x8a, 0x85, 0x6e, 0x26, 0x42, + 0x2f, 0xcd, 0x16, 0x78, 0x3d, 0xff, 0x7b, 0x58, 0x1f, 0xdc, 0xbe, 0x2c, 0x2d, 0x7c, 0xf5, 0xb2, + 0xb4, 0xf0, 0xf5, 0xcb, 0xd2, 0xc2, 0xef, 0x87, 0x25, 0xed, 0x76, 0x58, 0xd2, 0xbe, 0x1a, 0x96, + 0xb4, 0xaf, 0x87, 0x25, 0xed, 0x3f, 0xc3, 0x92, 0xf6, 0xe7, 0xff, 0x96, 0x16, 0x7e, 0x95, 0x57, + 0x72, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x91, 0xe5, 0x7f, 0xdc, 0x14, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto new file mode 100644 index 000000000..1a14d946f --- /dev/null +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -0,0 +1,334 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.policy.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +message AllowedFlexVolume { + // driver is the name of the Flexvolume driver. + optional string driver = 1; +} + +// AllowedHostPath defines the host volume conditions that will be enabled by a policy +// for pods to use. It requires the path prefix to be defined. +message AllowedHostPath { + // pathPrefix is the path prefix that the host volume must match. + // It does not support `*`. + // Trailing slashes are trimmed when validating the path prefix with a host path. + // + // Examples: + // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` + // `/foo` would not allow `/food` or `/etc/foo` + optional string pathPrefix = 1; + + // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. + // +optional + optional bool readOnly = 2; +} + +// Eviction evicts a pod from its node subject to certain policies and safety constraints. +// This is a subresource of Pod. A request to cause such an eviction is +// created by POSTing to .../pods//evictions. +message Eviction { + // ObjectMeta describes the pod that is being evicted. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // DeleteOptions may be provided + optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; +} + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +message FSGroupStrategyOptions { + // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + optional string rule = 1; + + // ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + +// HostPortRange defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +message HostPortRange { + // min is the start of the range, inclusive. + optional int32 min = 1; + + // max is the end of the range, inclusive. + optional int32 max = 2; +} + +// IDRange provides a min/max of an allowed range of IDs. +message IDRange { + // min is the start of the range, inclusive. + optional int64 min = 1; + + // max is the end of the range, inclusive. + optional int64 max = 2; +} + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +message PodDisruptionBudget { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the PodDisruptionBudget. + optional PodDisruptionBudgetSpec spec = 2; + + // Most recently observed status of the PodDisruptionBudget. + optional PodDisruptionBudgetStatus status = 3; +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +message PodDisruptionBudgetList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated PodDisruptionBudget items = 2; +} + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +message PodDisruptionBudgetSpec { + // An eviction is allowed if at least "minAvailable" pods selected by + // "selector" will still be available after the eviction, i.e. even in the + // absence of the evicted pod. So for example you can prevent all voluntary + // evictions by specifying "100%". + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; + + // Label query over pods whose evictions are managed by the disruption + // budget. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // An eviction is allowed if at most "maxUnavailable" pods selected by + // "selector" are unavailable after the eviction, i.e. even in absence of + // the evicted pod. For example, one can prevent all voluntary evictions + // by specifying 0. This is a mutually exclusive setting with "minAvailable". + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +message PodDisruptionBudgetStatus { + // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other + // status informatio is valid only if observedGeneration equals to PDB's object generation. + // +optional + optional int64 observedGeneration = 1; + + // DisruptedPods contains information about pods whose eviction was + // processed by the API server eviction subresource handler but has not + // yet been observed by the PodDisruptionBudget controller. + // A pod will be in this map from the time when the API server processed the + // eviction request to the time when the pod is seen by PDB controller + // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod + // and the value is the time when the API server processed the eviction request. If + // the deletion didn't occur and a pod is still there it will be removed from + // the list automatically by PodDisruptionBudget controller after some time. + // If everything goes smooth this map should be empty for the most of the time. + // Large number of entries in the map may indicate problems with pod deletions. + map disruptedPods = 2; + + // Number of pod disruptions that are currently allowed. + optional int32 disruptionsAllowed = 3; + + // current number of healthy pods + optional int32 currentHealthy = 4; + + // minimum desired number of healthy pods + optional int32 desiredHealthy = 5; + + // total number of pods counted by this disruption budget + optional int32 expectedPods = 6; +} + +// PodSecurityPolicy governs the ability to make requests that affect the Security Context +// that will be applied to a pod and container. +message PodSecurityPolicy { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec defines the policy enforced. + // +optional + optional PodSecurityPolicySpec spec = 2; +} + +// PodSecurityPolicyList is a list of PodSecurityPolicy objects. +message PodSecurityPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of schema objects. + repeated PodSecurityPolicy items = 2; +} + +// PodSecurityPolicySpec defines the policy enforced. +message PodSecurityPolicySpec { + // privileged determines if a pod can request to be run as privileged. + // +optional + optional bool privileged = 1; + + // defaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capability in both + // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the allowedCapabilities list. + // +optional + repeated string defaultAddCapabilities = 2; + + // requiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + repeated string requiredDropCapabilities = 3; + + // allowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. + // +optional + repeated string allowedCapabilities = 4; + + // volumes is a white list of allowed volume plugins. Empty indicates that + // no volumes may be used. To allow all volumes you may use '*'. + // +optional + repeated string volumes = 5; + + // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + optional bool hostNetwork = 6; + + // hostPorts determines which host port ranges are allowed to be exposed. + // +optional + repeated HostPortRange hostPorts = 7; + + // hostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + optional bool hostPID = 8; + + // hostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + optional bool hostIPC = 9; + + // seLinux is the strategy that will dictate the allowable labels that may be set. + optional SELinuxStrategyOptions seLinux = 10; + + // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + optional RunAsUserStrategyOptions runAsUser = 11; + + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + optional SupplementalGroupsStrategyOptions supplementalGroups = 12; + + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. + optional FSGroupStrategyOptions fsGroup = 13; + + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + optional bool readOnlyRootFilesystem = 14; + + // defaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + optional bool defaultAllowPrivilegeEscalation = 15; + + // allowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + optional bool allowPrivilegeEscalation = 16; + + // allowedHostPaths is a white list of allowed host paths. Empty indicates + // that all host paths may be used. + // +optional + repeated AllowedHostPath allowedHostPaths = 17; + + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "volumes" field. + // +optional + repeated AllowedFlexVolume allowedFlexVolumes = 18; + + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + repeated string allowedUnsafeSysctls = 19; + + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + repeated string forbiddenSysctls = 20; +} + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +message RunAsUserStrategyOptions { + // rule is the strategy that will dictate the allowable RunAsUser values that may be set. + optional string rule = 1; + + // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + +// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. +message SELinuxStrategyOptions { + // rule is the strategy that will dictate the allowable labels that may be set. + optional string rule = 1; + + // seLinuxOptions required to run as; required for MustRunAs + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; +} + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +message SupplementalGroupsStrategyOptions { + // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + optional string rule = 1; + + // ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/register.go b/vendor/k8s.io/api/policy/v1beta1/register.go similarity index 78% rename from vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/register.go rename to vendor/k8s.io/api/policy/v1beta1/register.go index 52bd65c8b..b3efd6326 100644 --- a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/register.go +++ b/vendor/k8s.io/api/policy/v1beta1/register.go @@ -34,15 +34,20 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodDisruptionBudget{}, &PodDisruptionBudgetList{}, + &PodSecurityPolicy{}, + &PodSecurityPolicyList{}, &Eviction{}, ) // Add the watch version that applies diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go new file mode 100644 index 000000000..ba1e4ff31 --- /dev/null +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -0,0 +1,393 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +type PodDisruptionBudgetSpec struct { + // An eviction is allowed if at least "minAvailable" pods selected by + // "selector" will still be available after the eviction, i.e. even in the + // absence of the evicted pod. So for example you can prevent all voluntary + // evictions by specifying "100%". + MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` + + // Label query over pods whose evictions are managed by the disruption + // budget. + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // An eviction is allowed if at most "maxUnavailable" pods selected by + // "selector" are unavailable after the eviction, i.e. even in absence of + // the evicted pod. For example, one can prevent all voluntary evictions + // by specifying 0. This is a mutually exclusive setting with "minAvailable". + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"` +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +type PodDisruptionBudgetStatus struct { + // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other + // status informatio is valid only if observedGeneration equals to PDB's object generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // DisruptedPods contains information about pods whose eviction was + // processed by the API server eviction subresource handler but has not + // yet been observed by the PodDisruptionBudget controller. + // A pod will be in this map from the time when the API server processed the + // eviction request to the time when the pod is seen by PDB controller + // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod + // and the value is the time when the API server processed the eviction request. If + // the deletion didn't occur and a pod is still there it will be removed from + // the list automatically by PodDisruptionBudget controller after some time. + // If everything goes smooth this map should be empty for the most of the time. + // Large number of entries in the map may indicate problems with pod deletions. + DisruptedPods map[string]metav1.Time `json:"disruptedPods" protobuf:"bytes,2,rep,name=disruptedPods"` + + // Number of pod disruptions that are currently allowed. + PodDisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"` + + // current number of healthy pods + CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,4,opt,name=currentHealthy"` + + // minimum desired number of healthy pods + DesiredHealthy int32 `json:"desiredHealthy" protobuf:"varint,5,opt,name=desiredHealthy"` + + // total number of pods counted by this disruption budget + ExpectedPods int32 `json:"expectedPods" protobuf:"varint,6,opt,name=expectedPods"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +type PodDisruptionBudget struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the PodDisruptionBudget. + Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Most recently observed status of the PodDisruptionBudget. + Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +type PodDisruptionBudgetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Eviction evicts a pod from its node subject to certain policies and safety constraints. +// This is a subresource of Pod. A request to cause such an eviction is +// created by POSTing to .../pods//evictions. +type Eviction struct { + metav1.TypeMeta `json:",inline"` + + // ObjectMeta describes the pod that is being evicted. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // DeleteOptions may be provided + DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSecurityPolicy governs the ability to make requests that affect the Security Context +// that will be applied to a pod and container. +type PodSecurityPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec defines the policy enforced. + // +optional + Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// PodSecurityPolicySpec defines the policy enforced. +type PodSecurityPolicySpec struct { + // privileged determines if a pod can request to be run as privileged. + // +optional + Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` + // defaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capability in both + // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the allowedCapabilities list. + // +optional + DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // requiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // allowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. + // +optional + AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // volumes is a white list of allowed volume plugins. Empty indicates that + // no volumes may be used. To allow all volumes you may use '*'. + // +optional + Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` + // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` + // hostPorts determines which host port ranges are allowed to be exposed. + // +optional + HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` + // hostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` + // hostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` + // seLinux is the strategy that will dictate the allowable labels that may be set. + SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` + // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. + FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` + // defaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` + // allowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` + // allowedHostPaths is a white list of allowed host paths. Empty indicates + // that all host paths may be used. + // +optional + AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "volumes" field. + // +optional + AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` +} + +// AllowedHostPath defines the host volume conditions that will be enabled by a policy +// for pods to use. It requires the path prefix to be defined. +type AllowedHostPath struct { + // pathPrefix is the path prefix that the host volume must match. + // It does not support `*`. + // Trailing slashes are trimmed when validating the path prefix with a host path. + // + // Examples: + // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` + // `/foo` would not allow `/food` or `/etc/foo` + PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` + + // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` +} + +// FSType gives strong typing to different file systems that are used by volumes. +type FSType string + +var ( + AzureFile FSType = "azureFile" + Flocker FSType = "flocker" + FlexVolume FSType = "flexVolume" + HostPath FSType = "hostPath" + EmptyDir FSType = "emptyDir" + GCEPersistentDisk FSType = "gcePersistentDisk" + AWSElasticBlockStore FSType = "awsElasticBlockStore" + GitRepo FSType = "gitRepo" + Secret FSType = "secret" + NFS FSType = "nfs" + ISCSI FSType = "iscsi" + Glusterfs FSType = "glusterfs" + PersistentVolumeClaim FSType = "persistentVolumeClaim" + RBD FSType = "rbd" + Cinder FSType = "cinder" + CephFS FSType = "cephFS" + DownwardAPI FSType = "downwardAPI" + FC FSType = "fc" + ConfigMap FSType = "configMap" + Quobyte FSType = "quobyte" + AzureDisk FSType = "azureDisk" + All FSType = "*" +) + +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +type AllowedFlexVolume struct { + // driver is the name of the Flexvolume driver. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` +} + +// HostPortRange defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +type HostPortRange struct { + // min is the start of the range, inclusive. + Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` + // max is the end of the range, inclusive. + Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` +} + +// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. +type SELinuxStrategyOptions struct { + // rule is the strategy that will dictate the allowable labels that may be set. + Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` + // seLinuxOptions required to run as; required for MustRunAs + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` +} + +// SELinuxStrategy denotes strategy types for generating SELinux options for a +// Security Context. +type SELinuxStrategy string + +const ( + // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. + SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" + // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. + SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" +) + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsUserStrategyOptions struct { + // rule is the strategy that will dictate the allowable RunAsUser values that may be set. + Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` + // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// IDRange provides a min/max of an allowed range of IDs. +type IDRange struct { + // min is the start of the range, inclusive. + Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` + // max is the end of the range, inclusive. + Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` +} + +// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a +// Security Context. +type RunAsUserStrategy string + +const ( + // RunAsUserStrategyMustRunAs means that container must run as a particular uid. + RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" + // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. + RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" + // RunAsUserStrategyRunAsAny means that container may make requests for any uid. + RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" +) + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +type FSGroupStrategyOptions struct { + // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` + // ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// FSGroupStrategyType denotes strategy types for generating FSGroup values for a +// SecurityContext +type FSGroupStrategyType string + +const ( + // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. + FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" + // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. + FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" +) + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +type SupplementalGroupsStrategyOptions struct { + // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` + // ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental +// groups for a SecurityContext. +type SupplementalGroupsStrategyType string + +const ( + // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. + SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" + // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. + SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSecurityPolicyList is a list of PodSecurityPolicy objects. +type PodSecurityPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of schema objects. + Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..122287645 --- /dev/null +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,210 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllowedFlexVolume = map[string]string{ + "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", + "driver": "driver is the name of the Flexvolume driver.", +} + +func (AllowedFlexVolume) SwaggerDoc() map[string]string { + return map_AllowedFlexVolume +} + +var map_AllowedHostPath = map[string]string{ + "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.", + "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", + "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", +} + +func (AllowedHostPath) SwaggerDoc() map[string]string { + return map_AllowedHostPath +} + +var map_Eviction = map[string]string{ + "": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods//evictions.", + "metadata": "ObjectMeta describes the pod that is being evicted.", + "deleteOptions": "DeleteOptions may be provided", +} + +func (Eviction) SwaggerDoc() map[string]string { + return map_Eviction +} + +var map_FSGroupStrategyOptions = map[string]string{ + "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", + "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_FSGroupStrategyOptions +} + +var map_HostPortRange = map[string]string{ + "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", +} + +func (HostPortRange) SwaggerDoc() map[string]string { + return map_HostPortRange +} + +var map_IDRange = map[string]string{ + "": "IDRange provides a min/max of an allowed range of IDs.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", +} + +func (IDRange) SwaggerDoc() map[string]string { + return map_IDRange +} + +var map_PodDisruptionBudget = map[string]string{ + "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "spec": "Specification of the desired behavior of the PodDisruptionBudget.", + "status": "Most recently observed status of the PodDisruptionBudget.", +} + +func (PodDisruptionBudget) SwaggerDoc() map[string]string { + return map_PodDisruptionBudget +} + +var map_PodDisruptionBudgetList = map[string]string{ + "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", +} + +func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetList +} + +var map_PodDisruptionBudgetSpec = map[string]string{ + "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", + "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", + "selector": "Label query over pods whose evictions are managed by the disruption budget.", + "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", +} + +func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetSpec +} + +var map_PodDisruptionBudgetStatus = map[string]string{ + "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", + "observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status informatio is valid only if observedGeneration equals to PDB's object generation.", + "disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", + "disruptionsAllowed": "Number of pod disruptions that are currently allowed.", + "currentHealthy": "current number of healthy pods", + "desiredHealthy": "minimum desired number of healthy pods", + "expectedPods": "total number of pods counted by this disruption budget", +} + +func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetStatus +} + +var map_PodSecurityPolicy = map[string]string{ + "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "spec defines the policy enforced.", +} + +func (PodSecurityPolicy) SwaggerDoc() map[string]string { + return map_PodSecurityPolicy +} + +var map_PodSecurityPolicyList = map[string]string{ + "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "items is a list of schema objects.", +} + +func (PodSecurityPolicyList) SwaggerDoc() map[string]string { + return map_PodSecurityPolicyList +} + +var map_PodSecurityPolicySpec = map[string]string{ + "": "PodSecurityPolicySpec defines the policy enforced.", + "privileged": "privileged determines if a pod can request to be run as privileged.", + "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", + "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", + "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", + "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", + "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", + "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", + "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", + "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", + "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", + "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", + "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", + "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", + "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", + "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", + "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", + "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", + "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", + "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", +} + +func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySpec +} + +var map_RunAsUserStrategyOptions = map[string]string{ + "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", + "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", + "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsUserStrategyOptions +} + +var map_SELinuxStrategyOptions = map[string]string{ + "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.", + "rule": "rule is the strategy that will dictate the allowable labels that may be set.", + "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", +} + +func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { + return map_SELinuxStrategyOptions +} + +var map_SupplementalGroupsStrategyOptions = map[string]string{ + "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", + "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { + return map_SupplementalGroupsStrategyOptions +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..1980bd161 --- /dev/null +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,485 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { + if in == nil { + return nil + } + out := new(AllowedFlexVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. +func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { + if in == nil { + return nil + } + out := new(AllowedHostPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Eviction) DeepCopyInto(out *Eviction) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.DeleteOptions != nil { + in, out := &in.DeleteOptions, &out.DeleteOptions + if *in == nil { + *out = nil + } else { + *out = new(v1.DeleteOptions) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eviction. +func (in *Eviction) DeepCopy() *Eviction { + if in == nil { + return nil + } + out := new(Eviction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Eviction) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. +func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { + if in == nil { + return nil + } + out := new(FSGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. +func (in *HostPortRange) DeepCopy() *HostPortRange { + if in == nil { + return nil + } + out := new(HostPortRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IDRange) DeepCopyInto(out *IDRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. +func (in *IDRange) DeepCopy() *IDRange { + if in == nil { + return nil + } + out := new(IDRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudget. +func (in *PodDisruptionBudget) DeepCopy() *PodDisruptionBudget { + if in == nil { + return nil + } + out := new(PodDisruptionBudget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodDisruptionBudget) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDisruptionBudgetList) DeepCopyInto(out *PodDisruptionBudgetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodDisruptionBudget, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetList. +func (in *PodDisruptionBudgetList) DeepCopy() *PodDisruptionBudgetList { + if in == nil { + return nil + } + out := new(PodDisruptionBudgetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodDisruptionBudgetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDisruptionBudgetSpec) DeepCopyInto(out *PodDisruptionBudgetSpec) { + *out = *in + if in.MinAvailable != nil { + in, out := &in.MinAvailable, &out.MinAvailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetSpec. +func (in *PodDisruptionBudgetSpec) DeepCopy() *PodDisruptionBudgetSpec { + if in == nil { + return nil + } + out := new(PodDisruptionBudgetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDisruptionBudgetStatus) DeepCopyInto(out *PodDisruptionBudgetStatus) { + *out = *in + if in.DisruptedPods != nil { + in, out := &in.DisruptedPods, &out.DisruptedPods + *out = make(map[string]v1.Time, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetStatus. +func (in *PodDisruptionBudgetStatus) DeepCopy() *PodDisruptionBudgetStatus { + if in == nil { + return nil + } + out := new(PodDisruptionBudgetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. +func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { + if in == nil { + return nil + } + out := new(PodSecurityPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. +func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { + if in == nil { + return nil + } + out := new(PodSecurityPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { + *out = *in + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]core_v1.Capability, len(*in)) + copy(*out, *in) + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]core_v1.Capability, len(*in)) + copy(*out, *in) + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]core_v1.Capability, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]FSType, len(*in)) + copy(*out, *in) + } + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(*in)) + copy(*out, *in) + } + in.SELinux.DeepCopyInto(&out.SELinux) + in.RunAsUser.DeepCopyInto(&out.RunAsUser) + in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) + in.FSGroup.DeepCopyInto(&out.FSGroup) + if in.DefaultAllowPrivilegeEscalation != nil { + in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.AllowedHostPaths != nil { + in, out := &in.AllowedHostPaths, &out.AllowedHostPaths + *out = make([]AllowedHostPath, len(*in)) + copy(*out, *in) + } + if in.AllowedFlexVolumes != nil { + in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes + *out = make([]AllowedFlexVolume, len(*in)) + copy(*out, *in) + } + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ForbiddenSysctls != nil { + in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. +func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { + if in == nil { + return nil + } + out := new(PodSecurityPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. +func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsUserStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + if *in == nil { + *out = nil + } else { + *out = new(core_v1.SELinuxOptions) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. +func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { + if in == nil { + return nil + } + out := new(SELinuxStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. +func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { + if in == nil { + return nil + } + out := new(SupplementalGroupsStrategyOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go similarity index 86% rename from vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/doc.go rename to vendor/k8s.io/api/rbac/v1/doc.go index a9ad4296e..28ceb269b 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -14,5 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + // +groupName=rbac.authorization.k8s.io -package v1beta1 +package v1 // import "k8s.io/api/rbac/v1" diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go new file mode 100644 index 000000000..6dd7d727b --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -0,0 +1,2749 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto + + It has these top-level messages: + AggregationRule + ClusterRole + ClusterRoleBinding + ClusterRoleBindingList + ClusterRoleList + PolicyRule + Role + RoleBinding + RoleBindingList + RoleList + RoleRef + Subject +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func init() { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1.AggregationRule") + proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1.PolicyRule") + proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1.RoleList") + proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1.RoleRef") + proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1.Subject") +} +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Role) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + return i, nil +} + +func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i += copy(dAtA[i:], m.APIGroup) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} + +func (m *Subject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i += copy(dAtA[i:], m.APIGroup) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRole) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Role) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleRef) Size() (n int) { + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRole) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRole{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *Role) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Role{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleRef{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 827 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x8b, 0x23, 0x45, + 0x18, 0x4d, 0x65, 0x12, 0x26, 0x5d, 0x31, 0xc4, 0x2d, 0x17, 0x69, 0xa2, 0x74, 0x86, 0x16, 0x24, + 0xa0, 0x76, 0x9b, 0x5d, 0x51, 0x41, 0xf6, 0xb0, 0xbd, 0xa2, 0x0c, 0x3b, 0x8e, 0x4b, 0x2d, 0x7a, + 0x10, 0x0f, 0x56, 0x77, 0x6a, 0x3b, 0x65, 0xfa, 0x17, 0x55, 0xd5, 0x81, 0xc5, 0x8b, 0x08, 0x1e, + 0xbc, 0x79, 0xd4, 0xbf, 0xc0, 0x8b, 0x1e, 0xfd, 0x0b, 0xbc, 0xcc, 0x71, 0x8f, 0x7b, 0x0a, 0x4e, + 0xfb, 0x87, 0x28, 0xfd, 0x2b, 0x9d, 0xa4, 0x3b, 0x4e, 0x4e, 0x01, 0xf1, 0x34, 0x53, 0xdf, 0xf7, + 0xde, 0xfb, 0x5e, 0xbf, 0xa9, 0xaf, 0x06, 0x7e, 0xb0, 0x78, 0x5f, 0x18, 0x2c, 0x34, 0x17, 0xb1, + 0x4d, 0x79, 0x40, 0x25, 0x15, 0xe6, 0x92, 0x06, 0xb3, 0x90, 0x9b, 0x45, 0x83, 0x44, 0xcc, 0xe4, + 0x36, 0x71, 0xcc, 0xe5, 0xd4, 0x74, 0x69, 0x40, 0x39, 0x91, 0x74, 0x66, 0x44, 0x3c, 0x94, 0x21, + 0x42, 0x39, 0xc6, 0x20, 0x11, 0x33, 0x52, 0x8c, 0xb1, 0x9c, 0x8e, 0xde, 0x72, 0x99, 0x9c, 0xc7, + 0xb6, 0xe1, 0x84, 0xbe, 0xe9, 0x86, 0x6e, 0x68, 0x66, 0x50, 0x3b, 0x7e, 0x92, 0x9d, 0xb2, 0x43, + 0xf6, 0x5b, 0x2e, 0x31, 0x9a, 0xd4, 0xc7, 0x10, 0x2f, 0x9a, 0x93, 0xda, 0xb0, 0xd1, 0x3b, 0x15, + 0xd2, 0x27, 0xce, 0x9c, 0x05, 0x94, 0x3f, 0x35, 0xa3, 0x85, 0x9b, 0x16, 0x84, 0xe9, 0x53, 0x49, + 0x1a, 0x2c, 0x8e, 0xcc, 0x7d, 0x2c, 0x1e, 0x07, 0x92, 0xf9, 0xb4, 0x46, 0x78, 0xf7, 0x26, 0x82, + 0x70, 0xe6, 0xd4, 0x27, 0x35, 0xde, 0xdd, 0x7d, 0xbc, 0x58, 0x32, 0xcf, 0x64, 0x81, 0x14, 0x92, + 0xef, 0x92, 0xf4, 0x9f, 0x01, 0x1c, 0xde, 0x77, 0x5d, 0x4e, 0x5d, 0x22, 0x59, 0x18, 0xe0, 0xd8, + 0xa3, 0xe8, 0x7b, 0x00, 0x6f, 0x3b, 0x5e, 0x2c, 0x24, 0xe5, 0x38, 0xf4, 0xe8, 0x63, 0xea, 0x51, + 0x47, 0x86, 0x5c, 0xa8, 0xe0, 0xec, 0x64, 0xd2, 0xbf, 0x73, 0xd7, 0xa8, 0x42, 0x5f, 0x0f, 0x32, + 0xa2, 0x85, 0x9b, 0x16, 0x84, 0x91, 0xe6, 0x60, 0x2c, 0xa7, 0xc6, 0x05, 0xb1, 0xa9, 0x57, 0x72, + 0xad, 0x57, 0xaf, 0x56, 0xe3, 0x56, 0xb2, 0x1a, 0xdf, 0x7e, 0xd0, 0x20, 0x8c, 0x1b, 0xc7, 0xe9, + 0x3f, 0xb5, 0x61, 0x7f, 0x03, 0x8e, 0xbe, 0x82, 0xbd, 0x54, 0x7c, 0x46, 0x24, 0x51, 0xc1, 0x19, + 0x98, 0xf4, 0xef, 0xbc, 0x7d, 0x98, 0x95, 0x4f, 0xed, 0xaf, 0xa9, 0x23, 0x3f, 0xa1, 0x92, 0x58, + 0xa8, 0xf0, 0x01, 0xab, 0x1a, 0x5e, 0xab, 0xa2, 0x07, 0xb0, 0xcb, 0x63, 0x8f, 0x0a, 0xb5, 0x9d, + 0x7d, 0xa9, 0x66, 0xd4, 0xaf, 0x97, 0xf1, 0x28, 0xf4, 0x98, 0xf3, 0x34, 0x0d, 0xca, 0x1a, 0x14, + 0x62, 0xdd, 0xf4, 0x24, 0x70, 0xce, 0x45, 0x36, 0x1c, 0x92, 0xed, 0x44, 0xd5, 0x93, 0xcc, 0xed, + 0x6b, 0x4d, 0x72, 0x3b, 0xe1, 0x5b, 0x2f, 0x25, 0xab, 0xf1, 0xee, 0x5f, 0x04, 0xef, 0x0a, 0xea, + 0x3f, 0xb4, 0x21, 0xda, 0x88, 0xc6, 0x62, 0xc1, 0x8c, 0x05, 0xee, 0x11, 0x12, 0x3a, 0x87, 0x3d, + 0x11, 0x67, 0x8d, 0x32, 0xa4, 0x57, 0x9a, 0xbe, 0xea, 0x71, 0x8e, 0xb1, 0x5e, 0x2c, 0xc4, 0x7a, + 0x45, 0x41, 0xe0, 0x35, 0x1d, 0x7d, 0x04, 0x4f, 0x79, 0xe8, 0x51, 0x4c, 0x9f, 0x14, 0xf9, 0x34, + 0x2a, 0xe1, 0x1c, 0x62, 0x0d, 0x0b, 0xa5, 0xd3, 0xa2, 0x80, 0x4b, 0xb2, 0xfe, 0x07, 0x80, 0x2f, + 0xd7, 0xb3, 0xb8, 0x60, 0x42, 0xa2, 0x2f, 0x6b, 0x79, 0x18, 0x07, 0x5e, 0x5e, 0x26, 0xf2, 0x34, + 0xd6, 0x1f, 0x50, 0x56, 0x36, 0xb2, 0x78, 0x08, 0xbb, 0x4c, 0x52, 0xbf, 0x0c, 0xe2, 0xf5, 0x26, + 0xfb, 0x75, 0x63, 0xd5, 0xad, 0x39, 0x4f, 0xc9, 0x38, 0xd7, 0xd0, 0x7f, 0x07, 0x70, 0xb8, 0x01, + 0x3e, 0x82, 0xfd, 0x0f, 0xb7, 0xed, 0x8f, 0x6f, 0xb2, 0xdf, 0xec, 0xfb, 0x6f, 0x00, 0x61, 0xb5, + 0x12, 0x68, 0x0c, 0xbb, 0x4b, 0xca, 0xed, 0xfc, 0xad, 0x50, 0x2c, 0x25, 0xc5, 0x7f, 0x9e, 0x16, + 0x70, 0x5e, 0x47, 0x6f, 0x40, 0x85, 0x44, 0xec, 0x63, 0x1e, 0xc6, 0x51, 0x3e, 0x59, 0xb1, 0x06, + 0xc9, 0x6a, 0xac, 0xdc, 0x7f, 0x74, 0x9e, 0x17, 0x71, 0xd5, 0x4f, 0xc1, 0x9c, 0x8a, 0x30, 0xe6, + 0x0e, 0x15, 0xea, 0x49, 0x05, 0xc6, 0x65, 0x11, 0x57, 0x7d, 0xf4, 0x1e, 0x1c, 0x94, 0x87, 0x4b, + 0xe2, 0x53, 0xa1, 0x76, 0x32, 0xc2, 0xad, 0x64, 0x35, 0x1e, 0xe0, 0xcd, 0x06, 0xde, 0xc6, 0xa1, + 0x7b, 0x70, 0x18, 0x84, 0x41, 0x09, 0xf9, 0x0c, 0x5f, 0x08, 0xb5, 0x9b, 0x51, 0xb3, 0x5d, 0xbc, + 0xdc, 0x6e, 0xe1, 0x5d, 0xac, 0xfe, 0x1b, 0x80, 0x9d, 0xff, 0xd0, 0xfb, 0xa4, 0x7f, 0xd7, 0x86, + 0xfd, 0xff, 0xfd, 0xa3, 0x91, 0xae, 0xdb, 0x71, 0x5f, 0x8b, 0x43, 0xd6, 0xed, 0xe6, 0x67, 0xe2, + 0x17, 0x00, 0x7b, 0x47, 0x7a, 0x1f, 0xee, 0x6d, 0x1b, 0x56, 0xf7, 0x1a, 0x6e, 0x76, 0xfa, 0x0d, + 0x2c, 0x53, 0x47, 0x6f, 0xc2, 0x5e, 0xb9, 0xd3, 0x99, 0x4f, 0xa5, 0x9a, 0x5b, 0xae, 0x3d, 0x5e, + 0x23, 0xd0, 0x19, 0xec, 0x2c, 0x58, 0x30, 0x53, 0xdb, 0x19, 0xf2, 0x85, 0x02, 0xd9, 0x79, 0xc8, + 0x82, 0x19, 0xce, 0x3a, 0x29, 0x22, 0x20, 0x7e, 0xfe, 0x6f, 0x75, 0x03, 0x91, 0x6e, 0x33, 0xce, + 0x3a, 0xfa, 0xaf, 0x00, 0x9e, 0x16, 0xb7, 0x67, 0xad, 0x07, 0xf6, 0xea, 0x6d, 0xfa, 0x6b, 0x1f, + 0xe2, 0xef, 0xdf, 0xa7, 0x23, 0x13, 0x2a, 0xe9, 0x4f, 0x11, 0x11, 0x87, 0xaa, 0x9d, 0x0c, 0x76, + 0xab, 0x80, 0x29, 0x97, 0x65, 0x03, 0x57, 0x18, 0x6b, 0x72, 0x75, 0xad, 0xb5, 0x9e, 0x5d, 0x6b, + 0xad, 0xe7, 0xd7, 0x5a, 0xeb, 0xdb, 0x44, 0x03, 0x57, 0x89, 0x06, 0x9e, 0x25, 0x1a, 0x78, 0x9e, + 0x68, 0xe0, 0xcf, 0x44, 0x03, 0x3f, 0xfe, 0xa5, 0xb5, 0xbe, 0x68, 0x2f, 0xa7, 0xff, 0x04, 0x00, + 0x00, 0xff, 0xff, 0x32, 0xe3, 0x23, 0xf8, 0x2e, 0x0b, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto similarity index 88% rename from vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.proto rename to vendor/k8s.io/api/rbac/v1/generated.proto index 542e2b027..78aca15bc 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,17 +19,24 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.rbac.v1beta1; +package k8s.io.api.rbac.v1; +import "k8s.io/api/rbac/v1alpha1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "v1"; + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { @@ -39,6 +46,12 @@ message ClusterRole { // Rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; } // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, @@ -49,6 +62,7 @@ message ClusterRoleBinding { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. + // +optional repeated Subject subjects = 2; // RoleRef can only reference a ClusterRole in the global namespace. @@ -56,14 +70,6 @@ message ClusterRoleBinding { optional RoleRef roleRef = 3; } -// +k8s:deepcopy-gen=false -// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. -// We use it to construct bindings in code. It's more compact than trying to write them -// out in a literal. -message ClusterRoleBindingBuilder { - optional ClusterRoleBinding clusterRoleBinding = 1; -} - // ClusterRoleBindingList is a collection of ClusterRoleBindings message ClusterRoleBindingList { // Standard object's metadata. @@ -110,14 +116,6 @@ message PolicyRule { repeated string nonResourceURLs = 5; } -// +k8s:deepcopy-gen=false -// PolicyRuleBuilder let's us attach methods. A no-no for API types. -// We use it to construct rules in code. It's more compact than trying to write them -// out in a literal and allows us to perform some basic checking during construction -message PolicyRuleBuilder { - optional PolicyRule policyRule = 1; -} - // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. message Role { // Standard object's metadata. @@ -137,6 +135,7 @@ message RoleBinding { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. + // +optional repeated Subject subjects = 2; // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/register.go b/vendor/k8s.io/api/rbac/v1/register.go similarity index 71% rename from vendor/k8s.io/client-go/pkg/apis/rbac/register.go rename to vendor/k8s.io/api/rbac/v1/register.go index f4a838bd8..8f1fd460a 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/register.go +++ b/vendor/k8s.io/api/rbac/v1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,9 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rbac +package v1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -24,12 +25,7 @@ import ( const GroupName = "rbac.authorization.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -37,11 +33,14 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Role{}, @@ -54,5 +53,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ClusterRoleBindingList{}, &ClusterRoleList{}, ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1/types.go similarity index 83% rename from vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.go rename to vendor/k8s.io/api/rbac/v1/types.go index 89a47db9e..17163cbb2 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -97,7 +97,8 @@ type RoleRef struct { Name string `json:"name" protobuf:"bytes,3,opt,name=name"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. type Role struct { @@ -110,7 +111,8 @@ type Role struct { Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given @@ -122,13 +124,16 @@ type RoleBinding struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Subjects holds references to the objects the role applies to. - Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + // +optional + Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // RoleBindingList is a collection of RoleBindings type RoleBindingList struct { metav1.TypeMeta `json:",inline"` @@ -140,6 +145,8 @@ type RoleBindingList struct { Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // RoleList is a collection of Roles type RoleList struct { metav1.TypeMeta `json:",inline"` @@ -151,8 +158,9 @@ type RoleList struct { Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient=true -// +nonNamespaced=true +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. type ClusterRole struct { @@ -163,10 +171,25 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` } -// +genclient=true -// +nonNamespaced=true +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. @@ -177,13 +200,16 @@ type ClusterRoleBinding struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Subjects holds references to the objects the role applies to. - Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + // +optional + Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can only reference a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ClusterRoleBindingList is a collection of ClusterRoleBindings type ClusterRoleBindingList struct { metav1.TypeMeta `json:",inline"` @@ -195,6 +221,8 @@ type ClusterRoleBindingList struct { Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ClusterRoleList is a collection of ClusterRoles type ClusterRoleList struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go similarity index 86% rename from vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 1463d8fea..0ec20c88e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more @@ -26,11 +26,21 @@ package v1beta1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", } func (ClusterRole) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..be1592f77 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go @@ -0,0 +1,393 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]meta_v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + if *in == nil { + *out = nil + } else { + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. +func (in *ClusterRole) DeepCopy() *ClusterRole { + if in == nil { + return nil + } + out := new(ClusterRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRole) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding. +func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding { + if in == nil { + return nil + } + out := new(ClusterRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList. +func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList { + if in == nil { + return nil + } + out := new(ClusterRoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList. +func (in *ClusterRoleList) DeepCopy() *ClusterRoleList { + if in == nil { + return nil + } + out := new(ClusterRoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Role) DeepCopyInto(out *Role) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. +func (in *Role) DeepCopy() *Role { + if in == nil { + return nil + } + out := new(Role) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Role) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. +func (in *RoleBinding) DeepCopy() *RoleBinding { + if in == nil { + return nil + } + out := new(RoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. +func (in *RoleBindingList) DeepCopy() *RoleBindingList { + if in == nil { + return nil + } + out := new(RoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleList) DeepCopyInto(out *RoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. +func (in *RoleList) DeepCopy() *RoleList { + if in == nil { + return nil + } + out := new(RoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleRef) DeepCopyInto(out *RoleRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef. +func (in *RoleRef) DeepCopy() *RoleRef { + if in == nil { + return nil + } + out := new(RoleRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go similarity index 84% rename from vendor/k8s.io/client-go/pkg/apis/rbac/doc.go rename to vendor/k8s.io/api/rbac/v1alpha1/doc.go index c5f057484..5236a477f 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/doc.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go @@ -14,5 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + // +groupName=rbac.authorization.k8s.io -package rbac +package v1alpha1 // import "k8s.io/api/rbac/v1alpha1" diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go similarity index 69% rename from vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.pb.go rename to vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 406c12c17..b8cf5431a 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,23 +15,22 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto // DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto + k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto It has these top-level messages: + AggregationRule ClusterRole ClusterRoleBinding - ClusterRoleBindingBuilder ClusterRoleBindingList ClusterRoleList PolicyRule - PolicyRuleBuilder Role RoleBinding RoleBindingList @@ -45,6 +44,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import strings "strings" import reflect "reflect" @@ -57,21 +58,21 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ClusterRoleBindingBuilder) Reset() { *m = ClusterRoleBindingBuilder{} } -func (*ClusterRoleBindingBuilder) ProtoMessage() {} -func (*ClusterRoleBindingBuilder) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} @@ -85,152 +86,161 @@ func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -func (m *PolicyRuleBuilder) Reset() { *m = PolicyRuleBuilder{} } -func (*PolicyRuleBuilder) ProtoMessage() {} -func (*PolicyRuleBuilder) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { - proto.RegisterType((*ClusterRole)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRole") - proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRoleBinding") - proto.RegisterType((*ClusterRoleBindingBuilder)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRoleBindingBuilder") - proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRoleBindingList") - proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRoleList") - proto.RegisterType((*PolicyRule)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.PolicyRule") - proto.RegisterType((*PolicyRuleBuilder)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.PolicyRuleBuilder") - proto.RegisterType((*Role)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.Role") - proto.RegisterType((*RoleBinding)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.RoleBinding") - proto.RegisterType((*RoleBindingList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.RoleBindingList") - proto.RegisterType((*RoleList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.RoleList") - proto.RegisterType((*RoleRef)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.RoleRef") - proto.RegisterType((*Subject)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.Subject") + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1alpha1.AggregationRule") + proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1alpha1.PolicyRule") + proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1alpha1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1alpha1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1alpha1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1alpha1.RoleList") + proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1alpha1.RoleRef") + proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1alpha1.Subject") } -func (m *ClusterRole) Marshal() (data []byte, err error) { +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ClusterRole) MarshalTo(data []byte) (int, error) { +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 if len(m.Rules) > 0 { for _, msg := range m.Rules { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } return i, nil } -func (m *ClusterRoleBinding) Marshal() (data []byte, err error) { +func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ClusterRoleBinding) MarshalTo(data []byte) (int, error) { +func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n3 - return i, nil -} - -func (m *ClusterRoleBindingBuilder) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - return data[:n], nil -} - -func (m *ClusterRoleBindingBuilder) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.ClusterRoleBinding.Size())) - n4, err := m.ClusterRoleBinding.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -238,35 +248,35 @@ func (m *ClusterRoleBindingBuilder) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ClusterRoleBindingList) Marshal() (data []byte, err error) { +func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) { +func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -276,35 +286,35 @@ func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ClusterRoleList) Marshal() (data []byte, err error) { +func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) { +func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -314,238 +324,250 @@ func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *PolicyRule) Marshal() (data []byte, err error) { +func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PolicyRule) MarshalTo(data []byte) (int, error) { +func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Verbs) > 0 { for _, s := range m.Verbs { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.APIGroups) > 0 { for _, s := range m.APIGroups { - data[i] = 0x1a + dAtA[i] = 0x1a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.Resources) > 0 { for _, s := range m.Resources { - data[i] = 0x22 + dAtA[i] = 0x22 i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.ResourceNames) > 0 { for _, s := range m.ResourceNames { - data[i] = 0x2a + dAtA[i] = 0x2a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.NonResourceURLs) > 0 { for _, s := range m.NonResourceURLs { - data[i] = 0x32 + dAtA[i] = 0x32 i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } return i, nil } -func (m *PolicyRuleBuilder) Marshal() (data []byte, err error) { +func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PolicyRuleBuilder) MarshalTo(data []byte) (int, error) { +func (m *Role) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.PolicyRule.Size())) - n7, err := m.PolicyRule.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n7 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } -func (m *Role) Marshal() (data []byte, err error) { +func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Role) MarshalTo(data []byte) (int, error) { +func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n8 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - data[i] = 0x12 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - return i, nil -} - -func (m *RoleBinding) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RoleBinding) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n9 - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - data[i] = 0x12 + return i, nil +} + +func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) - n10, err := m.RoleRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 return i, nil } -func (m *RoleBindingList) Marshal() (data []byte, err error) { +func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RoleBindingList) MarshalTo(data []byte) (int, error) { +func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -555,135 +577,109 @@ func (m *RoleBindingList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *RoleList) Marshal() (data []byte, err error) { +func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RoleList) MarshalTo(data []byte) (int, error) { +func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n12, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n12 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i += copy(dAtA[i:], m.APIGroup) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) return i, nil } -func (m *RoleRef) Marshal() (data []byte, err error) { +func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RoleRef) MarshalTo(data []byte) (int, error) { +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIGroup))) - i += copy(data[i:], m.APIGroup) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) return i, nil } -func (m *Subject) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Subject) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) - i += copy(data[i:], m.Namespace) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ClusterRole) Size() (n int) { var l int _ = l @@ -695,6 +691,10 @@ func (m *ClusterRole) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -714,14 +714,6 @@ func (m *ClusterRoleBinding) Size() (n int) { return n } -func (m *ClusterRoleBindingBuilder) Size() (n int) { - var l int - _ = l - l = m.ClusterRoleBinding.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *ClusterRoleBindingList) Size() (n int) { var l int _ = l @@ -786,14 +778,6 @@ func (m *PolicyRule) Size() (n int) { return n } -func (m *PolicyRuleBuilder) Size() (n int) { - var l int - _ = l - l = m.PolicyRule.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *Role) Size() (n int) { var l int _ = l @@ -891,6 +875,16 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ClusterRole) String() string { if this == nil { return "nil" @@ -898,6 +892,7 @@ func (this *ClusterRole) String() string { s := strings.Join([]string{`&ClusterRole{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -914,16 +909,6 @@ func (this *ClusterRoleBinding) String() string { }, "") return s } -func (this *ClusterRoleBindingBuilder) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRoleBindingBuilder{`, - `ClusterRoleBinding:` + strings.Replace(strings.Replace(this.ClusterRoleBinding.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" @@ -960,16 +945,6 @@ func (this *PolicyRule) String() string { }, "") return s } -func (this *PolicyRuleBuilder) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PolicyRuleBuilder{`, - `PolicyRule:` + strings.Replace(strings.Replace(this.PolicyRule.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *Role) String() string { if this == nil { return "nil" @@ -1048,8 +1023,8 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *ClusterRole) Unmarshal(data []byte) error { - l := len(data) +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1061,7 +1036,88 @@ func (m *ClusterRole) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1089,7 +1145,7 @@ func (m *ClusterRole) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1103,7 +1159,7 @@ func (m *ClusterRole) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1119,7 +1175,7 @@ func (m *ClusterRole) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1134,13 +1190,46 @@ func (m *ClusterRole) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Rules = append(m.Rules, PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1159,8 +1248,8 @@ func (m *ClusterRole) Unmarshal(data []byte) error { } return nil } -func (m *ClusterRoleBinding) Unmarshal(data []byte) error { - l := len(data) +func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1172,7 +1261,7 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1200,7 +1289,7 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1214,7 +1303,7 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1230,7 +1319,7 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1245,7 +1334,7 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Subjects = append(m.Subjects, Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1261,7 +1350,7 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1275,13 +1364,13 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1300,8 +1389,8 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { } return nil } -func (m *ClusterRoleBindingBuilder) Unmarshal(data []byte) error { - l := len(data) +func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1313,87 +1402,7 @@ func (m *ClusterRoleBindingBuilder) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBindingBuilder: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBindingBuilder: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleBinding", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClusterRoleBinding.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1421,7 +1430,7 @@ func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1435,7 +1444,7 @@ func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1451,7 +1460,7 @@ func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1466,13 +1475,13 @@ func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, ClusterRoleBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1491,8 +1500,8 @@ func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { } return nil } -func (m *ClusterRoleList) Unmarshal(data []byte) error { - l := len(data) +func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1504,7 +1513,7 @@ func (m *ClusterRoleList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1532,7 +1541,7 @@ func (m *ClusterRoleList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1546,7 +1555,7 @@ func (m *ClusterRoleList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1562,7 +1571,7 @@ func (m *ClusterRoleList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1577,13 +1586,13 @@ func (m *ClusterRoleList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, ClusterRole{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1602,8 +1611,8 @@ func (m *ClusterRoleList) Unmarshal(data []byte) error { } return nil } -func (m *PolicyRule) Unmarshal(data []byte) error { - l := len(data) +func (m *PolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1615,7 +1624,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1643,7 +1652,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1658,7 +1667,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Verbs = append(m.Verbs, string(data[iNdEx:postIndex])) + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { @@ -1672,7 +1681,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1687,7 +1696,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroups = append(m.APIGroups, string(data[iNdEx:postIndex])) + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { @@ -1701,7 +1710,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1716,7 +1725,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, string(data[iNdEx:postIndex])) + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 5: if wireType != 2 { @@ -1730,7 +1739,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1745,7 +1754,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceNames = append(m.ResourceNames, string(data[iNdEx:postIndex])) + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 6: if wireType != 2 { @@ -1759,7 +1768,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1774,11 +1783,11 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NonResourceURLs = append(m.NonResourceURLs, string(data[iNdEx:postIndex])) + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1797,8 +1806,8 @@ func (m *PolicyRule) Unmarshal(data []byte) error { } return nil } -func (m *PolicyRuleBuilder) Unmarshal(data []byte) error { - l := len(data) +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1810,87 +1819,7 @@ func (m *PolicyRuleBuilder) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PolicyRuleBuilder: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PolicyRuleBuilder: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyRule", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PolicyRule.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Role) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1918,7 +1847,7 @@ func (m *Role) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1932,7 +1861,7 @@ func (m *Role) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1948,7 +1877,7 @@ func (m *Role) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1963,13 +1892,13 @@ func (m *Role) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Rules = append(m.Rules, PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1988,8 +1917,8 @@ func (m *Role) Unmarshal(data []byte) error { } return nil } -func (m *RoleBinding) Unmarshal(data []byte) error { - l := len(data) +func (m *RoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2001,7 +1930,7 @@ func (m *RoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2029,7 +1958,7 @@ func (m *RoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2043,7 +1972,7 @@ func (m *RoleBinding) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2059,7 +1988,7 @@ func (m *RoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2074,7 +2003,7 @@ func (m *RoleBinding) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Subjects = append(m.Subjects, Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2090,7 +2019,7 @@ func (m *RoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2104,13 +2033,13 @@ func (m *RoleBinding) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2129,8 +2058,8 @@ func (m *RoleBinding) Unmarshal(data []byte) error { } return nil } -func (m *RoleBindingList) Unmarshal(data []byte) error { - l := len(data) +func (m *RoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2142,7 +2071,7 @@ func (m *RoleBindingList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2170,7 +2099,7 @@ func (m *RoleBindingList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2184,7 +2113,7 @@ func (m *RoleBindingList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2200,7 +2129,7 @@ func (m *RoleBindingList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2215,13 +2144,13 @@ func (m *RoleBindingList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, RoleBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2240,8 +2169,8 @@ func (m *RoleBindingList) Unmarshal(data []byte) error { } return nil } -func (m *RoleList) Unmarshal(data []byte) error { - l := len(data) +func (m *RoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2253,7 +2182,7 @@ func (m *RoleList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2281,7 +2210,7 @@ func (m *RoleList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2295,7 +2224,7 @@ func (m *RoleList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2311,7 +2240,7 @@ func (m *RoleList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2326,13 +2255,13 @@ func (m *RoleList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, Role{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2351,8 +2280,8 @@ func (m *RoleList) Unmarshal(data []byte) error { } return nil } -func (m *RoleRef) Unmarshal(data []byte) error { - l := len(data) +func (m *RoleRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2364,7 +2293,7 @@ func (m *RoleRef) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2392,7 +2321,7 @@ func (m *RoleRef) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2407,7 +2336,7 @@ func (m *RoleRef) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroup = string(data[iNdEx:postIndex]) + m.APIGroup = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2421,7 +2350,7 @@ func (m *RoleRef) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2436,7 +2365,7 @@ func (m *RoleRef) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -2450,7 +2379,7 @@ func (m *RoleRef) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2465,11 +2394,11 @@ func (m *RoleRef) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2488,8 +2417,8 @@ func (m *RoleRef) Unmarshal(data []byte) error { } return nil } -func (m *Subject) Unmarshal(data []byte) error { - l := len(data) +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2501,7 +2430,7 @@ func (m *Subject) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2529,7 +2458,7 @@ func (m *Subject) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2544,7 +2473,7 @@ func (m *Subject) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2558,7 +2487,7 @@ func (m *Subject) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2573,7 +2502,7 @@ func (m *Subject) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(data[iNdEx:postIndex]) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -2587,7 +2516,7 @@ func (m *Subject) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2602,7 +2531,7 @@ func (m *Subject) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -2616,7 +2545,7 @@ func (m *Subject) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2631,11 +2560,11 @@ func (m *Subject) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(data[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2654,8 +2583,8 @@ func (m *Subject) Unmarshal(data []byte) error { } return nil } -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -2666,7 +2595,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2684,7 +2613,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -2701,7 +2630,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2724,7 +2653,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2735,7 +2664,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -2759,59 +2688,63 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 847 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x54, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xce, 0xb4, 0x09, 0x4d, 0x5e, 0xa9, 0x4a, 0x07, 0x09, 0x99, 0x1e, 0x9c, 0xca, 0xa7, 0x0a, - 0x16, 0x9b, 0x96, 0x05, 0xf6, 0x00, 0x87, 0x35, 0x07, 0x54, 0xb1, 0x94, 0x6a, 0x56, 0xac, 0xc4, - 0x6a, 0x25, 0x98, 0x38, 0xb3, 0xc9, 0x10, 0xff, 0xd2, 0x8c, 0x1d, 0xb1, 0x42, 0x48, 0x1c, 0x39, - 0xf2, 0x57, 0x70, 0xe4, 0x80, 0xc4, 0x91, 0x13, 0x97, 0x0a, 0x2e, 0x3d, 0xc2, 0x25, 0xa2, 0xe6, - 0x0f, 0x01, 0x79, 0x3c, 0xfe, 0x51, 0x9c, 0xaa, 0x3f, 0x90, 0x22, 0x21, 0x71, 0x4a, 0xfc, 0xde, - 0xf7, 0x7d, 0xf3, 0xbe, 0x79, 0xf6, 0x07, 0xf7, 0x66, 0xf7, 0xa4, 0xcd, 0x23, 0x67, 0x96, 0x8e, - 0x98, 0x08, 0x59, 0xc2, 0xa4, 0x13, 0xcf, 0x26, 0x0e, 0x8d, 0xb9, 0x74, 0xc4, 0x88, 0x7a, 0xce, - 0xfc, 0x80, 0xfa, 0xf1, 0x94, 0x1e, 0x38, 0x13, 0x16, 0x32, 0x41, 0x13, 0x36, 0xb6, 0x63, 0x11, - 0x25, 0x11, 0xde, 0x2f, 0x98, 0x76, 0xcd, 0xb4, 0xe3, 0xd9, 0xc4, 0xce, 0x99, 0x76, 0xce, 0xb4, - 0x4b, 0xe6, 0xee, 0x6b, 0x13, 0x9e, 0x4c, 0xd3, 0x91, 0xed, 0x45, 0x81, 0x33, 0x89, 0x26, 0x91, - 0xa3, 0x04, 0x46, 0xe9, 0x53, 0xf5, 0xa4, 0x1e, 0xd4, 0xbf, 0x42, 0x78, 0xf7, 0xae, 0x1e, 0x89, - 0xc6, 0x3c, 0xa0, 0xde, 0x94, 0x87, 0x4c, 0x3c, 0xab, 0x87, 0x0a, 0x58, 0x42, 0x9d, 0x79, 0x6b, - 0x9c, 0x5d, 0xe7, 0x32, 0x96, 0x48, 0xc3, 0x84, 0x07, 0xac, 0x45, 0x78, 0xeb, 0x2a, 0x82, 0xf4, - 0xa6, 0x2c, 0xa0, 0x2d, 0xde, 0x1b, 0x97, 0xf1, 0xd2, 0x84, 0xfb, 0x0e, 0x0f, 0x13, 0x99, 0x88, - 0x16, 0xa9, 0xe1, 0x49, 0x32, 0x31, 0x67, 0xa2, 0x36, 0xc4, 0xbe, 0xa0, 0x41, 0xec, 0xb3, 0x65, - 0x9e, 0xee, 0x5c, 0xba, 0x9c, 0x25, 0x68, 0xeb, 0x17, 0x04, 0x9b, 0xef, 0xf9, 0xa9, 0x4c, 0x98, - 0x20, 0x91, 0xcf, 0xf0, 0x67, 0xd0, 0xcf, 0x2f, 0x6b, 0x4c, 0x13, 0x6a, 0xa0, 0x3d, 0xb4, 0xbf, - 0x79, 0xf8, 0xba, 0xad, 0x77, 0xd6, 0x9c, 0xbd, 0xde, 0x5a, 0x8e, 0xb6, 0xe7, 0x07, 0xf6, 0x47, - 0xa3, 0xcf, 0x99, 0x97, 0x7c, 0xc8, 0x12, 0xea, 0xe2, 0xd3, 0xc5, 0xb0, 0x93, 0x2d, 0x86, 0x50, - 0xd7, 0x48, 0xa5, 0x8a, 0x3f, 0x81, 0x9e, 0x48, 0x7d, 0x26, 0x8d, 0xb5, 0xbd, 0xf5, 0xfd, 0xcd, - 0xc3, 0xbb, 0xf6, 0x75, 0x5f, 0x09, 0xfb, 0x24, 0xf2, 0xb9, 0xf7, 0x8c, 0xa4, 0x3e, 0x73, 0xb7, - 0xf4, 0x11, 0xbd, 0xfc, 0x49, 0x92, 0x42, 0xd1, 0xfa, 0x71, 0x0d, 0x70, 0xc3, 0x8c, 0xcb, 0xc3, - 0x31, 0x0f, 0x27, 0x2b, 0xf0, 0xf4, 0x29, 0xf4, 0x65, 0xaa, 0x1a, 0xa5, 0xad, 0x83, 0xeb, 0xdb, - 0x7a, 0x58, 0x30, 0xdd, 0x17, 0xf4, 0x11, 0x7d, 0x5d, 0x90, 0xa4, 0x12, 0xc5, 0x4f, 0x60, 0x43, - 0x44, 0x3e, 0x23, 0xec, 0xa9, 0xb1, 0xae, 0x1c, 0xdc, 0x40, 0x9f, 0x14, 0x44, 0x77, 0x5b, 0xeb, - 0x6f, 0xe8, 0x02, 0x29, 0x25, 0xad, 0xef, 0x10, 0xbc, 0xdc, 0xbe, 0x37, 0x37, 0xe5, 0xfe, 0x98, - 0x09, 0xfc, 0x0d, 0x02, 0xec, 0xb5, 0xba, 0xfa, 0x26, 0xdf, 0xb9, 0xfe, 0x1c, 0x4b, 0x4e, 0xd8, - 0xd5, 0x23, 0x2d, 0xd9, 0x1a, 0x59, 0x72, 0xa6, 0xf5, 0x3b, 0x82, 0x97, 0xda, 0xd0, 0x07, 0x5c, - 0x26, 0xf8, 0x49, 0x6b, 0xc9, 0xf6, 0xf5, 0x96, 0x9c, 0xb3, 0xd5, 0x8a, 0xab, 0xfb, 0x2f, 0x2b, - 0x8d, 0x05, 0x53, 0xe8, 0xf1, 0x84, 0x05, 0xe5, 0x76, 0xff, 0x9d, 0xeb, 0xea, 0xe5, 0x3d, 0xca, - 0x25, 0x49, 0xa1, 0x6c, 0xfd, 0x8a, 0x60, 0xbb, 0x01, 0x5e, 0x81, 0xa9, 0xc7, 0x17, 0x4d, 0xbd, - 0x79, 0x3b, 0x53, 0xcb, 0xdd, 0xfc, 0x85, 0x00, 0xea, 0xef, 0x15, 0x0f, 0xa1, 0x37, 0x67, 0x62, - 0x24, 0x0d, 0xb4, 0xb7, 0xbe, 0x3f, 0x70, 0x07, 0x39, 0xfe, 0x51, 0x5e, 0x20, 0x45, 0x1d, 0xbf, - 0x0a, 0x03, 0x1a, 0xf3, 0xf7, 0x45, 0x94, 0xc6, 0xd2, 0x58, 0x57, 0xa0, 0xad, 0x6c, 0x31, 0x1c, - 0xdc, 0x3f, 0x39, 0x2a, 0x8a, 0xa4, 0xee, 0xe7, 0x60, 0xc1, 0x64, 0x94, 0x0a, 0x8f, 0x49, 0xa3, - 0x5b, 0x83, 0x49, 0x59, 0x24, 0x75, 0x1f, 0xbf, 0x0d, 0x5b, 0xe5, 0xc3, 0x31, 0x0d, 0x98, 0x34, - 0x7a, 0x8a, 0xb0, 0x93, 0x2d, 0x86, 0x5b, 0xa4, 0xd9, 0x20, 0x17, 0x71, 0xf8, 0x5d, 0xd8, 0x0e, - 0xa3, 0xb0, 0x84, 0x7c, 0x4c, 0x1e, 0x48, 0xe3, 0x39, 0x45, 0x7d, 0x31, 0x5b, 0x0c, 0xb7, 0x8f, - 0x2f, 0xb6, 0xc8, 0x3f, 0xb1, 0xd6, 0x57, 0xb0, 0xd3, 0x08, 0x2c, 0xfd, 0x2d, 0x4d, 0x01, 0xe2, - 0xaa, 0xa8, 0x57, 0x7a, 0xbb, 0x04, 0xac, 0x02, 0xa9, 0xae, 0x91, 0x86, 0xb6, 0xf5, 0x33, 0x82, - 0xee, 0x7f, 0x3f, 0xd1, 0xbf, 0x5f, 0x83, 0xcd, 0xff, 0xa3, 0xfc, 0x06, 0x51, 0x9e, 0xa7, 0xc8, - 0x6a, 0xa3, 0xf1, 0xf6, 0x29, 0x72, 0x75, 0x26, 0xfe, 0x84, 0xa0, 0xbf, 0xa2, 0x30, 0x7c, 0x78, - 0xd1, 0x86, 0x7d, 0x43, 0x1b, 0xcb, 0xe7, 0xff, 0x12, 0xca, 0x0d, 0xe1, 0x3b, 0xd0, 0x2f, 0x03, - 0x4c, 0x4d, 0x3f, 0xa8, 0xa7, 0x29, 0x33, 0x8e, 0x54, 0x08, 0xbc, 0x07, 0xdd, 0x19, 0x0f, 0xc7, - 0xc6, 0x9a, 0x42, 0x3e, 0xaf, 0x91, 0xdd, 0x0f, 0x78, 0x38, 0x26, 0xaa, 0x93, 0x23, 0x42, 0x1a, - 0x30, 0xf5, 0x0e, 0x35, 0x10, 0x79, 0x74, 0x11, 0xd5, 0xb1, 0x7e, 0x40, 0xb0, 0xa1, 0xdf, 0xbf, - 0x4a, 0x0f, 0x5d, 0xaa, 0x77, 0x08, 0x40, 0x63, 0xfe, 0x88, 0x09, 0xc9, 0xa3, 0x50, 0x9f, 0x5b, - 0x7d, 0x29, 0xf7, 0x4f, 0x8e, 0x74, 0x87, 0x34, 0x50, 0x57, 0xcf, 0x80, 0x1d, 0x18, 0xe4, 0xbf, - 0x32, 0xa6, 0x1e, 0x33, 0xba, 0x0a, 0xb6, 0xa3, 0x61, 0x83, 0xe3, 0xb2, 0x41, 0x6a, 0x8c, 0xfb, - 0xca, 0xe9, 0xb9, 0xd9, 0x39, 0x3b, 0x37, 0x3b, 0xbf, 0x9d, 0x9b, 0x9d, 0xaf, 0x33, 0x13, 0x9d, - 0x66, 0x26, 0x3a, 0xcb, 0x4c, 0xf4, 0x47, 0x66, 0xa2, 0x6f, 0xff, 0x34, 0x3b, 0x8f, 0xfb, 0xe5, - 0xc5, 0xff, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x53, 0x64, 0x1d, 0x0e, 0x87, 0x0c, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 844 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x65, 0x15, 0x6e, 0x38, 0x21, 0x6b, 0x85, 0x9c, 0xc5, 0x02, + 0xe9, 0x10, 0x87, 0xcd, 0xee, 0x21, 0xa0, 0xa1, 0x58, 0x5f, 0x81, 0x16, 0x96, 0xbd, 0x65, 0x4e, + 0x5c, 0x81, 0x28, 0x98, 0x38, 0x73, 0xce, 0x10, 0xdb, 0x63, 0xcd, 0x8c, 0x23, 0x9d, 0x68, 0x68, + 0x68, 0x11, 0x0d, 0x05, 0x3d, 0x2d, 0x0d, 0x94, 0xfc, 0x03, 0x4b, 0x77, 0xe5, 0x56, 0x11, 0x6b, + 0xfe, 0x10, 0x90, 0xc7, 0x76, 0xec, 0xfc, 0x22, 0xa9, 0x22, 0x21, 0x51, 0x25, 0xf3, 0xde, 0xf7, + 0xbe, 0xf7, 0xde, 0x37, 0xf3, 0x9e, 0xe1, 0xd9, 0xf8, 0x03, 0x69, 0x33, 0xee, 0x8c, 0x93, 0x01, + 0x15, 0x11, 0x55, 0x54, 0x3a, 0x13, 0x1a, 0x0d, 0xb9, 0x70, 0x0a, 0x07, 0x89, 0x99, 0x23, 0x06, + 0xc4, 0x73, 0x26, 0x27, 0x24, 0x88, 0x47, 0xe4, 0xc4, 0xf1, 0x69, 0x44, 0x05, 0x51, 0x74, 0x68, + 0xc7, 0x82, 0x2b, 0x8e, 0x8c, 0x1c, 0x69, 0x93, 0x98, 0xd9, 0x19, 0xd2, 0x2e, 0x91, 0x47, 0x6f, + 0xfb, 0x4c, 0x8d, 0x92, 0x81, 0xed, 0xf1, 0xd0, 0xf1, 0xb9, 0xcf, 0x1d, 0x1d, 0x30, 0x48, 0x9e, + 0xea, 0x93, 0x3e, 0xe8, 0x7f, 0x39, 0xd1, 0xd1, 0xbb, 0x55, 0xca, 0x90, 0x78, 0x23, 0x16, 0x51, + 0xf1, 0xcc, 0x89, 0xc7, 0x7e, 0x66, 0x90, 0x4e, 0x48, 0x15, 0x71, 0x26, 0x4b, 0xe9, 0x8f, 0x9c, + 0x75, 0x51, 0x22, 0x89, 0x14, 0x0b, 0xe9, 0x52, 0xc0, 0x7b, 0x9b, 0x02, 0xa4, 0x37, 0xa2, 0x21, + 0x59, 0x8a, 0x7b, 0xb0, 0x2e, 0x2e, 0x51, 0x2c, 0x70, 0x58, 0xa4, 0xa4, 0x12, 0x8b, 0x41, 0xd6, + 0x4f, 0x00, 0xf6, 0xce, 0x7c, 0x5f, 0x50, 0x9f, 0x28, 0xc6, 0x23, 0x9c, 0x04, 0x14, 0x7d, 0x07, + 0xe0, 0x5d, 0x2f, 0x48, 0xa4, 0xa2, 0x02, 0xf3, 0x80, 0x3e, 0xa6, 0x01, 0xf5, 0x14, 0x17, 0xd2, + 0x00, 0xc7, 0x7b, 0xf7, 0x0e, 0x4e, 0x1f, 0xd8, 0x95, 0xa0, 0xb3, 0x44, 0x76, 0x3c, 0xf6, 0x33, + 0x83, 0xb4, 0x33, 0x1d, 0xec, 0xc9, 0x89, 0x7d, 0x41, 0x06, 0x34, 0x28, 0x63, 0xdd, 0x57, 0xaf, + 0xa7, 0xfd, 0x46, 0x3a, 0xed, 0xdf, 0x7d, 0xb8, 0x82, 0x18, 0xaf, 0x4c, 0x67, 0xfd, 0xdc, 0x84, + 0x07, 0x35, 0x38, 0xfa, 0x0a, 0x76, 0x32, 0xf2, 0x21, 0x51, 0xc4, 0x00, 0xc7, 0xe0, 0xde, 0xc1, + 0xe9, 0x3b, 0xdb, 0x95, 0xf2, 0x68, 0xf0, 0x35, 0xf5, 0xd4, 0xa7, 0x54, 0x11, 0x17, 0x15, 0x75, + 0xc0, 0xca, 0x86, 0x67, 0xac, 0xe8, 0x1c, 0xb6, 0x45, 0x12, 0x50, 0x69, 0x34, 0x75, 0xa7, 0xaf, + 0xdb, 0xeb, 0x9e, 0x8e, 0x7d, 0xc5, 0x03, 0xe6, 0x3d, 0xcb, 0xe4, 0x72, 0x0f, 0x0b, 0xca, 0x76, + 0x76, 0x92, 0x38, 0x67, 0x40, 0x23, 0xd8, 0x23, 0xf3, 0xba, 0x1a, 0x7b, 0xba, 0xe6, 0x37, 0xd7, + 0x93, 0x2e, 0x5c, 0x84, 0xfb, 0x72, 0x3a, 0xed, 0x2f, 0xde, 0x0e, 0x5e, 0xa4, 0xb5, 0x7e, 0x6c, + 0x42, 0x54, 0x93, 0xc9, 0x65, 0xd1, 0x90, 0x45, 0xfe, 0x0e, 0xd4, 0x7a, 0x04, 0x3b, 0x32, 0xd1, + 0x8e, 0x52, 0xb0, 0xd7, 0xd6, 0xf7, 0xf6, 0x38, 0x47, 0xba, 0x2f, 0x15, 0x94, 0x9d, 0xc2, 0x20, + 0xf1, 0x8c, 0x04, 0x5d, 0xc0, 0x7d, 0xc1, 0x03, 0x8a, 0xe9, 0xd3, 0x42, 0xab, 0x7f, 0xe1, 0xc3, + 0x39, 0xd0, 0xed, 0x15, 0x7c, 0xfb, 0x85, 0x01, 0x97, 0x14, 0xd6, 0x1f, 0x00, 0xbe, 0xb2, 0xac, + 0xcb, 0x05, 0x93, 0x0a, 0x7d, 0xb9, 0xa4, 0x8d, 0xbd, 0xe5, 0xa3, 0x66, 0x32, 0x57, 0x66, 0xd6, + 0x46, 0x69, 0xa9, 0xe9, 0xf2, 0x19, 0x6c, 0x33, 0x45, 0xc3, 0x52, 0x94, 0xfb, 0xeb, 0x9b, 0x58, + 0x2e, 0xaf, 0x7a, 0x4d, 0xe7, 0x19, 0x05, 0xce, 0x99, 0xac, 0xdf, 0x01, 0xec, 0xd5, 0xc0, 0x3b, + 0x68, 0xe2, 0xe3, 0xf9, 0x26, 0xde, 0xd8, 0xae, 0x89, 0xd5, 0xd5, 0xff, 0x0d, 0x20, 0xac, 0x06, + 0x06, 0xf5, 0x61, 0x7b, 0x42, 0xc5, 0x20, 0xdf, 0x27, 0x5d, 0xb7, 0x9b, 0xe1, 0x9f, 0x64, 0x06, + 0x9c, 0xdb, 0xd1, 0x5b, 0xb0, 0x4b, 0x62, 0xf6, 0x91, 0xe0, 0x49, 0x2c, 0x8d, 0x3d, 0x0d, 0x3a, + 0x4c, 0xa7, 0xfd, 0xee, 0xd9, 0xd5, 0x79, 0x6e, 0xc4, 0x95, 0x3f, 0x03, 0x0b, 0x2a, 0x79, 0x22, + 0x3c, 0x2a, 0x8d, 0x56, 0x05, 0xc6, 0xa5, 0x11, 0x57, 0x7e, 0xf4, 0x3e, 0x3c, 0x2c, 0x0f, 0x97, + 0x24, 0xa4, 0xd2, 0x68, 0xeb, 0x80, 0x3b, 0xe9, 0xb4, 0x7f, 0x88, 0xeb, 0x0e, 0x3c, 0x8f, 0x43, + 0x1f, 0xc2, 0x5e, 0xc4, 0xa3, 0x12, 0xf2, 0x39, 0xbe, 0x90, 0xc6, 0x0b, 0x3a, 0x54, 0xcf, 0xe8, + 0xe5, 0xbc, 0x0b, 0x2f, 0x62, 0xad, 0xdf, 0x00, 0x6c, 0xfd, 0xe7, 0x76, 0x98, 0xf5, 0x7d, 0x13, + 0x1e, 0xfc, 0xbf, 0x52, 0x6a, 0x2b, 0x25, 0x1b, 0xc3, 0xdd, 0xee, 0x92, 0xed, 0xc7, 0x70, 0xf3, + 0x12, 0xf9, 0x05, 0xc0, 0xce, 0x8e, 0xb6, 0xc7, 0xc3, 0xf9, 0xb2, 0xcd, 0x0d, 0x65, 0xaf, 0xae, + 0xf7, 0x1b, 0x58, 0xde, 0x00, 0xba, 0x0f, 0x3b, 0xe5, 0xc4, 0xeb, 0x6a, 0xbb, 0x55, 0xf6, 0x72, + 0x29, 0xe0, 0x19, 0x02, 0x1d, 0xc3, 0xd6, 0x98, 0x45, 0x43, 0xa3, 0xa9, 0x91, 0x2f, 0x16, 0xc8, + 0xd6, 0x27, 0x2c, 0x1a, 0x62, 0xed, 0xc9, 0x10, 0x11, 0x09, 0xf3, 0x4f, 0x72, 0x0d, 0x91, 0xcd, + 0x3a, 0xd6, 0x1e, 0xeb, 0x57, 0x00, 0xf7, 0x8b, 0xf7, 0x34, 0xe3, 0x03, 0x6b, 0xf9, 0x4e, 0x21, + 0x24, 0x31, 0x7b, 0x42, 0x85, 0x64, 0x3c, 0x2a, 0xf2, 0xce, 0x5e, 0xfa, 0xd9, 0xd5, 0x79, 0xe1, + 0xc1, 0x35, 0xd4, 0xe6, 0x1a, 0x90, 0x03, 0xbb, 0xd9, 0xaf, 0x8c, 0x89, 0x47, 0x8d, 0x96, 0x86, + 0xdd, 0x29, 0x60, 0xdd, 0xcb, 0xd2, 0x81, 0x2b, 0x8c, 0x6b, 0x5f, 0xdf, 0x9a, 0x8d, 0xe7, 0xb7, + 0x66, 0xe3, 0xe6, 0xd6, 0x6c, 0x7c, 0x9b, 0x9a, 0xe0, 0x3a, 0x35, 0xc1, 0xf3, 0xd4, 0x04, 0x37, + 0xa9, 0x09, 0xfe, 0x4c, 0x4d, 0xf0, 0xc3, 0x5f, 0x66, 0xe3, 0x8b, 0x4e, 0x29, 0xfe, 0x3f, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xeb, 0xcc, 0xe2, 0x61, 0x5e, 0x0b, 0x00, 0x00, } diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto similarity index 89% rename from vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.proto rename to vendor/k8s.io/api/rbac/v1alpha1/generated.proto index 5a75fb240..d7b294863 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,18 +19,24 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.rbac.v1alpha1; +package k8s.io.api.rbac.v1alpha1; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. @@ -39,6 +45,12 @@ message ClusterRole { // Rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; } // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, @@ -49,6 +61,7 @@ message ClusterRoleBinding { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. + // +optional repeated Subject subjects = 2; // RoleRef can only reference a ClusterRole in the global namespace. @@ -56,14 +69,6 @@ message ClusterRoleBinding { optional RoleRef roleRef = 3; } -// +k8s:deepcopy-gen=false -// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. -// We use it to construct bindings in code. It's more compact than trying to write them -// out in a literal. -message ClusterRoleBindingBuilder { - optional ClusterRoleBinding clusterRoleBinding = 1; -} - // ClusterRoleBindingList is a collection of ClusterRoleBindings message ClusterRoleBindingList { // Standard object's metadata. @@ -111,14 +116,6 @@ message PolicyRule { repeated string nonResourceURLs = 6; } -// +k8s:deepcopy-gen=false -// PolicyRuleBuilder let's us attach methods. A no-no for API types. -// We use it to construct rules in code. It's more compact than trying to write them -// out in a literal and allows us to perform some basic checking during construction -message PolicyRuleBuilder { - optional PolicyRule policyRule = 1; -} - // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. message Role { // Standard object's metadata. @@ -138,6 +135,7 @@ message RoleBinding { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. + // +optional repeated Subject subjects = 2; // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/register.go b/vendor/k8s.io/api/rbac/v1alpha1/register.go similarity index 80% rename from vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/register.go rename to vendor/k8s.io/api/rbac/v1alpha1/register.go index 3977e99c5..0c6977685 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/register.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/register.go @@ -33,11 +33,14 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Role{}, diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go similarity index 83% rename from vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.go rename to vendor/k8s.io/api/rbac/v1alpha1/types.go index e9f8efb3b..398d6a169 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -99,7 +99,8 @@ type RoleRef struct { Name string `json:"name" protobuf:"bytes,3,opt,name=name"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. type Role struct { @@ -112,7 +113,8 @@ type Role struct { Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given @@ -124,13 +126,16 @@ type RoleBinding struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Subjects holds references to the objects the role applies to. - Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + // +optional + Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // RoleBindingList is a collection of RoleBindings type RoleBindingList struct { metav1.TypeMeta `json:",inline"` @@ -142,6 +147,8 @@ type RoleBindingList struct { Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // RoleList is a collection of Roles type RoleList struct { metav1.TypeMeta `json:",inline"` @@ -153,8 +160,9 @@ type RoleList struct { Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient=true -// +nonNamespaced=true +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. type ClusterRole struct { @@ -165,10 +173,25 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` } -// +genclient=true -// +nonNamespaced=true +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. @@ -179,13 +202,16 @@ type ClusterRoleBinding struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Subjects holds references to the objects the role applies to. - Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + // +optional + Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can only reference a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ClusterRoleBindingList is a collection of ClusterRoleBindings type ClusterRoleBindingList struct { metav1.TypeMeta `json:",inline"` @@ -197,6 +223,8 @@ type ClusterRoleBindingList struct { Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ClusterRoleList is a collection of ClusterRoles type ClusterRoleList struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go similarity index 86% rename from vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index d58a722af..1d6ef30b0 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,11 +26,21 @@ package v1alpha1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", } func (ClusterRole) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..1d29acff3 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,393 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + if *in == nil { + *out = nil + } else { + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. +func (in *ClusterRole) DeepCopy() *ClusterRole { + if in == nil { + return nil + } + out := new(ClusterRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRole) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding. +func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding { + if in == nil { + return nil + } + out := new(ClusterRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList. +func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList { + if in == nil { + return nil + } + out := new(ClusterRoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList. +func (in *ClusterRoleList) DeepCopy() *ClusterRoleList { + if in == nil { + return nil + } + out := new(ClusterRoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Role) DeepCopyInto(out *Role) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. +func (in *Role) DeepCopy() *Role { + if in == nil { + return nil + } + out := new(Role) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Role) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. +func (in *RoleBinding) DeepCopy() *RoleBinding { + if in == nil { + return nil + } + out := new(RoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. +func (in *RoleBindingList) DeepCopy() *RoleBindingList { + if in == nil { + return nil + } + out := new(RoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleList) DeepCopyInto(out *RoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. +func (in *RoleList) DeepCopy() *RoleList { + if in == nil { + return nil + } + out := new(RoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleRef) DeepCopyInto(out *RoleRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef. +func (in *RoleRef) DeepCopy() *RoleRef { + if in == nil { + return nil + } + out := new(RoleRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go new file mode 100644 index 000000000..4b77c9c6b --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// +groupName=rbac.authorization.k8s.io +package v1beta1 // import "k8s.io/api/rbac/v1beta1" diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go similarity index 69% rename from vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.pb.go rename to vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 5f553a57e..e4ddbcde3 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,23 +15,22 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto // DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.proto + k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto It has these top-level messages: + AggregationRule ClusterRole ClusterRoleBinding - ClusterRoleBindingBuilder ClusterRoleBindingList ClusterRoleList PolicyRule - PolicyRuleBuilder Role RoleBinding RoleBindingList @@ -45,6 +44,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import strings "strings" import reflect "reflect" @@ -57,21 +58,21 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ClusterRoleBindingBuilder) Reset() { *m = ClusterRoleBindingBuilder{} } -func (*ClusterRoleBindingBuilder) ProtoMessage() {} -func (*ClusterRoleBindingBuilder) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} @@ -85,152 +86,161 @@ func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -func (m *PolicyRuleBuilder) Reset() { *m = PolicyRuleBuilder{} } -func (*PolicyRuleBuilder) ProtoMessage() {} -func (*PolicyRuleBuilder) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { - proto.RegisterType((*ClusterRole)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRole") - proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRoleBinding") - proto.RegisterType((*ClusterRoleBindingBuilder)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRoleBindingBuilder") - proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRoleBindingList") - proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRoleList") - proto.RegisterType((*PolicyRule)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.PolicyRule") - proto.RegisterType((*PolicyRuleBuilder)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.PolicyRuleBuilder") - proto.RegisterType((*Role)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.Role") - proto.RegisterType((*RoleBinding)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.RoleBinding") - proto.RegisterType((*RoleBindingList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.RoleBindingList") - proto.RegisterType((*RoleList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.RoleList") - proto.RegisterType((*RoleRef)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.RoleRef") - proto.RegisterType((*Subject)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.Subject") + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1beta1.AggregationRule") + proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1beta1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1beta1.PolicyRule") + proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1beta1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1beta1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1beta1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1beta1.RoleList") + proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1beta1.RoleRef") + proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1beta1.Subject") } -func (m *ClusterRole) Marshal() (data []byte, err error) { +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ClusterRole) MarshalTo(data []byte) (int, error) { +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 if len(m.Rules) > 0 { for _, msg := range m.Rules { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } return i, nil } -func (m *ClusterRoleBinding) Marshal() (data []byte, err error) { +func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ClusterRoleBinding) MarshalTo(data []byte) (int, error) { +func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n3 - return i, nil -} - -func (m *ClusterRoleBindingBuilder) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - return data[:n], nil -} - -func (m *ClusterRoleBindingBuilder) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.ClusterRoleBinding.Size())) - n4, err := m.ClusterRoleBinding.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -238,35 +248,35 @@ func (m *ClusterRoleBindingBuilder) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ClusterRoleBindingList) Marshal() (data []byte, err error) { +func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) { +func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -276,35 +286,35 @@ func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *ClusterRoleList) Marshal() (data []byte, err error) { +func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) { +func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -314,238 +324,250 @@ func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *PolicyRule) Marshal() (data []byte, err error) { +func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PolicyRule) MarshalTo(data []byte) (int, error) { +func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Verbs) > 0 { for _, s := range m.Verbs { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.APIGroups) > 0 { for _, s := range m.APIGroups { - data[i] = 0x12 + dAtA[i] = 0x12 i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.Resources) > 0 { for _, s := range m.Resources { - data[i] = 0x1a + dAtA[i] = 0x1a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.ResourceNames) > 0 { for _, s := range m.ResourceNames { - data[i] = 0x22 + dAtA[i] = 0x22 i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.NonResourceURLs) > 0 { for _, s := range m.NonResourceURLs { - data[i] = 0x2a + dAtA[i] = 0x2a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } return i, nil } -func (m *PolicyRuleBuilder) Marshal() (data []byte, err error) { +func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PolicyRuleBuilder) MarshalTo(data []byte) (int, error) { +func (m *Role) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.PolicyRule.Size())) - n7, err := m.PolicyRule.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n7 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } -func (m *Role) Marshal() (data []byte, err error) { +func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Role) MarshalTo(data []byte) (int, error) { +func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n8 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - data[i] = 0x12 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - return i, nil -} - -func (m *RoleBinding) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RoleBinding) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n9 - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - data[i] = 0x12 + return i, nil +} + +func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) - n10, err := m.RoleRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 return i, nil } -func (m *RoleBindingList) Marshal() (data []byte, err error) { +func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RoleBindingList) MarshalTo(data []byte) (int, error) { +func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -555,135 +577,109 @@ func (m *RoleBindingList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *RoleList) Marshal() (data []byte, err error) { +func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RoleList) MarshalTo(data []byte) (int, error) { +func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n12, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n12 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i += copy(dAtA[i:], m.APIGroup) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) return i, nil } -func (m *RoleRef) Marshal() (data []byte, err error) { +func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RoleRef) MarshalTo(data []byte) (int, error) { +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIGroup))) - i += copy(data[i:], m.APIGroup) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i += copy(dAtA[i:], m.APIGroup) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) return i, nil } -func (m *Subject) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Subject) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIGroup))) - i += copy(data[i:], m.APIGroup) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) - i += copy(data[i:], m.Namespace) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ClusterRole) Size() (n int) { var l int _ = l @@ -695,6 +691,10 @@ func (m *ClusterRole) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -714,14 +714,6 @@ func (m *ClusterRoleBinding) Size() (n int) { return n } -func (m *ClusterRoleBindingBuilder) Size() (n int) { - var l int - _ = l - l = m.ClusterRoleBinding.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *ClusterRoleBindingList) Size() (n int) { var l int _ = l @@ -786,14 +778,6 @@ func (m *PolicyRule) Size() (n int) { return n } -func (m *PolicyRuleBuilder) Size() (n int) { - var l int - _ = l - l = m.PolicyRule.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *Role) Size() (n int) { var l int _ = l @@ -891,6 +875,16 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ClusterRole) String() string { if this == nil { return "nil" @@ -898,6 +892,7 @@ func (this *ClusterRole) String() string { s := strings.Join([]string{`&ClusterRole{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -914,16 +909,6 @@ func (this *ClusterRoleBinding) String() string { }, "") return s } -func (this *ClusterRoleBindingBuilder) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRoleBindingBuilder{`, - `ClusterRoleBinding:` + strings.Replace(strings.Replace(this.ClusterRoleBinding.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" @@ -960,16 +945,6 @@ func (this *PolicyRule) String() string { }, "") return s } -func (this *PolicyRuleBuilder) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PolicyRuleBuilder{`, - `PolicyRule:` + strings.Replace(strings.Replace(this.PolicyRule.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *Role) String() string { if this == nil { return "nil" @@ -1048,8 +1023,8 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *ClusterRole) Unmarshal(data []byte) error { - l := len(data) +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1061,7 +1036,88 @@ func (m *ClusterRole) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1089,7 +1145,7 @@ func (m *ClusterRole) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1103,7 +1159,7 @@ func (m *ClusterRole) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1119,7 +1175,7 @@ func (m *ClusterRole) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1134,13 +1190,46 @@ func (m *ClusterRole) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Rules = append(m.Rules, PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1159,8 +1248,8 @@ func (m *ClusterRole) Unmarshal(data []byte) error { } return nil } -func (m *ClusterRoleBinding) Unmarshal(data []byte) error { - l := len(data) +func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1172,7 +1261,7 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1200,7 +1289,7 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1214,7 +1303,7 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1230,7 +1319,7 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1245,7 +1334,7 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Subjects = append(m.Subjects, Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1261,7 +1350,7 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1275,13 +1364,13 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1300,8 +1389,8 @@ func (m *ClusterRoleBinding) Unmarshal(data []byte) error { } return nil } -func (m *ClusterRoleBindingBuilder) Unmarshal(data []byte) error { - l := len(data) +func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1313,87 +1402,7 @@ func (m *ClusterRoleBindingBuilder) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBindingBuilder: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBindingBuilder: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleBinding", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClusterRoleBinding.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1421,7 +1430,7 @@ func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1435,7 +1444,7 @@ func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1451,7 +1460,7 @@ func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1466,13 +1475,13 @@ func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, ClusterRoleBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1491,8 +1500,8 @@ func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { } return nil } -func (m *ClusterRoleList) Unmarshal(data []byte) error { - l := len(data) +func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1504,7 +1513,7 @@ func (m *ClusterRoleList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1532,7 +1541,7 @@ func (m *ClusterRoleList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1546,7 +1555,7 @@ func (m *ClusterRoleList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1562,7 +1571,7 @@ func (m *ClusterRoleList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1577,13 +1586,13 @@ func (m *ClusterRoleList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, ClusterRole{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1602,8 +1611,8 @@ func (m *ClusterRoleList) Unmarshal(data []byte) error { } return nil } -func (m *PolicyRule) Unmarshal(data []byte) error { - l := len(data) +func (m *PolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1615,7 +1624,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1643,7 +1652,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1658,7 +1667,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Verbs = append(m.Verbs, string(data[iNdEx:postIndex])) + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { @@ -1672,7 +1681,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1687,7 +1696,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroups = append(m.APIGroups, string(data[iNdEx:postIndex])) + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { @@ -1701,7 +1710,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1716,7 +1725,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, string(data[iNdEx:postIndex])) + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { @@ -1730,7 +1739,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1745,7 +1754,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceNames = append(m.ResourceNames, string(data[iNdEx:postIndex])) + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 5: if wireType != 2 { @@ -1759,7 +1768,7 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1774,11 +1783,11 @@ func (m *PolicyRule) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NonResourceURLs = append(m.NonResourceURLs, string(data[iNdEx:postIndex])) + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1797,8 +1806,8 @@ func (m *PolicyRule) Unmarshal(data []byte) error { } return nil } -func (m *PolicyRuleBuilder) Unmarshal(data []byte) error { - l := len(data) +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1810,87 +1819,7 @@ func (m *PolicyRuleBuilder) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PolicyRuleBuilder: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PolicyRuleBuilder: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyRule", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PolicyRule.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Role) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1918,7 +1847,7 @@ func (m *Role) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1932,7 +1861,7 @@ func (m *Role) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1948,7 +1877,7 @@ func (m *Role) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1963,13 +1892,13 @@ func (m *Role) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Rules = append(m.Rules, PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -1988,8 +1917,8 @@ func (m *Role) Unmarshal(data []byte) error { } return nil } -func (m *RoleBinding) Unmarshal(data []byte) error { - l := len(data) +func (m *RoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2001,7 +1930,7 @@ func (m *RoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2029,7 +1958,7 @@ func (m *RoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2043,7 +1972,7 @@ func (m *RoleBinding) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2059,7 +1988,7 @@ func (m *RoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2074,7 +2003,7 @@ func (m *RoleBinding) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Subjects = append(m.Subjects, Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2090,7 +2019,7 @@ func (m *RoleBinding) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2104,13 +2033,13 @@ func (m *RoleBinding) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2129,8 +2058,8 @@ func (m *RoleBinding) Unmarshal(data []byte) error { } return nil } -func (m *RoleBindingList) Unmarshal(data []byte) error { - l := len(data) +func (m *RoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2142,7 +2071,7 @@ func (m *RoleBindingList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2170,7 +2099,7 @@ func (m *RoleBindingList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2184,7 +2113,7 @@ func (m *RoleBindingList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2200,7 +2129,7 @@ func (m *RoleBindingList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2215,13 +2144,13 @@ func (m *RoleBindingList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, RoleBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2240,8 +2169,8 @@ func (m *RoleBindingList) Unmarshal(data []byte) error { } return nil } -func (m *RoleList) Unmarshal(data []byte) error { - l := len(data) +func (m *RoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2253,7 +2182,7 @@ func (m *RoleList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2281,7 +2210,7 @@ func (m *RoleList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2295,7 +2224,7 @@ func (m *RoleList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2311,7 +2240,7 @@ func (m *RoleList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2326,13 +2255,13 @@ func (m *RoleList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, Role{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2351,8 +2280,8 @@ func (m *RoleList) Unmarshal(data []byte) error { } return nil } -func (m *RoleRef) Unmarshal(data []byte) error { - l := len(data) +func (m *RoleRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2364,7 +2293,7 @@ func (m *RoleRef) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2392,7 +2321,7 @@ func (m *RoleRef) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2407,7 +2336,7 @@ func (m *RoleRef) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroup = string(data[iNdEx:postIndex]) + m.APIGroup = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2421,7 +2350,7 @@ func (m *RoleRef) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2436,7 +2365,7 @@ func (m *RoleRef) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -2450,7 +2379,7 @@ func (m *RoleRef) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2465,11 +2394,11 @@ func (m *RoleRef) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2488,8 +2417,8 @@ func (m *RoleRef) Unmarshal(data []byte) error { } return nil } -func (m *Subject) Unmarshal(data []byte) error { - l := len(data) +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2501,7 +2430,7 @@ func (m *Subject) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2529,7 +2458,7 @@ func (m *Subject) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2544,7 +2473,7 @@ func (m *Subject) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2558,7 +2487,7 @@ func (m *Subject) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2573,7 +2502,7 @@ func (m *Subject) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroup = string(data[iNdEx:postIndex]) + m.APIGroup = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -2587,7 +2516,7 @@ func (m *Subject) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2602,7 +2531,7 @@ func (m *Subject) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -2616,7 +2545,7 @@ func (m *Subject) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2631,11 +2560,11 @@ func (m *Subject) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(data[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2654,8 +2583,8 @@ func (m *Subject) Unmarshal(data []byte) error { } return nil } -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -2666,7 +2595,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2684,7 +2613,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -2701,7 +2630,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2724,7 +2653,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2735,7 +2664,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -2759,58 +2688,63 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 830 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x54, 0xbf, 0x8f, 0xe3, 0x44, - 0x14, 0xce, 0x64, 0x13, 0x6d, 0x3c, 0xcb, 0x2a, 0xec, 0x20, 0x21, 0x93, 0xc2, 0x89, 0xdc, 0xb0, - 0x88, 0x3b, 0xfb, 0xf6, 0xee, 0xc4, 0x21, 0x21, 0x0a, 0x4c, 0x81, 0x4e, 0x1c, 0xcb, 0x69, 0x10, - 0x88, 0x5f, 0x42, 0x37, 0x71, 0xe6, 0xbc, 0x43, 0xfc, 0x4b, 0x33, 0xe3, 0x88, 0x13, 0x14, 0x74, - 0xb4, 0xfc, 0x13, 0x74, 0xd7, 0xd1, 0x52, 0x51, 0x2d, 0x54, 0x57, 0x6e, 0x15, 0xb1, 0xe6, 0x0f, - 0x01, 0xd9, 0x1e, 0xff, 0x08, 0x4e, 0xd8, 0xb0, 0x48, 0x91, 0x90, 0xa8, 0x92, 0x79, 0xef, 0xfb, - 0xde, 0xbc, 0xef, 0xbd, 0xf1, 0x07, 0xef, 0xcd, 0x5f, 0x17, 0x16, 0x8b, 0xec, 0x79, 0x32, 0xa5, - 0x3c, 0xa4, 0x92, 0x0a, 0x3b, 0x9e, 0x7b, 0x36, 0x89, 0x99, 0xb0, 0xf9, 0x94, 0xb8, 0xf6, 0xe2, - 0x64, 0x4a, 0x25, 0x39, 0xb1, 0x3d, 0x1a, 0x52, 0x4e, 0x24, 0x9d, 0x59, 0x31, 0x8f, 0x64, 0x84, - 0x5e, 0x2e, 0x88, 0x56, 0x4d, 0xb4, 0xe2, 0xb9, 0x67, 0x65, 0x44, 0x2b, 0x23, 0x5a, 0x8a, 0x38, - 0xba, 0xe9, 0x31, 0x79, 0x96, 0x4c, 0x2d, 0x37, 0x0a, 0x6c, 0x2f, 0xf2, 0x22, 0x3b, 0xe7, 0x4f, - 0x93, 0xc7, 0xf9, 0x29, 0x3f, 0xe4, 0xff, 0x8a, 0xba, 0xa3, 0xbb, 0xaa, 0x21, 0x12, 0xb3, 0x80, - 0xb8, 0x67, 0x2c, 0xa4, 0xfc, 0x49, 0xdd, 0x52, 0x40, 0x25, 0xb1, 0x17, 0xad, 0x6e, 0x46, 0xf6, - 0x26, 0x16, 0x4f, 0x42, 0xc9, 0x02, 0xda, 0x22, 0xbc, 0x76, 0x15, 0x41, 0xb8, 0x67, 0x34, 0x20, - 0x2d, 0xde, 0x9d, 0x4d, 0xbc, 0x44, 0x32, 0xdf, 0x66, 0xa1, 0x14, 0x92, 0xb7, 0x48, 0x0d, 0x4d, - 0x82, 0xf2, 0x05, 0xe5, 0xb5, 0x20, 0xfa, 0x15, 0x09, 0x62, 0x9f, 0xae, 0xd3, 0x74, 0x63, 0xe3, - 0x6a, 0xd6, 0xa0, 0xcd, 0x5f, 0x00, 0x3c, 0x78, 0xdb, 0x4f, 0x84, 0xa4, 0x1c, 0x47, 0x3e, 0x45, - 0x8f, 0xe0, 0x20, 0x1b, 0xd6, 0x8c, 0x48, 0xa2, 0x83, 0x09, 0x38, 0x3e, 0xb8, 0x7d, 0xcb, 0x52, - 0x2b, 0x6b, 0xf6, 0x5e, 0x2f, 0x2d, 0x43, 0x5b, 0x8b, 0x13, 0xeb, 0xfd, 0xe9, 0x97, 0xd4, 0x95, - 0xef, 0x51, 0x49, 0x1c, 0x74, 0xbe, 0x1c, 0x77, 0xd2, 0xe5, 0x18, 0xd6, 0x31, 0x5c, 0x55, 0x45, - 0x1f, 0xc3, 0x3e, 0x4f, 0x7c, 0x2a, 0xf4, 0xee, 0x64, 0xef, 0xf8, 0xe0, 0xf6, 0x1d, 0x6b, 0xcb, - 0x17, 0x61, 0x3d, 0x8c, 0x7c, 0xe6, 0x3e, 0xc1, 0x89, 0x4f, 0x9d, 0x43, 0x75, 0x43, 0x3f, 0x3b, - 0x09, 0x5c, 0x14, 0x34, 0x7f, 0xec, 0x42, 0xd4, 0xd0, 0xe2, 0xb0, 0x70, 0xc6, 0x42, 0x6f, 0x07, - 0x92, 0xbe, 0x80, 0x03, 0x91, 0xe4, 0x89, 0x52, 0xd5, 0xad, 0xad, 0x55, 0x7d, 0x50, 0x10, 0x9d, - 0xe7, 0xd5, 0x0d, 0x03, 0x15, 0x10, 0xb8, 0xaa, 0x89, 0x3e, 0x83, 0xfb, 0x3c, 0xf2, 0x29, 0xa6, - 0x8f, 0xf5, 0xbd, 0x55, 0x01, 0x57, 0x96, 0xc7, 0x05, 0xcf, 0x19, 0xaa, 0xf2, 0xfb, 0x2a, 0x80, - 0xcb, 0x8a, 0xe6, 0x0f, 0x00, 0xbe, 0xd4, 0x9e, 0x9a, 0x93, 0x30, 0x7f, 0x46, 0x39, 0xfa, 0x0e, - 0x40, 0xe4, 0xb6, 0xb2, 0x6a, 0x8e, 0x6f, 0x6c, 0xdd, 0xc6, 0x9a, 0x0b, 0x46, 0xaa, 0xa3, 0x35, - 0x2b, 0xc3, 0x6b, 0xae, 0x34, 0x2f, 0x00, 0x7c, 0xb1, 0x0d, 0x7d, 0xc0, 0x84, 0x44, 0x9f, 0xb7, - 0x36, 0x6c, 0x6d, 0xb7, 0xe1, 0x8c, 0x9d, 0xef, 0xb7, 0x9a, 0x7e, 0x19, 0x69, 0x6c, 0xf7, 0x11, - 0xec, 0x33, 0x49, 0x83, 0x72, 0xb5, 0xff, 0x4a, 0x74, 0xf5, 0x70, 0xef, 0x67, 0x15, 0x71, 0x51, - 0xd8, 0xfc, 0x15, 0xc0, 0x61, 0x03, 0xbc, 0x03, 0x4d, 0x9f, 0xac, 0x6a, 0xba, 0x7b, 0x2d, 0x4d, - 0xeb, 0xc5, 0xfc, 0x01, 0x20, 0xac, 0x3f, 0x55, 0x34, 0x86, 0xfd, 0x05, 0xe5, 0x53, 0xa1, 0x83, - 0xc9, 0xde, 0xb1, 0xe6, 0x68, 0x19, 0xfe, 0xa3, 0x2c, 0x80, 0x8b, 0x38, 0x7a, 0x15, 0x6a, 0x24, - 0x66, 0xef, 0xf0, 0x28, 0x89, 0x8b, 0x76, 0x34, 0xe7, 0x30, 0x5d, 0x8e, 0xb5, 0xb7, 0x1e, 0xde, - 0x2f, 0x82, 0xb8, 0xce, 0x67, 0x60, 0x4e, 0x45, 0x94, 0x70, 0x97, 0x0a, 0x7d, 0xaf, 0x06, 0xe3, - 0x32, 0x88, 0xeb, 0x3c, 0xba, 0x07, 0x0f, 0xcb, 0xc3, 0x29, 0x09, 0xa8, 0xd0, 0x7b, 0x39, 0xe1, - 0x28, 0x5d, 0x8e, 0x0f, 0x71, 0x33, 0x81, 0x57, 0x71, 0xe8, 0x4d, 0x38, 0x0c, 0xa3, 0xb0, 0x84, - 0x7c, 0x88, 0x1f, 0x08, 0xbd, 0x9f, 0x53, 0x5f, 0x48, 0x97, 0xe3, 0xe1, 0xe9, 0x6a, 0x0a, 0xff, - 0x15, 0x6b, 0x7e, 0x03, 0x8f, 0x1a, 0x5e, 0xa5, 0x3e, 0x24, 0x0f, 0xc2, 0xb8, 0x0a, 0xaa, 0x8d, - 0x5e, 0xcb, 0xfb, 0x2a, 0x2b, 0xaa, 0x63, 0xb8, 0x51, 0xda, 0xfc, 0x19, 0xc0, 0xde, 0x7f, 0xde, - 0xca, 0x9f, 0x76, 0xe1, 0xc1, 0xff, 0x1e, 0xbe, 0xb5, 0x87, 0x67, 0x06, 0xb2, 0x5b, 0x53, 0xbc, - 0xb6, 0x81, 0x5c, 0xed, 0x86, 0x3f, 0x01, 0x38, 0xd8, 0x91, 0x0d, 0xe2, 0x55, 0x15, 0x37, 0xff, - 0x99, 0x8a, 0xf5, 0xed, 0x7f, 0x0d, 0xcb, 0xfd, 0xa0, 0x1b, 0x70, 0x50, 0x5a, 0x57, 0xde, 0xbc, - 0x56, 0x37, 0x53, 0xba, 0x1b, 0xae, 0x10, 0x68, 0x02, 0x7b, 0x73, 0x16, 0xce, 0xf4, 0x6e, 0x8e, - 0x7c, 0x4e, 0x21, 0x7b, 0xef, 0xb2, 0x70, 0x86, 0xf3, 0x4c, 0x86, 0x08, 0x49, 0x40, 0xf3, 0x07, - 0xd4, 0x40, 0x64, 0xa6, 0x85, 0xf3, 0x8c, 0xf9, 0x14, 0xc0, 0x7d, 0xf5, 0xf8, 0xaa, 0x7a, 0x60, - 0x63, 0xbd, 0x66, 0x7f, 0xdd, 0x6d, 0xfa, 0xfb, 0xfb, 0xdb, 0x91, 0x0d, 0xb5, 0xec, 0x57, 0xc4, - 0xc4, 0xa5, 0x7a, 0x2f, 0x87, 0x1d, 0x29, 0x98, 0x76, 0x5a, 0x26, 0x70, 0x8d, 0x71, 0x5e, 0x39, - 0xbf, 0x34, 0x3a, 0xcf, 0x2e, 0x8d, 0xce, 0xc5, 0xa5, 0xd1, 0xf9, 0x36, 0x35, 0xc0, 0x79, 0x6a, - 0x80, 0x67, 0xa9, 0x01, 0x7e, 0x4b, 0x0d, 0xf0, 0xfd, 0xef, 0x46, 0xe7, 0xd3, 0x7d, 0x35, 0xf1, - 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x96, 0xa1, 0xd4, 0x72, 0x0c, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 833 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x6d, 0x3c, 0xcb, 0x2a, 0xdc, 0x70, 0x02, 0x6b, 0x05, 0xce, 0x2a, 0x50, + 0x44, 0x3a, 0xce, 0x66, 0xef, 0x10, 0xd0, 0x20, 0x71, 0xa6, 0x80, 0xd5, 0x2d, 0x61, 0x35, 0x27, + 0x28, 0x10, 0x05, 0x63, 0x67, 0xce, 0x19, 0xe2, 0x5f, 0x9a, 0x19, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, + 0x51, 0x20, 0x51, 0xd1, 0x52, 0x53, 0x51, 0xf2, 0x17, 0xa4, 0xbc, 0xf2, 0xaa, 0x88, 0x35, 0x7f, + 0x08, 0x68, 0xfc, 0x23, 0x4e, 0xe2, 0xf8, 0x2e, 0x55, 0x24, 0xa4, 0xab, 0x76, 0xe7, 0xbd, 0xef, + 0x7d, 0xef, 0x7b, 0x9f, 0x67, 0x5e, 0xe0, 0x27, 0xb3, 0x8f, 0x84, 0xc9, 0x22, 0x6b, 0x96, 0x38, + 0x94, 0x87, 0x54, 0x52, 0x61, 0xcd, 0x69, 0x38, 0x89, 0xb8, 0x55, 0x24, 0x48, 0xcc, 0x2c, 0xee, + 0x10, 0xd7, 0x9a, 0x5f, 0x38, 0x54, 0x92, 0x0b, 0xcb, 0xa3, 0x21, 0xe5, 0x44, 0xd2, 0x89, 0x19, + 0xf3, 0x48, 0x46, 0xe8, 0x8d, 0x1c, 0x68, 0x92, 0x98, 0x99, 0x0a, 0x68, 0x16, 0xc0, 0xb3, 0xbb, + 0x1e, 0x93, 0xd3, 0xc4, 0x31, 0xdd, 0x28, 0xb0, 0xbc, 0xc8, 0x8b, 0xac, 0x0c, 0xef, 0x24, 0x8f, + 0xb3, 0x53, 0x76, 0xc8, 0xfe, 0xcb, 0x79, 0xce, 0x46, 0xf5, 0x86, 0xc4, 0x8f, 0xa7, 0xf5, 0x8e, + 0x67, 0xef, 0x57, 0xc8, 0x80, 0xb8, 0x53, 0x16, 0x52, 0xfe, 0xc4, 0x8a, 0x67, 0x9e, 0x0a, 0x08, + 0x2b, 0xa0, 0x92, 0x58, 0xf3, 0x7a, 0x95, 0xd5, 0x54, 0xc5, 0x93, 0x50, 0xb2, 0x80, 0xd6, 0x0a, + 0x3e, 0x78, 0x51, 0x81, 0x70, 0xa7, 0x34, 0x20, 0xb5, 0xba, 0xfb, 0x4d, 0x75, 0x89, 0x64, 0xbe, + 0xc5, 0x42, 0x29, 0x24, 0xdf, 0x2e, 0x1a, 0xfe, 0x06, 0x60, 0xff, 0x81, 0xe7, 0x71, 0xea, 0x11, + 0xc9, 0xa2, 0x10, 0x27, 0x3e, 0x45, 0x3f, 0x01, 0x78, 0xdb, 0xf5, 0x13, 0x21, 0x29, 0xc7, 0x91, + 0x4f, 0x1f, 0x51, 0x9f, 0xba, 0x32, 0xe2, 0x42, 0x07, 0xe7, 0x47, 0xa3, 0x93, 0x7b, 0xf7, 0xcd, + 0xca, 0xf9, 0x55, 0x23, 0x33, 0x9e, 0x79, 0x2a, 0x20, 0x4c, 0xe5, 0x83, 0x39, 0xbf, 0x30, 0xaf, + 0x88, 0x43, 0xfd, 0xb2, 0xd6, 0x7e, 0x73, 0xb1, 0x1c, 0xb4, 0xd2, 0xe5, 0xe0, 0xf6, 0xa7, 0x3b, + 0x88, 0xf1, 0xce, 0x76, 0xc3, 0xdf, 0xdb, 0xf0, 0x64, 0x0d, 0x8e, 0xbe, 0x83, 0x3d, 0x45, 0x3e, + 0x21, 0x92, 0xe8, 0xe0, 0x1c, 0x8c, 0x4e, 0xee, 0xbd, 0xb7, 0x9f, 0x94, 0x2f, 0x9d, 0xef, 0xa9, + 0x2b, 0xbf, 0xa0, 0x92, 0xd8, 0xa8, 0xd0, 0x01, 0xab, 0x18, 0x5e, 0xb1, 0xa2, 0xcf, 0x61, 0x97, + 0x27, 0x3e, 0x15, 0x7a, 0x3b, 0x9b, 0xf4, 0x6d, 0xb3, 0xe1, 0x8e, 0x99, 0xd7, 0x91, 0xcf, 0xdc, + 0x27, 0xca, 0x2d, 0xfb, 0xb4, 0x60, 0xec, 0xaa, 0x93, 0xc0, 0x39, 0x01, 0xf2, 0x60, 0x9f, 0x6c, + 0xda, 0xaa, 0x1f, 0x65, 0x92, 0x47, 0x8d, 0x9c, 0x5b, 0x9f, 0xc1, 0x7e, 0x2d, 0x5d, 0x0e, 0xb6, + 0xbf, 0x0d, 0xde, 0x66, 0x1d, 0xfe, 0xda, 0x86, 0x68, 0xcd, 0x24, 0x9b, 0x85, 0x13, 0x16, 0x7a, + 0x07, 0xf0, 0x6a, 0x0c, 0x7b, 0x22, 0xc9, 0x12, 0xa5, 0x5d, 0xe7, 0x8d, 0xa3, 0x3d, 0xca, 0x81, + 0xf6, 0xab, 0x05, 0x63, 0xaf, 0x08, 0x08, 0xbc, 0xe2, 0x40, 0x0f, 0xe1, 0x31, 0x8f, 0x7c, 0x8a, + 0xe9, 0xe3, 0xc2, 0xa9, 0x66, 0x3a, 0x9c, 0xe3, 0xec, 0x7e, 0x41, 0x77, 0x5c, 0x04, 0x70, 0xc9, + 0x30, 0x5c, 0x00, 0xf8, 0x7a, 0xdd, 0x95, 0x2b, 0x26, 0x24, 0xfa, 0xb6, 0xe6, 0x8c, 0xb9, 0xe7, + 0x85, 0x66, 0x22, 0xf7, 0x65, 0x35, 0x45, 0x19, 0x59, 0x73, 0xe5, 0x1a, 0x76, 0x99, 0xa4, 0x41, + 0x69, 0xc9, 0x9d, 0xc6, 0x19, 0xea, 0xea, 0xaa, 0x9b, 0x74, 0xa9, 0x18, 0x70, 0x4e, 0x34, 0xfc, + 0x0b, 0xc0, 0xfe, 0x1a, 0xf8, 0x00, 0x33, 0x5c, 0x6e, 0xce, 0xf0, 0xce, 0x5e, 0x33, 0xec, 0x16, + 0xff, 0x2f, 0x80, 0xb0, 0x7a, 0x2b, 0x68, 0x00, 0xbb, 0x73, 0xca, 0x9d, 0x7c, 0x93, 0x68, 0xb6, + 0xa6, 0xf0, 0x5f, 0xab, 0x00, 0xce, 0xe3, 0xe8, 0x0e, 0xd4, 0x48, 0xcc, 0x3e, 0xe3, 0x51, 0x12, + 0xe7, 0xed, 0x35, 0xfb, 0x34, 0x5d, 0x0e, 0xb4, 0x07, 0xd7, 0x97, 0x79, 0x10, 0x57, 0x79, 0x05, + 0xe6, 0x54, 0x44, 0x09, 0x77, 0xa9, 0xd0, 0x8f, 0x2a, 0x30, 0x2e, 0x83, 0xb8, 0xca, 0xa3, 0x0f, + 0xe1, 0x69, 0x79, 0x18, 0x93, 0x80, 0x0a, 0xbd, 0x93, 0x15, 0xdc, 0x4a, 0x97, 0x83, 0x53, 0xbc, + 0x9e, 0xc0, 0x9b, 0x38, 0xf4, 0x31, 0xec, 0x87, 0x51, 0x58, 0x42, 0xbe, 0xc2, 0x57, 0x42, 0xef, + 0x66, 0xa5, 0xd9, 0xfb, 0x1c, 0x6f, 0xa6, 0xf0, 0x36, 0x76, 0xf8, 0x27, 0x80, 0x9d, 0xff, 0xdb, + 0xf6, 0x1a, 0xfe, 0xdc, 0x86, 0x27, 0x2f, 0xb7, 0xc9, 0x6a, 0x9b, 0xa8, 0x27, 0x78, 0xd8, 0x35, + 0xb2, 0xf7, 0x13, 0x7c, 0xf1, 0xfe, 0xf8, 0x03, 0xc0, 0xde, 0x81, 0x16, 0x87, 0xbd, 0xa9, 0xfa, + 0xad, 0xe7, 0xab, 0xde, 0x2d, 0xf7, 0x07, 0x58, 0xfa, 0x8f, 0xde, 0x85, 0xbd, 0xf2, 0xb1, 0x67, + 0x62, 0xb5, 0xaa, 0x79, 0xb9, 0x0f, 0xf0, 0x0a, 0x81, 0xce, 0x61, 0x67, 0xc6, 0xc2, 0x89, 0xde, + 0xce, 0x90, 0xaf, 0x14, 0xc8, 0xce, 0x43, 0x16, 0x4e, 0x70, 0x96, 0x51, 0x88, 0x90, 0x04, 0xf9, + 0x0f, 0xf1, 0x1a, 0x42, 0x3d, 0x73, 0x9c, 0x65, 0x94, 0x57, 0xc7, 0xc5, 0x65, 0x5a, 0xf1, 0x81, + 0x46, 0xbe, 0x75, 0x7d, 0xed, 0x7d, 0xf4, 0x3d, 0xbf, 0x3b, 0xb2, 0xa0, 0xa6, 0xfe, 0x8a, 0x98, + 0xb8, 0x54, 0xef, 0x64, 0xb0, 0x5b, 0x05, 0x4c, 0x1b, 0x97, 0x09, 0x5c, 0x61, 0xec, 0xbb, 0x8b, + 0x1b, 0xa3, 0xf5, 0xf4, 0xc6, 0x68, 0x3d, 0xbb, 0x31, 0x5a, 0x3f, 0xa6, 0x06, 0x58, 0xa4, 0x06, + 0x78, 0x9a, 0x1a, 0xe0, 0x59, 0x6a, 0x80, 0xbf, 0x53, 0x03, 0xfc, 0xf2, 0x8f, 0xd1, 0xfa, 0xe6, + 0xb8, 0x70, 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x99, 0xaf, 0xff, 0x74, 0x0b, 0x00, + 0x00, } diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto new file mode 100644 index 000000000..494aff8b3 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -0,0 +1,200 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.rbac.v1beta1; + +import "k8s.io/api/rbac/v1alpha1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +message ClusterRole { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this ClusterRole + repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +message ClusterRoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + // +optional + repeated Subject subjects = 2; + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +message ClusterRoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles +message ClusterRoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + repeated string apiGroups = 2; + + // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. + // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + repeated string resources = 3; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + repeated string resourceNames = 4; + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + repeated string nonResourceURLs = 5; +} + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +message Role { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this Role + repeated PolicyRule rules = 2; +} + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +message RoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + // +optional + repeated Subject subjects = 2; + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// RoleBindingList is a collection of RoleBindings +message RoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleList is a collection of Roles +message RoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of Roles + repeated Role items = 2; +} + +// RoleRef contains information that points to the role being used +message RoleRef { + // APIGroup is the group for the resource being referenced + optional string apiGroup = 1; + + // Kind is the type of resource being referenced + optional string kind = 2; + + // Name is the name of resource being referenced + optional string name = 3; +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +message Subject { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + optional string kind = 1; + + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + // +optional + optional string apiGroup = 2; + + // Name of the object being referenced. + optional string name = 3; + + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + optional string namespace = 4; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/register.go b/vendor/k8s.io/api/rbac/v1beta1/register.go similarity index 80% rename from vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/register.go rename to vendor/k8s.io/api/rbac/v1beta1/register.go index cab2e77e4..c8526a656 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/register.go +++ b/vendor/k8s.io/api/rbac/v1beta1/register.go @@ -33,11 +33,14 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Role{}, diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go similarity index 51% rename from vendor/k8s.io/client-go/pkg/apis/rbac/types.go rename to vendor/k8s.io/api/rbac/v1beta1/types.go index ddc2456a0..857b67a6f 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rbac +package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,25 +39,34 @@ const ( AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" ) +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. - Verbs []string + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` - // APIGroups is the name of the APIGroup that contains the resources. - // If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. - APIGroups []string - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. - Resources []string + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. + // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. - ResourceNames []string + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path - // If an action is not a resource API request, then the URL is split on '/' and is checked against the NonResourceURLs to look for a match. // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. - NonResourceURLs []string + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` } // Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, @@ -65,124 +74,162 @@ type PolicyRule struct { type Subject struct { // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". // If the Authorizer does not recognized the kind value, the Authorizer should report an error. - Kind string + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // APIGroup holds the API group of the referenced subject. // Defaults to "" for ServiceAccount subjects. // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - APIGroup string + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` // Name of the object being referenced. - Name string + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty // the Authorizer should report an error. - Namespace string + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` } // RoleRef contains information that points to the role being used type RoleRef struct { // APIGroup is the group for the resource being referenced - APIGroup string + APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` // Kind is the type of resource being referenced - Kind string + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` // Name is the name of resource being referenced - Name string + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. type Role struct { - metav1.TypeMeta + metav1.TypeMeta `json:",inline"` // Standard object's metadata. - metav1.ObjectMeta + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role - Rules []PolicyRule + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. type RoleBinding struct { - metav1.TypeMeta - metav1.ObjectMeta + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Subjects holds references to the objects the role applies to. - Subjects []Subject + // +optional + Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. - RoleRef RoleRef + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // RoleBindingList is a collection of RoleBindings type RoleBindingList struct { - metav1.TypeMeta + metav1.TypeMeta `json:",inline"` // Standard object's metadata. - metav1.ListMeta + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of roleBindings - Items []RoleBinding + // Items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // RoleList is a collection of Roles type RoleList struct { - metav1.TypeMeta + metav1.TypeMeta `json:",inline"` // Standard object's metadata. - metav1.ListMeta + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of roles - Items []Role + // Items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient=true -// +nonNamespaced=true +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. type ClusterRole struct { - metav1.TypeMeta + metav1.TypeMeta `json:",inline"` // Standard object's metadata. - metav1.ObjectMeta + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole - Rules []PolicyRule + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` } -// +genclient=true -// +nonNamespaced=true +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. type ClusterRoleBinding struct { - metav1.TypeMeta + metav1.TypeMeta `json:",inline"` // Standard object's metadata. - metav1.ObjectMeta + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Subjects holds references to the objects the role applies to. - Subjects []Subject + // +optional + Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can only reference a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. - RoleRef RoleRef + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ClusterRoleBindingList is a collection of ClusterRoleBindings type ClusterRoleBindingList struct { - metav1.TypeMeta + metav1.TypeMeta `json:",inline"` // Standard object's metadata. - metav1.ListMeta + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of ClusterRoleBindings - Items []ClusterRoleBinding + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ClusterRoleList is a collection of ClusterRoles type ClusterRoleList struct { - metav1.TypeMeta + metav1.TypeMeta `json:",inline"` // Standard object's metadata. - metav1.ListMeta + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of ClusterRoles - Items []ClusterRole + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..66dba6ca1 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,158 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "resources": "Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_Role = map[string]string{ + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_RoleRef = map[string]string{ + "": "RoleRef contains information that points to the role being used", + "apiGroup": "APIGroup is the group for the resource being referenced", + "kind": "Kind is the type of resource being referenced", + "name": "Name is the name of resource being referenced", +} + +func (RoleRef) SwaggerDoc() map[string]string { + return map_RoleRef +} + +var map_Subject = map[string]string{ + "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "apiGroup": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", + "name": "Name of the object being referenced.", + "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..86fadd170 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,393 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + if *in == nil { + *out = nil + } else { + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. +func (in *ClusterRole) DeepCopy() *ClusterRole { + if in == nil { + return nil + } + out := new(ClusterRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRole) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding. +func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding { + if in == nil { + return nil + } + out := new(ClusterRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList. +func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList { + if in == nil { + return nil + } + out := new(ClusterRoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList. +func (in *ClusterRoleList) DeepCopy() *ClusterRoleList { + if in == nil { + return nil + } + out := new(ClusterRoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Role) DeepCopyInto(out *Role) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. +func (in *Role) DeepCopy() *Role { + if in == nil { + return nil + } + out := new(Role) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Role) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. +func (in *RoleBinding) DeepCopy() *RoleBinding { + if in == nil { + return nil + } + out := new(RoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. +func (in *RoleBindingList) DeepCopy() *RoleBindingList { + if in == nil { + return nil + } + out := new(RoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleList) DeepCopyInto(out *RoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. +func (in *RoleList) DeepCopy() *RoleList { + if in == nil { + return nil + } + out := new(RoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleRef) DeepCopyInto(out *RoleRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef. +func (in *RoleRef) DeepCopy() *RoleRef { + if in == nil { + return nil + } + out := new(RoleRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go new file mode 100644 index 000000000..e10d07ff7 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// +groupName=scheduling.k8s.io +package v1alpha1 // import "k8s.io/api/scheduling/v1alpha1" diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go new file mode 100644 index 000000000..1f79d7f88 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -0,0 +1,641 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto + + It has these top-level messages: + PriorityClass + PriorityClassList +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func init() { + proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClass") + proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClassList") +} +func (m *PriorityClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + dAtA[i] = 0x18 + i++ + if m.GlobalDefault { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + return i, nil +} + +func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PriorityClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Value)) + n += 2 + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PriorityClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PriorityClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalDefault", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.GlobalDefault = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 460 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x8b, 0xd3, 0x40, + 0x18, 0xc6, 0x33, 0x5d, 0x0b, 0x75, 0x4a, 0x41, 0x23, 0x42, 0xe8, 0x61, 0x36, 0xac, 0x97, 0x5c, + 0x76, 0xc6, 0xae, 0x7f, 0x10, 0xbc, 0xc5, 0x85, 0x45, 0x50, 0x94, 0x1c, 0x3c, 0x88, 0x07, 0x27, + 0xc9, 0x6c, 0x3a, 0x36, 0xc9, 0x84, 0x99, 0x37, 0x81, 0xbd, 0x79, 0xf6, 0xe4, 0x97, 0x12, 0x7a, + 0xdc, 0xe3, 0x9e, 0x16, 0x1b, 0xbf, 0x88, 0x24, 0x4d, 0x37, 0xad, 0x65, 0xd5, 0x5b, 0xe6, 0x79, + 0x9f, 0xdf, 0x33, 0xf3, 0x3e, 0x04, 0x9f, 0x2d, 0x5e, 0x18, 0x2a, 0x15, 0x5b, 0x94, 0xa1, 0xd0, + 0xb9, 0x00, 0x61, 0x58, 0x25, 0xf2, 0x58, 0x69, 0xd6, 0x0d, 0x78, 0x21, 0x99, 0x89, 0xe6, 0x22, + 0x2e, 0x53, 0x99, 0x27, 0xac, 0x9a, 0xf1, 0xb4, 0x98, 0xf3, 0x19, 0x4b, 0x44, 0x2e, 0x34, 0x07, + 0x11, 0xd3, 0x42, 0x2b, 0x50, 0x36, 0x59, 0xfb, 0x29, 0x2f, 0x24, 0xed, 0xfd, 0x74, 0xe3, 0x9f, + 0x1e, 0x27, 0x12, 0xe6, 0x65, 0x48, 0x23, 0x95, 0xb1, 0x44, 0x25, 0x8a, 0xb5, 0x58, 0x58, 0x9e, + 0xb7, 0xa7, 0xf6, 0xd0, 0x7e, 0xad, 0xe3, 0xa6, 0x4f, 0xfb, 0xeb, 0x33, 0x1e, 0xcd, 0x65, 0x2e, + 0xf4, 0x05, 0x2b, 0x16, 0x49, 0x23, 0x18, 0x96, 0x09, 0xe0, 0xac, 0xda, 0x7b, 0xc4, 0x94, 0xdd, + 0x46, 0xe9, 0x32, 0x07, 0x99, 0x89, 0x3d, 0xe0, 0xf9, 0xbf, 0x80, 0x66, 0x95, 0x8c, 0xef, 0x71, + 0x4f, 0x6e, 0xe3, 0x4a, 0x90, 0x29, 0x93, 0x39, 0x18, 0xd0, 0x7f, 0x42, 0x47, 0xdf, 0x06, 0x78, + 0xf2, 0x5e, 0x4b, 0xa5, 0x25, 0x5c, 0xbc, 0x4a, 0xb9, 0x31, 0xf6, 0x67, 0x3c, 0x6a, 0x56, 0x89, + 0x39, 0x70, 0x07, 0xb9, 0xc8, 0x1b, 0x9f, 0x3c, 0xa6, 0x7d, 0x8f, 0x37, 0xc9, 0xb4, 0x58, 0x24, + 0x8d, 0x60, 0x68, 0xe3, 0xa6, 0xd5, 0x8c, 0xbe, 0x0b, 0xbf, 0x88, 0x08, 0xde, 0x0a, 0xe0, 0xbe, + 0xbd, 0xbc, 0x3e, 0xb4, 0xea, 0xeb, 0x43, 0xdc, 0x6b, 0xc1, 0x4d, 0xaa, 0xfd, 0x08, 0x0f, 0x2b, + 0x9e, 0x96, 0xc2, 0x19, 0xb8, 0xc8, 0x1b, 0xfa, 0x93, 0xce, 0x3c, 0xfc, 0xd0, 0x88, 0xc1, 0x7a, + 0x66, 0xbf, 0xc4, 0x93, 0x24, 0x55, 0x21, 0x4f, 0x4f, 0xc5, 0x39, 0x2f, 0x53, 0x70, 0x0e, 0x5c, + 0xe4, 0x8d, 0xfc, 0x87, 0x9d, 0x79, 0x72, 0xb6, 0x3d, 0x0c, 0x76, 0xbd, 0xf6, 0x33, 0x3c, 0x8e, + 0x85, 0x89, 0xb4, 0x2c, 0x40, 0xaa, 0xdc, 0xb9, 0xe3, 0x22, 0xef, 0xae, 0xff, 0xa0, 0x43, 0xc7, + 0xa7, 0xfd, 0x28, 0xd8, 0xf6, 0x1d, 0xfd, 0x40, 0xf8, 0xfe, 0x4e, 0x19, 0x6f, 0xa4, 0x01, 0xfb, + 0xd3, 0x5e, 0x21, 0xf4, 0xff, 0x0a, 0x69, 0xe8, 0xb6, 0x8e, 0x7b, 0xdd, 0xcd, 0xa3, 0x8d, 0xb2, + 0x55, 0x46, 0x80, 0x87, 0x12, 0x44, 0x66, 0x9c, 0x81, 0x7b, 0xe0, 0x8d, 0x4f, 0x8e, 0xe9, 0xdf, + 0xff, 0x59, 0xba, 0xf3, 0xbe, 0xbe, 0xbb, 0xd7, 0x4d, 0x46, 0xb0, 0x8e, 0xf2, 0xe9, 0x72, 0x45, + 0xac, 0xcb, 0x15, 0xb1, 0xae, 0x56, 0xc4, 0xfa, 0x5a, 0x13, 0xb4, 0xac, 0x09, 0xba, 0xac, 0x09, + 0xba, 0xaa, 0x09, 0xfa, 0x59, 0x13, 0xf4, 0xfd, 0x17, 0xb1, 0x3e, 0x8e, 0x36, 0x99, 0xbf, 0x03, + 0x00, 0x00, 0xff, 0xff, 0x44, 0x05, 0xba, 0x7b, 0x71, 0x03, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto new file mode 100644 index 000000000..7dc5cc841 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.scheduling.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +message PriorityClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + optional int32 value = 2; + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + optional bool globalDefault = 3; + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + optional string description = 4; +} + +// PriorityClassList is a collection of priority classes. +message PriorityClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of PriorityClasses + repeated PriorityClass items = 2; +} + diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/register.go b/vendor/k8s.io/api/scheduling/v1alpha1/register.go new file mode 100644 index 000000000..24689f0ad --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "scheduling.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PriorityClass{}, + &PriorityClassList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go new file mode 100644 index 000000000..21e3df0af --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +type PriorityClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"` + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClassList is a collection of priority classes. +type PriorityClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of PriorityClasses + Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..f406f4402 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_PriorityClass = map[string]string{ + "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", +} + +func (PriorityClass) SwaggerDoc() map[string]string { + return map_PriorityClass +} + +var map_PriorityClassList = map[string]string{ + "": "PriorityClassList is a collection of priority classes.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "items is the list of PriorityClasses", +} + +func (PriorityClassList) SwaggerDoc() map[string]string { + return map_PriorityClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..fe0c86040 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,84 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass. +func (in *PriorityClass) DeepCopy() *PriorityClass { + if in == nil { + return nil + } + out := new(PriorityClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList. +func (in *PriorityClassList) DeepCopy() *PriorityClassList { + if in == nil { + return nil + } + out := new(PriorityClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/util/umask.go b/vendor/k8s.io/api/scheduling/v1beta1/doc.go similarity index 74% rename from vendor/k8s.io/client-go/pkg/util/umask.go rename to vendor/k8s.io/api/scheduling/v1beta1/doc.go index 35ccce50b..f2dd1cfac 100644 --- a/vendor/k8s.io/client-go/pkg/util/umask.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/doc.go @@ -1,7 +1,5 @@ -// +build !windows - /* -Copyright 2014 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,12 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true -import ( - "syscall" -) - -func Umask(mask int) (old int, err error) { - return syscall.Umask(mask), nil -} +// +groupName=scheduling.k8s.io +package v1beta1 // import "k8s.io/api/scheduling/v1beta1" diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go new file mode 100644 index 000000000..c4fe85fe9 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -0,0 +1,641 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto + + It has these top-level messages: + PriorityClass + PriorityClassList +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func init() { + proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClass") + proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClassList") +} +func (m *PriorityClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + dAtA[i] = 0x18 + i++ + if m.GlobalDefault { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + return i, nil +} + +func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PriorityClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Value)) + n += 2 + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PriorityClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PriorityClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalDefault", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.GlobalDefault = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 462 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xbf, 0x8e, 0xd3, 0x4c, + 0x14, 0xc5, 0x3d, 0xd9, 0x2f, 0xfa, 0x82, 0xa3, 0x48, 0x60, 0x84, 0x64, 0x45, 0xc2, 0x6b, 0x2d, + 0x8d, 0x0b, 0x76, 0x86, 0x2c, 0x7f, 0x84, 0x44, 0x67, 0x56, 0x20, 0x24, 0x10, 0xe0, 0x82, 0x02, + 0x51, 0x30, 0xb6, 0xef, 0x3a, 0x43, 0x6c, 0x8f, 0x35, 0x73, 0x6d, 0x69, 0x3b, 0x6a, 0x2a, 0x1e, + 0x8a, 0x22, 0xe5, 0x96, 0x5b, 0xad, 0x88, 0x79, 0x11, 0x64, 0xc7, 0xac, 0x13, 0xa2, 0x05, 0x3a, + 0xcf, 0xb9, 0xe7, 0x77, 0x66, 0xee, 0x91, 0xcd, 0x67, 0x8b, 0xc7, 0x9a, 0x0a, 0xc9, 0x16, 0x65, + 0x08, 0x2a, 0x07, 0x04, 0xcd, 0x2a, 0xc8, 0x63, 0xa9, 0x58, 0x37, 0xe0, 0x85, 0x60, 0x3a, 0x9a, + 0x43, 0x5c, 0xa6, 0x22, 0x4f, 0x58, 0x35, 0x0b, 0x01, 0xf9, 0x8c, 0x25, 0x90, 0x83, 0xe2, 0x08, + 0x31, 0x2d, 0x94, 0x44, 0x69, 0xdd, 0x5e, 0xdb, 0x29, 0x2f, 0x04, 0xed, 0xed, 0xb4, 0xb3, 0x4f, + 0x0f, 0x13, 0x81, 0xf3, 0x32, 0xa4, 0x91, 0xcc, 0x58, 0x22, 0x13, 0xc9, 0x5a, 0x2a, 0x2c, 0x4f, + 0xda, 0x53, 0x7b, 0x68, 0xbf, 0xd6, 0x69, 0xd3, 0x07, 0xfd, 0xe5, 0x19, 0x8f, 0xe6, 0x22, 0x07, + 0x75, 0xca, 0x8a, 0x45, 0xd2, 0x08, 0x9a, 0x65, 0x80, 0x9c, 0x55, 0x3b, 0x6f, 0x98, 0xb2, 0xab, + 0x28, 0x55, 0xe6, 0x28, 0x32, 0xd8, 0x01, 0x1e, 0xfd, 0x0d, 0x68, 0x36, 0xc9, 0xf8, 0x0e, 0x77, + 0xff, 0x2a, 0xae, 0x44, 0x91, 0x32, 0x91, 0xa3, 0x46, 0xf5, 0x3b, 0x74, 0xf0, 0x65, 0x60, 0x4e, + 0xde, 0x28, 0x21, 0x95, 0xc0, 0xd3, 0xa7, 0x29, 0xd7, 0xda, 0xfa, 0x68, 0x8e, 0x9a, 0x55, 0x62, + 0x8e, 0xdc, 0x26, 0x2e, 0xf1, 0xc6, 0x47, 0xf7, 0x68, 0x5f, 0xe3, 0x65, 0x32, 0x2d, 0x16, 0x49, + 0x23, 0x68, 0xda, 0xb8, 0x69, 0x35, 0xa3, 0xaf, 0xc3, 0x4f, 0x10, 0xe1, 0x2b, 0x40, 0xee, 0x5b, + 0xcb, 0x8b, 0x7d, 0xa3, 0xbe, 0xd8, 0x37, 0x7b, 0x2d, 0xb8, 0x4c, 0xb5, 0xee, 0x98, 0xc3, 0x8a, + 0xa7, 0x25, 0xd8, 0x03, 0x97, 0x78, 0x43, 0x7f, 0xd2, 0x99, 0x87, 0xef, 0x1a, 0x31, 0x58, 0xcf, + 0xac, 0x27, 0xe6, 0x24, 0x49, 0x65, 0xc8, 0xd3, 0x63, 0x38, 0xe1, 0x65, 0x8a, 0xf6, 0x9e, 0x4b, + 0xbc, 0x91, 0x7f, 0xab, 0x33, 0x4f, 0x9e, 0x6f, 0x0e, 0x83, 0x6d, 0xaf, 0xf5, 0xd0, 0x1c, 0xc7, + 0xa0, 0x23, 0x25, 0x0a, 0x14, 0x32, 0xb7, 0xff, 0x73, 0x89, 0x77, 0xcd, 0xbf, 0xd9, 0xa1, 0xe3, + 0xe3, 0x7e, 0x14, 0x6c, 0xfa, 0x0e, 0xbe, 0x11, 0xf3, 0xc6, 0x56, 0x19, 0x2f, 0x85, 0x46, 0xeb, + 0xc3, 0x4e, 0x21, 0xf4, 0xdf, 0x0a, 0x69, 0xe8, 0xb6, 0x8e, 0xeb, 0xdd, 0xcd, 0xa3, 0x5f, 0xca, + 0x46, 0x19, 0x6f, 0xcd, 0xa1, 0x40, 0xc8, 0xb4, 0x3d, 0x70, 0xf7, 0xbc, 0xf1, 0xd1, 0x5d, 0xfa, + 0xc7, 0x5f, 0x96, 0x6e, 0x3d, 0xaf, 0xaf, 0xee, 0x45, 0x13, 0x11, 0xac, 0x93, 0xfc, 0xc3, 0xe5, + 0xca, 0x31, 0xce, 0x56, 0x8e, 0x71, 0xbe, 0x72, 0x8c, 0xcf, 0xb5, 0x43, 0x96, 0xb5, 0x43, 0xce, + 0x6a, 0x87, 0x9c, 0xd7, 0x0e, 0xf9, 0x5e, 0x3b, 0xe4, 0xeb, 0x0f, 0xc7, 0x78, 0xff, 0x7f, 0x17, + 0xf9, 0x33, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xb3, 0xc6, 0x7a, 0x6d, 0x03, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto new file mode 100644 index 000000000..fe7f21de8 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.scheduling.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +message PriorityClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + optional int32 value = 2; + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + optional bool globalDefault = 3; + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + optional string description = 4; +} + +// PriorityClassList is a collection of priority classes. +message PriorityClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of PriorityClasses + repeated PriorityClass items = 2; +} + diff --git a/vendor/k8s.io/api/scheduling/v1beta1/register.go b/vendor/k8s.io/api/scheduling/v1beta1/register.go new file mode 100644 index 000000000..fb26557bb --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1beta1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "scheduling.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PriorityClass{}, + &PriorityClassList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go new file mode 100644 index 000000000..a9aaa8665 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -0,0 +1,66 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +type PriorityClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"` + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClassList is a collection of priority classes. +type PriorityClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of PriorityClasses + Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..c18f54a82 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_PriorityClass = map[string]string{ + "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", +} + +func (PriorityClass) SwaggerDoc() map[string]string { + return map_PriorityClass +} + +var map_PriorityClassList = map[string]string{ + "": "PriorityClassList is a collection of priority classes.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "items is the list of PriorityClasses", +} + +func (PriorityClassList) SwaggerDoc() map[string]string { + return map_PriorityClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..6f68e4ac5 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,84 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass. +func (in *PriorityClass) DeepCopy() *PriorityClass { + if in == nil { + return nil + } + out := new(PriorityClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList. +func (in *PriorityClassList) DeepCopy() *PriorityClassList { + if in == nil { + return nil + } + out := new(PriorityClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/doc.go b/vendor/k8s.io/api/settings/v1alpha1/doc.go similarity index 84% rename from vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/doc.go rename to vendor/k8s.io/api/settings/v1alpha1/doc.go index 86390b523..05a62c569 100644 --- a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/api/settings/v1alpha1/doc.go @@ -14,5 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + // +groupName=settings.k8s.io -package v1alpha1 +package v1alpha1 // import "k8s.io/api/settings/v1alpha1" diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go similarity index 65% rename from vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.pb.go rename to vendor/k8s.io/api/settings/v1alpha1/generated.pb.go index 88f85877a..86a7f89a5 100644 --- a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,14 +15,14 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto // DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.proto + k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto It has these top-level messages: PodPreset @@ -35,7 +35,7 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" import strings "strings" import reflect "reflect" @@ -49,7 +49,9 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *PodPreset) Reset() { *m = PodPreset{} } func (*PodPreset) ProtoMessage() {} @@ -64,37 +66,37 @@ func (*PodPresetSpec) ProtoMessage() {} func (*PodPresetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func init() { - proto.RegisterType((*PodPreset)(nil), "k8s.io.client-go.pkg.apis.settings.v1alpha1.PodPreset") - proto.RegisterType((*PodPresetList)(nil), "k8s.io.client-go.pkg.apis.settings.v1alpha1.PodPresetList") - proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.client-go.pkg.apis.settings.v1alpha1.PodPresetSpec") + proto.RegisterType((*PodPreset)(nil), "k8s.io.api.settings.v1alpha1.PodPreset") + proto.RegisterType((*PodPresetList)(nil), "k8s.io.api.settings.v1alpha1.PodPresetList") + proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.api.settings.v1alpha1.PodPresetSpec") } -func (m *PodPreset) Marshal() (data []byte, err error) { +func (m *PodPreset) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodPreset) MarshalTo(data []byte) (int, error) { +func (m *PodPreset) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -102,35 +104,35 @@ func (m *PodPreset) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *PodPresetList) Marshal() (data []byte, err error) { +func (m *PodPresetList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodPresetList) MarshalTo(data []byte) (int, error) { +func (m *PodPresetList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n3 if len(m.Items) > 0 { for _, msg := range m.Items { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -140,35 +142,35 @@ func (m *PodPresetList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *PodPresetSpec) Marshal() (data []byte, err error) { +func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *PodPresetSpec) MarshalTo(data []byte) (int, error) { +func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n4, err := m.Selector.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n4, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n4 if len(m.Env) > 0 { for _, msg := range m.Env { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -177,10 +179,10 @@ func (m *PodPresetSpec) MarshalTo(data []byte) (int, error) { } if len(m.EnvFrom) > 0 { for _, msg := range m.EnvFrom { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -189,10 +191,10 @@ func (m *PodPresetSpec) MarshalTo(data []byte) (int, error) { } if len(m.Volumes) > 0 { for _, msg := range m.Volumes { - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -201,10 +203,10 @@ func (m *PodPresetSpec) MarshalTo(data []byte) (int, error) { } if len(m.VolumeMounts) > 0 { for _, msg := range m.VolumeMounts { - data[i] = 0x2a + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -214,31 +216,31 @@ func (m *PodPresetSpec) MarshalTo(data []byte) (int, error) { return i, nil } -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *PodPreset) Size() (n int) { @@ -338,10 +340,10 @@ func (this *PodPresetSpec) String() string { } s := strings.Join([]string{`&PodPresetSpec{`, `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_kubernetes_pkg_api_v1.EnvVar", 1), `&`, ``, 1) + `,`, - `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "k8s_io_kubernetes_pkg_api_v1.EnvFromSource", 1), `&`, ``, 1) + `,`, - `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "k8s_io_kubernetes_pkg_api_v1.Volume", 1), `&`, ``, 1) + `,`, - `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "k8s_io_kubernetes_pkg_api_v1.VolumeMount", 1), `&`, ``, 1) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_api_core_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "k8s_io_api_core_v1.EnvFromSource", 1), `&`, ``, 1) + `,`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "k8s_io_api_core_v1.Volume", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "k8s_io_api_core_v1.VolumeMount", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -354,8 +356,8 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *PodPreset) Unmarshal(data []byte) error { - l := len(data) +func (m *PodPreset) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -367,7 +369,7 @@ func (m *PodPreset) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -395,7 +397,7 @@ func (m *PodPreset) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -409,7 +411,7 @@ func (m *PodPreset) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -425,7 +427,7 @@ func (m *PodPreset) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -439,13 +441,13 @@ func (m *PodPreset) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -464,8 +466,8 @@ func (m *PodPreset) Unmarshal(data []byte) error { } return nil } -func (m *PodPresetList) Unmarshal(data []byte) error { - l := len(data) +func (m *PodPresetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -477,7 +479,7 @@ func (m *PodPresetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -505,7 +507,7 @@ func (m *PodPresetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -519,7 +521,7 @@ func (m *PodPresetList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -535,7 +537,7 @@ func (m *PodPresetList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -550,13 +552,13 @@ func (m *PodPresetList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Items = append(m.Items, PodPreset{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -575,8 +577,8 @@ func (m *PodPresetList) Unmarshal(data []byte) error { } return nil } -func (m *PodPresetSpec) Unmarshal(data []byte) error { - l := len(data) +func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -588,7 +590,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -616,7 +618,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -630,7 +632,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -646,7 +648,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -660,8 +662,8 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, k8s_io_kubernetes_pkg_api_v1.EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Env = append(m.Env, k8s_io_api_core_v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -677,7 +679,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -691,8 +693,8 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, k8s_io_kubernetes_pkg_api_v1.EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + m.EnvFrom = append(m.EnvFrom, k8s_io_api_core_v1.EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -708,7 +710,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -722,8 +724,8 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, k8s_io_kubernetes_pkg_api_v1.Volume{}) - if err := m.Volumes[len(m.Volumes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + m.Volumes = append(m.Volumes, k8s_io_api_core_v1.Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -739,7 +741,7 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -753,14 +755,14 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, k8s_io_kubernetes_pkg_api_v1.VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + m.VolumeMounts = append(m.VolumeMounts, k8s_io_api_core_v1.VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -779,8 +781,8 @@ func (m *PodPresetSpec) Unmarshal(data []byte) error { } return nil } -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -791,7 +793,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -809,7 +811,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -826,7 +828,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -849,7 +851,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -860,7 +862,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -884,41 +886,45 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 550 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x8b, 0xd3, 0x40, - 0x14, 0xc7, 0x9b, 0x6d, 0x4b, 0xeb, 0xb4, 0x8b, 0x12, 0x3c, 0x84, 0x1e, 0xb2, 0x4b, 0xf1, 0xb0, - 0xea, 0x3a, 0xb1, 0xab, 0xa8, 0xa0, 0xa7, 0xc8, 0x0a, 0x82, 0xcb, 0x2e, 0x29, 0xf4, 0x20, 0x2b, - 0x38, 0x4d, 0x9f, 0x69, 0x6c, 0x93, 0x09, 0x33, 0x93, 0xa0, 0x37, 0x3f, 0x82, 0x5f, 0x4a, 0x28, - 0xe8, 0x61, 0x8f, 0x9e, 0x16, 0x5b, 0xbf, 0x88, 0xcc, 0x74, 0xd2, 0x44, 0xba, 0x65, 0xab, 0xb7, - 0x79, 0x8f, 0xf7, 0xff, 0xbd, 0xff, 0x3f, 0x2f, 0xe8, 0xc5, 0xe4, 0x19, 0xc7, 0x21, 0x75, 0x26, - 0xe9, 0x10, 0x58, 0x0c, 0x02, 0xb8, 0x93, 0x4c, 0x02, 0x87, 0x24, 0x21, 0x77, 0x38, 0x08, 0x11, - 0xc6, 0x01, 0x77, 0xb2, 0x1e, 0x99, 0x26, 0x63, 0xd2, 0x73, 0x02, 0x88, 0x81, 0x11, 0x01, 0x23, - 0x9c, 0x30, 0x2a, 0xa8, 0x79, 0xb8, 0x54, 0xe3, 0x42, 0x8d, 0x93, 0x49, 0x80, 0xa5, 0x1a, 0xe7, - 0x6a, 0x9c, 0xab, 0x3b, 0x0f, 0x82, 0x50, 0x8c, 0xd3, 0x21, 0xf6, 0x69, 0xe4, 0x04, 0x34, 0xa0, - 0x8e, 0x82, 0x0c, 0xd3, 0x0f, 0xaa, 0x52, 0x85, 0x7a, 0x2d, 0xe1, 0x9d, 0xc7, 0xda, 0x1a, 0x49, - 0xc2, 0x88, 0xf8, 0xe3, 0x30, 0x06, 0xf6, 0xb9, 0x30, 0x17, 0x81, 0x20, 0x4e, 0xb6, 0x66, 0xa9, - 0xe3, 0x6c, 0x52, 0xb1, 0x34, 0x16, 0x61, 0x04, 0x6b, 0x82, 0x27, 0xd7, 0x09, 0xb8, 0x3f, 0x86, - 0x88, 0xac, 0xe9, 0x4a, 0xf6, 0x38, 0xb0, 0x0c, 0x58, 0xe1, 0x0d, 0x3e, 0x91, 0x28, 0x99, 0xc2, - 0x55, 0xf6, 0x0e, 0x37, 0x7e, 0xef, 0x2b, 0xa6, 0xbb, 0x3f, 0x0c, 0x74, 0xe3, 0x8c, 0x8e, 0xce, - 0x18, 0x70, 0x10, 0xe6, 0x7b, 0xd4, 0x94, 0xa9, 0x47, 0x44, 0x10, 0xcb, 0xd8, 0x37, 0x0e, 0x5a, - 0x47, 0x0f, 0xb1, 0x3e, 0x40, 0xd9, 0x7c, 0x71, 0x02, 0x39, 0x8d, 0xb3, 0x1e, 0x3e, 0x1d, 0x7e, - 0x04, 0x5f, 0x9c, 0x80, 0x20, 0xae, 0x39, 0xbb, 0xdc, 0xab, 0x2c, 0x2e, 0xf7, 0x50, 0xd1, 0xf3, - 0x56, 0x54, 0xf3, 0x1d, 0xaa, 0xf1, 0x04, 0x7c, 0x6b, 0x47, 0xd1, 0x9f, 0xe3, 0x7f, 0x39, 0x2f, - 0x5e, 0x19, 0xed, 0x27, 0xe0, 0xbb, 0x6d, 0xbd, 0xa8, 0x26, 0x2b, 0x4f, 0x61, 0xbb, 0xdf, 0x0d, - 0xb4, 0xbb, 0x9a, 0x7a, 0x13, 0x72, 0x61, 0x9e, 0xaf, 0x45, 0xc2, 0xdb, 0x45, 0x92, 0x6a, 0x15, - 0xe8, 0x96, 0xde, 0xd3, 0xcc, 0x3b, 0xa5, 0x38, 0xe7, 0xa8, 0x1e, 0x0a, 0x88, 0xb8, 0xb5, 0xb3, - 0x5f, 0x3d, 0x68, 0x1d, 0x3d, 0xfd, 0xcf, 0x3c, 0xee, 0xae, 0xde, 0x51, 0x7f, 0x2d, 0x69, 0xde, - 0x12, 0xda, 0xfd, 0x56, 0x2d, 0xa5, 0x91, 0x29, 0x4d, 0x82, 0x9a, 0x1c, 0xa6, 0xe0, 0x0b, 0xca, - 0x74, 0x9a, 0x47, 0x5b, 0xa6, 0x21, 0x43, 0x98, 0xf6, 0xb5, 0xb4, 0x88, 0x94, 0x77, 0xbc, 0x15, - 0xd6, 0x7c, 0x89, 0xaa, 0x10, 0x67, 0x3a, 0xd0, 0x9d, 0xcd, 0x81, 0x24, 0xf5, 0x38, 0xce, 0x06, - 0x84, 0xb9, 0x2d, 0x8d, 0xab, 0x1e, 0xc7, 0x99, 0x27, 0xd5, 0xe6, 0x00, 0x35, 0x20, 0xce, 0x5e, - 0x31, 0x1a, 0x59, 0x55, 0x05, 0xba, 0x7f, 0x2d, 0x48, 0x0e, 0xf7, 0x69, 0xca, 0x7c, 0x70, 0x6f, - 0x6a, 0x5e, 0x43, 0xb7, 0xbd, 0x1c, 0x66, 0x9e, 0xa2, 0x46, 0x46, 0xa7, 0x69, 0x04, 0xdc, 0xaa, - 0x6d, 0x63, 0x70, 0xa0, 0x86, 0x0b, 0xe0, 0xb2, 0xe6, 0x5e, 0x4e, 0x31, 0x7d, 0xd4, 0x5e, 0x3e, - 0x4f, 0x68, 0x1a, 0x0b, 0x6e, 0xd5, 0x15, 0xf5, 0xee, 0x36, 0x54, 0xa5, 0x70, 0x6f, 0x6b, 0x74, - 0xbb, 0xd4, 0xe4, 0xde, 0x5f, 0x50, 0xf7, 0xde, 0x6c, 0x6e, 0x57, 0x2e, 0xe6, 0x76, 0xe5, 0xe7, - 0xdc, 0xae, 0x7c, 0x59, 0xd8, 0xc6, 0x6c, 0x61, 0x1b, 0x17, 0x0b, 0xdb, 0xf8, 0xb5, 0xb0, 0x8d, - 0xaf, 0xbf, 0xed, 0xca, 0xdb, 0x66, 0xfe, 0x4f, 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0x91, 0x84, - 0x03, 0x10, 0x2f, 0x05, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 556 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x4e, + 0x1c, 0xc7, 0xe9, 0xb2, 0x04, 0xfe, 0x03, 0x9b, 0xbf, 0x69, 0x3c, 0x34, 0xc4, 0x94, 0x95, 0x8b, + 0x9b, 0x18, 0x67, 0x64, 0xd7, 0x18, 0xbd, 0x36, 0xc1, 0xc4, 0x04, 0xe2, 0xa6, 0x24, 0x9b, 0x68, + 0x3c, 0x38, 0x94, 0x9f, 0xa5, 0x42, 0x67, 0x9a, 0x99, 0x69, 0x13, 0x6f, 0x3e, 0x82, 0x2f, 0xe0, + 0x93, 0xe8, 0x03, 0x70, 0xdc, 0xe3, 0x9e, 0x36, 0x52, 0x5f, 0xc4, 0x4c, 0x99, 0x02, 0x8a, 0x28, + 0xb7, 0xce, 0x8f, 0xef, 0xe7, 0x33, 0xbf, 0x2f, 0x2d, 0xea, 0xcf, 0x9e, 0x49, 0x1c, 0x71, 0x32, + 0x4b, 0xc7, 0x20, 0x18, 0x28, 0x90, 0x24, 0x03, 0x36, 0xe1, 0x82, 0x98, 0x1f, 0x68, 0x12, 0x11, + 0x09, 0x4a, 0x45, 0x2c, 0x94, 0x24, 0xeb, 0xd1, 0x79, 0x32, 0xa5, 0x3d, 0x12, 0x02, 0x03, 0x41, + 0x15, 0x4c, 0x70, 0x22, 0xb8, 0xe2, 0xf6, 0xbd, 0x55, 0x1a, 0xd3, 0x24, 0xc2, 0x65, 0x1a, 0x97, + 0xe9, 0xf6, 0xa3, 0x30, 0x52, 0xd3, 0x74, 0x8c, 0x03, 0x1e, 0x93, 0x90, 0x87, 0x9c, 0x14, 0xd0, + 0x38, 0x7d, 0x5f, 0x9c, 0x8a, 0x43, 0xf1, 0xb4, 0x92, 0xb5, 0xbb, 0x5b, 0x57, 0x07, 0x5c, 0x00, + 0xc9, 0x76, 0x2e, 0x6c, 0x3f, 0xd9, 0x64, 0x62, 0x1a, 0x4c, 0x23, 0x06, 0xe2, 0x23, 0x49, 0x66, + 0xa1, 0x1e, 0x48, 0x12, 0x83, 0xa2, 0x7f, 0xa2, 0xc8, 0x3e, 0x4a, 0xa4, 0x4c, 0x45, 0x31, 0xec, + 0x00, 0x4f, 0xff, 0x05, 0xc8, 0x60, 0x0a, 0x31, 0xdd, 0xe1, 0x2e, 0xf6, 0x71, 0xa9, 0x8a, 0xe6, + 0x24, 0x62, 0x4a, 0x2a, 0xf1, 0x3b, 0xd4, 0xfd, 0x66, 0xa1, 0xff, 0x2e, 0xf9, 0xe4, 0x52, 0x80, + 0x04, 0x65, 0xbf, 0x43, 0x0d, 0x5d, 0x63, 0x42, 0x15, 0x75, 0xac, 0x53, 0xeb, 0xac, 0x79, 0xfe, + 0x18, 0x6f, 0xfe, 0xe5, 0xb5, 0x15, 0x27, 0xb3, 0x50, 0x0f, 0x24, 0xd6, 0x69, 0x9c, 0xf5, 0xf0, + 0xab, 0xf1, 0x07, 0x08, 0xd4, 0x10, 0x14, 0xf5, 0xec, 0xc5, 0x6d, 0xa7, 0x92, 0xdf, 0x76, 0xd0, + 0x66, 0xe6, 0xaf, 0xad, 0xf6, 0x10, 0x1d, 0xcb, 0x04, 0x02, 0xe7, 0xa8, 0xb0, 0x3f, 0xc4, 0x7f, + 0x7b, 0x87, 0x78, 0xbd, 0xd8, 0x28, 0x81, 0xc0, 0x6b, 0x19, 0xf1, 0xb1, 0x3e, 0xf9, 0x85, 0xa6, + 0xfb, 0xd5, 0x42, 0x27, 0xeb, 0xd4, 0x20, 0x92, 0xca, 0x7e, 0xbb, 0x53, 0x01, 0x1f, 0x56, 0x41, + 0xd3, 0x45, 0x81, 0x3b, 0xe6, 0x9e, 0x46, 0x39, 0xd9, 0x5a, 0x7f, 0x80, 0x6a, 0x91, 0x82, 0x58, + 0x3a, 0x47, 0xa7, 0xd5, 0xb3, 0xe6, 0xf9, 0x83, 0x03, 0xf7, 0xf7, 0x4e, 0x8c, 0xb3, 0xf6, 0x52, + 0xd3, 0xfe, 0x4a, 0xd2, 0xfd, 0x52, 0xdd, 0xda, 0x5e, 0xb7, 0xb2, 0x29, 0x6a, 0x48, 0x98, 0x43, + 0xa0, 0xb8, 0x30, 0xdb, 0x5f, 0x1c, 0xb8, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xd0, 0x4d, 0x85, 0x72, + 0xe2, 0xaf, 0xb5, 0xf6, 0x73, 0x54, 0x05, 0x96, 0x99, 0x02, 0xed, 0xed, 0x02, 0xfa, 0xbb, 0xd7, + 0xae, 0x3e, 0xcb, 0xae, 0xa8, 0xf0, 0x9a, 0x46, 0x52, 0xed, 0xb3, 0xcc, 0xd7, 0x8c, 0x3d, 0x40, + 0x75, 0x60, 0xd9, 0x0b, 0xc1, 0x63, 0xa7, 0x5a, 0xe0, 0xf7, 0xf7, 0xe0, 0x3a, 0x32, 0xe2, 0xa9, + 0x08, 0xc0, 0xfb, 0xdf, 0x58, 0xea, 0x66, 0xec, 0x97, 0x0a, 0xbb, 0x8f, 0xea, 0x19, 0x9f, 0xa7, + 0x31, 0x48, 0xe7, 0x78, 0xff, 0x32, 0x57, 0x45, 0x64, 0xa3, 0x59, 0x9d, 0xa5, 0x5f, 0xb2, 0xf6, + 0x6b, 0xd4, 0x5a, 0x3d, 0x0e, 0x79, 0xca, 0x94, 0x74, 0x6a, 0x85, 0xab, 0xb3, 0xdf, 0x55, 0xe4, + 0xbc, 0xbb, 0x46, 0xd8, 0xda, 0x1a, 0x4a, 0xff, 0x17, 0x95, 0x87, 0x17, 0x4b, 0xb7, 0x72, 0xbd, + 0x74, 0x2b, 0x37, 0x4b, 0xb7, 0xf2, 0x29, 0x77, 0xad, 0x45, 0xee, 0x5a, 0xd7, 0xb9, 0x6b, 0xdd, + 0xe4, 0xae, 0xf5, 0x3d, 0x77, 0xad, 0xcf, 0x3f, 0xdc, 0xca, 0x9b, 0x46, 0xf9, 0xbe, 0x7f, 0x06, + 0x00, 0x00, 0xff, 0xff, 0x0e, 0x35, 0x98, 0xb5, 0xd9, 0x04, 0x00, 0x00, } diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.proto b/vendor/k8s.io/api/settings/v1alpha1/generated.proto similarity index 76% rename from vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.proto rename to vendor/k8s.io/api/settings/v1alpha1/generated.proto index b5a80fa63..887d2fa1b 100644 --- a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,13 +19,13 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.settings.v1alpha1; +package k8s.io.api.settings.v1alpha1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; @@ -43,7 +43,7 @@ message PodPreset { // PodPresetList is a list of PodPreset objects. message PodPresetList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -51,7 +51,7 @@ message PodPresetList { repeated PodPreset items = 2; } -// PodPresetSpec is a description of a pod injection policy. +// PodPresetSpec is a description of a pod preset. message PodPresetSpec { // Selector is a label query over a set of resources, in this case pods. // Required. @@ -59,18 +59,18 @@ message PodPresetSpec { // Env defines the collection of EnvVar to inject into containers. // +optional - repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 2; + repeated k8s.io.api.core.v1.EnvVar env = 2; // EnvFrom defines the collection of EnvFromSource to inject into containers. // +optional - repeated k8s.io.kubernetes.pkg.api.v1.EnvFromSource envFrom = 3; + repeated k8s.io.api.core.v1.EnvFromSource envFrom = 3; // Volumes defines the collection of Volume to inject into the pod. // +optional - repeated k8s.io.kubernetes.pkg.api.v1.Volume volumes = 4; + repeated k8s.io.api.core.v1.Volume volumes = 4; // VolumeMounts defines the collection of VolumeMount to inject into containers. // +optional - repeated k8s.io.kubernetes.pkg.api.v1.VolumeMount volumeMounts = 5; + repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 5; } diff --git a/vendor/k8s.io/api/settings/v1alpha1/register.go b/vendor/k8s.io/api/settings/v1alpha1/register.go new file mode 100644 index 000000000..eee278d95 --- /dev/null +++ b/vendor/k8s.io/api/settings/v1alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "settings.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodPreset{}, + &PodPresetList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types.go b/vendor/k8s.io/api/settings/v1alpha1/types.go similarity index 87% rename from vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types.go rename to vendor/k8s.io/api/settings/v1alpha1/types.go index 5b0c5d19f..506aacf4a 100644 --- a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types.go +++ b/vendor/k8s.io/api/settings/v1alpha1/types.go @@ -17,11 +17,12 @@ limitations under the License. package v1alpha1 import ( + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api/v1" ) -// +genclient=true +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodPreset is a policy resource that defines additional runtime // requirements for a Pod. @@ -34,7 +35,7 @@ type PodPreset struct { Spec PodPresetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } -// PodPresetSpec is a description of a pod injection policy. +// PodPresetSpec is a description of a pod preset. type PodPresetSpec struct { // Selector is a label query over a set of resources, in this case pods. // Required. @@ -54,11 +55,13 @@ type PodPresetSpec struct { VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,5,rep,name=volumeMounts"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // PodPresetList is a list of PodPreset objects. type PodPresetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go similarity index 88% rename from vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go index 5b4dc665d..508c452f1 100644 --- a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ package v1alpha1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PodPreset = map[string]string{ "": "PodPreset is a policy resource that defines additional runtime requirements for a Pod.", } @@ -37,7 +37,7 @@ func (PodPreset) SwaggerDoc() map[string]string { var map_PodPresetList = map[string]string{ "": "PodPresetList is a list of PodPreset objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -46,7 +46,7 @@ func (PodPresetList) SwaggerDoc() map[string]string { } var map_PodPresetSpec = map[string]string{ - "": "PodPresetSpec is a description of a pod injection policy.", + "": "PodPresetSpec is a description of a pod preset.", "selector": "Selector is a label query over a set of resources, in this case pods. Required.", "env": "Env defines the collection of EnvVar to inject into containers.", "envFrom": "EnvFrom defines the collection of EnvFromSource to inject into containers.", diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..6397a88ab --- /dev/null +++ b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,131 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPreset) DeepCopyInto(out *PodPreset) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPreset. +func (in *PodPreset) DeepCopy() *PodPreset { + if in == nil { + return nil + } + out := new(PodPreset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodPreset) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodPreset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetList. +func (in *PodPresetList) DeepCopy() *PodPresetList { + if in == nil { + return nil + } + out := new(PodPresetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodPresetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPresetSpec) DeepCopyInto(out *PodPresetSpec) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]v1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetSpec. +func (in *PodPresetSpec) DeepCopy() *PodPresetSpec { + if in == nil { + return nil + } + out := new(PodPresetSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go similarity index 91% rename from vendor/k8s.io/client-go/pkg/apis/storage/v1/doc.go rename to vendor/k8s.io/api/storage/v1/doc.go index 2b8d05cfd..8f4a4045c 100644 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1/doc.go +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:deepcopy-gen=package // +groupName=storage.k8s.io +// +k8s:openapi-gen=true package v1 diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go new file mode 100644 index 000000000..b13b03fc7 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -0,0 +1,980 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto + + It has these top-level messages: + StorageClass + StorageClassList +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func init() { + proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") +} +func (m *StorageClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i += copy(dAtA[i:], m.Provisioner) + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for _, k := range keysForParameters { + dAtA[i] = 0x1a + i++ + v := m.Parameters[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.ReclaimPolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i += copy(dAtA[i:], *m.ReclaimPolicy) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.AllowVolumeExpansion != nil { + dAtA[i] = 0x30 + i++ + if *m.AllowVolumeExpansion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.VolumeBindingMode != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i += copy(dAtA[i:], *m.VolumeBindingMode) + } + if len(m.AllowedTopologies) > 0 { + for _, msg := range m.AllowedTopologies { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StorageClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *StorageClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ReclaimPolicy != nil { + l = len(*m.ReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AllowVolumeExpansion != nil { + n += 2 + } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AllowedTopologies) > 0 { + for _, e := range m.AllowedTopologies { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, + `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StorageClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provisioner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Parameters[mapkey] = mapvalue + } else { + var mapvalue string + m.Parameters[mapkey] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) + m.ReclaimPolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) + if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 677 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xc1, 0x6e, 0xd3, 0x4a, + 0x14, 0x8d, 0x93, 0x97, 0xbe, 0x74, 0xd2, 0xea, 0x25, 0x7e, 0x45, 0x32, 0x59, 0x38, 0x51, 0xd9, + 0x44, 0x95, 0x18, 0x37, 0x6d, 0x41, 0x15, 0x12, 0x48, 0x35, 0xaa, 0x04, 0x52, 0xab, 0x46, 0x6e, + 0x55, 0x21, 0xc4, 0x82, 0x89, 0x73, 0x71, 0x87, 0xd8, 0x33, 0x66, 0x66, 0x6c, 0xc8, 0x8e, 0x1f, + 0x40, 0xe2, 0x7b, 0xf8, 0x82, 0x2e, 0xbb, 0xec, 0x2a, 0xa2, 0xe6, 0x2f, 0xba, 0x42, 0x76, 0x42, + 0xe3, 0x26, 0xa9, 0xe8, 0x6e, 0xe6, 0xdc, 0x73, 0xce, 0x9d, 0x3b, 0xf7, 0x5e, 0xf4, 0x62, 0xb0, + 0x2b, 0x31, 0xe5, 0xd6, 0x20, 0xea, 0x81, 0x60, 0xa0, 0x40, 0x5a, 0x31, 0xb0, 0x3e, 0x17, 0xd6, + 0x24, 0x40, 0x42, 0x6a, 0x49, 0xc5, 0x05, 0xf1, 0xc0, 0x8a, 0x3b, 0x96, 0x07, 0x0c, 0x04, 0x51, + 0xd0, 0xc7, 0xa1, 0xe0, 0x8a, 0xeb, 0x0f, 0xc6, 0x34, 0x4c, 0x42, 0x8a, 0x27, 0x34, 0x1c, 0x77, + 0x1a, 0x8f, 0x3d, 0xaa, 0xce, 0xa2, 0x1e, 0x76, 0x79, 0x60, 0x79, 0xdc, 0xe3, 0x56, 0xc6, 0xee, + 0x45, 0x1f, 0xb2, 0x5b, 0x76, 0xc9, 0x4e, 0x63, 0x97, 0xc6, 0x7a, 0x2e, 0x99, 0xcb, 0xc5, 0xa2, + 0x4c, 0x8d, 0x8d, 0x85, 0x0f, 0xea, 0x81, 0x22, 0xf3, 0xdc, 0x9d, 0x29, 0x37, 0x20, 0xee, 0x19, + 0x65, 0x20, 0x86, 0x56, 0x38, 0xf0, 0x52, 0x40, 0x5a, 0x01, 0x28, 0xb2, 0x28, 0x83, 0x75, 0x97, + 0x4a, 0x44, 0x4c, 0xd1, 0x00, 0xe6, 0x04, 0x4f, 0xff, 0x26, 0x90, 0xee, 0x19, 0x04, 0x64, 0x4e, + 0xb7, 0x7d, 0x97, 0x2e, 0x52, 0xd4, 0xb7, 0x28, 0x53, 0x52, 0x89, 0x59, 0xd1, 0xfa, 0xb7, 0x25, + 0xb4, 0x72, 0x3c, 0xae, 0xfb, 0xa5, 0x4f, 0xa4, 0xd4, 0xdf, 0xa3, 0x4a, 0x5a, 0x49, 0x9f, 0x28, + 0x62, 0x68, 0x2d, 0xad, 0x5d, 0xdd, 0xda, 0xc4, 0xd3, 0x6e, 0xdc, 0x18, 0xe3, 0x70, 0xe0, 0xa5, + 0x80, 0xc4, 0x29, 0x1b, 0xc7, 0x1d, 0x7c, 0xd4, 0xfb, 0x08, 0xae, 0x3a, 0x04, 0x45, 0x6c, 0xfd, + 0x7c, 0xd4, 0x2c, 0x24, 0xa3, 0x26, 0x9a, 0x62, 0xce, 0x8d, 0xab, 0xfe, 0x04, 0x55, 0x43, 0xc1, + 0x63, 0x2a, 0x29, 0x67, 0x20, 0x8c, 0x62, 0x4b, 0x6b, 0x2f, 0xdb, 0xff, 0x4f, 0x24, 0xd5, 0xee, + 0x34, 0xe4, 0xe4, 0x79, 0xba, 0x87, 0x50, 0x48, 0x04, 0x09, 0x40, 0x81, 0x90, 0x46, 0xa9, 0x55, + 0x6a, 0x57, 0xb7, 0xb6, 0xf1, 0xc2, 0x41, 0xc1, 0xf9, 0x8a, 0x70, 0xf7, 0x46, 0xb5, 0xcf, 0x94, + 0x18, 0x4e, 0x5f, 0x37, 0x0d, 0x38, 0x39, 0x6b, 0x7d, 0x80, 0x56, 0x05, 0xb8, 0x3e, 0xa1, 0x41, + 0x97, 0xfb, 0xd4, 0x1d, 0x1a, 0xff, 0x64, 0x2f, 0xdc, 0x4f, 0x46, 0xcd, 0x55, 0x27, 0x1f, 0xb8, + 0x1e, 0x35, 0x37, 0xe7, 0x47, 0x0c, 0x77, 0x41, 0x48, 0x2a, 0x15, 0x30, 0x75, 0xca, 0xfd, 0x28, + 0x80, 0x5b, 0x1a, 0xe7, 0xb6, 0xb7, 0xbe, 0x83, 0x56, 0x02, 0x1e, 0x31, 0x75, 0x14, 0x2a, 0xca, + 0x99, 0x34, 0xca, 0xad, 0x52, 0x7b, 0xd9, 0xae, 0x25, 0xa3, 0xe6, 0xca, 0x61, 0x0e, 0x77, 0x6e, + 0xb1, 0xf4, 0x03, 0xb4, 0x46, 0x7c, 0x9f, 0x7f, 0x1e, 0x27, 0xd8, 0xff, 0x12, 0x12, 0x96, 0xfe, + 0x92, 0xb1, 0xd4, 0xd2, 0xda, 0x15, 0xdb, 0x48, 0x46, 0xcd, 0xb5, 0xbd, 0x05, 0x71, 0x67, 0xa1, + 0x4a, 0x7f, 0x83, 0xea, 0x71, 0x06, 0xd9, 0x94, 0xf5, 0x29, 0xf3, 0x0e, 0x79, 0x1f, 0x8c, 0x7f, + 0xb3, 0xa2, 0x37, 0x92, 0x51, 0xb3, 0x7e, 0x3a, 0x1b, 0xbc, 0x5e, 0x04, 0x3a, 0xf3, 0x26, 0xfa, + 0x27, 0x54, 0xcf, 0x32, 0x42, 0xff, 0x84, 0x87, 0xdc, 0xe7, 0x1e, 0x05, 0x69, 0x54, 0xb2, 0xd6, + 0xb5, 0xf3, 0xad, 0x4b, 0xbf, 0x2e, 0xed, 0xdb, 0x84, 0x35, 0x3c, 0x06, 0x1f, 0x5c, 0xc5, 0xc5, + 0x09, 0x88, 0xc0, 0x7e, 0x38, 0xe9, 0x57, 0x7d, 0x6f, 0xd6, 0xca, 0x99, 0x77, 0x6f, 0x3c, 0x47, + 0xff, 0xcd, 0x34, 0x5c, 0xaf, 0xa1, 0xd2, 0x00, 0x86, 0xd9, 0x34, 0x2f, 0x3b, 0xe9, 0x51, 0x5f, + 0x43, 0xe5, 0x98, 0xf8, 0x11, 0x8c, 0x87, 0xcf, 0x19, 0x5f, 0x9e, 0x15, 0x77, 0xb5, 0xf5, 0x1f, + 0x1a, 0xaa, 0xe5, 0xa7, 0xe7, 0x80, 0x4a, 0xa5, 0xbf, 0x9b, 0xdb, 0x09, 0x7c, 0xbf, 0x9d, 0x48, + 0xd5, 0xd9, 0x46, 0xd4, 0x26, 0x35, 0x54, 0xfe, 0x20, 0xb9, 0x7d, 0x78, 0x85, 0xca, 0x54, 0x41, + 0x20, 0x8d, 0x62, 0xf6, 0x31, 0x8f, 0xee, 0x31, 0xd3, 0xf6, 0xea, 0xc4, 0xaf, 0xfc, 0x3a, 0x55, + 0x3a, 0x63, 0x03, 0xbb, 0x7d, 0x7e, 0x65, 0x16, 0x2e, 0xae, 0xcc, 0xc2, 0xe5, 0x95, 0x59, 0xf8, + 0x9a, 0x98, 0xda, 0x79, 0x62, 0x6a, 0x17, 0x89, 0xa9, 0x5d, 0x26, 0xa6, 0xf6, 0x33, 0x31, 0xb5, + 0xef, 0xbf, 0xcc, 0xc2, 0xdb, 0x62, 0xdc, 0xf9, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x0e, 0x67, 0x74, + 0x30, 0xa1, 0x05, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto similarity index 52% rename from vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.proto rename to vendor/k8s.io/api/storage/v1/generated.proto index 92e18cf9e..cff642211 100644 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,13 +19,14 @@ limitations under the License. syntax = 'proto2'; -package k8s.io.kubernetes.pkg.apis.storage.v1; +package k8s.io.api.storage.v1; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/api/storage/v1beta1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; @@ -37,7 +38,7 @@ option go_package = "v1"; // according to etcd is in ObjectMeta.Name. message StorageClass { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -48,12 +49,42 @@ message StorageClass { // create volumes of this storage class. // +optional map parameters = 3; + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with this reclaimPolicy. Defaults to Delete. + // +optional + optional string reclaimPolicy = 4; + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mount of the PVs will simply fail if one is invalid. + // +optional + repeated string mountOptions = 5; + + // AllowVolumeExpansion shows whether the storage class allow volume expand + // +optional + optional bool allowVolumeExpansion = 6; + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + optional string volumeBindingMode = 7; + + // Restrict the node topologies where volumes can be dynamically provisioned. + // Each volume plugin defines its own supported topology specifications. + // An empty TopologySelectorTerm list means there is no topology restriction. + // This field is alpha-level and is only honored by servers that enable + // the DynamicProvisioningScheduling feature. + // +optional + repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; } // StorageClassList is a collection of storage classes. message StorageClassList { // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go similarity index 79% rename from vendor/k8s.io/client-go/pkg/apis/storage/v1/register.go rename to vendor/k8s.io/api/storage/v1/register.go index 24d6bfa7d..c058add84 100644 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1/register.go +++ b/vendor/k8s.io/api/storage/v1/register.go @@ -34,11 +34,14 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go new file mode 100644 index 000000000..45bfa7681 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -0,0 +1,106 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +type StorageClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Provisioner indicates the type of the provisioner. + Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with this reclaimPolicy. Defaults to Delete. + // +optional + ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mount of the PVs will simply fail if one is invalid. + // +optional + MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` + + // AllowVolumeExpansion shows whether the storage class allow volume expand + // +optional + AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` + + // Restrict the node topologies where volumes can be dynamically provisioned. + // Each volume plugin defines its own supported topology specifications. + // An empty TopologySelectorTerm list means there is no topology restriction. + // This field is alpha-level and is only honored by servers that enable + // the DynamicProvisioningScheduling feature. + // +optional + AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of StorageClasses + Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeBindingMode indicates how PersistentVolumeClaims should be bound. +type VolumeBindingMode string + +const ( + // VolumeBindingImmediate indicates that PersistentVolumeClaims should be + // immediately provisioned and bound. This is the default mode. + VolumeBindingImmediate VolumeBindingMode = "Immediate" + + // VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims + // should not be provisioned and bound until the first Pod is created that + // references the PeristentVolumeClaim. The volume provisioning and + // binding will occur during Pod scheduing. + VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" +) diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..1d6587047 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_StorageClass = map[string]string{ + "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "provisioner": "Provisioner indicates the type of the provisioner.", + "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", + "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is alpha-level and is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is alpha-level and is only honored by servers that enable the DynamicProvisioningScheduling feature.", +} + +func (StorageClass) SwaggerDoc() map[string]string { + return map_StorageClass +} + +var map_StorageClassList = map[string]string{ + "": "StorageClassList is a collection of storage classes.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of StorageClasses", +} + +func (StorageClassList) SwaggerDoc() map[string]string { + return map_StorageClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..a1050134c --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -0,0 +1,131 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClass) DeepCopyInto(out *StorageClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ReclaimPolicy != nil { + in, out := &in.ReclaimPolicy, &out.ReclaimPolicy + if *in == nil { + *out = nil + } else { + *out = new(core_v1.PersistentVolumeReclaimPolicy) + **out = **in + } + } + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowVolumeExpansion != nil { + in, out := &in.AllowVolumeExpansion, &out.AllowVolumeExpansion + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.VolumeBindingMode != nil { + in, out := &in.VolumeBindingMode, &out.VolumeBindingMode + if *in == nil { + *out = nil + } else { + *out = new(VolumeBindingMode) + **out = **in + } + } + if in.AllowedTopologies != nil { + in, out := &in.AllowedTopologies, &out.AllowedTopologies + *out = make([]core_v1.TopologySelectorTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClass. +func (in *StorageClass) DeepCopy() *StorageClass { + if in == nil { + return nil + } + out := new(StorageClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassList. +func (in *StorageClassList) DeepCopy() *StorageClassList { + if in == nil { + return nil + } + out := new(StorageClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/doc.go b/vendor/k8s.io/api/storage/v1alpha1/doc.go new file mode 100644 index 000000000..aa94aff7f --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +groupName=storage.k8s.io +// +k8s:openapi-gen=true +package v1alpha1 // import "k8s.io/api/storage/v1alpha1" diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go new file mode 100644 index 000000000..e1d5a3aac --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -0,0 +1,1523 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto + + It has these top-level messages: + VolumeAttachment + VolumeAttachmentList + VolumeAttachmentSource + VolumeAttachmentSpec + VolumeAttachmentStatus + VolumeError +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func init() { + proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachment") + proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentList") + proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSource") + proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") + proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") + proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") +} +func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PersistentVolumeName != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i += copy(dAtA[i:], *m.PersistentVolumeName) + } + return i, nil +} + +func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i += copy(dAtA[i:], m.Attacher) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) + n5, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + return i, nil +} + +func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.AttachmentMetadata) > 0 { + keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) + for k := range m.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + for _, k := range keysForAttachmentMetadata { + dAtA[i] = 0x12 + i++ + v := m.AttachmentMetadata[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.AttachError != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) + n6, err := m.AttachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.DetachError != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) + n7, err := m.DetachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *VolumeError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) + n8, err := m.Time.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *VolumeAttachment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachmentSource) Size() (n int) { + var l int + _ = l + if m.PersistentVolumeName != nil { + l = len(*m.PersistentVolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeAttachmentSpec) Size() (n int) { + var l int + _ = l + l = len(m.Attacher) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentStatus) Size() (n int) { + var l int + _ = l + n += 2 + if len(m.AttachmentMetadata) > 0 { + for k, v := range m.AttachmentMetadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AttachError != nil { + l = m.AttachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DetachError != nil { + l = m.DetachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeError) Size() (n int) { + var l int + _ = l + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" + } + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttachment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PersistentVolumeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attacher = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Attached = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.AttachmentMetadata == nil { + m.AttachmentMetadata = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.AttachmentMetadata[mapkey] = mapvalue + } else { + var mapvalue string + m.AttachmentMetadata[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachError == nil { + m.AttachError = &VolumeError{} + } + if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DetachError == nil { + m.DetachError = &VolumeError{} + } + if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 745 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x3d, 0x6f, 0xdb, 0x46, + 0x18, 0xc7, 0x45, 0x49, 0xb6, 0xe5, 0x53, 0x5f, 0x8c, 0x83, 0xd0, 0x0a, 0x2a, 0x40, 0x19, 0x9a, + 0xdc, 0xa2, 0x3e, 0x56, 0x76, 0x51, 0x18, 0xdd, 0x44, 0xd8, 0x43, 0x51, 0xcb, 0x2d, 0xe8, 0xa2, + 0x43, 0xdb, 0xa1, 0x27, 0xf2, 0x31, 0x45, 0x4b, 0x7c, 0xc1, 0xdd, 0x51, 0x88, 0xb7, 0x4c, 0x99, + 0xb3, 0xe5, 0x1b, 0xe4, 0xb3, 0x68, 0x8b, 0x47, 0x4f, 0x42, 0xcc, 0x7c, 0x8b, 0x2c, 0x09, 0x78, + 0x3c, 0x89, 0xb2, 0x29, 0x25, 0xb6, 0x37, 0x3e, 0xcf, 0x3d, 0xff, 0xdf, 0xf3, 0x76, 0x47, 0x74, + 0x3c, 0x3a, 0xe2, 0xc4, 0x0b, 0x8d, 0x51, 0x3c, 0x00, 0x16, 0x80, 0x00, 0x6e, 0x4c, 0x20, 0x70, + 0x42, 0x66, 0xa8, 0x03, 0x1a, 0x79, 0x06, 0x17, 0x21, 0xa3, 0x2e, 0x18, 0x93, 0x2e, 0x1d, 0x47, + 0x43, 0xda, 0x35, 0x5c, 0x08, 0x80, 0x51, 0x01, 0x0e, 0x89, 0x58, 0x28, 0x42, 0xfc, 0x5d, 0x16, + 0x4c, 0x68, 0xe4, 0x11, 0x15, 0x4c, 0xe6, 0xc1, 0xad, 0x7d, 0xd7, 0x13, 0xc3, 0x78, 0x40, 0xec, + 0xd0, 0x37, 0xdc, 0xd0, 0x0d, 0x0d, 0xa9, 0x19, 0xc4, 0x17, 0xd2, 0x92, 0x86, 0xfc, 0xca, 0x58, + 0xad, 0x7e, 0x9e, 0x18, 0x9e, 0x09, 0x08, 0xb8, 0x17, 0x06, 0x7c, 0x9f, 0x46, 0x1e, 0x07, 0x36, + 0x01, 0x66, 0x44, 0x23, 0x37, 0x3d, 0xe3, 0x77, 0x03, 0x8c, 0x49, 0x77, 0x00, 0xa2, 0x58, 0x5a, + 0xeb, 0xe7, 0x1c, 0xe7, 0x53, 0x7b, 0xe8, 0x05, 0xc0, 0xae, 0x72, 0x86, 0x0f, 0x82, 0x1a, 0x93, + 0xa2, 0xca, 0x58, 0xa7, 0x62, 0x71, 0x20, 0x3c, 0x1f, 0x0a, 0x82, 0x5f, 0x3e, 0x27, 0xe0, 0xf6, + 0x10, 0x7c, 0x5a, 0xd0, 0x1d, 0xae, 0xd3, 0xc5, 0xc2, 0x1b, 0x1b, 0x5e, 0x20, 0xb8, 0x60, 0xf7, + 0x45, 0x9d, 0xd7, 0x65, 0xb4, 0xf3, 0x77, 0x38, 0x8e, 0x7d, 0xe8, 0x09, 0x41, 0xed, 0xa1, 0x0f, + 0x81, 0xc0, 0xff, 0xa3, 0x5a, 0xda, 0x8d, 0x43, 0x05, 0x6d, 0x6a, 0xbb, 0xda, 0x5e, 0xfd, 0xe0, + 0x27, 0x92, 0xaf, 0x65, 0x01, 0x27, 0xd1, 0xc8, 0x4d, 0x1d, 0x9c, 0xa4, 0xd1, 0x64, 0xd2, 0x25, + 0x7f, 0x0c, 0x2e, 0xc1, 0x16, 0x7d, 0x10, 0xd4, 0xc4, 0xd3, 0x59, 0xbb, 0x94, 0xcc, 0xda, 0x28, + 0xf7, 0x59, 0x0b, 0x2a, 0x3e, 0x47, 0x55, 0x1e, 0x81, 0xdd, 0x2c, 0x4b, 0x7a, 0x97, 0x7c, 0x62, + 0xe9, 0xe4, 0x7e, 0x79, 0xe7, 0x11, 0xd8, 0xe6, 0x17, 0x0a, 0x5f, 0x4d, 0x2d, 0x4b, 0xc2, 0xf0, + 0xbf, 0x68, 0x93, 0x0b, 0x2a, 0x62, 0xde, 0xac, 0x48, 0xec, 0xe1, 0xe3, 0xb0, 0x52, 0x6a, 0x7e, + 0xa5, 0xc0, 0x9b, 0x99, 0x6d, 0x29, 0x64, 0x67, 0xaa, 0xa1, 0xc6, 0x7d, 0xc9, 0xa9, 0xc7, 0x05, + 0xfe, 0xaf, 0x30, 0x2c, 0xf2, 0xb0, 0x61, 0xa5, 0x6a, 0x39, 0xaa, 0x1d, 0x95, 0xb2, 0x36, 0xf7, + 0x2c, 0x0d, 0xca, 0x42, 0x1b, 0x9e, 0x00, 0x9f, 0x37, 0xcb, 0xbb, 0x95, 0xbd, 0xfa, 0xc1, 0xfe, + 0xa3, 0x5a, 0x32, 0xbf, 0x54, 0xe4, 0x8d, 0xdf, 0x52, 0x86, 0x95, 0xa1, 0x3a, 0x17, 0xe8, 0x9b, + 0x42, 0xf3, 0x61, 0xcc, 0x6c, 0xc0, 0xa7, 0xa8, 0x11, 0x01, 0xe3, 0x1e, 0x17, 0x10, 0x88, 0x2c, + 0xe6, 0x8c, 0xfa, 0x20, 0xfb, 0xda, 0x36, 0x9b, 0xc9, 0xac, 0xdd, 0xf8, 0x73, 0xc5, 0xb9, 0xb5, + 0x52, 0xd5, 0x79, 0xb3, 0x62, 0x64, 0xe9, 0xba, 0xf0, 0x8f, 0xa8, 0x46, 0xa5, 0x07, 0x98, 0x42, + 0x2f, 0x46, 0xd0, 0x53, 0x7e, 0x6b, 0x11, 0x21, 0xd7, 0x2a, 0xcb, 0x53, 0xb7, 0xe5, 0x91, 0x6b, + 0x95, 0xd2, 0xa5, 0xb5, 0x4a, 0xdb, 0x52, 0xc8, 0xb4, 0x94, 0x20, 0x74, 0xb2, 0x2e, 0x2b, 0x77, + 0x4b, 0x39, 0x53, 0x7e, 0x6b, 0x11, 0xd1, 0xf9, 0x50, 0x59, 0x31, 0x3a, 0x79, 0x3f, 0x96, 0x7a, + 0x72, 0x64, 0x4f, 0xb5, 0x42, 0x4f, 0xce, 0xa2, 0x27, 0x07, 0xbf, 0xd2, 0x10, 0xa6, 0x0b, 0x44, + 0x7f, 0x7e, 0x7f, 0xb2, 0x25, 0xff, 0xfe, 0x84, 0x7b, 0x4b, 0x7a, 0x05, 0xda, 0x49, 0x20, 0xd8, + 0x95, 0xd9, 0x52, 0x55, 0xe0, 0x62, 0x80, 0xb5, 0xa2, 0x04, 0x7c, 0x89, 0xea, 0x99, 0xf7, 0x84, + 0xb1, 0x90, 0xa9, 0x97, 0xb4, 0xf7, 0x80, 0x8a, 0x64, 0xbc, 0xa9, 0x27, 0xb3, 0x76, 0xbd, 0x97, + 0x03, 0xde, 0xcf, 0xda, 0xf5, 0xa5, 0x73, 0x6b, 0x19, 0x9e, 0xe6, 0x72, 0x20, 0xcf, 0x55, 0x7d, + 0x4a, 0xae, 0x63, 0x58, 0x9f, 0x6b, 0x09, 0xde, 0x3a, 0x41, 0xdf, 0xae, 0x19, 0x11, 0xde, 0x41, + 0x95, 0x11, 0x5c, 0x65, 0x37, 0xd1, 0x4a, 0x3f, 0x71, 0x03, 0x6d, 0x4c, 0xe8, 0x38, 0xce, 0x6e, + 0xdc, 0xb6, 0x95, 0x19, 0xbf, 0x96, 0x8f, 0xb4, 0xce, 0x0b, 0x0d, 0x2d, 0xe7, 0xc0, 0xa7, 0xa8, + 0x9a, 0xfe, 0x93, 0xd5, 0xcb, 0xff, 0xe1, 0x61, 0x2f, 0xff, 0x2f, 0xcf, 0x87, 0xfc, 0x0f, 0x96, + 0x5a, 0x96, 0xa4, 0xe0, 0xef, 0xd1, 0x96, 0x0f, 0x9c, 0x53, 0x57, 0x65, 0x36, 0xbf, 0x56, 0x41, + 0x5b, 0xfd, 0xcc, 0x6d, 0xcd, 0xcf, 0x4d, 0x32, 0xbd, 0xd5, 0x4b, 0xd7, 0xb7, 0x7a, 0xe9, 0xe6, + 0x56, 0x2f, 0x3d, 0x4f, 0x74, 0x6d, 0x9a, 0xe8, 0xda, 0x75, 0xa2, 0x6b, 0x37, 0x89, 0xae, 0xbd, + 0x4d, 0x74, 0xed, 0xe5, 0x3b, 0xbd, 0xf4, 0x4f, 0x6d, 0x3e, 0xb8, 0x8f, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x68, 0x82, 0x7b, 0x73, 0x9e, 0x07, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto new file mode 100644 index 000000000..2f792a06c --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -0,0 +1,128 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.storage.v1alpha1; + +import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +message VolumeAttachment { + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + optional VolumeAttachmentSpec spec = 2; + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeAttachmentStatus status = 3; +} + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +message VolumeAttachmentList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of VolumeAttachments + repeated VolumeAttachment items = 2; +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +message VolumeAttachmentSource { + // Name of the persistent volume to attach. + // +optional + optional string persistentVolumeName = 1; +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +message VolumeAttachmentSpec { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + optional string attacher = 1; + + // Source represents the volume that should be attached. + optional VolumeAttachmentSource source = 2; + + // The node that the volume should be attached to. + optional string nodeName = 3; +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +message VolumeAttachmentStatus { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + optional bool attached = 1; + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + map attachmentMetadata = 2; + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError attachError = 3; + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError detachError = 4; +} + +// VolumeError captures an error encountered during a volume operation. +message VolumeError { + // Time the error was encountered. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + optional string message = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/register.go b/vendor/k8s.io/api/storage/v1alpha1/register.go similarity index 91% rename from vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/register.go rename to vendor/k8s.io/api/storage/v1alpha1/register.go index 45afb50ca..7b81ee49c 100644 --- a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/register.go +++ b/vendor/k8s.io/api/storage/v1alpha1/register.go @@ -23,7 +23,7 @@ import ( ) // GroupName is the group name use in this package -const GroupName = "settings.k8s.io" +const GroupName = "storage.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} @@ -38,12 +38,13 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &PodPreset{}, - &PodPresetList{}, + &VolumeAttachment{}, + &VolumeAttachmentList{}, ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go new file mode 100644 index 000000000..964bb5f7b --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -0,0 +1,126 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +type VolumeAttachment struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +type VolumeAttachmentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeAttachments + Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +type VolumeAttachmentSpec struct { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` + + // Source represents the volume that should be attached. + Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // The node that the volume should be attached to. + NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +type VolumeAttachmentSource struct { + // Name of the persistent volume to attach. + // +optional + PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` + + // Placeholder for *VolumeSource to accommodate inline volumes in pods. +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +type VolumeAttachmentStatus struct { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` +} + +// VolumeError captures an error encountered during a volume operation. +type VolumeError struct { + // Time the error was encountered. + // +optional + Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..32d7dcc52 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_VolumeAttachment = map[string]string{ + "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachment) SwaggerDoc() map[string]string { + return map_VolumeAttachment +} + +var map_VolumeAttachmentList = map[string]string{ + "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of VolumeAttachments", +} + +func (VolumeAttachmentList) SwaggerDoc() map[string]string { + return map_VolumeAttachmentList +} + +var map_VolumeAttachmentSource = map[string]string{ + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "persistentVolumeName": "Name of the persistent volume to attach.", +} + +func (VolumeAttachmentSource) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSource +} + +var map_VolumeAttachmentSpec = map[string]string{ + "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "Source represents the volume that should be attached.", + "nodeName": "The node that the volume should be attached to.", +} + +func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSpec +} + +var map_VolumeAttachmentStatus = map[string]string{ + "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { + return map_VolumeAttachmentStatus +} + +var map_VolumeError = map[string]string{ + "": "VolumeError captures an error encountered during a volume operation.", + "time": "Time the error was encountered.", + "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", +} + +func (VolumeError) SwaggerDoc() map[string]string { + return map_VolumeError +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..d1a53755b --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,186 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. +func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { + if in == nil { + return nil + } + out := new(VolumeAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. +func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { + if in == nil { + return nil + } + out := new(VolumeAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { + *out = *in + if in.PersistentVolumeName != nil { + in, out := &in.PersistentVolumeName, &out.PersistentVolumeName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. +func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { + if in == nil { + return nil + } + out := new(VolumeAttachmentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. +func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { + if in == nil { + return nil + } + out := new(VolumeAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { + *out = *in + if in.AttachmentMetadata != nil { + in, out := &in.AttachmentMetadata, &out.AttachmentMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AttachError != nil { + in, out := &in.AttachError, &out.AttachError + if *in == nil { + *out = nil + } else { + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + } + if in.DetachError != nil { + in, out := &in.DetachError, &out.DetachError + if *in == nil { + *out = nil + } else { + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. +func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { + if in == nil { + return nil + } + out := new(VolumeAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeError) DeepCopyInto(out *VolumeError) { + *out = *in + in.Time.DeepCopyInto(&out.Time) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. +func (in *VolumeError) DeepCopy() *VolumeError { + if in == nil { + return nil + } + out := new(VolumeError) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/doc.go b/vendor/k8s.io/api/storage/v1beta1/doc.go similarity index 84% rename from vendor/k8s.io/client-go/pkg/apis/storage/doc.go rename to vendor/k8s.io/api/storage/v1beta1/doc.go index ef6d2a9ba..8957a4cf2 100644 --- a/vendor/k8s.io/client-go/pkg/apis/storage/doc.go +++ b/vendor/k8s.io/api/storage/v1beta1/doc.go @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:deepcopy-gen=package // +groupName=storage.k8s.io -package storage +// +k8s:openapi-gen=true +package v1beta1 // import "k8s.io/api/storage/v1beta1" diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go new file mode 100644 index 000000000..058702593 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -0,0 +1,2263 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto + + It has these top-level messages: + StorageClass + StorageClassList + VolumeAttachment + VolumeAttachmentList + VolumeAttachmentSource + VolumeAttachmentSpec + VolumeAttachmentStatus + VolumeError +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func init() { + proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") + proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") + proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList") + proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource") + proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") + proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") + proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") +} +func (m *StorageClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i += copy(dAtA[i:], m.Provisioner) + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for _, k := range keysForParameters { + dAtA[i] = 0x1a + i++ + v := m.Parameters[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.ReclaimPolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i += copy(dAtA[i:], *m.ReclaimPolicy) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.AllowVolumeExpansion != nil { + dAtA[i] = 0x30 + i++ + if *m.AllowVolumeExpansion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.VolumeBindingMode != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i += copy(dAtA[i:], *m.VolumeBindingMode) + } + if len(m.AllowedTopologies) > 0 { + for _, msg := range m.AllowedTopologies { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StorageClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n5, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PersistentVolumeName != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i += copy(dAtA[i:], *m.PersistentVolumeName) + } + return i, nil +} + +func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i += copy(dAtA[i:], m.Attacher) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) + n7, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + return i, nil +} + +func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.AttachmentMetadata) > 0 { + keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) + for k := range m.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + for _, k := range keysForAttachmentMetadata { + dAtA[i] = 0x12 + i++ + v := m.AttachmentMetadata[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.AttachError != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) + n8, err := m.AttachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.DetachError != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) + n9, err := m.DetachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *VolumeError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) + n10, err := m.Time.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *StorageClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ReclaimPolicy != nil { + l = len(*m.ReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AllowVolumeExpansion != nil { + n += 2 + } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AllowedTopologies) > 0 { + for _, e := range m.AllowedTopologies { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachmentSource) Size() (n int) { + var l int + _ = l + if m.PersistentVolumeName != nil { + l = len(*m.PersistentVolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeAttachmentSpec) Size() (n int) { + var l int + _ = l + l = len(m.Attacher) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentStatus) Size() (n int) { + var l int + _ = l + n += 2 + if len(m.AttachmentMetadata) > 0 { + for k, v := range m.AttachmentMetadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AttachError != nil { + l = m.AttachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DetachError != nil { + l = m.DetachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeError) Size() (n int) { + var l int + _ = l + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, + `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" + } + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StorageClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provisioner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Parameters[mapkey] = mapvalue + } else { + var mapvalue string + m.Parameters[mapkey] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) + m.ReclaimPolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) + if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttachment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PersistentVolumeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attacher = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Attached = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.AttachmentMetadata == nil { + m.AttachmentMetadata = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.AttachmentMetadata[mapkey] = mapvalue + } else { + var mapvalue string + m.AttachmentMetadata[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachError == nil { + m.AttachError = &VolumeError{} + } + if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DetachError == nil { + m.DetachError = &VolumeError{} + } + if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1022 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xce, 0xc6, 0xf9, 0x70, 0xc6, 0x09, 0x4d, 0x96, 0x08, 0x8c, 0x0f, 0x76, 0xe4, 0x0b, 0xa6, + 0x6a, 0x76, 0x9b, 0x50, 0x50, 0x84, 0xc4, 0xc1, 0x5b, 0x72, 0x00, 0xc5, 0x6d, 0x98, 0x44, 0x15, + 0xaa, 0x38, 0x30, 0xd9, 0x7d, 0xeb, 0x0c, 0xde, 0xdd, 0x59, 0x66, 0xc6, 0xa6, 0xb9, 0x71, 0xe2, + 0x8c, 0x38, 0xf0, 0x0b, 0xf8, 0x1f, 0x1c, 0xc9, 0x09, 0xf5, 0xd8, 0x93, 0x45, 0xcc, 0xbf, 0x88, + 0x38, 0xa0, 0x99, 0x9d, 0x78, 0xd7, 0x5e, 0x9b, 0x36, 0x3d, 0xf4, 0xe6, 0xf7, 0xe3, 0x79, 0xde, + 0xef, 0x59, 0xa3, 0x87, 0xbd, 0x03, 0xe1, 0x50, 0xe6, 0xf6, 0xfa, 0x67, 0xc0, 0x63, 0x90, 0x20, + 0xdc, 0x01, 0xc4, 0x01, 0xe3, 0xae, 0x31, 0x90, 0x84, 0xba, 0x42, 0x32, 0x4e, 0xba, 0xe0, 0x0e, + 0xf6, 0xce, 0x40, 0x92, 0x3d, 0xb7, 0x0b, 0x31, 0x70, 0x22, 0x21, 0x70, 0x12, 0xce, 0x24, 0xb3, + 0x6b, 0xa9, 0xaf, 0x43, 0x12, 0xea, 0x18, 0x5f, 0xc7, 0xf8, 0xd6, 0x76, 0xbb, 0x54, 0x9e, 0xf7, + 0xcf, 0x1c, 0x9f, 0x45, 0x6e, 0x97, 0x75, 0x99, 0xab, 0x21, 0x67, 0xfd, 0x67, 0x5a, 0xd2, 0x82, + 0xfe, 0x95, 0x52, 0xd5, 0x9a, 0xb9, 0xb0, 0x3e, 0xe3, 0x2a, 0xe6, 0x74, 0xb8, 0x5a, 0x27, 0xf3, + 0x81, 0xe7, 0x12, 0x62, 0x41, 0x59, 0x2c, 0x76, 0x49, 0x42, 0x05, 0xf0, 0x01, 0x70, 0x37, 0xe9, + 0x75, 0x95, 0x4d, 0x4c, 0x3a, 0xcc, 0xcb, 0xbe, 0xf6, 0x20, 0xa3, 0x8b, 0x88, 0x7f, 0x4e, 0x63, + 0xe0, 0x17, 0x19, 0x47, 0x04, 0x92, 0xcc, 0x4a, 0xc2, 0x9d, 0x87, 0xe2, 0xfd, 0x58, 0xd2, 0x08, + 0x0a, 0x80, 0x4f, 0x5f, 0x05, 0x10, 0xfe, 0x39, 0x44, 0xa4, 0x80, 0xfb, 0x78, 0x1e, 0xae, 0x2f, + 0x69, 0xe8, 0xd2, 0x58, 0x0a, 0xc9, 0xa7, 0x41, 0xcd, 0x5f, 0x57, 0xd0, 0xfa, 0x49, 0x3a, 0x89, + 0x87, 0x21, 0x11, 0xc2, 0xfe, 0x0e, 0x95, 0x55, 0x25, 0x01, 0x91, 0xa4, 0x6a, 0xed, 0x58, 0xad, + 0xca, 0xfe, 0x7d, 0x27, 0x9b, 0xda, 0x98, 0xd8, 0x49, 0x7a, 0x5d, 0xa5, 0x10, 0x8e, 0xf2, 0x76, + 0x06, 0x7b, 0xce, 0xe3, 0xb3, 0xef, 0xc1, 0x97, 0x1d, 0x90, 0xc4, 0xb3, 0x2f, 0x87, 0x8d, 0x85, + 0xd1, 0xb0, 0x81, 0x32, 0x1d, 0x1e, 0xb3, 0xda, 0x9f, 0xa0, 0x4a, 0xc2, 0xd9, 0x80, 0xaa, 0x66, + 0x03, 0xaf, 0x2e, 0xee, 0x58, 0xad, 0x35, 0xef, 0x5d, 0x03, 0xa9, 0x1c, 0x67, 0x26, 0x9c, 0xf7, + 0xb3, 0x43, 0x84, 0x12, 0xc2, 0x49, 0x04, 0x12, 0xb8, 0xa8, 0x96, 0x76, 0x4a, 0xad, 0xca, 0xfe, + 0x81, 0x33, 0x7f, 0xa1, 0x9c, 0x7c, 0x59, 0xce, 0xf1, 0x18, 0x7a, 0x18, 0x4b, 0x7e, 0x91, 0xa5, + 0x98, 0x19, 0x70, 0x8e, 0xdf, 0xee, 0xa1, 0x0d, 0x0e, 0x7e, 0x48, 0x68, 0x74, 0xcc, 0x42, 0xea, + 0x5f, 0x54, 0x97, 0x74, 0x9a, 0x87, 0xa3, 0x61, 0x63, 0x03, 0xe7, 0x0d, 0xd7, 0xc3, 0xc6, 0xfd, + 0xe2, 0x2a, 0x3a, 0xc7, 0xc0, 0x05, 0x15, 0x12, 0x62, 0xf9, 0x84, 0x85, 0xfd, 0x08, 0x26, 0x30, + 0x78, 0x92, 0xdb, 0x7e, 0x80, 0xd6, 0x23, 0xd6, 0x8f, 0xe5, 0xe3, 0x44, 0xaa, 0x05, 0xac, 0x2e, + 0xef, 0x94, 0x5a, 0x6b, 0xde, 0xe6, 0x68, 0xd8, 0x58, 0xef, 0xe4, 0xf4, 0x78, 0xc2, 0xcb, 0x3e, + 0x42, 0xdb, 0x24, 0x0c, 0xd9, 0x8f, 0x69, 0x80, 0xc3, 0xe7, 0x09, 0xd1, 0xfb, 0x5b, 0x5d, 0xd9, + 0xb1, 0x5a, 0x65, 0xaf, 0x3a, 0x1a, 0x36, 0xb6, 0xdb, 0x33, 0xec, 0x78, 0x26, 0xca, 0xfe, 0x06, + 0x6d, 0x0d, 0xb4, 0xca, 0xa3, 0x71, 0x40, 0xe3, 0x6e, 0x87, 0x05, 0x50, 0x5d, 0xd5, 0x45, 0xdf, + 0x1d, 0x0d, 0x1b, 0x5b, 0x4f, 0xa6, 0x8d, 0xd7, 0xb3, 0x94, 0xb8, 0x48, 0x62, 0xff, 0x80, 0xb6, + 0x74, 0x44, 0x08, 0x4e, 0x59, 0xc2, 0x42, 0xd6, 0xa5, 0x20, 0xaa, 0x65, 0x3d, 0xbf, 0x56, 0x7e, + 0x7e, 0xaa, 0x75, 0x6a, 0x91, 0x8c, 0xd7, 0xc5, 0x09, 0x84, 0xe0, 0x4b, 0xc6, 0x4f, 0x81, 0x47, + 0xde, 0x07, 0x66, 0x5e, 0x5b, 0xed, 0x69, 0x2a, 0x5c, 0x64, 0xaf, 0x7d, 0x8e, 0xee, 0x4c, 0x0d, + 0xdc, 0xde, 0x44, 0xa5, 0x1e, 0x5c, 0xe8, 0x95, 0x5e, 0xc3, 0xea, 0xa7, 0xbd, 0x8d, 0x96, 0x07, + 0x24, 0xec, 0x43, 0xba, 0x81, 0x38, 0x15, 0x3e, 0x5b, 0x3c, 0xb0, 0x9a, 0x7f, 0x58, 0x68, 0x33, + 0xbf, 0x3d, 0x47, 0x54, 0x48, 0xfb, 0xdb, 0xc2, 0x61, 0x38, 0xaf, 0x77, 0x18, 0x0a, 0xad, 0xcf, + 0x62, 0xd3, 0xd4, 0x50, 0xbe, 0xd1, 0xe4, 0x8e, 0xa2, 0x83, 0x96, 0xa9, 0x84, 0x48, 0x54, 0x17, + 0x8b, 0x8d, 0xf9, 0xbf, 0xc5, 0xf6, 0x36, 0x0c, 0xe9, 0xf2, 0x97, 0x0a, 0x8e, 0x53, 0x96, 0xe6, + 0xef, 0x8b, 0x68, 0x33, 0x1d, 0x4e, 0x5b, 0x4a, 0xe2, 0x9f, 0x47, 0x10, 0xcb, 0xb7, 0x70, 0xda, + 0x18, 0x2d, 0x89, 0x04, 0x7c, 0xdd, 0xd1, 0x49, 0xf6, 0x42, 0x11, 0xd3, 0xd9, 0x9d, 0x24, 0xe0, + 0x7b, 0xeb, 0x86, 0x7d, 0x49, 0x49, 0x58, 0x73, 0xd9, 0x4f, 0xd1, 0x8a, 0x90, 0x44, 0xf6, 0xd5, + 0xcd, 0x2b, 0xd6, 0xfd, 0x5b, 0xb1, 0x6a, 0xa4, 0xf7, 0x8e, 0xe1, 0x5d, 0x49, 0x65, 0x6c, 0x18, + 0x9b, 0x7f, 0x5a, 0x68, 0x7b, 0x1a, 0xf2, 0x16, 0x86, 0xfd, 0xf5, 0xe4, 0xb0, 0xef, 0xdd, 0xa6, + 0xa2, 0x39, 0x03, 0x7f, 0x86, 0xde, 0x2b, 0xd4, 0xce, 0xfa, 0xdc, 0x07, 0xf5, 0x4c, 0x24, 0x53, + 0x8f, 0xd1, 0x23, 0x12, 0x41, 0x7a, 0x09, 0xe9, 0x33, 0x71, 0x3c, 0xc3, 0x8e, 0x67, 0xa2, 0x9a, + 0x7f, 0xcd, 0xe8, 0x98, 0x1a, 0x96, 0x7d, 0x0f, 0x95, 0x89, 0xd6, 0x00, 0x37, 0xd4, 0xe3, 0x0e, + 0xb4, 0x8d, 0x1e, 0x8f, 0x3d, 0xf4, 0x50, 0x75, 0x7a, 0x66, 0x55, 0x6e, 0x37, 0x54, 0x8d, 0xcc, + 0x0d, 0x55, 0xcb, 0xd8, 0x30, 0xaa, 0x4c, 0x62, 0x16, 0xa4, 0x45, 0x96, 0x26, 0x33, 0x79, 0x64, + 0xf4, 0x78, 0xec, 0xd1, 0xfc, 0xb7, 0x34, 0xa3, 0x73, 0x7a, 0x3b, 0x72, 0x25, 0x05, 0xba, 0xa4, + 0x72, 0xa1, 0xa4, 0x60, 0x5c, 0x52, 0x60, 0xff, 0x66, 0x21, 0x9b, 0x8c, 0x29, 0x3a, 0x37, 0xdb, + 0x93, 0x8e, 0xf8, 0xab, 0xdb, 0x2f, 0xad, 0xd3, 0x2e, 0x90, 0xa5, 0x9f, 0xae, 0x9a, 0x49, 0xc2, + 0x2e, 0x3a, 0xe0, 0x19, 0x19, 0xd8, 0x14, 0x55, 0x52, 0xed, 0x21, 0xe7, 0x8c, 0x9b, 0x2b, 0xfa, + 0xf0, 0xd5, 0x09, 0x69, 0x77, 0xaf, 0xae, 0x3e, 0xca, 0xed, 0x0c, 0x7f, 0x3d, 0x6c, 0x54, 0x72, + 0x76, 0x9c, 0xe7, 0x56, 0xa1, 0x02, 0xc8, 0x42, 0x2d, 0xbd, 0x41, 0xa8, 0x2f, 0x60, 0x7e, 0xa8, + 0x1c, 0x77, 0xed, 0x10, 0xbd, 0x3f, 0xa7, 0x41, 0xb7, 0x7a, 0xea, 0x7f, 0xb6, 0x50, 0x3e, 0x86, + 0x7d, 0x84, 0x96, 0xd4, 0x7f, 0x2c, 0x73, 0xf4, 0x77, 0x5f, 0xef, 0xe8, 0x4f, 0x69, 0x04, 0xd9, + 0xdb, 0xa5, 0x24, 0xac, 0x59, 0xec, 0x8f, 0xd0, 0x6a, 0x04, 0x42, 0x90, 0xae, 0x89, 0xec, 0xdd, + 0x31, 0x4e, 0xab, 0x9d, 0x54, 0x8d, 0x6f, 0xec, 0xde, 0xee, 0xe5, 0x55, 0x7d, 0xe1, 0xc5, 0x55, + 0x7d, 0xe1, 0xe5, 0x55, 0x7d, 0xe1, 0xa7, 0x51, 0xdd, 0xba, 0x1c, 0xd5, 0xad, 0x17, 0xa3, 0xba, + 0xf5, 0x72, 0x54, 0xb7, 0xfe, 0x1e, 0xd5, 0xad, 0x5f, 0xfe, 0xa9, 0x2f, 0x3c, 0x5d, 0x35, 0x7d, + 0xfb, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xd8, 0xa9, 0x81, 0xd5, 0x8f, 0x0b, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto new file mode 100644 index 000000000..32032b1a3 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -0,0 +1,190 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.storage.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +message StorageClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Provisioner indicates the type of the provisioner. + optional string provisioner = 2; + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + map parameters = 3; + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with this reclaimPolicy. Defaults to Delete. + // +optional + optional string reclaimPolicy = 4; + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mount of the PVs will simply fail if one is invalid. + // +optional + repeated string mountOptions = 5; + + // AllowVolumeExpansion shows whether the storage class allow volume expand + // +optional + optional bool allowVolumeExpansion = 6; + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + optional string volumeBindingMode = 7; + + // Restrict the node topologies where volumes can be dynamically provisioned. + // Each volume plugin defines its own supported topology specifications. + // An empty TopologySelectorTerm list means there is no topology restriction. + // This field is alpha-level and is only honored by servers that enable + // the DynamicProvisioningScheduling feature. + // +optional + repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; +} + +// StorageClassList is a collection of storage classes. +message StorageClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of StorageClasses + repeated StorageClass items = 2; +} + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +message VolumeAttachment { + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + optional VolumeAttachmentSpec spec = 2; + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeAttachmentStatus status = 3; +} + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +message VolumeAttachmentList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of VolumeAttachments + repeated VolumeAttachment items = 2; +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +message VolumeAttachmentSource { + // Name of the persistent volume to attach. + // +optional + optional string persistentVolumeName = 1; +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +message VolumeAttachmentSpec { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + optional string attacher = 1; + + // Source represents the volume that should be attached. + optional VolumeAttachmentSource source = 2; + + // The node that the volume should be attached to. + optional string nodeName = 3; +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +message VolumeAttachmentStatus { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + optional bool attached = 1; + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + map attachmentMetadata = 2; + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError attachError = 3; + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError detachError = 4; +} + +// VolumeError captures an error encountered during a volume operation. +message VolumeError { + // Time the error was encountered. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + optional string message = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go similarity index 77% rename from vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/register.go rename to vendor/k8s.io/api/storage/v1beta1/register.go index 70087f379..06b0f3d52 100644 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/register.go +++ b/vendor/k8s.io/api/storage/v1beta1/register.go @@ -34,15 +34,21 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, &StorageClassList{}, + + &VolumeAttachment{}, + &VolumeAttachmentList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go new file mode 100644 index 000000000..7ec1e908f --- /dev/null +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -0,0 +1,213 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +type StorageClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Provisioner indicates the type of the provisioner. + Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with this reclaimPolicy. Defaults to Delete. + // +optional + ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mount of the PVs will simply fail if one is invalid. + // +optional + MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` + + // AllowVolumeExpansion shows whether the storage class allow volume expand + // +optional + AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` + + // Restrict the node topologies where volumes can be dynamically provisioned. + // Each volume plugin defines its own supported topology specifications. + // An empty TopologySelectorTerm list means there is no topology restriction. + // This field is alpha-level and is only honored by servers that enable + // the DynamicProvisioningScheduling feature. + // +optional + AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of StorageClasses + Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeBindingMode indicates how PersistentVolumeClaims should be bound. +type VolumeBindingMode string + +const ( + // VolumeBindingImmediate indicates that PersistentVolumeClaims should be + // immediately provisioned and bound. This is the default mode. + VolumeBindingImmediate VolumeBindingMode = "Immediate" + + // VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims + // should not be provisioned and bound until the first Pod is created that + // references the PeristentVolumeClaim. The volume provisioning and + // binding will occur during Pod scheduing. + VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +type VolumeAttachment struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +type VolumeAttachmentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeAttachments + Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +type VolumeAttachmentSpec struct { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` + + // Source represents the volume that should be attached. + Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // The node that the volume should be attached to. + NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +type VolumeAttachmentSource struct { + // Name of the persistent volume to attach. + // +optional + PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` + + // Placeholder for *VolumeSource to accommodate inline volumes in pods. +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +type VolumeAttachmentStatus struct { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` +} + +// VolumeError captures an error encountered during a volume operation. +type VolumeError struct { + // Time the error was encountered. + // +optional + Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..423e7f271 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,119 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_StorageClass = map[string]string{ + "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "provisioner": "Provisioner indicates the type of the provisioner.", + "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", + "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is alpha-level and is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is alpha-level and is only honored by servers that enable the DynamicProvisioningScheduling feature.", +} + +func (StorageClass) SwaggerDoc() map[string]string { + return map_StorageClass +} + +var map_StorageClassList = map[string]string{ + "": "StorageClassList is a collection of storage classes.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of StorageClasses", +} + +func (StorageClassList) SwaggerDoc() map[string]string { + return map_StorageClassList +} + +var map_VolumeAttachment = map[string]string{ + "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachment) SwaggerDoc() map[string]string { + return map_VolumeAttachment +} + +var map_VolumeAttachmentList = map[string]string{ + "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of VolumeAttachments", +} + +func (VolumeAttachmentList) SwaggerDoc() map[string]string { + return map_VolumeAttachmentList +} + +var map_VolumeAttachmentSource = map[string]string{ + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "persistentVolumeName": "Name of the persistent volume to attach.", +} + +func (VolumeAttachmentSource) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSource +} + +var map_VolumeAttachmentSpec = map[string]string{ + "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "Source represents the volume that should be attached.", + "nodeName": "The node that the volume should be attached to.", +} + +func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSpec +} + +var map_VolumeAttachmentStatus = map[string]string{ + "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { + return map_VolumeAttachmentStatus +} + +var map_VolumeError = map[string]string{ + "": "VolumeError captures an error encountered during a volume operation.", + "time": "Time the error was encountered.", + "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", +} + +func (VolumeError) SwaggerDoc() map[string]string { + return map_VolumeError +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..7c7c8fde5 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,292 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClass) DeepCopyInto(out *StorageClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ReclaimPolicy != nil { + in, out := &in.ReclaimPolicy, &out.ReclaimPolicy + if *in == nil { + *out = nil + } else { + *out = new(v1.PersistentVolumeReclaimPolicy) + **out = **in + } + } + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowVolumeExpansion != nil { + in, out := &in.AllowVolumeExpansion, &out.AllowVolumeExpansion + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.VolumeBindingMode != nil { + in, out := &in.VolumeBindingMode, &out.VolumeBindingMode + if *in == nil { + *out = nil + } else { + *out = new(VolumeBindingMode) + **out = **in + } + } + if in.AllowedTopologies != nil { + in, out := &in.AllowedTopologies, &out.AllowedTopologies + *out = make([]v1.TopologySelectorTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClass. +func (in *StorageClass) DeepCopy() *StorageClass { + if in == nil { + return nil + } + out := new(StorageClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassList. +func (in *StorageClassList) DeepCopy() *StorageClassList { + if in == nil { + return nil + } + out := new(StorageClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. +func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { + if in == nil { + return nil + } + out := new(VolumeAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. +func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { + if in == nil { + return nil + } + out := new(VolumeAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { + *out = *in + if in.PersistentVolumeName != nil { + in, out := &in.PersistentVolumeName, &out.PersistentVolumeName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. +func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { + if in == nil { + return nil + } + out := new(VolumeAttachmentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. +func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { + if in == nil { + return nil + } + out := new(VolumeAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { + *out = *in + if in.AttachmentMetadata != nil { + in, out := &in.AttachmentMetadata, &out.AttachmentMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AttachError != nil { + in, out := &in.AttachError, &out.AttachError + if *in == nil { + *out = nil + } else { + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + } + if in.DetachError != nil { + in, out := &in.DetachError, &out.DetachError + if *in == nil { + *out = nil + } else { + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. +func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { + if in == nil { + return nil + } + out := new(VolumeAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeError) DeepCopyInto(out *VolumeError) { + *out = *in + in.Time.DeepCopyInto(&out.Time) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. +func (in *VolumeError) DeepCopy() *VolumeError { + if in == nil { + return nil + } + out := new(VolumeError) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS index ae828ad25..dc6a4c724 100755 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -14,14 +14,11 @@ reviewers: - erictune - saad-ali - janetkuo -- timstclair +- tallclair - eparis -- timothysc - dims -- spxtr - hongchaodeng - krousey -- satnam6502 - cjcullen - david-mcmahon - goltermann diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index 560c889b9..bcc032df9 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -28,14 +28,10 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" ) -// HTTP Status codes not in the golang http package. const ( - StatusUnprocessableEntity = 422 - StatusTooManyRequests = 429 - // StatusServerTimeout is an indication that a transient server error has - // occurred and the client *should* retry, with an optional Retry-After - // header to specify the back off window. - StatusServerTimeout = 504 + // StatusTooManyRequests means the server experienced too many requests within a + // given window and that the client must wait to perform the action again. + StatusTooManyRequests = 429 ) // StatusError is an error intended for consumption by a REST API server; it can also be @@ -138,6 +134,14 @@ func NewUnauthorized(reason string) *StatusError { // NewForbidden returns an error indicating the requested action was forbidden func NewForbidden(qualifiedResource schema.GroupResource, name string, err error) *StatusError { + var message string + if qualifiedResource.Empty() { + message = fmt.Sprintf("forbidden: %v", err) + } else if name == "" { + message = fmt.Sprintf("%s is forbidden: %v", qualifiedResource.String(), err) + } else { + message = fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err) + } return &StatusError{metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusForbidden, @@ -147,7 +151,7 @@ func NewForbidden(qualifiedResource schema.GroupResource, name string, err error Kind: qualifiedResource.Resource, Name: name, }, - Message: fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err), + Message: message, }} } @@ -176,6 +180,17 @@ func NewGone(message string) *StatusError { }} } +// NewResourceExpired creates an error that indicates that the requested resource content has expired from +// the server (usually due to a resourceVersion that is too old). +func NewResourceExpired(message string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusGone, + Reason: metav1.StatusReasonExpired, + Message: message, + }} +} + // NewInvalid returns an error indicating the item is invalid and cannot be processed. func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorList) *StatusError { causes := make([]metav1.StatusCause, 0, len(errs)) @@ -189,7 +204,7 @@ func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorLis } return &StatusError{metav1.Status{ Status: metav1.StatusFailure, - Code: StatusUnprocessableEntity, // RFC 4918: StatusUnprocessableEntity + Code: http.StatusUnprocessableEntity, Reason: metav1.StatusReasonInvalid, Details: &metav1.StatusDetails{ Group: qualifiedKind.Group, @@ -211,6 +226,21 @@ func NewBadRequest(reason string) *StatusError { }} } +// NewTooManyRequests creates an error that indicates that the client must try again later because +// the specified endpoint is not accepting requests. More specific details should be provided +// if client should know why the failure was limited4. +func NewTooManyRequests(message string, retryAfterSeconds int) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusTooManyRequests, + Reason: metav1.StatusReasonTooManyRequests, + Message: message, + Details: &metav1.StatusDetails{ + RetryAfterSeconds: int32(retryAfterSeconds), + }, + }} +} + // NewServiceUnavailable creates an error that indicates that the requested service is unavailable. func NewServiceUnavailable(reason string) *StatusError { return &StatusError{metav1.Status{ @@ -276,7 +306,7 @@ func NewInternalError(err error) *StatusError { func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { return &StatusError{metav1.Status{ Status: metav1.StatusFailure, - Code: StatusServerTimeout, + Code: http.StatusGatewayTimeout, Reason: metav1.StatusReasonTimeout, Message: fmt.Sprintf("Timeout: %s", message), Details: &metav1.StatusDetails{ @@ -285,6 +315,18 @@ func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { }} } +// NewTooManyRequestsError returns an error indicating that the request was rejected because +// the server has received too many requests. Client should wait and retry. But if the request +// is perishable, then the client should not retry the request. +func NewTooManyRequestsError(message string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: StatusTooManyRequests, + Reason: metav1.StatusReasonTooManyRequests, + Message: fmt.Sprintf("Too many requests: %s", message), + }} +} + // NewGenericServerResponse returns a new error for server responses that are not in a recognizable form. func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError { reason := metav1.StatusReasonUnknown @@ -310,17 +352,28 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr reason = metav1.StatusReasonForbidden // the server message has details about who is trying to perform what action. Keep its message. message = serverMessage + case http.StatusNotAcceptable: + reason = metav1.StatusReasonNotAcceptable + // the server message has details about what types are acceptable + message = serverMessage + case http.StatusUnsupportedMediaType: + reason = metav1.StatusReasonUnsupportedMediaType + // the server message has details about what types are acceptable + message = serverMessage case http.StatusMethodNotAllowed: reason = metav1.StatusReasonMethodNotAllowed message = "the server does not allow this method on the requested resource" - case StatusUnprocessableEntity: + case http.StatusUnprocessableEntity: reason = metav1.StatusReasonInvalid message = "the server rejected our request due to an error in our request" - case StatusServerTimeout: - reason = metav1.StatusReasonServerTimeout - message = "the server cannot complete the requested operation at this time, try again later" - case StatusTooManyRequests: + case http.StatusServiceUnavailable: + reason = metav1.StatusReasonServiceUnavailable + message = "the server is currently unable to handle the request" + case http.StatusGatewayTimeout: reason = metav1.StatusReasonTimeout + message = "the server was unable to return a response in the time allotted, but may still be processing the request" + case http.StatusTooManyRequests: + reason = metav1.StatusReasonTooManyRequests message = "the server has received too many requests and has asked us to try again later" default: if code >= 500 { @@ -363,71 +416,99 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr // IsNotFound returns true if the specified error was created by NewNotFound. func IsNotFound(err error) bool { - return reasonForError(err) == metav1.StatusReasonNotFound + return ReasonForError(err) == metav1.StatusReasonNotFound } // IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists. func IsAlreadyExists(err error) bool { - return reasonForError(err) == metav1.StatusReasonAlreadyExists + return ReasonForError(err) == metav1.StatusReasonAlreadyExists } // IsConflict determines if the err is an error which indicates the provided update conflicts. func IsConflict(err error) bool { - return reasonForError(err) == metav1.StatusReasonConflict + return ReasonForError(err) == metav1.StatusReasonConflict } // IsInvalid determines if the err is an error which indicates the provided resource is not valid. func IsInvalid(err error) bool { - return reasonForError(err) == metav1.StatusReasonInvalid + return ReasonForError(err) == metav1.StatusReasonInvalid +} + +// IsGone is true if the error indicates the requested resource is no longer available. +func IsGone(err error) bool { + return ReasonForError(err) == metav1.StatusReasonGone +} + +// IsResourceExpired is true if the error indicates the resource has expired and the current action is +// no longer possible. +func IsResourceExpired(err error) bool { + return ReasonForError(err) == metav1.StatusReasonExpired +} + +// IsNotAcceptable determines if err is an error which indicates that the request failed due to an invalid Accept header +func IsNotAcceptable(err error) bool { + return ReasonForError(err) == metav1.StatusReasonNotAcceptable +} + +// IsUnsupportedMediaType determines if err is an error which indicates that the request failed due to an invalid Content-Type header +func IsUnsupportedMediaType(err error) bool { + return ReasonForError(err) == metav1.StatusReasonUnsupportedMediaType } // IsMethodNotSupported determines if the err is an error which indicates the provided action could not // be performed because it is not supported by the server. func IsMethodNotSupported(err error) bool { - return reasonForError(err) == metav1.StatusReasonMethodNotAllowed + return ReasonForError(err) == metav1.StatusReasonMethodNotAllowed +} + +// IsServiceUnavailable is true if the error indicates the underlying service is no longer available. +func IsServiceUnavailable(err error) bool { + return ReasonForError(err) == metav1.StatusReasonServiceUnavailable } // IsBadRequest determines if err is an error which indicates that the request is invalid. func IsBadRequest(err error) bool { - return reasonForError(err) == metav1.StatusReasonBadRequest + return ReasonForError(err) == metav1.StatusReasonBadRequest } // IsUnauthorized determines if err is an error which indicates that the request is unauthorized and // requires authentication by the user. func IsUnauthorized(err error) bool { - return reasonForError(err) == metav1.StatusReasonUnauthorized + return ReasonForError(err) == metav1.StatusReasonUnauthorized } // IsForbidden determines if err is an error which indicates that the request is forbidden and cannot // be completed as requested. func IsForbidden(err error) bool { - return reasonForError(err) == metav1.StatusReasonForbidden + return ReasonForError(err) == metav1.StatusReasonForbidden } // IsTimeout determines if err is an error which indicates that request times out due to long // processing. func IsTimeout(err error) bool { - return reasonForError(err) == metav1.StatusReasonTimeout + return ReasonForError(err) == metav1.StatusReasonTimeout } // IsServerTimeout determines if err is an error which indicates that the request needs to be retried // by the client. func IsServerTimeout(err error) bool { - return reasonForError(err) == metav1.StatusReasonServerTimeout + return ReasonForError(err) == metav1.StatusReasonServerTimeout } // IsInternalError determines if err is an error which indicates an internal server error. func IsInternalError(err error) bool { - return reasonForError(err) == metav1.StatusReasonInternalError + return ReasonForError(err) == metav1.StatusReasonInternalError } // IsTooManyRequests determines if err is an error which indicates that there are too many requests // that the server cannot handle. -// TODO: update IsTooManyRequests() when the TooManyRequests(429) error returned from the API server has a non-empty Reason field func IsTooManyRequests(err error) bool { + if ReasonForError(err) == metav1.StatusReasonTooManyRequests { + return true + } switch t := err.(type) { case APIStatus: - return t.Status().Code == StatusTooManyRequests + return t.Status().Code == http.StatusTooManyRequests } return false } @@ -455,13 +536,20 @@ func IsUnexpectedObjectError(err error) bool { } // SuggestsClientDelay returns true if this error suggests a client delay as well as the -// suggested seconds to wait, or false if the error does not imply a wait. +// suggested seconds to wait, or false if the error does not imply a wait. It does not +// address whether the error *should* be retried, since some errors (like a 3xx) may +// request delay without retry. func SuggestsClientDelay(err error) (int, bool) { switch t := err.(type) { case APIStatus: if t.Status().Details != nil { switch t.Status().Reason { - case metav1.StatusReasonServerTimeout, metav1.StatusReasonTimeout: + // this StatusReason explicitly requests the caller to delay the action + case metav1.StatusReasonServerTimeout: + return int(t.Status().Details.RetryAfterSeconds), true + } + // If the client requests that we retry after a certain number of seconds + if t.Status().Details.RetryAfterSeconds > 0 { return int(t.Status().Details.RetryAfterSeconds), true } } @@ -469,7 +557,8 @@ func SuggestsClientDelay(err error) (int, bool) { return 0, false } -func reasonForError(err error) metav1.StatusReason { +// ReasonForError returns the HTTP status for a particular error. +func ReasonForError(err error) metav1.StatusReason { switch t := err.(type) { case APIStatus: return t.Status().Reason diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS index 6044c031e..5f729ffe6 100755 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -10,7 +10,6 @@ reviewers: - liggitt - nikhiljindal - gmarek -- kargakis - janetkuo - ncdc - eparis diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/default.go b/vendor/k8s.io/apimachinery/pkg/api/meta/default.go deleted file mode 100644 index 5ea906a2a..000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/default.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "strings" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" -) - -// NewDefaultRESTMapperFromScheme instantiates a DefaultRESTMapper based on types registered in the given scheme. -func NewDefaultRESTMapperFromScheme(defaultGroupVersions []schema.GroupVersion, interfacesFunc VersionInterfacesFunc, - importPathPrefix string, ignoredKinds, rootScoped sets.String, scheme *runtime.Scheme) *DefaultRESTMapper { - - mapper := NewDefaultRESTMapper(defaultGroupVersions, interfacesFunc) - // enumerate all supported versions, get the kinds, and register with the mapper how to address - // our resources. - for _, gv := range defaultGroupVersions { - for kind, oType := range scheme.KnownTypes(gv) { - gvk := gv.WithKind(kind) - // TODO: Remove import path check. - // We check the import path because we currently stuff both "api" and "extensions" objects - // into the same group within Scheme since Scheme has no notion of groups yet. - if !strings.Contains(oType.PkgPath(), importPathPrefix) || ignoredKinds.Has(kind) { - continue - } - scope := RESTScopeNamespace - if rootScoped.Has(kind) { - scope = RESTScopeRoot - } - mapper.Add(gvk, scope) - } - } - return mapper -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go index 1503bd6d8..cbf5d0263 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go @@ -20,6 +20,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" ) // AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource @@ -85,11 +86,26 @@ func (e *NoResourceMatchError) Error() string { // NoKindMatchError is returned if the RESTMapper can't find any match for a kind type NoKindMatchError struct { - PartialKind schema.GroupVersionKind + // GroupKind is the API group and kind that was searched + GroupKind schema.GroupKind + // SearchedVersions is the optional list of versions the search was restricted to + SearchedVersions []string } func (e *NoKindMatchError) Error() string { - return fmt.Sprintf("no matches for %v", e.PartialKind) + searchedVersions := sets.NewString() + for _, v := range e.SearchedVersions { + searchedVersions.Insert(schema.GroupVersion{Group: e.GroupKind.Group, Version: v}.String()) + } + + switch len(searchedVersions) { + case 0: + return fmt.Sprintf("no matches for kind %q in group %q", e.GroupKind.Kind, e.GroupKind.Group) + case 1: + return fmt.Sprintf("no matches for kind %q in version %q", e.GroupKind.Kind, searchedVersions.List()[0]) + default: + return fmt.Sprintf("no matches for kind %q in versions %q", e.GroupKind.Kind, searchedVersions.List()) + } } func IsNoMatchError(err error) bool { diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 930441fa6..c70b3d2b6 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -67,6 +67,9 @@ func GetItemsPtr(list runtime.Object) (interface{}, error) { // EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates // the loop. func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { + if unstructured, ok := obj.(runtime.Unstructured); ok { + return unstructured.EachListItem(fn) + } // TODO: Change to an interface call? itemsPtr, err := GetItemsPtr(obj) if err != nil { @@ -175,6 +178,9 @@ func SetList(list runtime.Object, objects []runtime.Object) error { slice := reflect.MakeSlice(items.Type(), len(objects), len(objects)) for i := range objects { dest := slice.Index(i) + if dest.Type() == reflect.TypeOf(runtime.RawExtension{}) { + dest = dest.FieldByName("Object") + } // check to see if you're directly assignable if reflect.TypeOf(objects[i]).AssignableTo(dest.Type()) { diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go index 524b1ebd8..42eac3af0 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go @@ -23,12 +23,6 @@ import ( "k8s.io/apimachinery/pkg/types" ) -// VersionInterfaces contains the interfaces one should use for dealing with types of a particular version. -type VersionInterfaces struct { - runtime.ObjectConvertor - MetadataAccessor -} - type ListMetaAccessor interface { GetListMeta() List } @@ -36,7 +30,7 @@ type ListMetaAccessor interface { // List lets you work with list metadata from any of the versioned or // internal API objects. Attempting to set or retrieve a field on an object that does // not support that field will be a no-op and return a default value. -type List metav1.List +type List metav1.ListInterface // Type exposes the type and APIVersion of versioned or internal API objects. type Type metav1.Type @@ -75,6 +69,9 @@ type MetadataAccessor interface { Annotations(obj runtime.Object) (map[string]string, error) SetAnnotations(obj runtime.Object, annotations map[string]string) error + Continue(obj runtime.Object) (string, error) + SetContinue(obj runtime.Object, c string) error + runtime.ResourceVersioner } @@ -89,28 +86,19 @@ const ( type RESTScope interface { // Name of the scope Name() RESTScopeName - // ParamName is the optional name of the parameter that should be inserted in the resource url - // If empty, no param will be inserted - ParamName() string - // ArgumentName is the optional name that should be used for the variable holding the value. - ArgumentName() string - // ParamDescription is the optional description to use to document the parameter in api documentation - ParamDescription() string } // RESTMapping contains the information needed to deal with objects of a specific // resource and kind in a RESTful manner. type RESTMapping struct { - // Resource is a string representing the name of this resource as a REST client would see it - Resource string + // Resource is the GroupVersionResource (location) for this endpoint + Resource schema.GroupVersionResource + // GroupVersionKind is the GroupVersionKind (data format) to submit to this endpoint GroupVersionKind schema.GroupVersionKind // Scope contains the information needed to deal with REST Resources that are in a resource hierarchy Scope RESTScope - - runtime.ObjectConvertor - MetadataAccessor } // RESTMapper allows clients to map resources to kind, and map kind and version @@ -142,6 +130,5 @@ type RESTMapper interface { // the provided version(s). RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) - AliasesForResource(resource string) ([]string, bool) ResourceSingularizer(resource string) (singular string, err error) } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go new file mode 100644 index 000000000..431a0a635 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go @@ -0,0 +1,104 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// lazyObject defers loading the mapper and typer until necessary. +type lazyObject struct { + loader func() (RESTMapper, error) + + lock sync.Mutex + loaded bool + err error + mapper RESTMapper +} + +// NewLazyObjectLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by +// returning those initialization errors when the interface methods are invoked. This defers the +// initialization and any server calls until a client actually needs to perform the action. +func NewLazyRESTMapperLoader(fn func() (RESTMapper, error)) RESTMapper { + obj := &lazyObject{loader: fn} + return obj +} + +// init lazily loads the mapper and typer, returning an error if initialization has failed. +func (o *lazyObject) init() error { + o.lock.Lock() + defer o.lock.Unlock() + if o.loaded { + return o.err + } + o.mapper, o.err = o.loader() + o.loaded = true + return o.err +} + +var _ RESTMapper = &lazyObject{} + +func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := o.init(); err != nil { + return schema.GroupVersionKind{}, err + } + return o.mapper.KindFor(resource) +} + +func (o *lazyObject) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := o.init(); err != nil { + return []schema.GroupVersionKind{}, err + } + return o.mapper.KindsFor(resource) +} + +func (o *lazyObject) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := o.init(); err != nil { + return schema.GroupVersionResource{}, err + } + return o.mapper.ResourceFor(input) +} + +func (o *lazyObject) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := o.init(); err != nil { + return []schema.GroupVersionResource{}, err + } + return o.mapper.ResourcesFor(input) +} + +func (o *lazyObject) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + if err := o.init(); err != nil { + return nil, err + } + return o.mapper.RESTMapping(gk, versions...) +} + +func (o *lazyObject) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + if err := o.init(); err != nil { + return nil, err + } + return o.mapper.RESTMappings(gk, versions...) +} + +func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err error) { + if err := o.init(); err != nil { + return "", err + } + return o.mapper.ResourceSingularizer(resource) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 7492324ed..1c2a83cfa 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -23,6 +23,7 @@ import ( "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -33,16 +34,47 @@ import ( // interfaces. var errNotList = fmt.Errorf("object does not implement the List interfaces") +var errNotCommon = fmt.Errorf("object does not implement the common interface for accessing the SelfLink") + +// CommonAccessor returns a Common interface for the provided object or an error if the object does +// not provide List. +func CommonAccessor(obj interface{}) (metav1.Common, error) { + switch t := obj.(type) { + case List: + return t, nil + case metav1.ListInterface: + return t, nil + case ListMetaAccessor: + if m := t.GetListMeta(); m != nil { + return m, nil + } + return nil, errNotCommon + case metav1.ListMetaAccessor: + if m := t.GetListMeta(); m != nil { + return m, nil + } + return nil, errNotCommon + case metav1.Object: + return t, nil + case metav1.ObjectMetaAccessor: + if m := t.GetObjectMeta(); m != nil { + return m, nil + } + return nil, errNotCommon + default: + return nil, errNotCommon + } +} + // ListAccessor returns a List interface for the provided object or an error if the object does // not provide List. -// IMPORTANT: Objects are a superset of lists, so all Objects return List metadata. Do not use this -// check to determine whether an object *is* a List. -// TODO: return bool instead of error +// IMPORTANT: Objects are NOT a superset of lists. Do not use this check to determine whether an +// object *is* a List. func ListAccessor(obj interface{}) (List, error) { switch t := obj.(type) { case List: return t, nil - case metav1.List: + case metav1.ListInterface: return t, nil case ListMetaAccessor: if m := t.GetListMeta(); m != nil { @@ -54,13 +86,6 @@ func ListAccessor(obj interface{}) (List, error) { return m, nil } return nil, errNotList - case metav1.Object: - return t, nil - case metav1.ObjectMetaAccessor: - if m := t.GetObjectMeta(); m != nil { - return m, nil - } - return nil, errNotList default: return nil, errNotList } @@ -74,7 +99,6 @@ var errNotObject = fmt.Errorf("object does not implement the Object interfaces") // obj must be a pointer to an API type. An error is returned if the minimum // required fields are missing. Fields that are not required return the default // value and are a no-op if set. -// TODO: return bool instead of error func Accessor(obj interface{}) (metav1.Object, error) { switch t := obj.(type) { case metav1.Object: @@ -84,13 +108,41 @@ func Accessor(obj interface{}) (metav1.Object, error) { return m, nil } return nil, errNotObject - case List, metav1.List, ListMetaAccessor, metav1.ListMetaAccessor: - return nil, errNotObject default: return nil, errNotObject } } +// AsPartialObjectMetadata takes the metav1 interface and returns a partial object. +// TODO: consider making this solely a conversion action. +func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata { + switch t := m.(type) { + case *metav1.ObjectMeta: + return &metav1beta1.PartialObjectMetadata{ObjectMeta: *t} + default: + return &metav1beta1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Name: m.GetName(), + GenerateName: m.GetGenerateName(), + Namespace: m.GetNamespace(), + SelfLink: m.GetSelfLink(), + UID: m.GetUID(), + ResourceVersion: m.GetResourceVersion(), + Generation: m.GetGeneration(), + CreationTimestamp: m.GetCreationTimestamp(), + DeletionTimestamp: m.GetDeletionTimestamp(), + DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(), + Labels: m.GetLabels(), + Annotations: m.GetAnnotations(), + OwnerReferences: m.GetOwnerReferences(), + Finalizers: m.GetFinalizers(), + ClusterName: m.GetClusterName(), + Initializers: m.GetInitializers(), + }, + } + } +} + // TypeAccessor returns an interface that allows retrieving and modifying the APIVersion // and Kind of an in-memory internal object. // TODO: this interface is used to test code that does not have ObjectMeta or ListMeta @@ -245,7 +297,7 @@ func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error { } func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) { - accessor, err := ListAccessor(obj) + accessor, err := CommonAccessor(obj) if err != nil { return "", err } @@ -253,7 +305,7 @@ func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) { } func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error { - accessor, err := ListAccessor(obj) + accessor, err := CommonAccessor(obj) if err != nil { return err } @@ -296,7 +348,7 @@ func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[strin } func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) { - accessor, err := ListAccessor(obj) + accessor, err := CommonAccessor(obj) if err != nil { return "", err } @@ -304,7 +356,7 @@ func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) { } func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error { - accessor, err := ListAccessor(obj) + accessor, err := CommonAccessor(obj) if err != nil { return err } @@ -312,6 +364,23 @@ func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) e return nil } +func (resourceAccessor) Continue(obj runtime.Object) (string, error) { + accessor, err := ListAccessor(obj) + if err != nil { + return "", err + } + return accessor.GetContinue(), nil +} + +func (resourceAccessor) SetContinue(obj runtime.Object, version string) error { + accessor, err := ListAccessor(obj) + if err != nil { + return err + } + accessor.SetContinue(version) + return nil +} + // extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object. func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go index d32b0dbf7..6b01bf197 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go @@ -22,7 +22,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" ) // MultiRESTMapper is a wrapper for multiple RESTMappers. @@ -180,7 +179,7 @@ func (m MultiRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (* if len(errors) > 0 { return nil, utilerrors.NewAggregate(errors) } - return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} } // RESTMappings returns all possible RESTMappings for the provided group kind, or an error @@ -205,27 +204,7 @@ func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ( return nil, utilerrors.NewAggregate(errors) } if len(allMappings) == 0 { - return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} } return allMappings, nil } - -// AliasesForResource finds the first alias response for the provided mappers. -func (m MultiRESTMapper) AliasesForResource(alias string) ([]string, bool) { - seenAliases := sets.NewString() - allAliases := []string{} - handled := false - - for _, t := range m { - if currAliases, currOk := t.AliasesForResource(alias); currOk { - for _, currAlias := range currAliases { - if !seenAliases.Has(currAlias) { - allAliases = append(allAliases, currAlias) - seenAliases.Insert(currAlias) - } - } - handled = true - } - } - return allAliases, handled -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go index 91203d11f..fa11c580f 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go @@ -54,12 +54,12 @@ func (m PriorityRESTMapper) String() string { // ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit. func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionResource, error) { - originalGVRs, err := m.Delegate.ResourcesFor(partiallySpecifiedResource) - if err != nil { - return schema.GroupVersionResource{}, err + originalGVRs, originalErr := m.Delegate.ResourcesFor(partiallySpecifiedResource) + if originalErr != nil && len(originalGVRs) == 0 { + return schema.GroupVersionResource{}, originalErr } if len(originalGVRs) == 1 { - return originalGVRs[0], nil + return originalGVRs[0], originalErr } remainingGVRs := append([]schema.GroupVersionResource{}, originalGVRs...) @@ -77,7 +77,7 @@ func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupV continue case 1: // one match, return - return matchedGVRs[0], nil + return matchedGVRs[0], originalErr default: // more than one match, use the matched hits as the list moving to the next pattern. // this way you can have a series of selection criteria @@ -90,12 +90,12 @@ func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupV // KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit. func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - originalGVKs, err := m.Delegate.KindsFor(partiallySpecifiedResource) - if err != nil { - return schema.GroupVersionKind{}, err + originalGVKs, originalErr := m.Delegate.KindsFor(partiallySpecifiedResource) + if originalErr != nil && len(originalGVKs) == 0 { + return schema.GroupVersionKind{}, originalErr } if len(originalGVKs) == 1 { - return originalGVKs[0], nil + return originalGVKs[0], originalErr } remainingGVKs := append([]schema.GroupVersionKind{}, originalGVKs...) @@ -113,7 +113,7 @@ func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersi continue case 1: // one match, return - return matchedGVKs[0], nil + return matchedGVKs[0], originalErr default: // more than one match, use the matched hits as the list moving to the next pattern. // this way you can have a series of selection criteria @@ -153,9 +153,9 @@ func kindMatches(pattern schema.GroupVersionKind, kind schema.GroupVersionKind) } func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (mapping *RESTMapping, err error) { - mappings, err := m.Delegate.RESTMappings(gk) - if err != nil { - return nil, err + mappings, originalErr := m.Delegate.RESTMappings(gk, versions...) + if originalErr != nil && len(mappings) == 0 { + return nil, originalErr } // any versions the user provides take priority @@ -187,7 +187,7 @@ func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) continue case 1: // one match, return - return matching[0], nil + return matching[0], originalErr default: // more than one match, use the matched hits as the list moving to the next pattern. // this way you can have a series of selection criteria @@ -195,7 +195,7 @@ func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) } } if len(remaining) == 1 { - return remaining[0], nil + return remaining[0], originalErr } var kinds []schema.GroupVersionKind @@ -209,10 +209,6 @@ func (m PriorityRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string return m.Delegate.RESTMappings(gk, versions...) } -func (m PriorityRESTMapper) AliasesForResource(alias string) (aliases []string, ok bool) { - return m.Delegate.AliasesForResource(alias) -} - func (m PriorityRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { return m.Delegate.ResourceSingularizer(resource) } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go index bf4c77a82..41b60d731 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go @@ -28,30 +28,15 @@ import ( // Implements RESTScope interface type restScope struct { - name RESTScopeName - paramName string - argumentName string - paramDescription string + name RESTScopeName } func (r *restScope) Name() RESTScopeName { return r.name } -func (r *restScope) ParamName() string { - return r.paramName -} -func (r *restScope) ArgumentName() string { - return r.argumentName -} -func (r *restScope) ParamDescription() string { - return r.paramDescription -} var RESTScopeNamespace = &restScope{ - name: RESTScopeNameNamespace, - paramName: "namespaces", - argumentName: "namespace", - paramDescription: "object name and auth scope, such as for teams and projects", + name: RESTScopeNameNamespace, } var RESTScopeRoot = &restScope{ @@ -77,11 +62,6 @@ type DefaultRESTMapper struct { kindToScope map[schema.GroupVersionKind]RESTScope singularToPlural map[schema.GroupVersionResource]schema.GroupVersionResource pluralToSingular map[schema.GroupVersionResource]schema.GroupVersionResource - - interfacesFunc VersionInterfacesFunc - - // aliasToResource is used for mapping aliases to resources - aliasToResource map[string][]string } func (m *DefaultRESTMapper) String() string { @@ -90,22 +70,17 @@ func (m *DefaultRESTMapper) String() string { var _ RESTMapper = &DefaultRESTMapper{} -// VersionInterfacesFunc returns the appropriate typer, and metadata accessor for a -// given api version, or an error if no such api version exists. -type VersionInterfacesFunc func(version schema.GroupVersion) (*VersionInterfaces, error) - // NewDefaultRESTMapper initializes a mapping between Kind and APIVersion // to a resource name and back based on the objects in a runtime.Scheme // and the Kubernetes API conventions. Takes a group name, a priority list of the versions // to search when an object has no default version (set empty to return an error), // and a function that retrieves the correct metadata for a given version. -func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion, f VersionInterfacesFunc) *DefaultRESTMapper { +func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion) *DefaultRESTMapper { resourceToKind := make(map[schema.GroupVersionResource]schema.GroupVersionKind) kindToPluralResource := make(map[schema.GroupVersionKind]schema.GroupVersionResource) kindToScope := make(map[schema.GroupVersionKind]RESTScope) singularToPlural := make(map[schema.GroupVersionResource]schema.GroupVersionResource) pluralToSingular := make(map[schema.GroupVersionResource]schema.GroupVersionResource) - aliasToResource := make(map[string][]string) // TODO: verify name mappings work correctly when versions differ return &DefaultRESTMapper{ @@ -115,14 +90,15 @@ func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion, f VersionI defaultGroupVersions: defaultGroupVersions, singularToPlural: singularToPlural, pluralToSingular: pluralToSingular, - aliasToResource: aliasToResource, - interfacesFunc: f, } } func (m *DefaultRESTMapper) Add(kind schema.GroupVersionKind, scope RESTScope) { - plural, singular := KindToResource(kind) + plural, singular := UnsafeGuessKindToResource(kind) + m.AddSpecific(kind, plural, singular, scope) +} +func (m *DefaultRESTMapper) AddSpecific(kind schema.GroupVersionKind, plural, singular schema.GroupVersionResource, scope RESTScope) { m.singularToPlural[singular] = plural m.pluralToSingular[plural] = singular @@ -141,10 +117,10 @@ var unpluralizedSuffixes = []string{ "endpoints", } -// KindToResource converts Kind to a resource name. +// UnsafeGuessKindToResource converts Kind to a resource name. // Broken. This method only "sort of" works when used outside of this package. It assumes that Kinds and Resources match // and they aren't guaranteed to do so. -func KindToResource(kind schema.GroupVersionKind) ( /*plural*/ schema.GroupVersionResource /*singular*/, schema.GroupVersionResource) { +func UnsafeGuessKindToResource(kind schema.GroupVersionKind) ( /*plural*/ schema.GroupVersionResource /*singular*/, schema.GroupVersionResource) { kindName := kind.Kind if len(kindName) == 0 { return schema.GroupVersionResource{}, schema.GroupVersionResource{} @@ -474,7 +450,7 @@ func (m *DefaultRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) return nil, err } if len(mappings) == 0 { - return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} } // since we rely on RESTMappings method // take the first match and return to the caller @@ -512,7 +488,7 @@ func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string } if len(potentialGVK) == 0 { - return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} } for _, gvk := range potentialGVK { @@ -528,18 +504,10 @@ func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion(), gvk.Kind) } - interfaces, err := m.interfacesFunc(gvk.GroupVersion()) - if err != nil { - return nil, fmt.Errorf("the provided version %q has no relevant versions: %v", gvk.GroupVersion().String(), err) - } - mappings = append(mappings, &RESTMapping{ - Resource: res.Resource, + Resource: res, GroupVersionKind: gvk, Scope: scope, - - ObjectConvertor: interfaces.ObjectConvertor, - MetadataAccessor: interfaces.MetadataAccessor, }) } @@ -548,19 +516,3 @@ func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string } return mappings, nil } - -// AddResourceAlias maps aliases to resources -func (m *DefaultRESTMapper) AddResourceAlias(alias string, resources ...string) { - if len(resources) == 0 { - return - } - m.aliasToResource[alias] = resources -} - -// AliasesForResource returns whether a resource has an alias or not -func (m *DefaultRESTMapper) AliasesForResource(alias string) ([]string, bool) { - if res, ok := m.aliasToResource[alias]; ok { - return res, true - } - return nil, false -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go deleted file mode 100644 index 3ebf24815..000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// InterfacesForUnstructured returns VersionInterfaces suitable for -// dealing with unstructured.Unstructured objects. -func InterfacesForUnstructured(schema.GroupVersion) (*VersionInterfaces, error) { - return &VersionInterfaces{ - ObjectConvertor: &unstructured.UnstructuredObjectConverter{}, - MetadataAccessor: NewAccessor(), - }, nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS index b905e57f0..c430067f3 100755 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -7,9 +7,8 @@ reviewers: - mikedanese - saad-ali - janetkuo -- timstclair +- tallclair - eparis -- timothysc - jbeda - xiang90 - mbohlool diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 87fb5f580..083c82256 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -40,7 +40,9 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *Quantity) Reset() { *m = Quantity{} } func (*Quantity) ProtoMessage() {} @@ -50,22 +52,26 @@ func init() { proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") } -var fileDescriptorGenerated = []byte{ - // 253 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x8f, 0xb1, 0x4a, 0x03, 0x41, - 0x10, 0x86, 0x77, 0x1b, 0x89, 0x57, 0x06, 0x11, 0x49, 0xb1, 0x17, 0xc4, 0x42, 0x04, 0x77, 0x0a, - 0x9b, 0x60, 0x69, 0x6f, 0xa1, 0xa5, 0xdd, 0xdd, 0x65, 0xdc, 0x2c, 0x67, 0x76, 0x8f, 0xd9, 0x59, - 0x21, 0x5d, 0x4a, 0xcb, 0x94, 0x96, 0xb9, 0xb7, 0x49, 0x99, 0xd2, 0xc2, 0xc2, 0x3b, 0x5f, 0x44, - 0x72, 0xc9, 0x81, 0x08, 0x76, 0xf3, 0xfd, 0xc3, 0x37, 0xfc, 0x93, 0xdc, 0x97, 0x93, 0xa0, 0xad, - 0x87, 0x32, 0xe6, 0x48, 0x0e, 0x19, 0x03, 0xbc, 0xa2, 0x9b, 0x7a, 0x82, 0xc3, 0x22, 0xab, 0xec, - 0x3c, 0x2b, 0x66, 0xd6, 0x21, 0x2d, 0xa0, 0x2a, 0xcd, 0x2e, 0x00, 0xc2, 0xe0, 0x23, 0x15, 0x08, - 0x06, 0x1d, 0x52, 0xc6, 0x38, 0xd5, 0x15, 0x79, 0xf6, 0xc3, 0x8b, 0xbd, 0xa5, 0x7f, 0x5b, 0xba, - 0x2a, 0xcd, 0x2e, 0xd0, 0xbd, 0x35, 0xba, 0x36, 0x96, 0x67, 0x31, 0xd7, 0x85, 0x9f, 0x83, 0xf1, - 0xc6, 0x43, 0x27, 0xe7, 0xf1, 0xb9, 0xa3, 0x0e, 0xba, 0x69, 0x7f, 0x74, 0x74, 0xf3, 0x5f, 0x95, - 0xc8, 0xf6, 0x05, 0xac, 0xe3, 0xc0, 0xf4, 0xb7, 0xc9, 0xf9, 0x24, 0x19, 0x3c, 0xc4, 0xcc, 0xb1, - 0xe5, 0xc5, 0xf0, 0x34, 0x39, 0x0a, 0x4c, 0xd6, 0x99, 0x33, 0x39, 0x96, 0x97, 0xc7, 0x8f, 0x07, - 0xba, 0x3d, 0x79, 0x5f, 0xa7, 0xe2, 0xad, 0x4e, 0xc5, 0xaa, 0x4e, 0xc5, 0xba, 0x4e, 0xc5, 0xf2, - 0x73, 0x2c, 0xee, 0xae, 0x36, 0x8d, 0x12, 0xdb, 0x46, 0x89, 0x8f, 0x46, 0x89, 0x65, 0xab, 0xe4, - 0xa6, 0x55, 0x72, 0xdb, 0x2a, 0xf9, 0xd5, 0x2a, 0xb9, 0xfa, 0x56, 0xe2, 0x69, 0xd0, 0x7f, 0xf2, - 0x13, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x3c, 0xf3, 0xc9, 0x3f, 0x01, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 255 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x8f, 0xa1, 0x4e, 0x03, 0x41, + 0x10, 0x86, 0x77, 0x0d, 0x29, 0x95, 0x0d, 0x21, 0xa4, 0x62, 0xaf, 0x21, 0x08, 0x0c, 0x3b, 0x02, + 0xd3, 0x20, 0xf1, 0x08, 0x90, 0xb8, 0xbb, 0xeb, 0xb0, 0xdd, 0x1c, 0xdd, 0xbd, 0xcc, 0xce, 0x92, + 0xd4, 0x55, 0x22, 0x2b, 0x91, 0xbd, 0xb7, 0xa9, 0xac, 0xac, 0x40, 0x70, 0xcb, 0x8b, 0x90, 0x5e, + 0xdb, 0x84, 0x90, 0xe0, 0xe6, 0xfb, 0x27, 0xdf, 0xe4, 0x9f, 0xfe, 0x43, 0x35, 0x0e, 0xda, 0x7a, + 0xa8, 0x62, 0x81, 0xe4, 0x90, 0x31, 0xc0, 0x1b, 0xba, 0x89, 0x27, 0x38, 0x2c, 0xf2, 0xda, 0xce, + 0xf2, 0x72, 0x6a, 0x1d, 0xd2, 0x1c, 0xea, 0xca, 0xec, 0x02, 0x20, 0x0c, 0x3e, 0x52, 0x89, 0x60, + 0xd0, 0x21, 0xe5, 0x8c, 0x13, 0x5d, 0x93, 0x67, 0x3f, 0xb8, 0xda, 0x5b, 0xfa, 0xb7, 0xa5, 0xeb, + 0xca, 0xec, 0x02, 0x7d, 0xb4, 0x86, 0x37, 0xc6, 0xf2, 0x34, 0x16, 0xba, 0xf4, 0x33, 0x30, 0xde, + 0x78, 0xe8, 0xe4, 0x22, 0xbe, 0x74, 0xd4, 0x41, 0x37, 0xed, 0x8f, 0x0e, 0x6f, 0xff, 0xab, 0x12, + 0xd9, 0xbe, 0x82, 0x75, 0x1c, 0x98, 0xfe, 0x36, 0xb9, 0x1c, 0xf7, 0x7b, 0x8f, 0x31, 0x77, 0x6c, + 0x79, 0x3e, 0x38, 0xef, 0x9f, 0x04, 0x26, 0xeb, 0xcc, 0x85, 0x1c, 0xc9, 0xeb, 0xd3, 0xa7, 0x03, + 0xdd, 0x9d, 0x7d, 0xac, 0x32, 0xf1, 0xde, 0x64, 0x62, 0xd9, 0x64, 0x62, 0xd5, 0x64, 0x62, 0xf1, + 0x39, 0x12, 0xf7, 0x7a, 0xdd, 0x2a, 0xb1, 0x69, 0x95, 0xd8, 0xb6, 0x4a, 0x2c, 0x92, 0x92, 0xeb, + 0xa4, 0xe4, 0x26, 0x29, 0xb9, 0x4d, 0x4a, 0x7e, 0x25, 0x25, 0x97, 0xdf, 0x4a, 0x3c, 0xf7, 0x8e, + 0xdf, 0xfc, 0x04, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x5e, 0xda, 0xf9, 0x43, 0x01, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 608299da4..31a46a6d3 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -87,6 +87,7 @@ option go_package = "resource"; // +protobuf.embed=string // +protobuf.options.marshal=false // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true // +k8s:openapi-gen=true message Quantity { optional string string = 1; diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go index 887ac74c9..72d3880c0 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go @@ -247,19 +247,6 @@ func pow10Int64(b int64) int64 { } } -// powInt64 raises a to the bth power. Is not overflow aware. -func powInt64(a, b int64) int64 { - p := int64(1) - for b > 0 { - if b&1 != 0 { - p *= a - } - b >>= 1 - a *= a - } - return p -} - // negativeScaleInt64 returns the result of dividing base by scale * 10 and the remainder, or // false if no such division is possible. Dividing by negative scales is undefined. func divideByScaleInt64(base int64, scale Scale) (result, remainder int64, exact bool) { diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 3a9560882..c3cd13960 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -25,11 +25,7 @@ import ( "strconv" "strings" - flag "github.com/spf13/pflag" - - "github.com/go-openapi/spec" inf "gopkg.in/inf.v0" - "k8s.io/apimachinery/pkg/openapi" ) // Quantity is a fixed-point representation of a number. @@ -93,6 +89,7 @@ import ( // +protobuf.embed=string // +protobuf.options.marshal=false // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true // +k8s:openapi-gen=true type Quantity struct { // i is the quantity in int64 scaled form, if d.Dec == nil @@ -398,24 +395,22 @@ func (q Quantity) DeepCopy() Quantity { return q } -// OpenAPIDefinition returns openAPI definition for this type. -func (_ Quantity) OpenAPIDefinition() openapi.OpenAPIDefinition { - return openapi.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - } -} +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Quantity) OpenAPISchemaFormat() string { return "" } // CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity). // // Note about BinarySI: // * If q.Format is set to BinarySI and q.Amount represents a non-zero value between // -1 and +1, it will be emitted as if q.Format were DecimalSI. -// * Otherwise, if q.Format is set to BinarySI, frational parts of q.Amount will be +// * Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be // rounded up. (1.1i becomes 2i.) func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { if q.IsZero() { @@ -750,43 +745,3 @@ func (q *Quantity) Copy() *Quantity { Format: q.Format, } } - -// qFlag is a helper type for the Flag function -type qFlag struct { - dest *Quantity -} - -// Sets the value of the internal Quantity. (used by flag & pflag) -func (qf qFlag) Set(val string) error { - q, err := ParseQuantity(val) - if err != nil { - return err - } - // This copy is OK because q will not be referenced again. - *qf.dest = q - return nil -} - -// Converts the value of the internal Quantity to a string. (used by flag & pflag) -func (qf qFlag) String() string { - return qf.dest.String() -} - -// States the type of flag this is (Quantity). (used by pflag) -func (qf qFlag) Type() string { - return "quantity" -} - -// QuantityFlag is a helper that makes a quantity flag (using standard flag package). -// Will panic if defaultValue is not a valid quantity. -func QuantityFlag(flagName, defaultValue, description string) *Quantity { - q := MustParse(defaultValue) - flag.Var(NewQuantityFlagValue(&q), flagName, description) - return &q -} - -// NewQuantityFlagValue returns an object that can be used to back a flag, -// pointing at the given Quantity variable. -func NewQuantityFlagValue(q *Quantity) flag.Value { - return qFlag{q} -} diff --git a/vendor/k8s.io/client-go/pkg/util/umask_windows.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go similarity index 62% rename from vendor/k8s.io/client-go/pkg/util/umask_windows.go rename to vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go index 7a1ba1538..ab4740790 100644 --- a/vendor/k8s.io/client-go/pkg/util/umask_windows.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ -// +build windows +// +build !ignore_autogenerated /* -Copyright 2014 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,12 +16,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +// Code generated by deepcopy-gen. DO NOT EDIT. -import ( - "errors" -) +package resource -func Umask(mask int) (int, error) { - return 0, errors.New("platform and architecture is not supported") +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Quantity) DeepCopyInto(out *Quantity) { + *out = in.DeepCopy() + return } diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go deleted file mode 100644 index 2c8568c1f..000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package announced contains tools for announcing API group factories. This is -// distinct from registration (in the 'registered' package) in that it's safe -// to announce every possible group linked in, but only groups requested at -// runtime should be registered. This package contains both a registry, and -// factory code (which was formerly copy-pasta in every install package). -package announced - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" -) - -// APIGroupFactoryRegistry allows for groups and versions to announce themselves, -// which simply makes them available and doesn't take other actions. Later, -// users of the registry can select which groups and versions they'd actually -// like to register with an APIRegistrationManager. -// -// (Right now APIRegistrationManager has separate 'registration' and 'enabled' -// concepts-- APIGroupFactory is going to take over the former function; -// they will overlap untill the refactoring is finished.) -// -// The key is the group name. After initialization, this should be treated as -// read-only. It is implemented as a map from group name to group factory, and -// it is safe to use this knowledge to manually pick out groups to register -// (e.g., for testing). -type APIGroupFactoryRegistry map[string]*GroupMetaFactory - -func (gar APIGroupFactoryRegistry) group(groupName string) *GroupMetaFactory { - gmf, ok := gar[groupName] - if !ok { - gmf = &GroupMetaFactory{VersionArgs: map[string]*GroupVersionFactoryArgs{}} - gar[groupName] = gmf - } - return gmf -} - -// AnnounceGroupVersion adds the particular arguments for this group version to the group factory. -func (gar APIGroupFactoryRegistry) AnnounceGroupVersion(gvf *GroupVersionFactoryArgs) error { - gmf := gar.group(gvf.GroupName) - if _, ok := gmf.VersionArgs[gvf.VersionName]; ok { - return fmt.Errorf("version %q in group %q has already been announced", gvf.VersionName, gvf.GroupName) - } - gmf.VersionArgs[gvf.VersionName] = gvf - return nil -} - -// AnnounceGroup adds the group-wide arguments to the group factory. -func (gar APIGroupFactoryRegistry) AnnounceGroup(args *GroupMetaFactoryArgs) error { - gmf := gar.group(args.GroupName) - if gmf.GroupArgs != nil { - return fmt.Errorf("group %q has already been announced", args.GroupName) - } - gmf.GroupArgs = args - return nil -} - -// RegisterAndEnableAll throws every factory at the specified API registration -// manager, and lets it decide which to register. (If you want to do this a la -// cart, you may look through gar itself-- it's just a map.) -func (gar APIGroupFactoryRegistry) RegisterAndEnableAll(m *registered.APIRegistrationManager, scheme *runtime.Scheme) error { - for groupName, gmf := range gar { - if err := gmf.Register(m); err != nil { - return fmt.Errorf("error registering %v: %v", groupName, err) - } - if err := gmf.Enable(m, scheme); err != nil { - return fmt.Errorf("error enabling %v: %v", groupName, err) - } - } - return nil -} - -// AnnouncePreconstructedFactory announces a factory which you've manually assembled. -// You may call this instead of calling AnnounceGroup and AnnounceGroupVersion. -func (gar APIGroupFactoryRegistry) AnnouncePreconstructedFactory(gmf *GroupMetaFactory) error { - name := gmf.GroupArgs.GroupName - if _, exists := gar[name]; exists { - return fmt.Errorf("the group %q has already been announced.", name) - } - gar[name] = gmf - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go deleted file mode 100644 index afb03295d..000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go +++ /dev/null @@ -1,252 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package announced - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apimachinery" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" -) - -type SchemeFunc func(*runtime.Scheme) error -type VersionToSchemeFunc map[string]SchemeFunc - -// GroupVersionFactoryArgs contains all the per-version parts of a GroupMetaFactory. -type GroupVersionFactoryArgs struct { - GroupName string - VersionName string - - AddToScheme SchemeFunc -} - -// GroupMetaFactoryArgs contains the group-level args of a GroupMetaFactory. -type GroupMetaFactoryArgs struct { - // GroupName is the name of the API-Group - // - // example: 'servicecatalog.k8s.io' - GroupName string - VersionPreferenceOrder []string - // ImportPrefix is the base go package of the API-Group - // - // example: 'k8s.io/kubernetes/pkg/apis/autoscaling' - ImportPrefix string - // RootScopedKinds are resources that are not namespaced. - RootScopedKinds sets.String // nil is allowed - IgnoredKinds sets.String // nil is allowed - - // May be nil if there are no internal objects. - AddInternalObjectsToScheme SchemeFunc -} - -// NewGroupMetaFactory builds the args for you. This is for if you're -// constructing a factory all at once and not using the registry. -func NewGroupMetaFactory(groupArgs *GroupMetaFactoryArgs, versions VersionToSchemeFunc) *GroupMetaFactory { - gmf := &GroupMetaFactory{ - GroupArgs: groupArgs, - VersionArgs: map[string]*GroupVersionFactoryArgs{}, - } - for v, f := range versions { - gmf.VersionArgs[v] = &GroupVersionFactoryArgs{ - GroupName: groupArgs.GroupName, - VersionName: v, - AddToScheme: f, - } - } - return gmf -} - -// Announce adds this Group factory to the global factory registry. It should -// only be called if you constructed the GroupMetaFactory yourself via -// NewGroupMetadFactory. -// Note that this will panic on an error, since it's expected that you'll be -// calling this at initialization time and any error is a result of a -// programmer importing the wrong set of packages. If this assumption doesn't -// work for you, just call DefaultGroupFactoryRegistry.AnnouncePreconstructedFactory -// yourself. -func (gmf *GroupMetaFactory) Announce(groupFactoryRegistry APIGroupFactoryRegistry) *GroupMetaFactory { - if err := groupFactoryRegistry.AnnouncePreconstructedFactory(gmf); err != nil { - panic(err) - } - return gmf -} - -// GroupMetaFactory has the logic for actually assembling and registering a group. -// -// There are two ways of obtaining one of these. -// 1. You can announce your group and versions separately, and then let the -// GroupFactoryRegistry assemble this object for you. (This allows group and -// versions to be imported separately, without referencing each other, to -// keep import trees small.) -// 2. You can call NewGroupMetaFactory(), which is mostly a drop-in replacement -// for the old, bad way of doing things. You can then call .Announce() to -// announce your constructed factory to any code that would like to do -// things the new, better way. -// -// Note that GroupMetaFactory actually does construct GroupMeta objects, but -// currently it does so in a way that's very entangled with an -// APIRegistrationManager. It's a TODO item to cleanly separate that interface. -type GroupMetaFactory struct { - GroupArgs *GroupMetaFactoryArgs - // map of version name to version factory - VersionArgs map[string]*GroupVersionFactoryArgs - - // assembled by Register() - prioritizedVersionList []schema.GroupVersion -} - -// Register constructs the finalized prioritized version list and sanity checks -// the announced group & versions. Then it calls register. -func (gmf *GroupMetaFactory) Register(m *registered.APIRegistrationManager) error { - if gmf.GroupArgs == nil { - return fmt.Errorf("partially announced groups are not allowed, only got versions: %#v", gmf.VersionArgs) - } - if len(gmf.VersionArgs) == 0 { - return fmt.Errorf("group %v announced but no versions announced", gmf.GroupArgs.GroupName) - } - - pvSet := sets.NewString(gmf.GroupArgs.VersionPreferenceOrder...) - if pvSet.Len() != len(gmf.GroupArgs.VersionPreferenceOrder) { - return fmt.Errorf("preference order for group %v has duplicates: %v", gmf.GroupArgs.GroupName, gmf.GroupArgs.VersionPreferenceOrder) - } - prioritizedVersions := []schema.GroupVersion{} - for _, v := range gmf.GroupArgs.VersionPreferenceOrder { - prioritizedVersions = append( - prioritizedVersions, - schema.GroupVersion{ - Group: gmf.GroupArgs.GroupName, - Version: v, - }, - ) - } - - // Go through versions that weren't explicitly prioritized. - unprioritizedVersions := []schema.GroupVersion{} - for _, v := range gmf.VersionArgs { - if v.GroupName != gmf.GroupArgs.GroupName { - return fmt.Errorf("found %v/%v in group %v?", v.GroupName, v.VersionName, gmf.GroupArgs.GroupName) - } - if pvSet.Has(v.VersionName) { - pvSet.Delete(v.VersionName) - continue - } - unprioritizedVersions = append(unprioritizedVersions, schema.GroupVersion{Group: v.GroupName, Version: v.VersionName}) - } - if len(unprioritizedVersions) > 1 { - glog.Warningf("group %v has multiple unprioritized versions: %#v. They will have an arbitrary preference order!", gmf.GroupArgs.GroupName, unprioritizedVersions) - } - if pvSet.Len() != 0 { - return fmt.Errorf("group %v has versions in the priority list that were never announced: %s", gmf.GroupArgs.GroupName, pvSet) - } - prioritizedVersions = append(prioritizedVersions, unprioritizedVersions...) - m.RegisterVersions(prioritizedVersions) - gmf.prioritizedVersionList = prioritizedVersions - return nil -} - -func (gmf *GroupMetaFactory) newRESTMapper(scheme *runtime.Scheme, externalVersions []schema.GroupVersion, groupMeta *apimachinery.GroupMeta) meta.RESTMapper { - // the list of kinds that are scoped at the root of the api hierarchy - // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := sets.NewString() - if gmf.GroupArgs.RootScopedKinds != nil { - rootScoped = gmf.GroupArgs.RootScopedKinds - } - ignoredKinds := sets.NewString() - if gmf.GroupArgs.IgnoredKinds != nil { - ignoredKinds = gmf.GroupArgs.IgnoredKinds - } - - return meta.NewDefaultRESTMapperFromScheme( - externalVersions, - groupMeta.InterfacesFor, - gmf.GroupArgs.ImportPrefix, - ignoredKinds, - rootScoped, - scheme, - ) -} - -// Enable enables group versions that are allowed, adds methods to the scheme, etc. -func (gmf *GroupMetaFactory) Enable(m *registered.APIRegistrationManager, scheme *runtime.Scheme) error { - externalVersions := []schema.GroupVersion{} - for _, v := range gmf.prioritizedVersionList { - if !m.IsAllowedVersion(v) { - continue - } - externalVersions = append(externalVersions, v) - if err := m.EnableVersions(v); err != nil { - return err - } - gmf.VersionArgs[v.Version].AddToScheme(scheme) - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", gmf.GroupArgs.GroupName) - return nil - } - - if gmf.GroupArgs.AddInternalObjectsToScheme != nil { - gmf.GroupArgs.AddInternalObjectsToScheme(scheme) - } - - preferredExternalVersion := externalVersions[0] - accessor := meta.NewAccessor() - - groupMeta := &apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - SelfLinker: runtime.SelfLinker(accessor), - } - for _, v := range externalVersions { - gvf := gmf.VersionArgs[v.Version] - if err := groupMeta.AddVersionInterfaces( - schema.GroupVersion{Group: gvf.GroupName, Version: gvf.VersionName}, - &meta.VersionInterfaces{ - ObjectConvertor: scheme, - MetadataAccessor: accessor, - }, - ); err != nil { - return err - } - } - groupMeta.InterfacesFor = groupMeta.DefaultInterfacesFor - groupMeta.RESTMapper = gmf.newRESTMapper(scheme, externalVersions, groupMeta) - - if err := m.RegisterGroup(*groupMeta); err != nil { - return err - } - return nil -} - -// RegisterAndEnable is provided only to allow this code to get added in multiple steps. -// It's really bad that this is called in init() methods, but supporting this -// temporarily lets us do the change incrementally. -func (gmf *GroupMetaFactory) RegisterAndEnable(registry *registered.APIRegistrationManager, scheme *runtime.Scheme) error { - if err := gmf.Register(registry); err != nil { - return err - } - if err := gmf.Enable(registry, scheme); err != nil { - return err - } - - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go deleted file mode 100644 index b238454b2..000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package apimachinery contains the generic API machinery code that -// is common to both server and clients. -// This package should never import specific API objects. -package apimachinery // import "k8s.io/apimachinery/pkg/apimachinery" diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go deleted file mode 100644 index f2e32c88c..000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go +++ /dev/null @@ -1,376 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package to keep track of API Versions that can be registered and are enabled in api.Scheme. -package registered - -import ( - "fmt" - "sort" - "strings" - - "github.com/golang/glog" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apimachinery" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" -) - -// APIRegistrationManager provides the concept of what API groups are enabled. -// -// TODO: currently, it also provides a "registered" concept. But it's wrong to -// have both concepts in the same object. Therefore the "announced" package is -// going to take over the registered concept. After all the install packages -// are switched to using the announce package instead of this package, then we -// can combine the registered/enabled concepts in this object. Simplifying this -// isn't easy right now because there are so many callers of this package. -type APIRegistrationManager struct { - // registeredGroupVersions stores all API group versions for which RegisterGroup is called. - registeredVersions map[schema.GroupVersion]struct{} - - // thirdPartyGroupVersions are API versions which are dynamically - // registered (and unregistered) via API calls to the apiserver - thirdPartyGroupVersions []schema.GroupVersion - - // enabledVersions represents all enabled API versions. It should be a - // subset of registeredVersions. Please call EnableVersions() to add - // enabled versions. - enabledVersions map[schema.GroupVersion]struct{} - - // map of group meta for all groups. - groupMetaMap map[string]*apimachinery.GroupMeta - - // envRequestedVersions represents the versions requested via the - // KUBE_API_VERSIONS environment variable. The install package of each group - // checks this list before add their versions to the latest package and - // Scheme. This list is small and order matters, so represent as a slice - envRequestedVersions []schema.GroupVersion -} - -// NewAPIRegistrationManager constructs a new manager. The argument ought to be -// the value of the KUBE_API_VERSIONS env var, or a value of this which you -// wish to test. -func NewAPIRegistrationManager(kubeAPIVersions string) (*APIRegistrationManager, error) { - m := &APIRegistrationManager{ - registeredVersions: map[schema.GroupVersion]struct{}{}, - thirdPartyGroupVersions: []schema.GroupVersion{}, - enabledVersions: map[schema.GroupVersion]struct{}{}, - groupMetaMap: map[string]*apimachinery.GroupMeta{}, - envRequestedVersions: []schema.GroupVersion{}, - } - - if len(kubeAPIVersions) != 0 { - for _, version := range strings.Split(kubeAPIVersions, ",") { - gv, err := schema.ParseGroupVersion(version) - if err != nil { - return nil, fmt.Errorf("invalid api version: %s in KUBE_API_VERSIONS: %s.", - version, kubeAPIVersions) - } - m.envRequestedVersions = append(m.envRequestedVersions, gv) - } - } - return m, nil -} - -func NewOrDie(kubeAPIVersions string) *APIRegistrationManager { - m, err := NewAPIRegistrationManager(kubeAPIVersions) - if err != nil { - glog.Fatalf("Could not construct version manager: %v (KUBE_API_VERSIONS=%q)", err, kubeAPIVersions) - } - return m -} - -// RegisterVersions adds the given group versions to the list of registered group versions. -func (m *APIRegistrationManager) RegisterVersions(availableVersions []schema.GroupVersion) { - for _, v := range availableVersions { - m.registeredVersions[v] = struct{}{} - } -} - -// RegisterGroup adds the given group to the list of registered groups. -func (m *APIRegistrationManager) RegisterGroup(groupMeta apimachinery.GroupMeta) error { - groupName := groupMeta.GroupVersion.Group - if _, found := m.groupMetaMap[groupName]; found { - return fmt.Errorf("group %q is already registered in groupsMap: %v", groupName, m.groupMetaMap) - } - m.groupMetaMap[groupName] = &groupMeta - return nil -} - -// EnableVersions adds the versions for the given group to the list of enabled versions. -// Note that the caller should call RegisterGroup before calling this method. -// The caller of this function is responsible to add the versions to scheme and RESTMapper. -func (m *APIRegistrationManager) EnableVersions(versions ...schema.GroupVersion) error { - var unregisteredVersions []schema.GroupVersion - for _, v := range versions { - if _, found := m.registeredVersions[v]; !found { - unregisteredVersions = append(unregisteredVersions, v) - } - m.enabledVersions[v] = struct{}{} - } - if len(unregisteredVersions) != 0 { - return fmt.Errorf("Please register versions before enabling them: %v", unregisteredVersions) - } - return nil -} - -// IsAllowedVersion returns if the version is allowed by the KUBE_API_VERSIONS -// environment variable. If the environment variable is empty, then it always -// returns true. -func (m *APIRegistrationManager) IsAllowedVersion(v schema.GroupVersion) bool { - if len(m.envRequestedVersions) == 0 { - return true - } - for _, envGV := range m.envRequestedVersions { - if v == envGV { - return true - } - } - return false -} - -// IsEnabledVersion returns if a version is enabled. -func (m *APIRegistrationManager) IsEnabledVersion(v schema.GroupVersion) bool { - _, found := m.enabledVersions[v] - return found -} - -// EnabledVersions returns all enabled versions. Groups are randomly ordered, but versions within groups -// are priority order from best to worst -func (m *APIRegistrationManager) EnabledVersions() []schema.GroupVersion { - ret := []schema.GroupVersion{} - for _, groupMeta := range m.groupMetaMap { - for _, version := range groupMeta.GroupVersions { - if m.IsEnabledVersion(version) { - ret = append(ret, version) - } - } - } - return ret -} - -// EnabledVersionsForGroup returns all enabled versions for a group in order of best to worst -func (m *APIRegistrationManager) EnabledVersionsForGroup(group string) []schema.GroupVersion { - groupMeta, ok := m.groupMetaMap[group] - if !ok { - return []schema.GroupVersion{} - } - - ret := []schema.GroupVersion{} - for _, version := range groupMeta.GroupVersions { - if m.IsEnabledVersion(version) { - ret = append(ret, version) - } - } - return ret -} - -// Group returns the metadata of a group if the group is registered, otherwise -// an error is returned. -func (m *APIRegistrationManager) Group(group string) (*apimachinery.GroupMeta, error) { - groupMeta, found := m.groupMetaMap[group] - if !found { - return nil, fmt.Errorf("group %v has not been registered", group) - } - groupMetaCopy := *groupMeta - return &groupMetaCopy, nil -} - -// IsRegistered takes a string and determines if it's one of the registered groups -func (m *APIRegistrationManager) IsRegistered(group string) bool { - _, found := m.groupMetaMap[group] - return found -} - -// IsRegisteredVersion returns if a version is registered. -func (m *APIRegistrationManager) IsRegisteredVersion(v schema.GroupVersion) bool { - _, found := m.registeredVersions[v] - return found -} - -// RegisteredGroupVersions returns all registered group versions. -func (m *APIRegistrationManager) RegisteredGroupVersions() []schema.GroupVersion { - ret := []schema.GroupVersion{} - for groupVersion := range m.registeredVersions { - ret = append(ret, groupVersion) - } - return ret -} - -// IsThirdPartyAPIGroupVersion returns true if the api version is a user-registered group/version. -func (m *APIRegistrationManager) IsThirdPartyAPIGroupVersion(gv schema.GroupVersion) bool { - for ix := range m.thirdPartyGroupVersions { - if m.thirdPartyGroupVersions[ix] == gv { - return true - } - } - return false -} - -// AddThirdPartyAPIGroupVersions sets the list of third party versions, -// registers them in the API machinery and enables them. -// Skips GroupVersions that are already registered. -// Returns the list of GroupVersions that were skipped. -func (m *APIRegistrationManager) AddThirdPartyAPIGroupVersions(gvs ...schema.GroupVersion) []schema.GroupVersion { - filteredGVs := []schema.GroupVersion{} - skippedGVs := []schema.GroupVersion{} - for ix := range gvs { - if !m.IsRegisteredVersion(gvs[ix]) { - filteredGVs = append(filteredGVs, gvs[ix]) - } else { - glog.V(3).Infof("Skipping %s, because its already registered", gvs[ix].String()) - skippedGVs = append(skippedGVs, gvs[ix]) - } - } - if len(filteredGVs) == 0 { - return skippedGVs - } - m.RegisterVersions(filteredGVs) - m.EnableVersions(filteredGVs...) - m.thirdPartyGroupVersions = append(m.thirdPartyGroupVersions, filteredGVs...) - - return skippedGVs -} - -// InterfacesFor is a union meta.VersionInterfacesFunc func for all registered types -func (m *APIRegistrationManager) InterfacesFor(version schema.GroupVersion) (*meta.VersionInterfaces, error) { - groupMeta, err := m.Group(version.Group) - if err != nil { - return nil, err - } - return groupMeta.InterfacesFor(version) -} - -// TODO: This is an expedient function, because we don't check if a Group is -// supported throughout the code base. We will abandon this function and -// checking the error returned by the Group() function. -func (m *APIRegistrationManager) GroupOrDie(group string) *apimachinery.GroupMeta { - groupMeta, found := m.groupMetaMap[group] - if !found { - if group == "" { - panic("The legacy v1 API is not registered.") - } else { - panic(fmt.Sprintf("Group %s is not registered.", group)) - } - } - groupMetaCopy := *groupMeta - return &groupMetaCopy -} - -// RESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order: -// 1. if KUBE_API_VERSIONS is specified, then KUBE_API_VERSIONS in order, OR -// 1. legacy kube group preferred version, extensions preferred version, metrics perferred version, legacy -// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, -// all other groups alphabetical. -func (m *APIRegistrationManager) RESTMapper(versionPatterns ...schema.GroupVersion) meta.RESTMapper { - unionMapper := meta.MultiRESTMapper{} - unionedGroups := sets.NewString() - for enabledVersion := range m.enabledVersions { - if !unionedGroups.Has(enabledVersion.Group) { - unionedGroups.Insert(enabledVersion.Group) - groupMeta := m.groupMetaMap[enabledVersion.Group] - unionMapper = append(unionMapper, groupMeta.RESTMapper) - } - } - - if len(versionPatterns) != 0 { - resourcePriority := []schema.GroupVersionResource{} - kindPriority := []schema.GroupVersionKind{} - for _, versionPriority := range versionPatterns { - resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) - } - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} - } - - if len(m.envRequestedVersions) != 0 { - resourcePriority := []schema.GroupVersionResource{} - kindPriority := []schema.GroupVersionKind{} - - for _, versionPriority := range m.envRequestedVersions { - resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) - } - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} - } - - prioritizedGroups := []string{"", "extensions", "metrics"} - resourcePriority, kindPriority := m.prioritiesForGroups(prioritizedGroups...) - - prioritizedGroupsSet := sets.NewString(prioritizedGroups...) - remainingGroups := sets.String{} - for enabledVersion := range m.enabledVersions { - if !prioritizedGroupsSet.Has(enabledVersion.Group) { - remainingGroups.Insert(enabledVersion.Group) - } - } - - remainingResourcePriority, remainingKindPriority := m.prioritiesForGroups(remainingGroups.List()...) - resourcePriority = append(resourcePriority, remainingResourcePriority...) - kindPriority = append(kindPriority, remainingKindPriority...) - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} -} - -// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first, -// then any non-preferred version of the group second. -func (m *APIRegistrationManager) prioritiesForGroups(groups ...string) ([]schema.GroupVersionResource, []schema.GroupVersionKind) { - resourcePriority := []schema.GroupVersionResource{} - kindPriority := []schema.GroupVersionKind{} - - for _, group := range groups { - availableVersions := m.EnabledVersionsForGroup(group) - if len(availableVersions) > 0 { - resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind)) - } - } - for _, group := range groups { - resourcePriority = append(resourcePriority, schema.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) - kindPriority = append(kindPriority, schema.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) - } - - return resourcePriority, kindPriority -} - -// AllPreferredGroupVersions returns the preferred versions of all registered -// groups in the form of "group1/version1,group2/version2,..." -func (m *APIRegistrationManager) AllPreferredGroupVersions() string { - if len(m.groupMetaMap) == 0 { - return "" - } - var defaults []string - for _, groupMeta := range m.groupMetaMap { - defaults = append(defaults, groupMeta.GroupVersion.String()) - } - sort.Strings(defaults) - return strings.Join(defaults, ",") -} - -// ValidateEnvRequestedVersions returns a list of versions that are requested in -// the KUBE_API_VERSIONS environment variable, but not enabled. -func (m *APIRegistrationManager) ValidateEnvRequestedVersions() []schema.GroupVersion { - var missingVersions []schema.GroupVersion - for _, v := range m.envRequestedVersions { - if _, found := m.enabledVersions[v]; !found { - missingVersions = append(missingVersions, v) - } - } - return missingVersions -} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go deleted file mode 100644 index 213e34bc0..000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apimachinery - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupMeta stores the metadata of a group. -type GroupMeta struct { - // GroupVersion represents the preferred version of the group. - GroupVersion schema.GroupVersion - - // GroupVersions is Group + all versions in that group. - GroupVersions []schema.GroupVersion - - // SelfLinker can set or get the SelfLink field of all API types. - // TODO: when versioning changes, make this part of each API definition. - // TODO(lavalamp): Combine SelfLinker & ResourceVersioner interfaces, force all uses - // to go through the InterfacesFor method below. - SelfLinker runtime.SelfLinker - - // RESTMapper provides the default mapping between REST paths and the objects declared in api.Scheme and all known - // versions. - RESTMapper meta.RESTMapper - - // InterfacesFor returns the default Codec and ResourceVersioner for a given version - // string, or an error if the version is not known. - // TODO: make this stop being a func pointer and always use the default - // function provided below once every place that populates this field has been changed. - InterfacesFor func(version schema.GroupVersion) (*meta.VersionInterfaces, error) - - // InterfacesByVersion stores the per-version interfaces. - InterfacesByVersion map[schema.GroupVersion]*meta.VersionInterfaces -} - -// DefaultInterfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -// TODO: Remove the "Default" prefix. -func (gm *GroupMeta) DefaultInterfacesFor(version schema.GroupVersion) (*meta.VersionInterfaces, error) { - if v, ok := gm.InterfacesByVersion[version]; ok { - return v, nil - } - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, gm.GroupVersions) -} - -// AddVersionInterfaces adds the given version to the group. Only call during -// init, after that GroupMeta objects should be immutable. Not thread safe. -// (If you use this, be sure to set .InterfacesFor = .DefaultInterfacesFor) -// TODO: remove the "Interfaces" suffix and make this also maintain the -// .GroupVersions member. -func (gm *GroupMeta) AddVersionInterfaces(version schema.GroupVersion, interfaces *meta.VersionInterfaces) error { - if e, a := gm.GroupVersion.Group, version.Group; a != e { - return fmt.Errorf("got a version in group %v, but am in group %v", a, e) - } - if gm.InterfacesByVersion == nil { - gm.InterfacesByVersion = make(map[schema.GroupVersion]*meta.VersionInterfaces) - } - gm.InterfacesByVersion[version] = interfaces - - // TODO: refactor to make the below error not possible, this function - // should *set* GroupVersions rather than depend on it. - for _, v := range gm.GroupVersions { - if v == version { - return nil - } - } - return fmt.Errorf("added a version interface without the corresponding version %v being in the list %#v", version, gm.GroupVersions) -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go new file mode 100644 index 000000000..1ea8c137b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *metav1.ListOptions, s conversion.Scope) error { + if err := metav1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + if err := metav1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = in.TimeoutSeconds + out.Watch = in.Watch + out.Limit = in.Limit + out.Continue = in.Continue + return nil +} + +func Convert_v1_ListOptions_To_internalversion_ListOptions(in *metav1.ListOptions, out *ListOptions, s conversion.Scope) error { + if err := metav1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + if err := metav1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = in.TimeoutSeconds + out.Watch = in.Watch + out.Limit = in.Limit + out.Continue = in.Continue + return nil +} + +func Convert_map_to_v1_LabelSelector(in *map[string]string, out *metav1.LabelSelector, s conversion.Scope) error { + if in == nil { + return nil + } + out = new(metav1.LabelSelector) + for labelKey, labelValue := range *in { + metav1.AddLabelToSelector(out, labelKey, labelValue) + } + return nil +} + +func Convert_v1_LabelSelector_to_map(in *metav1.LabelSelector, out *map[string]string, s conversion.Scope) error { + var err error + *out, err = metav1.LabelSelectorAsMap(in) + if err != nil { + err = field.Invalid(field.NewPath("labelSelector"), *in, fmt.Sprintf("cannot convert to old selector: %v", err)) + } + return err +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go new file mode 100644 index 000000000..1e85c5c43 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apimachinery/pkg/apis/meta/v1 + +package internalversion diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go new file mode 100644 index 000000000..4bde90b3f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go @@ -0,0 +1,105 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +// Scheme is the registry for any type that adheres to the meta API spec. +var scheme = runtime.NewScheme() + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Codecs provides access to encoding and decoding for the scheme. +var Codecs = serializer.NewCodecFactory(scheme) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// addToGroupVersion registers common meta types into schemas. +func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + scheme.AddConversionFuncs( + metav1.Convert_string_To_labels_Selector, + metav1.Convert_labels_Selector_To_string, + + metav1.Convert_string_To_fields_Selector, + metav1.Convert_fields_Selector_To_string, + + Convert_map_to_v1_LabelSelector, + Convert_v1_LabelSelector_to_map, + + Convert_internalversion_ListOptions_To_v1_ListOptions, + Convert_v1_ListOptions_To_internalversion_ListOptions, + ) + // ListOptions is the only options struct which needs conversion (it exposes labels and fields + // as selectors for convenience). The other types have only a single representation today. + scheme.AddKnownTypes(SchemeGroupVersion, + &ListOptions{}, + &metav1.GetOptions{}, + &metav1.ExportOptions{}, + &metav1.DeleteOptions{}, + ) + scheme.AddKnownTypes(SchemeGroupVersion, + &metav1beta1.Table{}, + &metav1beta1.TableOptions{}, + &metav1beta1.PartialObjectMetadata{}, + &metav1beta1.PartialObjectMetadataList{}, + ) + scheme.AddKnownTypes(metav1beta1.SchemeGroupVersion, + &metav1beta1.Table{}, + &metav1beta1.TableOptions{}, + &metav1beta1.PartialObjectMetadata{}, + &metav1beta1.PartialObjectMetadataList{}, + ) + // Allow delete options to be decoded across all version in this scheme (we may want to be more clever than this) + scheme.AddUnversionedTypes(SchemeGroupVersion, &metav1.DeleteOptions{}) + metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion) + return nil +} + +// Unlike other API groups, meta internal knows about all meta external versions, but keeps +// the logic for conversion private. +func init() { + if err := addToGroupVersion(scheme, SchemeGroupVersion); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go new file mode 100644 index 000000000..2f6740d36 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ListOptions is the query options to a standard REST list call. +type ListOptions struct { + metav1.TypeMeta + + // A selector based on labels + LabelSelector labels.Selector + // A selector based on fields + FieldSelector fields.Selector + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized bool + // If true, watch for changes to this list + Watch bool + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + ResourceVersion string + // Timeout for the list/watch call. + TimeoutSeconds *int64 + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. + Continue string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []runtime.Object +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go new file mode 100644 index 000000000..a63b3fc2c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -0,0 +1,118 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package internalversion + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_internalversion_List_To_v1_List, + Convert_v1_List_To_internalversion_List, + Convert_internalversion_ListOptions_To_v1_ListOptions, + Convert_v1_ListOptions_To_internalversion_ListOptions, + ) +} + +func autoConvert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_internalversion_List_To_v1_List is an autogenerated conversion function. +func Convert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error { + return autoConvert_internalversion_List_To_v1_List(in, out, s) +} + +func autoConvert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_List_To_internalversion_List is an autogenerated conversion function. +func Convert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error { + return autoConvert_v1_List_To_internalversion_List(in, out, s) +} + +func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error { + if err := v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + out.Limit = in.Limit + out.Continue = in.Continue + return nil +} + +func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error { + if err := v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + out.Limit = in.Limit + out.Continue = in.Continue + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go new file mode 100644 index 000000000..77bd9a6b4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -0,0 +1,106 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package internalversion + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if (*in)[i] == nil { + (*out)[i] = nil + } else { + (*out)[i] = (*in)[i].DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListOptions) DeepCopyInto(out *ListOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.LabelSelector == nil { + out.LabelSelector = nil + } else { + out.LabelSelector = in.LabelSelector.DeepCopySelector() + } + if in.FieldSelector == nil { + out.FieldSelector = nil + } else { + out.FieldSelector = in.FieldSelector.DeepCopySelector() + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. +func (in *ListOptions) DeepCopy() *ListOptions { + if in == nil { + return nil + } + out := new(ListOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ListOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index 381a52509..cdb125a0d 100755 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -12,12 +12,10 @@ reviewers: - davidopp - sttts - quinton-hoole -- kargakis - luxas - janetkuo - justinsb - ncdc -- timothysc - soltysh - dims - madhusudancs diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go new file mode 100644 index 000000000..042cd5b9c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// IsControlledBy checks if the object has a controllerRef set to the given owner +func IsControlledBy(obj Object, owner Object) bool { + ref := GetControllerOf(obj) + if ref == nil { + return false + } + return ref.UID == owner.GetUID() +} + +// GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller +func GetControllerOf(controllee Object) *OwnerReference { + for _, ref := range controllee.GetOwnerReferences() { + if ref.Controller != nil && *ref.Controller { + return &ref + } + } + return nil +} + +// NewControllerRef creates an OwnerReference pointing to the given owner. +func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference { + blockOwnerDeletion := true + isController := true + return &OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &isController, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index b9a8fd02d..c36fc6556 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -38,6 +38,10 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_intstr_IntOrString_To_intstr_IntOrString, Convert_unversioned_Time_To_unversioned_Time, + Convert_unversioned_MicroTime_To_unversioned_MicroTime, + + Convert_Pointer_v1_Duration_To_v1_Duration, + Convert_v1_Duration_To_Pointer_v1_Duration, Convert_Slice_string_To_unversioned_Time, @@ -61,6 +65,9 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_Pointer_int32_To_int32, Convert_int32_To_Pointer_int32, + Convert_Pointer_int64_To_int64, + Convert_int64_To_Pointer_int64, + Convert_Pointer_float64_To_float64, Convert_float64_To_Pointer_float64, @@ -68,6 +75,8 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_unversioned_LabelSelector_to_map, Convert_Slice_string_To_Slice_int32, + + Convert_Slice_string_To_v1_DeletionPropagation, ) } @@ -101,6 +110,21 @@ func Convert_int32_To_Pointer_int32(in *int32, out **int32, s conversion.Scope) return nil } +func Convert_Pointer_int64_To_int64(in **int64, out *int64, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = int64(**in) + return nil +} + +func Convert_int64_To_Pointer_int64(in *int64, out **int64, s conversion.Scope) error { + temp := int64(*in) + *out = &temp + return nil +} + func Convert_Pointer_int64_To_int(in **int64, out *int, s conversion.Scope) error { if *in == nil { *out = 0 @@ -181,6 +205,27 @@ func Convert_unversioned_Time_To_unversioned_Time(in *Time, out *Time, s convers return nil } +func Convert_Pointer_v1_Duration_To_v1_Duration(in **Duration, out *Duration, s conversion.Scope) error { + if *in == nil { + *out = Duration{} // zero duration + return nil + } + *out = **in // copy + return nil +} + +func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s conversion.Scope) error { + temp := *in //copy + *out = &temp + return nil +} + +func Convert_unversioned_MicroTime_To_unversioned_MicroTime(in *MicroTime, out *MicroTime, s conversion.Scope) error { + // Cannot deep copy these, because time.Time has unexported fields. + *out = *in + return nil +} + // Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value func Convert_Slice_string_To_unversioned_Time(input *[]string, out *Time, s conversion.Scope) error { str := "" @@ -234,7 +279,6 @@ func Convert_map_to_unversioned_LabelSelector(in *map[string]string, out *LabelS if in == nil { return nil } - out = new(LabelSelector) for labelKey, labelValue := range *in { AddLabelToSelector(out, labelKey, labelValue) } @@ -262,3 +306,13 @@ func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversio } return nil } + +// Convert_Slice_string_To_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy +func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *DeletionPropagation, s conversion.Scope) error { + if len(*input) > 0 { + *out = DeletionPropagation((*input)[0]) + } else { + *out = "" + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go index 52273240f..61f201cdf 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -19,4 +19,4 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=meta.k8s.io -package v1 +package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go index fea458dfb..2eaabf079 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -31,7 +31,10 @@ type Duration struct { // UnmarshalJSON implements the json.Unmarshaller interface. func (d *Duration) UnmarshalJSON(b []byte) error { var str string - json.Unmarshal(b, &str) + err := json.Unmarshal(b, &str) + if err != nil { + return err + } pd, err := time.ParseDuration(str) if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 72282474d..febace500 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -40,12 +40,17 @@ limitations under the License. GroupVersionForDiscovery GroupVersionKind GroupVersionResource + Initializer + Initializers LabelSelector LabelSelectorRequirement + List ListMeta ListOptions + MicroTime ObjectMeta OwnerReference + Patch Preconditions RootPaths ServerAddressByClientCIDR @@ -64,12 +69,15 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + import time "time" import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + import strings "strings" import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" import io "io" @@ -77,10 +85,13 @@ import io "io" var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +var _ = time.Kitchen // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *APIGroup) Reset() { *m = APIGroup{} } func (*APIGroup) ProtoMessage() {} @@ -144,77 +155,97 @@ func (m *GroupVersionResource) Reset() { *m = GroupVersionRes func (*GroupVersionResource) ProtoMessage() {} func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *Initializer) Reset() { *m = Initializer{} } +func (*Initializer) ProtoMessage() {} +func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *Initializers) Reset() { *m = Initializers{} } +func (*Initializers) ProtoMessage() {} +func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + func (m *LabelSelector) Reset() { *m = LabelSelector{} } func (*LabelSelector) ProtoMessage() {} -func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{16} + return fileDescriptorGenerated, []int{18} } +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + func (m *ListMeta) Reset() { *m = ListMeta{} } func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } + +func (m *MicroTime) Reset() { *m = MicroTime{} } +func (*MicroTime) ProtoMessage() {} +func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (*OwnerReference) ProtoMessage() {} -func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *Patch) Reset() { *m = Patch{} } +func (*Patch) ProtoMessage() {} +func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *RootPaths) Reset() { *m = RootPaths{} } func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{23} + return fileDescriptorGenerated, []int{28} } func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *StatusCause) Reset() { *m = StatusCause{} } func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *Time) Reset() { *m = Time{} } func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *Timestamp) Reset() { *m = Timestamp{} } func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *Verbs) Reset() { *m = Verbs{} } func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func init() { proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") @@ -232,12 +263,17 @@ func init() { proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") + proto.RegisterType((*Initializer)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializer") + proto.RegisterType((*Initializers)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializers") proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") + proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions") + proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") @@ -250,51 +286,51 @@ func init() { proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") } -func (m *APIGroup) Marshal() (data []byte, err error) { +func (m *APIGroup) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *APIGroup) MarshalTo(data []byte) (int, error) { +func (m *APIGroup) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) if len(m.Versions) > 0 { for _, msg := range m.Versions { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.PreferredVersion.Size())) - n1, err := m.PreferredVersion.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.PreferredVersion.Size())) + n1, err := m.PreferredVersion.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 if len(m.ServerAddressByClientCIDRs) > 0 { for _, msg := range m.ServerAddressByClientCIDRs { - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -304,27 +340,27 @@ func (m *APIGroup) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *APIGroupList) Marshal() (data []byte, err error) { +func (m *APIGroupList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *APIGroupList) MarshalTo(data []byte) (int, error) { +func (m *APIGroupList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Groups) > 0 { for _, msg := range m.Groups { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -334,42 +370,42 @@ func (m *APIGroupList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *APIResource) Marshal() (data []byte, err error) { +func (m *APIResource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *APIResource) MarshalTo(data []byte) (int, error) { +func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 i++ if m.Namespaced { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) if m.Verbs != nil { - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(m.Verbs.Size())) - n2, err := m.Verbs.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Verbs.Size())) + n2, err := m.Verbs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -377,47 +413,74 @@ func (m *APIResource) MarshalTo(data []byte) (int, error) { } if len(m.ShortNames) > 0 { for _, s := range m.ShortNames { - data[i] = 0x2a + dAtA[i] = 0x2a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) + i += copy(dAtA[i:], m.SingularName) + if len(m.Categories) > 0 { + for _, s := range m.Categories { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) return i, nil } -func (m *APIResourceList) Marshal() (data []byte, err error) { +func (m *APIResourceList) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *APIResourceList) MarshalTo(data []byte) (int, error) { +func (m *APIResourceList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion))) - i += copy(data[i:], m.GroupVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i += copy(dAtA[i:], m.GroupVersion) if len(m.APIResources) > 0 { for _, msg := range m.APIResources { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -427,42 +490,42 @@ func (m *APIResourceList) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *APIVersions) Marshal() (data []byte, err error) { +func (m *APIVersions) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *APIVersions) MarshalTo(data []byte) (int, error) { +func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Versions) > 0 { for _, s := range m.Versions { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if len(m.ServerAddressByClientCIDRs) > 0 { for _, msg := range m.ServerAddressByClientCIDRs { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -472,334 +535,409 @@ func (m *APIVersions) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *DeleteOptions) Marshal() (data []byte, err error) { +func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *DeleteOptions) MarshalTo(data []byte) (int, error) { +func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.GracePeriodSeconds != nil { - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(*m.GracePeriodSeconds)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) } if m.Preconditions != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Preconditions.Size())) - n3, err := m.Preconditions.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Preconditions.Size())) + n3, err := m.Preconditions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n3 } if m.OrphanDependents != nil { - data[i] = 0x18 + dAtA[i] = 0x18 i++ if *m.OrphanDependents { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ } if m.PropagationPolicy != nil { - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.PropagationPolicy))) - i += copy(data[i:], *m.PropagationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) + i += copy(dAtA[i:], *m.PropagationPolicy) } return i, nil } -func (m *Duration) Marshal() (data []byte, err error) { +func (m *Duration) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Duration) MarshalTo(data []byte) (int, error) { +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.Duration)) + i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) return i, nil } -func (m *ExportOptions) Marshal() (data []byte, err error) { +func (m *ExportOptions) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ExportOptions) MarshalTo(data []byte) (int, error) { +func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0x8 i++ if m.Export { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ - data[i] = 0x10 + dAtA[i] = 0x10 i++ if m.Exact { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ return i, nil } -func (m *GetOptions) Marshal() (data []byte, err error) { +func (m *GetOptions) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *GetOptions) MarshalTo(data []byte) (int, error) { +func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x10 + i++ + if m.IncludeUninitialized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) - i += copy(data[i:], m.ResourceVersion) return i, nil } -func (m *GroupKind) Marshal() (data []byte, err error) { +func (m *GroupKind) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *GroupKind) MarshalTo(data []byte) (int, error) { +func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) return i, nil } -func (m *GroupResource) Marshal() (data []byte, err error) { +func (m *GroupResource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *GroupResource) MarshalTo(data []byte) (int, error) { +func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) - i += copy(data[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) return i, nil } -func (m *GroupVersion) Marshal() (data []byte, err error) { +func (m *GroupVersion) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *GroupVersion) MarshalTo(data []byte) (int, error) { +func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) return i, nil } -func (m *GroupVersionForDiscovery) Marshal() (data []byte, err error) { +func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *GroupVersionForDiscovery) MarshalTo(data []byte) (int, error) { +func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion))) - i += copy(data[i:], m.GroupVersion) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i += copy(dAtA[i:], m.GroupVersion) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) return i, nil } -func (m *GroupVersionKind) Marshal() (data []byte, err error) { +func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *GroupVersionKind) MarshalTo(data []byte) (int, error) { +func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) return i, nil } -func (m *GroupVersionResource) Marshal() (data []byte, err error) { +func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *GroupVersionResource) MarshalTo(data []byte) (int, error) { +func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) - i += copy(data[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) return i, nil } -func (m *LabelSelector) Marshal() (data []byte, err error) { +func (m *Initializer) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *LabelSelector) MarshalTo(data []byte) (int, error) { +func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} + +func (m *Initializers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Initializers) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Pending) > 0 { + for _, msg := range m.Pending { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Result != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) + n4, err := m.Result.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *LabelSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.MatchLabels) > 0 { + keysForMatchLabels := make([]string, 0, len(m.MatchLabels)) for k := range m.MatchLabels { - data[i] = 0xa + keysForMatchLabels = append(keysForMatchLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) + for _, k := range keysForMatchLabels { + dAtA[i] = 0xa i++ - v := m.MatchLabels[k] + v := m.MatchLabels[string(k)] mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } if len(m.MatchExpressions) > 0 { for _, msg := range m.MatchExpressions { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -809,221 +947,288 @@ func (m *LabelSelector) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { +func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { +func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) - i += copy(data[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i += copy(dAtA[i:], m.Operator) if len(m.Values) > 0 { for _, s := range m.Values { - data[i] = 0x1a + dAtA[i] = 0x1a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } return i, nil } -func (m *ListMeta) Marshal() (data []byte, err error) { +func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ListMeta) MarshalTo(data []byte) (int, error) { +func (m *List) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) - i += copy(data[i:], m.SelfLink) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) - i += copy(data[i:], m.ResourceVersion) - return i, nil -} - -func (m *ListOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ListOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.LabelSelector))) - i += copy(data[i:], m.LabelSelector) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FieldSelector))) - i += copy(data[i:], m.FieldSelector) - data[i] = 0x18 - i++ - if m.Watch { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) - i += copy(data[i:], m.ResourceVersion) - if m.TimeoutSeconds != nil { - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.TimeoutSeconds)) - } - return i, nil -} - -func (m *ObjectMeta) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ObjectMeta) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.GenerateName))) - i += copy(data[i:], m.GenerateName) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) - i += copy(data[i:], m.Namespace) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) - i += copy(data[i:], m.SelfLink) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.UID))) - i += copy(data[i:], m.UID) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) - i += copy(data[i:], m.ResourceVersion) - data[i] = 0x38 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Generation)) - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CreationTimestamp.Size())) - n4, err := m.CreationTimestamp.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 - if m.DeletionTimestamp != nil { - data[i] = 0x4a + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ListMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i += copy(dAtA[i:], m.SelfLink) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i += copy(dAtA[i:], m.Continue) + return i, nil +} + +func (m *ListOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) + i += copy(dAtA[i:], m.LabelSelector) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) + i += copy(dAtA[i:], m.FieldSelector) + dAtA[i] = 0x18 + i++ + if m.Watch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + if m.TimeoutSeconds != nil { + dAtA[i] = 0x28 i++ - i = encodeVarintGenerated(data, i, uint64(m.DeletionTimestamp.Size())) - n5, err := m.DeletionTimestamp.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + } + dAtA[i] = 0x30 + i++ + if m.IncludeUninitialized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i += copy(dAtA[i:], m.Continue) + return i, nil +} + +func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) + i += copy(dAtA[i:], m.GenerateName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i += copy(dAtA[i:], m.SelfLink) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) + n6, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if m.DeletionTimestamp != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) + n7, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n7 } if m.DeletionGracePeriodSeconds != nil { - data[i] = 0x50 + dAtA[i] = 0x50 i++ - i = encodeVarintGenerated(data, i, uint64(*m.DeletionGracePeriodSeconds)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) } if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) for k := range m.Labels { - data[i] = 0x5a + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for _, k := range keysForLabels { + dAtA[i] = 0x5a i++ - v := m.Labels[k] + v := m.Labels[string(k)] mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) for k := range m.Annotations { - data[i] = 0x62 + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for _, k := range keysForAnnotations { + dAtA[i] = 0x62 i++ - v := m.Annotations[k] + v := m.Annotations[string(k)] mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } if len(m.OwnerReferences) > 0 { for _, msg := range m.OwnerReferences { - data[i] = 0x6a + dAtA[i] = 0x6a i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -1032,427 +1237,461 @@ func (m *ObjectMeta) MarshalTo(data []byte) (int, error) { } if len(m.Finalizers) > 0 { for _, s := range m.Finalizers { - data[i] = 0x72 + dAtA[i] = 0x72 i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } - data[i] = 0x7a + dAtA[i] = 0x7a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ClusterName))) - i += copy(data[i:], m.ClusterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) + i += copy(dAtA[i:], m.ClusterName) + if m.Initializers != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) + n8, err := m.Initializers.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } return i, nil } -func (m *OwnerReference) Marshal() (data []byte, err error) { +func (m *OwnerReference) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *OwnerReference) MarshalTo(data []byte) (int, error) { +func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.UID))) - i += copy(data[i:], m.UID) - data[i] = 0x2a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) if m.Controller != nil { - data[i] = 0x30 + dAtA[i] = 0x30 i++ if *m.Controller { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ } if m.BlockOwnerDeletion != nil { - data[i] = 0x38 + dAtA[i] = 0x38 i++ if *m.BlockOwnerDeletion { - data[i] = 1 + dAtA[i] = 1 } else { - data[i] = 0 + dAtA[i] = 0 } i++ } return i, nil } -func (m *Preconditions) Marshal() (data []byte, err error) { +func (m *Patch) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Preconditions) MarshalTo(data []byte) (int, error) { +func (m *Patch) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *Preconditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.UID != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.UID))) - i += copy(data[i:], *m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i += copy(dAtA[i:], *m.UID) } return i, nil } -func (m *RootPaths) Marshal() (data []byte, err error) { +func (m *RootPaths) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RootPaths) MarshalTo(data []byte) (int, error) { +func (m *RootPaths) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Paths) > 0 { for _, s := range m.Paths { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } return i, nil } -func (m *ServerAddressByClientCIDR) Marshal() (data []byte, err error) { +func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ServerAddressByClientCIDR) MarshalTo(data []byte) (int, error) { +func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ClientCIDR))) - i += copy(data[i:], m.ClientCIDR) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) + i += copy(dAtA[i:], m.ClientCIDR) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServerAddress))) - i += copy(data[i:], m.ServerAddress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddress))) + i += copy(dAtA[i:], m.ServerAddress) return i, nil } -func (m *Status) Marshal() (data []byte, err error) { +func (m *Status) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Status) MarshalTo(data []byte) (int, error) { +func (m *Status) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 - data[i] = 0x12 + i += n9 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) if m.Details != nil { - data[i] = 0x2a + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(data, i, uint64(m.Details.Size())) - n7, err := m.Details.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) + n10, err := m.Details.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n10 } - data[i] = 0x30 + dAtA[i] = 0x30 i++ - i = encodeVarintGenerated(data, i, uint64(m.Code)) + i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) return i, nil } -func (m *StatusCause) Marshal() (data []byte, err error) { +func (m *StatusCause) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *StatusCause) MarshalTo(data []byte) (int, error) { +func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Field))) - i += copy(data[i:], m.Field) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Field))) + i += copy(dAtA[i:], m.Field) return i, nil } -func (m *StatusDetails) Marshal() (data []byte, err error) { +func (m *StatusDetails) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *StatusDetails) MarshalTo(data []byte) (int, error) { +func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) if len(m.Causes) > 0 { for _, msg := range m.Causes { - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } - data[i] = 0x28 + dAtA[i] = 0x28 i++ - i = encodeVarintGenerated(data, i, uint64(m.RetryAfterSeconds)) + i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) return i, nil } -func (m *Timestamp) Marshal() (data []byte, err error) { +func (m *Timestamp) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Timestamp) MarshalTo(data []byte) (int, error) { +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.Seconds)) - data[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(m.Nanos)) + i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos)) return i, nil } -func (m *TypeMeta) Marshal() (data []byte, err error) { +func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *TypeMeta) MarshalTo(data []byte) (int, error) { +func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) return i, nil } -func (m Verbs) Marshal() (data []byte, err error) { +func (m Verbs) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m Verbs) MarshalTo(data []byte) (int, error) { +func (m Verbs) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m) > 0 { for _, s := range m { - data[i] = 0xa + dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } return i, nil } -func (m *WatchEvent) Marshal() (data []byte, err error) { +func (m *WatchEvent) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *WatchEvent) MarshalTo(data []byte) (int, error) { +func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) - n8, err := m.Object.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n11, err := m.Object.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n11 return i, nil } -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *APIGroup) Size() (n int) { @@ -1507,6 +1746,18 @@ func (m *APIResource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.SingularName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1582,6 +1833,7 @@ func (m *GetOptions) Size() (n int) { _ = l l = len(m.ResourceVersion) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } @@ -1649,6 +1901,30 @@ func (m *GroupVersionResource) Size() (n int) { return n } +func (m *Initializer) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Initializers) Size() (n int) { + var l int + _ = l + if len(m.Pending) > 0 { + for _, e := range m.Pending { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *LabelSelector) Size() (n int) { var l int _ = l @@ -1685,6 +1961,20 @@ func (m *LabelSelectorRequirement) Size() (n int) { return n } +func (m *List) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ListMeta) Size() (n int) { var l int _ = l @@ -1692,6 +1982,8 @@ func (m *ListMeta) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.ResourceVersion) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Continue) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1708,6 +2000,10 @@ func (m *ListOptions) Size() (n int) { if m.TimeoutSeconds != nil { n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } + n += 2 + n += 1 + sovGenerated(uint64(m.Limit)) + l = len(m.Continue) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1766,6 +2062,10 @@ func (m *ObjectMeta) Size() (n int) { } l = len(m.ClusterName) n += 1 + l + sovGenerated(uint64(l)) + if m.Initializers != nil { + l = m.Initializers.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -1789,6 +2089,12 @@ func (m *OwnerReference) Size() (n int) { return n } +func (m *Patch) Size() (n int) { + var l int + _ = l + return n +} + func (m *Preconditions) Size() (n int) { var l int _ = l @@ -1868,6 +2174,8 @@ func (m *StatusDetails) Size() (n int) { } } n += 1 + sovGenerated(uint64(m.RetryAfterSeconds)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1957,6 +2265,10 @@ func (this *APIResource) String() string { `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, `Verbs:` + strings.Replace(fmt.Sprintf("%v", this.Verbs), "Verbs", "Verbs", 1) + `,`, `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `SingularName:` + fmt.Sprintf("%v", this.SingularName) + `,`, + `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, `}`, }, "") return s @@ -2012,6 +2324,7 @@ func (this *GetOptions) String() string { } s := strings.Join([]string{`&GetOptions{`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, `}`, }, "") return s @@ -2027,6 +2340,27 @@ func (this *GroupVersionForDiscovery) String() string { }, "") return s } +func (this *Initializer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Initializer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Initializers) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Initializers{`, + `Pending:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Pending), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "Status", 1) + `,`, + `}`, + }, "") + return s +} func (this *LabelSelector) String() string { if this == nil { return "nil" @@ -2060,6 +2394,17 @@ func (this *LabelSelectorRequirement) String() string { }, "") return s } +func (this *List) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&List{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ListMeta) String() string { if this == nil { return "nil" @@ -2067,6 +2412,7 @@ func (this *ListMeta) String() string { s := strings.Join([]string{`&ListMeta{`, `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, `}`, }, "") return s @@ -2081,6 +2427,9 @@ func (this *ListOptions) String() string { `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, `}`, }, "") return s @@ -2125,6 +2474,7 @@ func (this *ObjectMeta) String() string { `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + `,`, `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, + `Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "Initializers", 1) + `,`, `}`, }, "") return s @@ -2144,6 +2494,15 @@ func (this *OwnerReference) String() string { }, "") return s } +func (this *Patch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Patch{`, + `}`, + }, "") + return s +} func (this *Preconditions) String() string { if this == nil { return "nil" @@ -2212,6 +2571,7 @@ func (this *StatusDetails) String() string { `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "StatusCause", "StatusCause", 1), `&`, ``, 1) + `,`, `RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `}`, }, "") return s @@ -2257,8 +2617,8 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *APIGroup) Unmarshal(data []byte) error { - l := len(data) +func (m *APIGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2270,7 +2630,7 @@ func (m *APIGroup) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2298,7 +2658,7 @@ func (m *APIGroup) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2313,7 +2673,7 @@ func (m *APIGroup) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2327,7 +2687,7 @@ func (m *APIGroup) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2342,7 +2702,7 @@ func (m *APIGroup) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Versions = append(m.Versions, GroupVersionForDiscovery{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2358,7 +2718,7 @@ func (m *APIGroup) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2372,7 +2732,7 @@ func (m *APIGroup) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PreferredVersion.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.PreferredVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2388,7 +2748,7 @@ func (m *APIGroup) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2403,13 +2763,13 @@ func (m *APIGroup) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2428,8 +2788,8 @@ func (m *APIGroup) Unmarshal(data []byte) error { } return nil } -func (m *APIGroupList) Unmarshal(data []byte) error { - l := len(data) +func (m *APIGroupList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2441,7 +2801,7 @@ func (m *APIGroupList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2469,7 +2829,7 @@ func (m *APIGroupList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2484,13 +2844,13 @@ func (m *APIGroupList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Groups = append(m.Groups, APIGroup{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2509,8 +2869,8 @@ func (m *APIGroupList) Unmarshal(data []byte) error { } return nil } -func (m *APIResource) Unmarshal(data []byte) error { - l := len(data) +func (m *APIResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2522,7 +2882,7 @@ func (m *APIResource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2550,7 +2910,7 @@ func (m *APIResource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2565,7 +2925,7 @@ func (m *APIResource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { @@ -2579,7 +2939,7 @@ func (m *APIResource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2599,7 +2959,7 @@ func (m *APIResource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2614,7 +2974,7 @@ func (m *APIResource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -2628,7 +2988,7 @@ func (m *APIResource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2645,7 +3005,7 @@ func (m *APIResource) Unmarshal(data []byte) error { if m.Verbs == nil { m.Verbs = Verbs{} } - if err := m.Verbs.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Verbs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2661,7 +3021,7 @@ func (m *APIResource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2676,11 +3036,127 @@ func (m *APIResource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ShortNames = append(m.ShortNames, string(data[iNdEx:postIndex])) + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SingularName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SingularName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2699,8 +3175,8 @@ func (m *APIResource) Unmarshal(data []byte) error { } return nil } -func (m *APIResourceList) Unmarshal(data []byte) error { - l := len(data) +func (m *APIResourceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2712,7 +3188,7 @@ func (m *APIResourceList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2740,7 +3216,7 @@ func (m *APIResourceList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2755,7 +3231,7 @@ func (m *APIResourceList) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.GroupVersion = string(data[iNdEx:postIndex]) + m.GroupVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2769,7 +3245,7 @@ func (m *APIResourceList) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2784,13 +3260,13 @@ func (m *APIResourceList) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.APIResources = append(m.APIResources, APIResource{}) - if err := m.APIResources[len(m.APIResources)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.APIResources[len(m.APIResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2809,8 +3285,8 @@ func (m *APIResourceList) Unmarshal(data []byte) error { } return nil } -func (m *APIVersions) Unmarshal(data []byte) error { - l := len(data) +func (m *APIVersions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2822,7 +3298,7 @@ func (m *APIVersions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2850,7 +3326,7 @@ func (m *APIVersions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2865,7 +3341,7 @@ func (m *APIVersions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Versions = append(m.Versions, string(data[iNdEx:postIndex])) + m.Versions = append(m.Versions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { @@ -2879,7 +3355,7 @@ func (m *APIVersions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2894,13 +3370,13 @@ func (m *APIVersions) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -2919,8 +3395,8 @@ func (m *APIVersions) Unmarshal(data []byte) error { } return nil } -func (m *DeleteOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *DeleteOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -2932,7 +3408,7 @@ func (m *DeleteOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2960,7 +3436,7 @@ func (m *DeleteOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -2980,7 +3456,7 @@ func (m *DeleteOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2997,7 +3473,7 @@ func (m *DeleteOptions) Unmarshal(data []byte) error { if m.Preconditions == nil { m.Preconditions = &Preconditions{} } - if err := m.Preconditions.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Preconditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3013,7 +3489,7 @@ func (m *DeleteOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3034,7 +3510,7 @@ func (m *DeleteOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3049,12 +3525,12 @@ func (m *DeleteOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := DeletionPropagation(data[iNdEx:postIndex]) + s := DeletionPropagation(dAtA[iNdEx:postIndex]) m.PropagationPolicy = &s iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3073,8 +3549,8 @@ func (m *DeleteOptions) Unmarshal(data []byte) error { } return nil } -func (m *Duration) Unmarshal(data []byte) error { - l := len(data) +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3086,7 +3562,7 @@ func (m *Duration) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3114,7 +3590,7 @@ func (m *Duration) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Duration |= (time.Duration(b) & 0x7F) << shift if b < 0x80 { @@ -3123,7 +3599,7 @@ func (m *Duration) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3142,8 +3618,8 @@ func (m *Duration) Unmarshal(data []byte) error { } return nil } -func (m *ExportOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *ExportOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3155,7 +3631,7 @@ func (m *ExportOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3183,7 +3659,7 @@ func (m *ExportOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3203,7 +3679,7 @@ func (m *ExportOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -3213,7 +3689,7 @@ func (m *ExportOptions) Unmarshal(data []byte) error { m.Exact = bool(v != 0) default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3232,8 +3708,8 @@ func (m *ExportOptions) Unmarshal(data []byte) error { } return nil } -func (m *GetOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *GetOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3245,7 +3721,7 @@ func (m *GetOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3273,7 +3749,7 @@ func (m *GetOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3288,11 +3764,31 @@ func (m *GetOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(data[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeUninitialized = bool(v != 0) default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3311,8 +3807,8 @@ func (m *GetOptions) Unmarshal(data []byte) error { } return nil } -func (m *GroupKind) Unmarshal(data []byte) error { - l := len(data) +func (m *GroupKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3324,7 +3820,7 @@ func (m *GroupKind) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3352,7 +3848,7 @@ func (m *GroupKind) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3367,7 +3863,7 @@ func (m *GroupKind) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(data[iNdEx:postIndex]) + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -3381,7 +3877,7 @@ func (m *GroupKind) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3396,11 +3892,11 @@ func (m *GroupKind) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3419,8 +3915,8 @@ func (m *GroupKind) Unmarshal(data []byte) error { } return nil } -func (m *GroupResource) Unmarshal(data []byte) error { - l := len(data) +func (m *GroupResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3432,7 +3928,7 @@ func (m *GroupResource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3460,7 +3956,7 @@ func (m *GroupResource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3475,7 +3971,7 @@ func (m *GroupResource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(data[iNdEx:postIndex]) + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -3489,7 +3985,7 @@ func (m *GroupResource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3504,11 +4000,11 @@ func (m *GroupResource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Resource = string(data[iNdEx:postIndex]) + m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3527,8 +4023,8 @@ func (m *GroupResource) Unmarshal(data []byte) error { } return nil } -func (m *GroupVersion) Unmarshal(data []byte) error { - l := len(data) +func (m *GroupVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3540,7 +4036,7 @@ func (m *GroupVersion) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3568,7 +4064,7 @@ func (m *GroupVersion) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3583,7 +4079,7 @@ func (m *GroupVersion) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(data[iNdEx:postIndex]) + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -3597,7 +4093,7 @@ func (m *GroupVersion) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3612,11 +4108,11 @@ func (m *GroupVersion) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(data[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3635,8 +4131,8 @@ func (m *GroupVersion) Unmarshal(data []byte) error { } return nil } -func (m *GroupVersionForDiscovery) Unmarshal(data []byte) error { - l := len(data) +func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3648,7 +4144,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3676,7 +4172,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3691,7 +4187,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.GroupVersion = string(data[iNdEx:postIndex]) + m.GroupVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -3705,7 +4201,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3720,11 +4216,11 @@ func (m *GroupVersionForDiscovery) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(data[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3743,8 +4239,8 @@ func (m *GroupVersionForDiscovery) Unmarshal(data []byte) error { } return nil } -func (m *GroupVersionKind) Unmarshal(data []byte) error { - l := len(data) +func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3756,7 +4252,7 @@ func (m *GroupVersionKind) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3784,7 +4280,7 @@ func (m *GroupVersionKind) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3799,7 +4295,7 @@ func (m *GroupVersionKind) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(data[iNdEx:postIndex]) + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -3813,7 +4309,7 @@ func (m *GroupVersionKind) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3828,7 +4324,7 @@ func (m *GroupVersionKind) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(data[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -3842,7 +4338,7 @@ func (m *GroupVersionKind) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3857,11 +4353,11 @@ func (m *GroupVersionKind) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -3880,8 +4376,8 @@ func (m *GroupVersionKind) Unmarshal(data []byte) error { } return nil } -func (m *GroupVersionResource) Unmarshal(data []byte) error { - l := len(data) +func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -3893,7 +4389,7 @@ func (m *GroupVersionResource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3921,7 +4417,7 @@ func (m *GroupVersionResource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3936,7 +4432,7 @@ func (m *GroupVersionResource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(data[iNdEx:postIndex]) + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -3950,7 +4446,7 @@ func (m *GroupVersionResource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3965,7 +4461,7 @@ func (m *GroupVersionResource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(data[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -3979,7 +4475,7 @@ func (m *GroupVersionResource) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -3994,11 +4490,11 @@ func (m *GroupVersionResource) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Resource = string(data[iNdEx:postIndex]) + m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -4017,8 +4513,8 @@ func (m *GroupVersionResource) Unmarshal(data []byte) error { } return nil } -func (m *LabelSelector) Unmarshal(data []byte) error { - l := len(data) +func (m *Initializer) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -4030,7 +4526,200 @@ func (m *LabelSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Initializer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Initializers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Initializers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Initializers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pending", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pending = append(m.Pending, Initializer{}) + if err := m.Pending[len(m.Pending)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &Status{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4058,7 +4747,7 @@ func (m *LabelSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4080,7 +4769,7 @@ func (m *LabelSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4095,7 +4784,7 @@ func (m *LabelSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4110,52 +4799,57 @@ func (m *LabelSelector) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue if m.MatchLabels == nil { m.MatchLabels = make(map[string]string) } - m.MatchLabels[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.MatchLabels[mapkey] = mapvalue + } else { + var mapvalue string + m.MatchLabels[mapkey] = mapvalue + } iNdEx = postIndex case 2: if wireType != 2 { @@ -4169,7 +4863,7 @@ func (m *LabelSelector) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4184,13 +4878,13 @@ func (m *LabelSelector) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -4209,8 +4903,8 @@ func (m *LabelSelector) Unmarshal(data []byte) error { } return nil } -func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { - l := len(data) +func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -4222,7 +4916,7 @@ func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4250,7 +4944,7 @@ func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4265,7 +4959,7 @@ func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(data[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -4279,7 +4973,7 @@ func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4294,7 +4988,7 @@ func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) + m.Operator = LabelSelectorOperator(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -4308,7 +5002,7 @@ func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4323,11 +5017,11 @@ func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Values = append(m.Values, string(data[iNdEx:postIndex])) + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -4346,8 +5040,8 @@ func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { } return nil } -func (m *ListMeta) Unmarshal(data []byte) error { - l := len(data) +func (m *List) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -4359,7 +5053,118 @@ func (m *ListMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: List: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4387,7 +5192,7 @@ func (m *ListMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4402,7 +5207,7 @@ func (m *ListMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SelfLink = string(data[iNdEx:postIndex]) + m.SelfLink = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -4416,7 +5221,7 @@ func (m *ListMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4431,11 +5236,40 @@ func (m *ListMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(data[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Continue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -4454,8 +5288,8 @@ func (m *ListMeta) Unmarshal(data []byte) error { } return nil } -func (m *ListOptions) Unmarshal(data []byte) error { - l := len(data) +func (m *ListOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -4467,7 +5301,7 @@ func (m *ListOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4495,7 +5329,7 @@ func (m *ListOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4510,7 +5344,7 @@ func (m *ListOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LabelSelector = string(data[iNdEx:postIndex]) + m.LabelSelector = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -4524,7 +5358,7 @@ func (m *ListOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4539,7 +5373,7 @@ func (m *ListOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FieldSelector = string(data[iNdEx:postIndex]) + m.FieldSelector = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -4553,7 +5387,7 @@ func (m *ListOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4573,7 +5407,7 @@ func (m *ListOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4588,7 +5422,7 @@ func (m *ListOptions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(data[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 0 { @@ -4602,7 +5436,7 @@ func (m *ListOptions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -4610,9 +5444,77 @@ func (m *ListOptions) Unmarshal(data []byte) error { } } m.TimeoutSeconds = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeUninitialized = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Continue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -4631,8 +5533,8 @@ func (m *ListOptions) Unmarshal(data []byte) error { } return nil } -func (m *ObjectMeta) Unmarshal(data []byte) error { - l := len(data) +func (m *ObjectMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -4644,7 +5546,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4672,7 +5574,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4687,7 +5589,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -4701,7 +5603,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4716,7 +5618,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.GenerateName = string(data[iNdEx:postIndex]) + m.GenerateName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -4730,7 +5632,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4745,7 +5647,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(data[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -4759,7 +5661,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4774,7 +5676,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SelfLink = string(data[iNdEx:postIndex]) + m.SelfLink = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -4788,7 +5690,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4803,7 +5705,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { @@ -4817,7 +5719,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4832,7 +5734,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(data[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 0 { @@ -4846,7 +5748,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Generation |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -4865,7 +5767,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4879,7 +5781,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.CreationTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.CreationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4895,7 +5797,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4912,7 +5814,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if m.DeletionTimestamp == nil { m.DeletionTimestamp = &Time{} } - if err := m.DeletionTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.DeletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4928,7 +5830,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -4948,7 +5850,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -4970,7 +5872,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -4985,7 +5887,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5000,52 +5902,57 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue if m.Labels == nil { m.Labels = make(map[string]string) } - m.Labels[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } iNdEx = postIndex case 12: if wireType != 2 { @@ -5059,7 +5966,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5081,7 +5988,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5096,7 +6003,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5111,52 +6018,57 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey := string(data[iNdEx:postStringIndexmapkey]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue if m.Annotations == nil { m.Annotations = make(map[string]string) } - m.Annotations[mapkey] = mapvalue + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Annotations[mapkey] = mapvalue + } else { + var mapvalue string + m.Annotations[mapkey] = mapvalue + } iNdEx = postIndex case 13: if wireType != 2 { @@ -5170,7 +6082,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5185,7 +6097,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) - if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5201,7 +6113,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5216,7 +6128,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Finalizers = append(m.Finalizers, string(data[iNdEx:postIndex])) + m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 15: if wireType != 2 { @@ -5230,7 +6142,7 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5245,11 +6157,44 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterName = string(data[iNdEx:postIndex]) + m.ClusterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Initializers == nil { + m.Initializers = &Initializers{} + } + if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -5268,8 +6213,8 @@ func (m *ObjectMeta) Unmarshal(data []byte) error { } return nil } -func (m *OwnerReference) Unmarshal(data []byte) error { - l := len(data) +func (m *OwnerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -5281,7 +6226,7 @@ func (m *OwnerReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5309,7 +6254,7 @@ func (m *OwnerReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5324,7 +6269,7 @@ func (m *OwnerReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -5338,7 +6283,7 @@ func (m *OwnerReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5353,7 +6298,7 @@ func (m *OwnerReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -5367,7 +6312,7 @@ func (m *OwnerReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5382,7 +6327,7 @@ func (m *OwnerReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -5396,7 +6341,7 @@ func (m *OwnerReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5411,7 +6356,7 @@ func (m *OwnerReference) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(data[iNdEx:postIndex]) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 0 { @@ -5425,7 +6370,7 @@ func (m *OwnerReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5446,7 +6391,7 @@ func (m *OwnerReference) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5457,7 +6402,7 @@ func (m *OwnerReference) Unmarshal(data []byte) error { m.BlockOwnerDeletion = &b default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -5476,8 +6421,8 @@ func (m *OwnerReference) Unmarshal(data []byte) error { } return nil } -func (m *Preconditions) Unmarshal(data []byte) error { - l := len(data) +func (m *Patch) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -5489,7 +6434,57 @@ func (m *Preconditions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Patch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Preconditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5517,7 +6512,7 @@ func (m *Preconditions) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5532,12 +6527,12 @@ func (m *Preconditions) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) m.UID = &s iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -5556,8 +6551,8 @@ func (m *Preconditions) Unmarshal(data []byte) error { } return nil } -func (m *RootPaths) Unmarshal(data []byte) error { - l := len(data) +func (m *RootPaths) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -5569,7 +6564,7 @@ func (m *RootPaths) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5597,7 +6592,7 @@ func (m *RootPaths) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5612,11 +6607,11 @@ func (m *RootPaths) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Paths = append(m.Paths, string(data[iNdEx:postIndex])) + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -5635,8 +6630,8 @@ func (m *RootPaths) Unmarshal(data []byte) error { } return nil } -func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { - l := len(data) +func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -5648,7 +6643,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5676,7 +6671,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5691,7 +6686,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClientCIDR = string(data[iNdEx:postIndex]) + m.ClientCIDR = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -5705,7 +6700,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5720,11 +6715,11 @@ func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServerAddress = string(data[iNdEx:postIndex]) + m.ServerAddress = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -5743,8 +6738,8 @@ func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { } return nil } -func (m *Status) Unmarshal(data []byte) error { - l := len(data) +func (m *Status) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -5756,7 +6751,7 @@ func (m *Status) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5784,7 +6779,7 @@ func (m *Status) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5798,7 +6793,7 @@ func (m *Status) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5814,7 +6809,7 @@ func (m *Status) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5829,7 +6824,7 @@ func (m *Status) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = string(data[iNdEx:postIndex]) + m.Status = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -5843,7 +6838,7 @@ func (m *Status) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5858,7 +6853,7 @@ func (m *Status) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -5872,7 +6867,7 @@ func (m *Status) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -5887,7 +6882,7 @@ func (m *Status) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = StatusReason(data[iNdEx:postIndex]) + m.Reason = StatusReason(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { @@ -5901,7 +6896,7 @@ func (m *Status) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -5918,7 +6913,7 @@ func (m *Status) Unmarshal(data []byte) error { if m.Details == nil { m.Details = &StatusDetails{} } - if err := m.Details.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5934,7 +6929,7 @@ func (m *Status) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Code |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -5943,7 +6938,7 @@ func (m *Status) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -5962,8 +6957,8 @@ func (m *Status) Unmarshal(data []byte) error { } return nil } -func (m *StatusCause) Unmarshal(data []byte) error { - l := len(data) +func (m *StatusCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -5975,7 +6970,7 @@ func (m *StatusCause) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6003,7 +6998,7 @@ func (m *StatusCause) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6018,7 +7013,7 @@ func (m *StatusCause) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = CauseType(data[iNdEx:postIndex]) + m.Type = CauseType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -6032,7 +7027,7 @@ func (m *StatusCause) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6047,7 +7042,7 @@ func (m *StatusCause) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -6061,7 +7056,7 @@ func (m *StatusCause) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6076,11 +7071,11 @@ func (m *StatusCause) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Field = string(data[iNdEx:postIndex]) + m.Field = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -6099,8 +7094,8 @@ func (m *StatusCause) Unmarshal(data []byte) error { } return nil } -func (m *StatusDetails) Unmarshal(data []byte) error { - l := len(data) +func (m *StatusDetails) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -6112,7 +7107,7 @@ func (m *StatusDetails) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6140,7 +7135,7 @@ func (m *StatusDetails) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6155,7 +7150,7 @@ func (m *StatusDetails) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(data[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -6169,7 +7164,7 @@ func (m *StatusDetails) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6184,7 +7179,7 @@ func (m *StatusDetails) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(data[iNdEx:postIndex]) + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -6198,7 +7193,7 @@ func (m *StatusDetails) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6213,7 +7208,7 @@ func (m *StatusDetails) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -6227,7 +7222,7 @@ func (m *StatusDetails) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6242,7 +7237,7 @@ func (m *StatusDetails) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } m.Causes = append(m.Causes, StatusCause{}) - if err := m.Causes[len(m.Causes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Causes[len(m.Causes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6258,16 +7253,45 @@ func (m *StatusDetails) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -6286,8 +7310,8 @@ func (m *StatusDetails) Unmarshal(data []byte) error { } return nil } -func (m *Timestamp) Unmarshal(data []byte) error { - l := len(data) +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -6299,7 +7323,7 @@ func (m *Timestamp) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6327,7 +7351,7 @@ func (m *Timestamp) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Seconds |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -6346,7 +7370,7 @@ func (m *Timestamp) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Nanos |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -6355,7 +7379,7 @@ func (m *Timestamp) Unmarshal(data []byte) error { } default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -6374,8 +7398,8 @@ func (m *Timestamp) Unmarshal(data []byte) error { } return nil } -func (m *TypeMeta) Unmarshal(data []byte) error { - l := len(data) +func (m *TypeMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -6387,7 +7411,7 @@ func (m *TypeMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6415,7 +7439,7 @@ func (m *TypeMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6430,7 +7454,7 @@ func (m *TypeMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -6444,7 +7468,7 @@ func (m *TypeMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6459,11 +7483,11 @@ func (m *TypeMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(data[iNdEx:postIndex]) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -6482,8 +7506,8 @@ func (m *TypeMeta) Unmarshal(data []byte) error { } return nil } -func (m *Verbs) Unmarshal(data []byte) error { - l := len(data) +func (m *Verbs) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -6495,7 +7519,7 @@ func (m *Verbs) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6523,7 +7547,7 @@ func (m *Verbs) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6538,11 +7562,11 @@ func (m *Verbs) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - *m = append(*m, string(data[iNdEx:postIndex])) + *m = append(*m, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -6561,8 +7585,8 @@ func (m *Verbs) Unmarshal(data []byte) error { } return nil } -func (m *WatchEvent) Unmarshal(data []byte) error { - l := len(data) +func (m *WatchEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -6574,7 +7598,7 @@ func (m *WatchEvent) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6602,7 +7626,7 @@ func (m *WatchEvent) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6617,7 +7641,7 @@ func (m *WatchEvent) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(data[iNdEx:postIndex]) + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -6631,7 +7655,7 @@ func (m *WatchEvent) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6645,13 +7669,13 @@ func (m *WatchEvent) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -6670,8 +7694,8 @@ func (m *WatchEvent) Unmarshal(data []byte) error { } return nil } -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -6682,7 +7706,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6700,7 +7724,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -6717,7 +7741,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -6740,7 +7764,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -6751,7 +7775,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -6775,141 +7799,163 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 2160 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x19, 0xcd, 0x6f, 0x23, 0x57, - 0x3d, 0x63, 0xc7, 0x5e, 0xfb, 0xe7, 0x38, 0x1f, 0xaf, 0x59, 0x70, 0x23, 0x61, 0xa7, 0xd3, 0x0a, - 0xa5, 0xb0, 0xb5, 0x49, 0x0a, 0xd5, 0xb2, 0xc0, 0x42, 0x26, 0xce, 0x46, 0x51, 0x37, 0x9b, 0xe8, - 0xa5, 0xbb, 0x88, 0x65, 0x85, 0x98, 0xcc, 0xbc, 0x38, 0x43, 0xc6, 0x33, 0xc3, 0x7b, 0x63, 0x6f, - 0x4c, 0x0f, 0x54, 0x2a, 0x48, 0x1c, 0x10, 0xda, 0x23, 0x07, 0x84, 0xba, 0x82, 0x1b, 0x37, 0xfe, - 0x06, 0x24, 0xf6, 0x58, 0x89, 0x0b, 0x07, 0x64, 0xb1, 0xee, 0x81, 0x23, 0xf7, 0x9c, 0xd0, 0x7b, - 0xf3, 0xe6, 0xcb, 0x8e, 0x9b, 0x31, 0xed, 0xa1, 0xa7, 0x78, 0x7e, 0xdf, 0xef, 0xf7, 0xfd, 0x5e, - 0xe0, 0xe0, 0xfc, 0x36, 0x6b, 0x5a, 0x6e, 0xeb, 0xbc, 0x77, 0x42, 0xa8, 0x43, 0x7c, 0xc2, 0x5a, - 0x7d, 0xe2, 0x98, 0x2e, 0x6d, 0x49, 0x84, 0xee, 0x59, 0x5d, 0xdd, 0x38, 0xb3, 0x1c, 0x42, 0x07, - 0x2d, 0xef, 0xbc, 0xc3, 0x01, 0xac, 0xd5, 0x25, 0xbe, 0xde, 0xea, 0x6f, 0xb6, 0x3a, 0xc4, 0x21, - 0x54, 0xf7, 0x89, 0xd9, 0xf4, 0xa8, 0xeb, 0xbb, 0xe8, 0x8d, 0x80, 0xab, 0x99, 0xe4, 0x6a, 0x7a, - 0xe7, 0x1d, 0x0e, 0x60, 0x4d, 0xce, 0xd5, 0xec, 0x6f, 0xae, 0xbd, 0xd5, 0xb1, 0xfc, 0xb3, 0xde, - 0x49, 0xd3, 0x70, 0xbb, 0xad, 0x8e, 0xdb, 0x71, 0x5b, 0x82, 0xf9, 0xa4, 0x77, 0x2a, 0xbe, 0xc4, - 0x87, 0xf8, 0x15, 0x08, 0x5d, 0x9b, 0x6a, 0x0a, 0xed, 0x39, 0xbe, 0xd5, 0x25, 0xe3, 0x56, 0xac, - 0xbd, 0x73, 0x1d, 0x03, 0x33, 0xce, 0x48, 0x57, 0x9f, 0xe0, 0x7b, 0x7b, 0x1a, 0x5f, 0xcf, 0xb7, - 0xec, 0x96, 0xe5, 0xf8, 0xcc, 0xa7, 0xe3, 0x4c, 0xea, 0xdf, 0xf3, 0x50, 0xda, 0x3e, 0xda, 0xdf, - 0xa3, 0x6e, 0xcf, 0x43, 0xeb, 0x30, 0xef, 0xe8, 0x5d, 0x52, 0x53, 0xd6, 0x95, 0x8d, 0xb2, 0xb6, - 0xf0, 0x62, 0xd8, 0x98, 0x1b, 0x0d, 0x1b, 0xf3, 0x0f, 0xf4, 0x2e, 0xc1, 0x02, 0x83, 0x6c, 0x28, - 0xf5, 0x09, 0x65, 0x96, 0xeb, 0xb0, 0x5a, 0x6e, 0x3d, 0xbf, 0x51, 0xd9, 0xba, 0xdb, 0xcc, 0xe2, - 0xb4, 0xa6, 0x50, 0xf0, 0x28, 0x60, 0xbd, 0xe7, 0xd2, 0xb6, 0xc5, 0x0c, 0xb7, 0x4f, 0xe8, 0x40, - 0x5b, 0x96, 0x5a, 0x4a, 0x12, 0xc9, 0x70, 0xa4, 0x01, 0xfd, 0x4a, 0x81, 0x65, 0x8f, 0x92, 0x53, - 0x42, 0x29, 0x31, 0x25, 0xbe, 0x96, 0x5f, 0x57, 0x3e, 0x07, 0xb5, 0x35, 0xa9, 0x76, 0xf9, 0x68, - 0x4c, 0x3e, 0x9e, 0xd0, 0x88, 0xfe, 0xa4, 0xc0, 0x1a, 0x23, 0xb4, 0x4f, 0xe8, 0xb6, 0x69, 0x52, - 0xc2, 0x98, 0x36, 0xd8, 0xb1, 0x2d, 0xe2, 0xf8, 0x3b, 0xfb, 0x6d, 0xcc, 0x6a, 0xf3, 0xc2, 0x0f, - 0xdf, 0xcf, 0x66, 0xd0, 0xf1, 0x34, 0x39, 0x9a, 0x2a, 0x2d, 0x5a, 0x9b, 0x4a, 0xc2, 0xf0, 0xa7, - 0x98, 0xa1, 0x9e, 0xc2, 0x42, 0x18, 0xc8, 0xfb, 0x16, 0xf3, 0xd1, 0x23, 0x28, 0x76, 0xf8, 0x07, - 0xab, 0x29, 0xc2, 0xc0, 0x66, 0x36, 0x03, 0x43, 0x19, 0xda, 0xa2, 0xb4, 0xa7, 0x28, 0x3e, 0x19, - 0x96, 0xd2, 0xd4, 0x0f, 0x73, 0x50, 0xd9, 0x3e, 0xda, 0xc7, 0x84, 0xb9, 0x3d, 0x6a, 0x90, 0x0c, - 0x49, 0xb3, 0x05, 0xc0, 0xff, 0x32, 0x4f, 0x37, 0x88, 0x59, 0xcb, 0xad, 0x2b, 0x1b, 0x25, 0x0d, - 0x49, 0x3a, 0x78, 0x10, 0x61, 0x70, 0x82, 0x8a, 0x4b, 0x3d, 0xb7, 0x1c, 0x53, 0x44, 0x3b, 0x21, - 0xf5, 0x5d, 0xcb, 0x31, 0xb1, 0xc0, 0xa0, 0xfb, 0x50, 0xe8, 0x13, 0x7a, 0xc2, 0xfd, 0xcf, 0x13, - 0xe2, 0xeb, 0xd9, 0x8e, 0xf7, 0x88, 0xb3, 0x68, 0xe5, 0xd1, 0xb0, 0x51, 0x10, 0x3f, 0x71, 0x20, - 0x04, 0x35, 0x01, 0xd8, 0x99, 0x4b, 0x7d, 0x61, 0x4e, 0xad, 0xb0, 0x9e, 0xdf, 0x28, 0x6b, 0x8b, - 0xdc, 0xbe, 0xe3, 0x08, 0x8a, 0x13, 0x14, 0xea, 0x5f, 0x15, 0x58, 0x4a, 0x78, 0x41, 0x78, 0xfc, - 0x36, 0x2c, 0x74, 0x12, 0xf9, 0x26, 0x3d, 0xb2, 0x2a, 0x6d, 0x5f, 0x48, 0xe6, 0x22, 0x4e, 0x51, - 0x22, 0x02, 0x65, 0x2a, 0x25, 0x85, 0x75, 0xb5, 0x99, 0x39, 0x5c, 0xa1, 0x0d, 0xb1, 0xa6, 0x04, - 0x90, 0xe1, 0x58, 0xb2, 0xfa, 0x1f, 0x45, 0x84, 0x2e, 0xac, 0x34, 0xb4, 0x91, 0xa8, 0x66, 0x45, - 0x1c, 0x79, 0x61, 0x4a, 0x25, 0x5e, 0x53, 0x02, 0xb9, 0x2f, 0x44, 0x09, 0xdc, 0x29, 0xfd, 0xfe, - 0xa3, 0xc6, 0xdc, 0x07, 0xff, 0x5a, 0x9f, 0x53, 0x3f, 0xc9, 0x41, 0xb5, 0x4d, 0x6c, 0xe2, 0x93, - 0x43, 0xcf, 0x17, 0x27, 0xb8, 0x07, 0xa8, 0x43, 0x75, 0x83, 0x1c, 0x11, 0x6a, 0xb9, 0xe6, 0x31, - 0x31, 0x5c, 0xc7, 0x64, 0x22, 0x44, 0x79, 0xed, 0x4b, 0xa3, 0x61, 0x03, 0xed, 0x4d, 0x60, 0xf1, - 0x15, 0x1c, 0xc8, 0x86, 0xaa, 0x47, 0xc5, 0x6f, 0xcb, 0x97, 0x6d, 0x90, 0xa7, 0xdf, 0xdb, 0xd9, - 0xce, 0x7e, 0x94, 0x64, 0xd5, 0x56, 0x46, 0xc3, 0x46, 0x35, 0x05, 0xc2, 0x69, 0xe1, 0xe8, 0x07, - 0xb0, 0xec, 0x52, 0xef, 0x4c, 0x77, 0xda, 0xc4, 0x23, 0x8e, 0x49, 0x1c, 0x9f, 0x89, 0x92, 0x28, - 0x69, 0xab, 0xbc, 0x79, 0x1d, 0x8e, 0xe1, 0xf0, 0x04, 0x35, 0x7a, 0x0c, 0x2b, 0x1e, 0x75, 0x3d, - 0xbd, 0xa3, 0x73, 0x89, 0x47, 0xae, 0x6d, 0x19, 0x03, 0x51, 0x32, 0x65, 0xed, 0xd6, 0x68, 0xd8, - 0x58, 0x39, 0x1a, 0x47, 0x5e, 0x0e, 0x1b, 0xaf, 0x08, 0xd7, 0x71, 0x48, 0x8c, 0xc4, 0x93, 0x62, - 0xd4, 0x7d, 0x28, 0xb5, 0x7b, 0x54, 0x40, 0xd0, 0xf7, 0xa0, 0x64, 0xca, 0xdf, 0xd2, 0xab, 0xaf, - 0x85, 0x9d, 0x3d, 0xa4, 0xb9, 0x1c, 0x36, 0xaa, 0x7c, 0x80, 0x35, 0x43, 0x00, 0x8e, 0x58, 0xd4, - 0x27, 0x50, 0xdd, 0xbd, 0xf0, 0x5c, 0xea, 0x87, 0xf1, 0xfa, 0x2a, 0x14, 0x89, 0x00, 0x08, 0x69, - 0xa5, 0xb8, 0x1d, 0x05, 0x64, 0x58, 0x62, 0xd1, 0xeb, 0x50, 0x20, 0x17, 0xba, 0xe1, 0xcb, 0xbe, - 0x52, 0x95, 0x64, 0x85, 0x5d, 0x0e, 0xc4, 0x01, 0x4e, 0x3d, 0x04, 0xd8, 0x23, 0x91, 0xe8, 0x6d, - 0x58, 0x0a, 0x6b, 0x22, 0x5d, 0xaa, 0x5f, 0x96, 0xcc, 0x4b, 0x38, 0x8d, 0xc6, 0xe3, 0xf4, 0xea, - 0x13, 0x28, 0x8b, 0x72, 0xe6, 0xfd, 0x88, 0x9b, 0x20, 0xaa, 0x59, 0x4a, 0x89, 0x4c, 0x10, 0x14, - 0x38, 0xc0, 0x45, 0x0d, 0x2d, 0x37, 0xad, 0xa1, 0x25, 0xb2, 0xd7, 0x86, 0x6a, 0xc0, 0x1b, 0xf6, - 0xd8, 0x4c, 0x1a, 0x6e, 0x41, 0x29, 0x34, 0x53, 0x6a, 0x89, 0x66, 0x6b, 0x28, 0x08, 0x47, 0x14, - 0x09, 0x6d, 0x67, 0x90, 0x6a, 0x4d, 0xd9, 0x94, 0xbd, 0x09, 0x37, 0x64, 0x73, 0x90, 0xba, 0x96, - 0x24, 0xd9, 0x8d, 0xd0, 0x67, 0x21, 0x3e, 0xa1, 0xe9, 0x97, 0x50, 0x9b, 0x36, 0x90, 0x3f, 0x43, - 0xf3, 0xcc, 0x6e, 0x8a, 0xfa, 0x3b, 0x05, 0x96, 0x93, 0x92, 0xb2, 0x87, 0x2f, 0xbb, 0x92, 0xeb, - 0x47, 0x57, 0xc2, 0x23, 0x7f, 0x54, 0x60, 0x35, 0x75, 0xb4, 0x99, 0x22, 0x3e, 0x83, 0x51, 0xc9, - 0xe4, 0xc8, 0xcf, 0x90, 0x1c, 0xff, 0xc8, 0x41, 0xf5, 0xbe, 0x7e, 0x42, 0xec, 0x63, 0x62, 0x13, - 0xc3, 0x77, 0x29, 0x7a, 0x1f, 0x2a, 0x5d, 0xdd, 0x37, 0xce, 0x04, 0x34, 0x5c, 0x2e, 0xda, 0xd9, - 0xda, 0x5f, 0x4a, 0x52, 0xf3, 0x20, 0x16, 0xb3, 0xeb, 0xf8, 0x74, 0xa0, 0xbd, 0x22, 0x4d, 0xaa, - 0x24, 0x30, 0x38, 0xa9, 0x4d, 0x6c, 0x84, 0xe2, 0x7b, 0xf7, 0xc2, 0xe3, 0xfd, 0x7f, 0xf6, 0x45, - 0x34, 0x65, 0x02, 0x26, 0x3f, 0xef, 0x59, 0x94, 0x74, 0x89, 0xe3, 0xc7, 0x1b, 0xe1, 0xc1, 0x98, - 0x7c, 0x3c, 0xa1, 0x71, 0xed, 0x2e, 0x2c, 0x8f, 0x1b, 0x8f, 0x96, 0x21, 0x7f, 0x4e, 0x06, 0x41, - 0xbc, 0x30, 0xff, 0x89, 0x56, 0xa1, 0xd0, 0xd7, 0xed, 0x9e, 0xac, 0x46, 0x1c, 0x7c, 0xdc, 0xc9, - 0xdd, 0x56, 0xd4, 0x3f, 0x2b, 0x50, 0x9b, 0x66, 0x08, 0xfa, 0x4a, 0x42, 0x90, 0x56, 0x91, 0x56, - 0xe5, 0xdf, 0x25, 0x83, 0x40, 0xea, 0x2e, 0x94, 0x5c, 0x8f, 0xef, 0xf0, 0x2e, 0x95, 0x51, 0x7f, - 0x33, 0x8c, 0xe4, 0xa1, 0x84, 0x5f, 0x0e, 0x1b, 0x37, 0x53, 0xe2, 0x43, 0x04, 0x8e, 0x58, 0x91, - 0x0a, 0x45, 0x61, 0x0f, 0x9f, 0x27, 0x7c, 0xf2, 0x03, 0xef, 0xad, 0x8f, 0x04, 0x04, 0x4b, 0x8c, - 0xfa, 0x3e, 0x94, 0xf8, 0x62, 0x73, 0x40, 0x7c, 0x9d, 0x27, 0x10, 0x23, 0xf6, 0xe9, 0x7d, 0xcb, - 0x39, 0x97, 0xa6, 0x45, 0x09, 0x74, 0x2c, 0xe1, 0x38, 0xa2, 0xb8, 0xaa, 0xc5, 0xe6, 0x66, 0x6c, - 0xb1, 0x7f, 0xc9, 0x41, 0x85, 0x6b, 0x0f, 0xbb, 0xf6, 0x77, 0xa0, 0x6a, 0x27, 0xcf, 0x24, 0xad, - 0xb8, 0x29, 0x05, 0xa6, 0xb3, 0x14, 0xa7, 0x69, 0x39, 0xf3, 0xa9, 0x45, 0x6c, 0x33, 0x62, 0xce, - 0xa5, 0x99, 0xef, 0x25, 0x91, 0x38, 0x4d, 0xcb, 0x6b, 0xf1, 0x29, 0x8f, 0xb6, 0x9c, 0xbc, 0x51, - 0x2d, 0xfe, 0x90, 0x03, 0x71, 0x80, 0xbb, 0xea, 0xc4, 0xf3, 0xb3, 0x9d, 0x18, 0xdd, 0x81, 0x45, - 0x3e, 0x1e, 0xdd, 0x9e, 0x1f, 0xae, 0x27, 0x05, 0x31, 0x48, 0xd1, 0x68, 0xd8, 0x58, 0x7c, 0x2f, - 0x85, 0xc1, 0x63, 0x94, 0xea, 0x87, 0x00, 0x70, 0x78, 0xf2, 0x33, 0x62, 0x04, 0xd1, 0xba, 0x7e, - 0x29, 0xe7, 0xfd, 0x56, 0xde, 0x05, 0x39, 0x54, 0x3a, 0x24, 0xee, 0xb7, 0x09, 0x1c, 0x4e, 0x51, - 0xa2, 0x16, 0x94, 0xa3, 0x45, 0x5d, 0xf6, 0x92, 0x15, 0xc9, 0x56, 0x8e, 0xb6, 0x79, 0x1c, 0xd3, - 0xa4, 0x52, 0x67, 0xfe, 0xda, 0xd4, 0xd1, 0x20, 0xdf, 0xb3, 0x4c, 0x71, 0xf4, 0xb2, 0xf6, 0x8d, - 0x30, 0xfd, 0x1f, 0xee, 0xb7, 0x2f, 0x87, 0x8d, 0xd7, 0xa6, 0x5d, 0x71, 0xfd, 0x81, 0x47, 0x58, - 0xf3, 0xe1, 0x7e, 0x1b, 0x73, 0xe6, 0xab, 0x82, 0x51, 0x9c, 0x31, 0x18, 0x5b, 0x00, 0xf2, 0xd4, - 0x9c, 0xfb, 0x46, 0x10, 0x88, 0xf0, 0xd2, 0xb2, 0x17, 0x61, 0x70, 0x82, 0x0a, 0x31, 0x58, 0x31, - 0x28, 0x11, 0xbf, 0x79, 0xb8, 0x98, 0xaf, 0x77, 0xbd, 0x5a, 0x49, 0xec, 0x87, 0x5f, 0xcb, 0xd6, - 0x9d, 0x38, 0x9b, 0xf6, 0xaa, 0x54, 0xb3, 0xb2, 0x33, 0x2e, 0x0c, 0x4f, 0xca, 0x47, 0x2e, 0xac, - 0x98, 0x72, 0x5d, 0x8b, 0x95, 0x96, 0x67, 0x56, 0x7a, 0x93, 0x2b, 0x6c, 0x8f, 0x0b, 0xc2, 0x93, - 0xb2, 0xd1, 0x4f, 0x60, 0x2d, 0x04, 0x4e, 0xee, 0xcc, 0x35, 0x10, 0x9e, 0xaa, 0xf3, 0x2d, 0xbe, - 0x3d, 0x95, 0x0a, 0x7f, 0x8a, 0x04, 0x64, 0x42, 0xd1, 0x0e, 0x66, 0x4b, 0x45, 0x34, 0xf6, 0xef, - 0x66, 0x3b, 0x45, 0x9c, 0xfd, 0xcd, 0xe4, 0x4c, 0x89, 0xf6, 0x46, 0x39, 0x4e, 0xa4, 0x6c, 0x74, - 0x01, 0x15, 0xdd, 0x71, 0x5c, 0x5f, 0x0f, 0xb6, 0xf8, 0x05, 0xa1, 0x6a, 0x7b, 0x66, 0x55, 0xdb, - 0xb1, 0x8c, 0xb1, 0x19, 0x96, 0xc0, 0xe0, 0xa4, 0x2a, 0xf4, 0x14, 0x96, 0xdc, 0xa7, 0x0e, 0xa1, - 0x98, 0x9c, 0x12, 0x4a, 0x1c, 0x7e, 0xe5, 0xab, 0x0a, 0xed, 0xdf, 0xcc, 0xa8, 0x3d, 0xc5, 0x1c, - 0xa7, 0x74, 0x1a, 0xce, 0xf0, 0xb8, 0x16, 0x7e, 0xc7, 0x3d, 0xb5, 0x1c, 0xdd, 0xb6, 0x7e, 0x41, - 0x28, 0xab, 0x2d, 0xc6, 0x77, 0xdc, 0x7b, 0x11, 0x14, 0x27, 0x28, 0xd0, 0xb7, 0xa0, 0x62, 0xd8, - 0x3d, 0xe6, 0x13, 0x2a, 0x3a, 0xc4, 0x92, 0xa8, 0xa0, 0xe8, 0x7c, 0x3b, 0x31, 0x0a, 0x27, 0xe9, - 0xd6, 0xbe, 0x0d, 0x95, 0xff, 0x73, 0x2e, 0xf2, 0xb9, 0x3a, 0xee, 0xd0, 0x99, 0xe6, 0xea, 0xdf, - 0x72, 0xb0, 0x98, 0x76, 0x43, 0xb4, 0x8d, 0x29, 0x53, 0x1f, 0x12, 0xc2, 0x5e, 0x99, 0x9f, 0xda, - 0x2b, 0x65, 0x4b, 0x9a, 0xff, 0x2c, 0x2d, 0x69, 0x0b, 0x40, 0xf7, 0xac, 0xb0, 0x1b, 0x05, 0xdd, - 0x2d, 0xea, 0x27, 0xf1, 0xa5, 0x1c, 0x27, 0xa8, 0x78, 0xc0, 0x0c, 0xd7, 0xf1, 0xa9, 0x6b, 0xdb, - 0x84, 0x8a, 0x0e, 0x56, 0x0a, 0x02, 0xb6, 0x13, 0x41, 0x71, 0x82, 0x82, 0xdf, 0x71, 0x4f, 0x6c, - 0xd7, 0x38, 0x17, 0x2e, 0x08, 0xab, 0x4f, 0xf4, 0xae, 0x52, 0x70, 0xc7, 0xd5, 0x26, 0xb0, 0xf8, - 0x0a, 0x0e, 0xf5, 0x10, 0xd2, 0xb7, 0x52, 0x74, 0x37, 0x70, 0x80, 0x12, 0x5d, 0x1b, 0x67, 0x3b, - 0xbc, 0x7a, 0x0b, 0xca, 0xd8, 0x75, 0xfd, 0x23, 0xdd, 0x3f, 0x63, 0xa8, 0x01, 0x05, 0x8f, 0xff, - 0x90, 0x4f, 0x0e, 0xe2, 0x2d, 0x46, 0x60, 0x70, 0x00, 0x57, 0x7f, 0xab, 0xc0, 0xab, 0x53, 0x5f, - 0x00, 0xb8, 0x23, 0x8d, 0xe8, 0x4b, 0x9a, 0x14, 0x39, 0x32, 0xa6, 0xc3, 0x09, 0x2a, 0x3e, 0xfe, - 0x53, 0xcf, 0x06, 0xe3, 0xe3, 0x3f, 0xa5, 0x0d, 0xa7, 0x69, 0xd5, 0xff, 0xe6, 0xa0, 0x78, 0xec, - 0xeb, 0x7e, 0x8f, 0xa1, 0x27, 0x50, 0xe2, 0x55, 0x68, 0xea, 0xbe, 0x2e, 0x34, 0x67, 0x7e, 0x55, - 0x0b, 0xd7, 0xa8, 0x78, 0xf2, 0x85, 0x10, 0x1c, 0x49, 0xe4, 0x57, 0x5e, 0x26, 0xf4, 0x48, 0xf3, - 0xa2, 0xd6, 0x15, 0x68, 0xc7, 0x12, 0xcb, 0xd7, 0xfe, 0x2e, 0x61, 0x4c, 0xef, 0x84, 0x39, 0x1b, - 0xad, 0xfd, 0x07, 0x01, 0x18, 0x87, 0x78, 0xf4, 0x0e, 0x14, 0x29, 0xd1, 0x59, 0xb4, 0x8c, 0xd4, - 0x43, 0x91, 0x58, 0x40, 0x2f, 0x87, 0x8d, 0x05, 0x29, 0x5c, 0x7c, 0x63, 0x49, 0x8d, 0x1e, 0xc3, - 0x0d, 0x93, 0xf8, 0xba, 0x65, 0x07, 0x3b, 0x48, 0xe6, 0xf7, 0x8d, 0x40, 0x58, 0x3b, 0x60, 0xd5, - 0x2a, 0xdc, 0x26, 0xf9, 0x81, 0x43, 0x81, 0xbc, 0xde, 0x0c, 0xd7, 0x24, 0x22, 0x9f, 0x0b, 0x71, - 0xbd, 0xed, 0xb8, 0x26, 0xc1, 0x02, 0xa3, 0x3e, 0x53, 0xa0, 0x12, 0x48, 0xda, 0xd1, 0x7b, 0x8c, - 0xa0, 0xcd, 0xe8, 0x14, 0x41, 0xb8, 0xc3, 0x01, 0x39, 0xff, 0xde, 0xc0, 0x23, 0x97, 0xc3, 0x46, - 0x59, 0x90, 0xf1, 0x8f, 0xe8, 0x00, 0x09, 0x1f, 0xe5, 0xae, 0xf1, 0xd1, 0xeb, 0x50, 0x10, 0xfb, - 0x9e, 0x74, 0x66, 0xb4, 0xde, 0x89, 0x9d, 0x10, 0x07, 0x38, 0xf5, 0x0f, 0x39, 0xa8, 0xa6, 0x0e, - 0x97, 0x61, 0xc5, 0x8a, 0xee, 0x70, 0xb9, 0x0c, 0xef, 0x02, 0xd3, 0x1f, 0x3a, 0x7f, 0x04, 0x45, - 0x83, 0x9f, 0x2f, 0x7c, 0x69, 0xde, 0x9c, 0x25, 0x14, 0xc2, 0x33, 0x71, 0x26, 0x89, 0x4f, 0x86, - 0xa5, 0x40, 0xb4, 0x07, 0x2b, 0x94, 0xf8, 0x74, 0xb0, 0x7d, 0xea, 0x13, 0x9a, 0x5c, 0x3a, 0x0b, - 0xf1, 0x12, 0x82, 0xc7, 0x09, 0xf0, 0x24, 0x8f, 0x6a, 0xc3, 0x3c, 0x5f, 0x10, 0xb8, 0xdb, 0x59, - 0xea, 0x69, 0x2d, 0x72, 0x7b, 0xc8, 0x1c, 0xe2, 0xb9, 0x77, 0x1c, 0xdd, 0x71, 0x83, 0x64, 0x2f, - 0xc4, 0xde, 0x79, 0xc0, 0x81, 0x38, 0xc0, 0xdd, 0x59, 0xe5, 0x17, 0xd1, 0xdf, 0x3c, 0x6f, 0xcc, - 0x3d, 0x7b, 0xde, 0x98, 0xfb, 0xe8, 0xb9, 0xbc, 0x94, 0xfe, 0x18, 0xca, 0xf1, 0x3a, 0xf2, 0x39, - 0xab, 0x54, 0x7f, 0x0a, 0x25, 0x9e, 0x49, 0xe1, 0x1a, 0x7d, 0xcd, 0xf0, 0x48, 0xb7, 0xf5, 0x5c, - 0x96, 0xb6, 0xae, 0x6e, 0x41, 0xf0, 0xf6, 0xcc, 0x3b, 0xa1, 0xe5, 0x93, 0x6e, 0xaa, 0x13, 0xee, - 0x73, 0x00, 0x0e, 0xe0, 0x89, 0x7b, 0xf8, 0xaf, 0x15, 0x00, 0x71, 0xdf, 0xd8, 0xed, 0xf3, 0x3b, - 0xe2, 0x3a, 0xcc, 0xf3, 0x16, 0x3b, 0x6e, 0x98, 0x28, 0x01, 0x81, 0x41, 0x0f, 0xa1, 0xe8, 0x8a, - 0x35, 0x45, 0x3e, 0x50, 0xbe, 0x35, 0x35, 0x6b, 0xe4, 0xbf, 0x95, 0x9a, 0x58, 0x7f, 0xba, 0x7b, - 0xe1, 0x13, 0x87, 0xdb, 0x18, 0x67, 0x4c, 0xb0, 0xeb, 0x60, 0x29, 0x4c, 0x7b, 0xe3, 0xc5, 0xcb, - 0xfa, 0xdc, 0xc7, 0x2f, 0xeb, 0x73, 0xff, 0x7c, 0x59, 0x9f, 0xfb, 0x60, 0x54, 0x57, 0x5e, 0x8c, - 0xea, 0xca, 0xc7, 0xa3, 0xba, 0xf2, 0xef, 0x51, 0x5d, 0x79, 0xf6, 0x49, 0x7d, 0xee, 0x71, 0xae, - 0xbf, 0xf9, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb1, 0xe8, 0xc1, 0x2f, 0x98, 0x1b, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 2435 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x6c, 0x23, 0x49, + 0x15, 0x4e, 0xdb, 0xb1, 0x63, 0x3f, 0xc7, 0xf9, 0xa9, 0xcd, 0x80, 0x37, 0x02, 0x3b, 0xdb, 0x8b, + 0x56, 0x59, 0x98, 0xb5, 0x49, 0x16, 0x56, 0xc3, 0x00, 0x03, 0xe9, 0x38, 0x33, 0x8a, 0x76, 0x32, + 0x63, 0x55, 0x76, 0x06, 0x31, 0x8c, 0x10, 0x9d, 0x76, 0xc5, 0x69, 0xd2, 0xee, 0xf6, 0x56, 0x95, + 0x33, 0x09, 0x1c, 0xd8, 0x03, 0x48, 0x1c, 0x10, 0x9a, 0x23, 0x27, 0xb4, 0x23, 0xb8, 0x70, 0xe5, + 0xc4, 0x05, 0x4e, 0x48, 0xcc, 0x71, 0x24, 0x2e, 0x7b, 0x40, 0xd6, 0x8e, 0xf7, 0xc0, 0x09, 0x71, + 0xcf, 0x09, 0x55, 0x75, 0xf5, 0x9f, 0x1d, 0x4f, 0xda, 0x3b, 0x0b, 0xe2, 0x14, 0xf7, 0xfb, 0xf9, + 0xde, 0xab, 0x57, 0xaf, 0x5e, 0xbd, 0x7a, 0x81, 0xbd, 0xe3, 0x6b, 0xac, 0x6e, 0x7b, 0x8d, 0xe3, + 0xfe, 0x01, 0xa1, 0x2e, 0xe1, 0x84, 0x35, 0x4e, 0x88, 0xdb, 0xf6, 0x68, 0x43, 0x31, 0xcc, 0x9e, + 0xdd, 0x35, 0xad, 0x23, 0xdb, 0x25, 0xf4, 0xac, 0xd1, 0x3b, 0xee, 0x08, 0x02, 0x6b, 0x74, 0x09, + 0x37, 0x1b, 0x27, 0x1b, 0x8d, 0x0e, 0x71, 0x09, 0x35, 0x39, 0x69, 0xd7, 0x7b, 0xd4, 0xe3, 0x1e, + 0xfa, 0x92, 0xaf, 0x55, 0x8f, 0x6b, 0xd5, 0x7b, 0xc7, 0x1d, 0x41, 0x60, 0x75, 0xa1, 0x55, 0x3f, + 0xd9, 0x58, 0x7d, 0xab, 0x63, 0xf3, 0xa3, 0xfe, 0x41, 0xdd, 0xf2, 0xba, 0x8d, 0x8e, 0xd7, 0xf1, + 0x1a, 0x52, 0xf9, 0xa0, 0x7f, 0x28, 0xbf, 0xe4, 0x87, 0xfc, 0xe5, 0x83, 0xae, 0x4e, 0x74, 0x85, + 0xf6, 0x5d, 0x6e, 0x77, 0xc9, 0xa8, 0x17, 0xab, 0xef, 0x5c, 0xa6, 0xc0, 0xac, 0x23, 0xd2, 0x35, + 0xc7, 0xf4, 0xde, 0x9e, 0xa4, 0xd7, 0xe7, 0xb6, 0xd3, 0xb0, 0x5d, 0xce, 0x38, 0x1d, 0x55, 0xd2, + 0xff, 0x96, 0x85, 0xc2, 0x56, 0x6b, 0xf7, 0x16, 0xf5, 0xfa, 0x3d, 0xb4, 0x06, 0xb3, 0xae, 0xd9, + 0x25, 0x15, 0x6d, 0x4d, 0x5b, 0x2f, 0x1a, 0xf3, 0x4f, 0x07, 0xb5, 0x99, 0xe1, 0xa0, 0x36, 0x7b, + 0xc7, 0xec, 0x12, 0x2c, 0x39, 0xc8, 0x81, 0xc2, 0x09, 0xa1, 0xcc, 0xf6, 0x5c, 0x56, 0xc9, 0xac, + 0x65, 0xd7, 0x4b, 0x9b, 0x37, 0xea, 0x69, 0x82, 0x56, 0x97, 0x06, 0xee, 0xfb, 0xaa, 0x37, 0x3d, + 0xda, 0xb4, 0x99, 0xe5, 0x9d, 0x10, 0x7a, 0x66, 0x2c, 0x29, 0x2b, 0x05, 0xc5, 0x64, 0x38, 0xb4, + 0x80, 0x7e, 0xae, 0xc1, 0x52, 0x8f, 0x92, 0x43, 0x42, 0x29, 0x69, 0x2b, 0x7e, 0x25, 0xbb, 0xa6, + 0x7d, 0x06, 0x66, 0x2b, 0xca, 0xec, 0x52, 0x6b, 0x04, 0x1f, 0x8f, 0x59, 0x44, 0xbf, 0xd3, 0x60, + 0x95, 0x11, 0x7a, 0x42, 0xe8, 0x56, 0xbb, 0x4d, 0x09, 0x63, 0xc6, 0xd9, 0xb6, 0x63, 0x13, 0x97, + 0x6f, 0xef, 0x36, 0x31, 0xab, 0xcc, 0xca, 0x38, 0x7c, 0x27, 0x9d, 0x43, 0xfb, 0x93, 0x70, 0x0c, + 0x5d, 0x79, 0xb4, 0x3a, 0x51, 0x84, 0xe1, 0x17, 0xb8, 0xa1, 0x1f, 0xc2, 0x7c, 0xb0, 0x91, 0xb7, + 0x6d, 0xc6, 0xd1, 0x7d, 0xc8, 0x77, 0xc4, 0x07, 0xab, 0x68, 0xd2, 0xc1, 0x7a, 0x3a, 0x07, 0x03, + 0x0c, 0x63, 0x41, 0xf9, 0x93, 0x97, 0x9f, 0x0c, 0x2b, 0x34, 0xfd, 0xcf, 0x59, 0x28, 0x6d, 0xb5, + 0x76, 0x31, 0x61, 0x5e, 0x9f, 0x5a, 0x24, 0x45, 0xd2, 0x6c, 0x02, 0x88, 0xbf, 0xac, 0x67, 0x5a, + 0xa4, 0x5d, 0xc9, 0xac, 0x69, 0xeb, 0x05, 0x03, 0x29, 0x39, 0xb8, 0x13, 0x72, 0x70, 0x4c, 0x4a, + 0xa0, 0x1e, 0xdb, 0x6e, 0x5b, 0xee, 0x76, 0x0c, 0xf5, 0x5d, 0xdb, 0x6d, 0x63, 0xc9, 0x41, 0xb7, + 0x21, 0x77, 0x42, 0xe8, 0x81, 0x88, 0xbf, 0x48, 0x88, 0xaf, 0xa4, 0x5b, 0xde, 0x7d, 0xa1, 0x62, + 0x14, 0x87, 0x83, 0x5a, 0x4e, 0xfe, 0xc4, 0x3e, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x8f, 0x72, 0xe9, + 0x4e, 0x25, 0xb7, 0x96, 0x5d, 0x2f, 0x1a, 0x0b, 0xc2, 0xbf, 0xfd, 0x90, 0x8a, 0x63, 0x12, 0xe8, + 0x1a, 0xcc, 0x33, 0xdb, 0xed, 0xf4, 0x1d, 0x93, 0x0a, 0x42, 0x25, 0x2f, 0xfd, 0x5c, 0x51, 0x7e, + 0xce, 0xef, 0xc7, 0x78, 0x38, 0x21, 0x29, 0x2c, 0x59, 0x26, 0x27, 0x1d, 0x8f, 0xda, 0x84, 0x55, + 0xe6, 0x22, 0x4b, 0xdb, 0x21, 0x15, 0xc7, 0x24, 0xd0, 0xeb, 0x90, 0x93, 0x91, 0xaf, 0x14, 0xa4, + 0x89, 0xb2, 0x32, 0x91, 0x93, 0xdb, 0x82, 0x7d, 0x1e, 0x7a, 0x13, 0xe6, 0xd4, 0xa9, 0xa9, 0x14, + 0xa5, 0xd8, 0xa2, 0x12, 0x9b, 0x0b, 0xd2, 0x3a, 0xe0, 0xeb, 0x7f, 0xd4, 0x60, 0x31, 0xb6, 0x7f, + 0x32, 0x57, 0xae, 0xc1, 0x7c, 0x27, 0x76, 0x52, 0xd4, 0x5e, 0x86, 0xab, 0x89, 0x9f, 0x22, 0x9c, + 0x90, 0x44, 0x04, 0x8a, 0x54, 0x21, 0x05, 0x15, 0x61, 0x23, 0x75, 0xa2, 0x05, 0x3e, 0x44, 0x96, + 0x62, 0x44, 0x86, 0x23, 0x64, 0xfd, 0x9f, 0x9a, 0x4c, 0xba, 0xa0, 0x46, 0xa0, 0xf5, 0x58, 0x1d, + 0xd2, 0x64, 0x08, 0xe7, 0x27, 0xd4, 0x90, 0x4b, 0x0e, 0x6f, 0xe6, 0xff, 0xe2, 0xf0, 0x5e, 0x2f, + 0xfc, 0xe6, 0xc3, 0xda, 0xcc, 0x07, 0xff, 0x58, 0x9b, 0xd1, 0x3f, 0xc9, 0x40, 0xb9, 0x49, 0x1c, + 0xc2, 0xc9, 0xdd, 0x1e, 0x97, 0x2b, 0xb8, 0x09, 0xa8, 0x43, 0x4d, 0x8b, 0xb4, 0x08, 0xb5, 0xbd, + 0xf6, 0x3e, 0xb1, 0x3c, 0xb7, 0xcd, 0xe4, 0x16, 0x65, 0x8d, 0xcf, 0x0d, 0x07, 0x35, 0x74, 0x6b, + 0x8c, 0x8b, 0x2f, 0xd0, 0x40, 0x0e, 0x94, 0x7b, 0x54, 0xfe, 0xb6, 0xb9, 0x2a, 0xe0, 0xe2, 0xe0, + 0xbc, 0x9d, 0x6e, 0xed, 0xad, 0xb8, 0xaa, 0xb1, 0x3c, 0x1c, 0xd4, 0xca, 0x09, 0x12, 0x4e, 0x82, + 0xa3, 0xef, 0xc2, 0x92, 0x47, 0x7b, 0x47, 0xa6, 0xdb, 0x24, 0x3d, 0xe2, 0xb6, 0x89, 0xcb, 0x99, + 0x3c, 0xcc, 0x05, 0x63, 0x45, 0x94, 0xdd, 0xbb, 0x23, 0x3c, 0x3c, 0x26, 0x8d, 0x1e, 0xc0, 0x72, + 0x8f, 0x7a, 0x3d, 0xb3, 0x63, 0x0a, 0xc4, 0x96, 0xe7, 0xd8, 0xd6, 0x99, 0x3c, 0xec, 0x45, 0xe3, + 0xea, 0x70, 0x50, 0x5b, 0x6e, 0x8d, 0x32, 0xcf, 0x07, 0xb5, 0x57, 0x64, 0xe8, 0x04, 0x25, 0x62, + 0xe2, 0x71, 0x18, 0x7d, 0x17, 0x0a, 0xcd, 0x3e, 0x95, 0x14, 0xf4, 0x6d, 0x28, 0xb4, 0xd5, 0x6f, + 0x15, 0xd5, 0xd7, 0x82, 0x3b, 0x29, 0x90, 0x39, 0x1f, 0xd4, 0xca, 0xe2, 0xea, 0xad, 0x07, 0x04, + 0x1c, 0xaa, 0xe8, 0x0f, 0xa1, 0xbc, 0x73, 0xda, 0xf3, 0x28, 0x0f, 0xf6, 0xeb, 0x0d, 0xc8, 0x13, + 0x49, 0x90, 0x68, 0x85, 0xa8, 0x90, 0xfa, 0x62, 0x58, 0x71, 0xc5, 0xc1, 0x26, 0xa7, 0xa6, 0xc5, + 0x55, 0x45, 0x0c, 0x0f, 0xf6, 0x8e, 0x20, 0x62, 0x9f, 0xa7, 0x3f, 0xd1, 0x00, 0x6e, 0x91, 0x10, + 0x7b, 0x0b, 0x16, 0x83, 0x43, 0x91, 0x3c, 0xab, 0x9f, 0x57, 0xda, 0x8b, 0x38, 0xc9, 0xc6, 0xa3, + 0xf2, 0xa8, 0x05, 0x2b, 0xb6, 0x6b, 0x39, 0xfd, 0x36, 0xb9, 0xe7, 0xda, 0xae, 0xcd, 0x6d, 0xd3, + 0xb1, 0x7f, 0x12, 0xd6, 0xe5, 0x2f, 0x28, 0x9c, 0x95, 0xdd, 0x0b, 0x64, 0xf0, 0x85, 0x9a, 0xfa, + 0x43, 0x28, 0xca, 0x0a, 0x21, 0x8a, 0x73, 0x54, 0xae, 0xb4, 0x17, 0x94, 0xab, 0xa0, 0xba, 0x67, + 0x26, 0x55, 0xf7, 0xd8, 0x81, 0x70, 0xa0, 0xec, 0xeb, 0x06, 0x17, 0x4e, 0x2a, 0x0b, 0x57, 0xa1, + 0x10, 0x2c, 0x5c, 0x59, 0x09, 0x1b, 0x8d, 0x00, 0x08, 0x87, 0x12, 0x31, 0x6b, 0x47, 0x90, 0xa8, + 0x76, 0xe9, 0x8c, 0xc5, 0xaa, 0x6f, 0xe6, 0xc5, 0xd5, 0x37, 0x66, 0xe9, 0x67, 0x50, 0x99, 0xd4, + 0x9d, 0xbc, 0x44, 0x3d, 0x4e, 0xef, 0x8a, 0xfe, 0x6b, 0x0d, 0x96, 0xe2, 0x48, 0xe9, 0xb7, 0x2f, + 0xbd, 0x91, 0xcb, 0xef, 0xf1, 0x58, 0x44, 0x7e, 0xab, 0xc1, 0x4a, 0x62, 0x69, 0x53, 0xed, 0xf8, + 0x14, 0x4e, 0xc5, 0x93, 0x23, 0x3b, 0x45, 0x72, 0x34, 0xa0, 0xb4, 0x1b, 0xe6, 0x3d, 0xbd, 0xbc, + 0xf3, 0xd1, 0xff, 0xa2, 0xc1, 0x7c, 0x4c, 0x83, 0xa1, 0x87, 0x30, 0x27, 0xea, 0x9b, 0xed, 0x76, + 0x54, 0x57, 0x96, 0xf2, 0xb2, 0x8c, 0x81, 0x44, 0xeb, 0x6a, 0xf9, 0x48, 0x38, 0x80, 0x44, 0x2d, + 0xc8, 0x53, 0xc2, 0xfa, 0x0e, 0x57, 0xa5, 0xfd, 0x6a, 0xca, 0x6b, 0x8d, 0x9b, 0xbc, 0xcf, 0x0c, + 0x10, 0x35, 0x0a, 0x4b, 0x7d, 0xac, 0x70, 0xf4, 0xbf, 0x67, 0xa0, 0x7c, 0xdb, 0x3c, 0x20, 0xce, + 0x3e, 0x71, 0x88, 0xc5, 0x3d, 0x8a, 0x7e, 0x0a, 0xa5, 0xae, 0xc9, 0xad, 0x23, 0x49, 0x0d, 0x7a, + 0xcb, 0x66, 0x3a, 0x43, 0x09, 0xa4, 0xfa, 0x5e, 0x04, 0xb3, 0xe3, 0x72, 0x7a, 0x66, 0xbc, 0xa2, + 0x16, 0x56, 0x8a, 0x71, 0x70, 0xdc, 0x9a, 0x7c, 0x10, 0xc8, 0xef, 0x9d, 0xd3, 0x9e, 0xb8, 0x44, + 0xa7, 0x7f, 0x87, 0x24, 0x5c, 0xc0, 0xe4, 0xfd, 0xbe, 0x4d, 0x49, 0x97, 0xb8, 0x3c, 0x7a, 0x10, + 0xec, 0x8d, 0xe0, 0xe3, 0x31, 0x8b, 0xab, 0x37, 0x60, 0x69, 0xd4, 0x79, 0xb4, 0x04, 0xd9, 0x63, + 0x72, 0xe6, 0xe7, 0x02, 0x16, 0x3f, 0xd1, 0x0a, 0xe4, 0x4e, 0x4c, 0xa7, 0xaf, 0xea, 0x0f, 0xf6, + 0x3f, 0xae, 0x67, 0xae, 0x69, 0xfa, 0xef, 0x35, 0xa8, 0x4c, 0x72, 0x04, 0x7d, 0x31, 0x06, 0x64, + 0x94, 0x94, 0x57, 0xd9, 0x77, 0xc9, 0x99, 0x8f, 0xba, 0x03, 0x05, 0xaf, 0x27, 0x9e, 0x70, 0x1e, + 0x55, 0x79, 0xfe, 0x66, 0x90, 0xbb, 0x77, 0x15, 0xfd, 0x7c, 0x50, 0xbb, 0x92, 0x80, 0x0f, 0x18, + 0x38, 0x54, 0x45, 0x3a, 0xe4, 0xa5, 0x3f, 0xe2, 0x52, 0x16, 0xed, 0x93, 0xdc, 0xfc, 0xfb, 0x92, + 0x82, 0x15, 0x47, 0xff, 0x93, 0x06, 0xb3, 0xb2, 0x3d, 0x7c, 0x08, 0x05, 0x11, 0xbf, 0xb6, 0xc9, + 0x4d, 0xe9, 0x57, 0xea, 0xc7, 0x84, 0xd0, 0xde, 0x23, 0xdc, 0x8c, 0xce, 0x57, 0x40, 0xc1, 0x21, + 0x22, 0xc2, 0x90, 0xb3, 0x39, 0xe9, 0x06, 0x1b, 0xf9, 0xd6, 0x44, 0x68, 0xf5, 0xfe, 0xad, 0x63, + 0xf3, 0xd1, 0xce, 0x29, 0x27, 0xae, 0xd8, 0x8c, 0xa8, 0x18, 0xec, 0x0a, 0x0c, 0xec, 0x43, 0xe9, + 0x7f, 0xd0, 0x20, 0x34, 0x25, 0x8e, 0x3b, 0x23, 0xce, 0xe1, 0x6d, 0xdb, 0x3d, 0x56, 0x61, 0x0d, + 0xdd, 0xd9, 0x57, 0x74, 0x1c, 0x4a, 0x5c, 0x74, 0xc5, 0x66, 0xa6, 0xbc, 0x62, 0xaf, 0x42, 0xc1, + 0xf2, 0x5c, 0x6e, 0xbb, 0xfd, 0xb1, 0xfa, 0xb2, 0xad, 0xe8, 0x38, 0x94, 0xd0, 0x9f, 0x65, 0xa1, + 0x24, 0x7c, 0x0d, 0xee, 0xf8, 0x6f, 0x42, 0xd9, 0x89, 0xef, 0x9e, 0xf2, 0xf9, 0x8a, 0x82, 0x48, + 0x9e, 0x47, 0x9c, 0x94, 0x15, 0xca, 0x87, 0x36, 0x71, 0xda, 0xa1, 0x72, 0x26, 0xa9, 0x7c, 0x33, + 0xce, 0xc4, 0x49, 0x59, 0x51, 0x67, 0x1f, 0x89, 0xbc, 0x56, 0x8d, 0x5a, 0x18, 0xda, 0xef, 0x09, + 0x22, 0xf6, 0x79, 0x17, 0xc5, 0x67, 0x76, 0xca, 0xf8, 0x5c, 0x87, 0x05, 0xb1, 0x91, 0x5e, 0x9f, + 0x07, 0xdd, 0x6c, 0x4e, 0xf6, 0x5d, 0x68, 0x38, 0xa8, 0x2d, 0xbc, 0x97, 0xe0, 0xe0, 0x11, 0xc9, + 0x89, 0xed, 0x4b, 0xfe, 0xd3, 0xb6, 0x2f, 0x62, 0xd5, 0x8e, 0xdd, 0xb5, 0x79, 0x65, 0x4e, 0x3a, + 0x11, 0xae, 0xfa, 0xb6, 0x20, 0x62, 0x9f, 0x97, 0xd8, 0xd2, 0xc2, 0xa5, 0x5b, 0xfa, 0x3e, 0x14, + 0xf7, 0x6c, 0x8b, 0x7a, 0x62, 0x2d, 0xe2, 0x62, 0x62, 0x89, 0xa6, 0x3d, 0x2c, 0xe0, 0xc1, 0x1a, + 0x03, 0xbe, 0x70, 0xc5, 0x35, 0x5d, 0xcf, 0x6f, 0xcd, 0x73, 0x91, 0x2b, 0x77, 0x04, 0x11, 0xfb, + 0xbc, 0xeb, 0x2b, 0xe2, 0x3e, 0xfa, 0xe5, 0x93, 0xda, 0xcc, 0xe3, 0x27, 0xb5, 0x99, 0x0f, 0x9f, + 0xa8, 0xbb, 0xe9, 0x5f, 0x00, 0x70, 0xf7, 0xe0, 0xc7, 0xc4, 0xf2, 0x73, 0xfe, 0xf2, 0x57, 0xb9, + 0xe8, 0x31, 0xd4, 0x30, 0x48, 0xbe, 0x60, 0x33, 0x23, 0x3d, 0x46, 0x8c, 0x87, 0x13, 0x92, 0xa8, + 0x01, 0xc5, 0xf0, 0xa5, 0xae, 0xf2, 0x7b, 0x59, 0xa9, 0x15, 0xc3, 0xe7, 0x3c, 0x8e, 0x64, 0x12, + 0x07, 0x70, 0xf6, 0xd2, 0x03, 0x68, 0x40, 0xb6, 0x6f, 0xb7, 0x65, 0x4a, 0x14, 0x8d, 0xaf, 0x06, + 0x05, 0xf0, 0xde, 0x6e, 0xf3, 0x7c, 0x50, 0x7b, 0x6d, 0xd2, 0x8c, 0x8b, 0x9f, 0xf5, 0x08, 0xab, + 0xdf, 0xdb, 0x6d, 0x62, 0xa1, 0x7c, 0x51, 0x92, 0xe6, 0xa7, 0x4c, 0xd2, 0x4d, 0x00, 0xb5, 0x6a, + 0xa1, 0xed, 0xe7, 0x46, 0x38, 0xb5, 0xb8, 0x15, 0x72, 0x70, 0x4c, 0x0a, 0x31, 0x58, 0xb6, 0x28, + 0x91, 0xbf, 0xc5, 0xd6, 0x33, 0x6e, 0x76, 0xfd, 0x77, 0x7b, 0x69, 0xf3, 0xcb, 0xe9, 0x2a, 0xa6, + 0x50, 0x33, 0x5e, 0x55, 0x66, 0x96, 0xb7, 0x47, 0xc1, 0xf0, 0x38, 0x3e, 0xf2, 0x60, 0xb9, 0xad, + 0x5e, 0x3d, 0x91, 0xd1, 0xe2, 0xd4, 0x46, 0xaf, 0x08, 0x83, 0xcd, 0x51, 0x20, 0x3c, 0x8e, 0x8d, + 0x7e, 0x08, 0xab, 0x01, 0x71, 0xfc, 0xe9, 0x59, 0x01, 0x19, 0xa9, 0xaa, 0x78, 0x0c, 0x37, 0x27, + 0x4a, 0xe1, 0x17, 0x20, 0xa0, 0x36, 0xe4, 0x1d, 0xbf, 0xbb, 0x28, 0xc9, 0x1b, 0xe1, 0x5b, 0xe9, + 0x56, 0x11, 0x65, 0x7f, 0x3d, 0xde, 0x55, 0x84, 0xcf, 0x2f, 0xd5, 0x50, 0x28, 0x6c, 0x74, 0x0a, + 0x25, 0xd3, 0x75, 0x3d, 0x6e, 0xfa, 0x8f, 0xe1, 0x79, 0x69, 0x6a, 0x6b, 0x6a, 0x53, 0x5b, 0x11, + 0xc6, 0x48, 0x17, 0x13, 0xe3, 0xe0, 0xb8, 0x29, 0xf4, 0x08, 0x16, 0xbd, 0x47, 0x2e, 0xa1, 0x98, + 0x1c, 0x12, 0x4a, 0x5c, 0x8b, 0xb0, 0x4a, 0x59, 0x5a, 0xff, 0x5a, 0x4a, 0xeb, 0x09, 0xe5, 0x28, + 0xa5, 0x93, 0x74, 0x86, 0x47, 0xad, 0xa0, 0x3a, 0xc0, 0xa1, 0xed, 0xaa, 0x5e, 0xb4, 0xb2, 0x10, + 0x8d, 0x9e, 0x6e, 0x86, 0x54, 0x1c, 0x93, 0x40, 0x5f, 0x87, 0x92, 0xe5, 0xf4, 0x19, 0x27, 0xfe, + 0x8c, 0x6b, 0x51, 0x9e, 0xa0, 0x70, 0x7d, 0xdb, 0x11, 0x0b, 0xc7, 0xe5, 0xd0, 0x11, 0xcc, 0xdb, + 0xb1, 0xa6, 0xb7, 0xb2, 0x24, 0x73, 0x71, 0x73, 0xea, 0x4e, 0x97, 0x19, 0x4b, 0xa2, 0x12, 0xc5, + 0x29, 0x38, 0x81, 0xbc, 0xfa, 0x0d, 0x28, 0x7d, 0xca, 0x1e, 0x4c, 0xf4, 0x70, 0xa3, 0x5b, 0x37, + 0x55, 0x0f, 0xf7, 0xd7, 0x0c, 0x2c, 0x24, 0x03, 0x1e, 0xbe, 0x75, 0xb4, 0x89, 0x33, 0xcb, 0xa0, + 0x2a, 0x67, 0x27, 0x56, 0x65, 0x55, 0xfc, 0x66, 0x5f, 0xa6, 0xf8, 0x6d, 0x02, 0x98, 0x3d, 0x3b, + 0xa8, 0x7b, 0x7e, 0x1d, 0x0d, 0x2b, 0x57, 0x34, 0x45, 0xc3, 0x31, 0x29, 0x39, 0x95, 0xf4, 0x5c, + 0x4e, 0x3d, 0xc7, 0x21, 0x54, 0x5d, 0xa6, 0xfe, 0x54, 0x32, 0xa4, 0xe2, 0x98, 0x04, 0xba, 0x09, + 0xe8, 0xc0, 0xf1, 0xac, 0x63, 0x19, 0x82, 0xe0, 0x9c, 0xcb, 0x2a, 0x59, 0xf0, 0x87, 0x52, 0xc6, + 0x18, 0x17, 0x5f, 0xa0, 0xa1, 0xcf, 0x41, 0xae, 0x25, 0xda, 0x0a, 0xfd, 0x2e, 0x24, 0xe7, 0x49, + 0xe8, 0x86, 0x1f, 0x09, 0x2d, 0x1c, 0xf8, 0x4c, 0x17, 0x05, 0xfd, 0x2a, 0x14, 0xb1, 0xe7, 0xf1, + 0x96, 0xc9, 0x8f, 0x18, 0xaa, 0x41, 0xae, 0x27, 0x7e, 0xa8, 0x61, 0xa1, 0x9c, 0xff, 0x4a, 0x0e, + 0xf6, 0xe9, 0xfa, 0xaf, 0x34, 0x78, 0x75, 0xe2, 0xec, 0x4e, 0x44, 0xd4, 0x0a, 0xbf, 0x94, 0x4b, + 0x61, 0x44, 0x23, 0x39, 0x1c, 0x93, 0x12, 0x9d, 0x58, 0x62, 0xe0, 0x37, 0xda, 0x89, 0x25, 0xac, + 0xe1, 0xa4, 0xac, 0xfe, 0xef, 0x0c, 0xe4, 0xfd, 0x67, 0xd9, 0x7f, 0xb9, 0xf9, 0x7e, 0x03, 0xf2, + 0x4c, 0xda, 0x51, 0xee, 0x85, 0xd5, 0xd2, 0xb7, 0x8e, 0x15, 0x57, 0x34, 0x31, 0x5d, 0xc2, 0x98, + 0xd9, 0x09, 0x92, 0x37, 0x6c, 0x62, 0xf6, 0x7c, 0x32, 0x0e, 0xf8, 0xe8, 0x1d, 0xf1, 0x0a, 0x35, + 0x59, 0xd8, 0x17, 0x56, 0x03, 0x48, 0x2c, 0xa9, 0xe7, 0x83, 0xda, 0xbc, 0x02, 0x97, 0xdf, 0x58, + 0x49, 0xa3, 0x07, 0x30, 0xd7, 0x26, 0xdc, 0xb4, 0x1d, 0xbf, 0x1d, 0x4c, 0x3d, 0x99, 0xf4, 0xc1, + 0x9a, 0xbe, 0xaa, 0x51, 0x12, 0x3e, 0xa9, 0x0f, 0x1c, 0x00, 0x8a, 0x83, 0x67, 0x79, 0x6d, 0x7f, + 0x4c, 0x9f, 0x8b, 0x0e, 0xde, 0xb6, 0xd7, 0x26, 0x58, 0x72, 0xf4, 0xc7, 0x1a, 0x94, 0x7c, 0xa4, + 0x6d, 0xb3, 0xcf, 0x08, 0xda, 0x08, 0x57, 0xe1, 0x6f, 0x77, 0x70, 0x27, 0xcf, 0xbe, 0x77, 0xd6, + 0x23, 0xe7, 0x83, 0x5a, 0x51, 0x8a, 0x89, 0x8f, 0x70, 0x01, 0xb1, 0x18, 0x65, 0x2e, 0x89, 0xd1, + 0xeb, 0x90, 0x93, 0xad, 0xb7, 0x0a, 0x66, 0xd8, 0xe8, 0xc9, 0xf6, 0x1c, 0xfb, 0x3c, 0xfd, 0xe3, + 0x0c, 0x94, 0x13, 0x8b, 0x4b, 0xd1, 0xd5, 0x85, 0xa3, 0x92, 0x4c, 0x8a, 0xf1, 0xdb, 0xe4, 0x7f, + 0xae, 0x7c, 0x1f, 0xf2, 0x96, 0x58, 0x5f, 0xf0, 0xdf, 0xad, 0x8d, 0x69, 0xb6, 0x42, 0x46, 0x26, + 0xca, 0x24, 0xf9, 0xc9, 0xb0, 0x02, 0x44, 0xb7, 0x60, 0x99, 0x12, 0x4e, 0xcf, 0xb6, 0x0e, 0x39, + 0xa1, 0xf1, 0xfe, 0x3f, 0x17, 0xf5, 0x3d, 0x78, 0x54, 0x00, 0x8f, 0xeb, 0x04, 0xa5, 0x32, 0xff, + 0x12, 0xa5, 0x52, 0x77, 0x60, 0xf6, 0x7f, 0xd8, 0xa3, 0xff, 0x00, 0x8a, 0x51, 0x17, 0xf5, 0x19, + 0x9b, 0xd4, 0x7f, 0x04, 0x05, 0x91, 0x8d, 0x41, 0xf7, 0x7f, 0xc9, 0x4d, 0x94, 0xbc, 0x23, 0x32, + 0x69, 0xee, 0x08, 0x7d, 0x13, 0xfc, 0xff, 0x99, 0x89, 0x6a, 0xea, 0xbf, 0xd8, 0x63, 0xd5, 0x34, + 0xfe, 0xfc, 0x8e, 0x8d, 0xcc, 0x7e, 0xa1, 0x01, 0xc8, 0xe7, 0xe3, 0xce, 0x09, 0x71, 0xb9, 0x70, + 0x4c, 0xec, 0xc0, 0xa8, 0x63, 0xf2, 0x18, 0x49, 0x0e, 0xba, 0x07, 0x79, 0x4f, 0x76, 0x57, 0x6a, + 0x86, 0x35, 0xe5, 0x38, 0x20, 0xcc, 0x3a, 0xbf, 0x45, 0xc3, 0x0a, 0xcc, 0x58, 0x7f, 0xfa, 0xbc, + 0x3a, 0xf3, 0xec, 0x79, 0x75, 0xe6, 0xa3, 0xe7, 0xd5, 0x99, 0x0f, 0x86, 0x55, 0xed, 0xe9, 0xb0, + 0xaa, 0x3d, 0x1b, 0x56, 0xb5, 0x8f, 0x86, 0x55, 0xed, 0xe3, 0x61, 0x55, 0x7b, 0xfc, 0x49, 0x75, + 0xe6, 0x41, 0xe6, 0x64, 0xe3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6c, 0xc5, 0x28, 0xb2, 0x54, + 0x20, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 9d42018ec..4baf44f3d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -49,6 +49,7 @@ message APIGroup { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + // +optional repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; } @@ -61,12 +62,25 @@ message APIGroupList { // APIResource specifies the name of a resource and whether it is namespaced. message APIResource { - // name is the name of the resource. + // name is the plural name of the resource. optional string name = 1; + // singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // The singularName is more correct for reporting status on a single item and both singular and plural are allowed + // from the kubectl CLI interface. + optional string singularName = 6; + // namespaced indicates if a resource is namespaced or not. optional bool namespaced = 2; + // group is the preferred group of the resource. Empty implies the group of the containing resource list. + // For subresources, this may have a different value, for example: Scale". + optional string group = 8; + + // version is the preferred version of the resource. Empty implies the version of the containing resource list + // For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)". + optional string version = 9; + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') optional string kind = 3; @@ -76,6 +90,9 @@ message APIResource { // shortNames is a list of suggested short names of the resource. repeated string shortNames = 5; + + // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + repeated string categories = 7; } // APIResourceList is a list of APIResource, it is used to expose the name of the @@ -93,6 +110,7 @@ message APIResourceList { // discover the API at /api, which is the root path of the legacy v1 API. // // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message APIVersions { // versions are the api versions that are available. repeated string versions = 1; @@ -132,6 +150,10 @@ message DeleteOptions { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional optional string propagationPolicy = 4; } @@ -159,6 +181,10 @@ message GetOptions { // - if it's 0, then we simply return what we currently have in cache, no guarantee; // - if set to non zero, then the result is at least as fresh as given rv. optional string resourceVersion = 1; + + // If true, partially initialized resources are included in the response. + // +optional + optional bool includeUninitialized = 2; } // GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying @@ -225,6 +251,27 @@ message GroupVersionResource { optional string resource = 3; } +// Initializer is information about an initializer that has not yet completed. +message Initializer { + // name of the process that is responsible for initializing this object. + optional string name = 1; +} + +// Initializers tracks the progress of initialization. +message Initializers { + // Pending is a list of initializers that must execute in order before this object is visible. + // When the last pending initializer is removed, and no failing result is set, the initializers + // struct will be set to nil and the object is considered as initialized and visible to all + // clients. + // +patchMergeKey=name + // +patchStrategy=merge + repeated Initializer pending = 1; + + // If result is set with the Failure field, the object will be persisted to storage and then deleted, + // ensuring that other clients can observe the deletion. + optional Status result = 2; +} + // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. @@ -244,10 +291,12 @@ message LabelSelector { // relates the key and values. message LabelSelectorRequirement { // key is the label key that the selector applies to. + // +patchMergeKey=key + // +patchStrategy=merge optional string key = 1; // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. + // Valid operators are In, NotIn, Exists and DoesNotExist. optional string operator = 2; // values is an array of string values. If the operator is In or NotIn, @@ -258,10 +307,21 @@ message LabelSelectorRequirement { repeated string values = 3; } +// List holds a list of objects, which may not be known by the server. +message List { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // List of objects + repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; +} + // ListMeta describes metadata that synthetic resources must have, including lists and // various status objects. A resource may have only one of {ObjectMeta, ListMeta}. message ListMeta { - // SelfLink is a URL representing this object. + // selfLink is a URL representing this object. // Populated by the system. // Read-only. // +optional @@ -272,9 +332,17 @@ message ListMeta { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 2; + + // continue may be set if the user set a limit on the number of items returned, and indicates that + // the server has more data available. The value is opaque and may be used to issue another request + // to the endpoint that served this list to retrieve the next set of available objects. Continuing a + // list may not be possible if the server configuration has changed or more than a few minutes have + // passed. The resourceVersion field returned when using this continue value will be identical to + // the value in the first response. + optional string continue = 3; } // ListOptions is the query options to a standard REST list call. @@ -289,6 +357,10 @@ message ListOptions { // +optional optional string fieldSelector = 2; + // If true, partially initialized resources are included in the response. + // +optional + optional bool includeUninitialized = 6; + // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. // +optional @@ -304,8 +376,55 @@ message ListOptions { optional string resourceVersion = 4; // Timeout for the list/watch call. + // This limits the duration of the call, regardless of any activity or inactivity. // +optional optional int64 timeoutSeconds = 5; + + // limit is a maximum number of responses to return for a list call. If more items exist, the + // server will set the `continue` field on the list metadata to a value that can be used with the + // same initial query to retrieve the next set of results. Setting a limit may return fewer than + // the requested amount of items (up to zero items) in the event all requested objects are + // filtered out and clients should only use the presence of the continue field to determine whether + // more results are available. Servers may choose not to support the limit argument and will return + // all of the available results. If limit is specified and the continue field is empty, clients may + // assume that no more results are available. This field is not supported if watch is true. + // + // The server guarantees that the objects returned when using continue will be identical to issuing + // a single list call without a limit - that is, no objects created, modified, or deleted after the + // first request is issued will be included in any subsequent continued requests. This is sometimes + // referred to as a consistent snapshot, and ensures that a client that is using limit to receive + // smaller chunks of a very large result can ensure they see all possible objects. If objects are + // updated during a chunked list the version of the object that was present at the time the first list + // result was calculated is returned. + optional int64 limit = 7; + + // The continue option should be set when retrieving more results from the server. Since this value + // is server defined, clients may only use the continue value from a previous query result with + // identical query parameters (except for the value of continue) and the server may reject a continue + // value it does not recognize. If the specified continue value is no longer valid whether due to + // expiration (generally five to fifteen minutes) or a configuration change on the server the server + // will respond with a 410 ResourceExpired error indicating the client must restart their list without + // the continue field. This field is not supported when watch is true. Clients may start a watch from + // the last resourceVersion value returned by the server and not miss any modifications. + optional string continue = 8; +} + +// MicroTime is version of Time with microsecond level precision. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +message MicroTime { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; } // ObjectMeta is metadata that all persisted resources must have, which includes all objects @@ -334,7 +453,7 @@ message ObjectMeta { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency // +optional optional string generateName = 2; @@ -374,7 +493,7 @@ message ObjectMeta { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 6; @@ -390,27 +509,28 @@ message ObjectMeta { // Populated by the system. // Read-only. // Null for lists. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional Time creationTimestamp = 8; // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. // If not set, graceful deletion of the object has not been requested. // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional Time deletionTimestamp = 9; @@ -440,13 +560,27 @@ message ObjectMeta { // then an entry in this list will point to this controller, with the controller field set to true. // There cannot be more than one managing controller. // +optional + // +patchMergeKey=uid + // +patchStrategy=merge repeated OwnerReference ownerReferences = 13; + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + optional Initializers initializers = 16; + // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries // in this list can only be removed. // +optional + // +patchStrategy=merge repeated string finalizers = 14; // The name of the cluster which the object belongs to. @@ -464,7 +598,7 @@ message OwnerReference { optional string apiVersion = 5; // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds optional string kind = 1; // Name of the referent. @@ -489,6 +623,10 @@ message OwnerReference { optional bool blockOwnerDeletion = 7; } +// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. +message Patch { +} + // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. message Preconditions { // Specifies the target UID. @@ -516,13 +654,13 @@ message ServerAddressByClientCIDR { // Status is a return value for calls that don't return other objects. message Status { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; // Status of the operation. // One of: "Success" or "Failure". - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional string status = 2; @@ -593,16 +731,24 @@ message StatusDetails { // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional string kind = 3; + // UID of the resource. + // (when there is a single resource which can be described). + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + optional string uid = 6; + // The Causes array includes more details associated with the StatusReason // failure. Not all StatusReasons may provide detailed causes. // +optional repeated StatusCause causes = 4; - // If specified, the time in seconds before the operation should be retried. + // If specified, the time in seconds before the operation should be retried. Some errors may indicate + // the client must take an alternate action - for those errors this field may indicate how long to wait + // before taking the alternate action. // +optional optional int32 retryAfterSeconds = 5; } @@ -616,7 +762,7 @@ message StatusDetails { // +protobuf.options.(gogoproto.goproto_stringer)=false message Time { // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. optional int64 seconds = 1; @@ -632,7 +778,7 @@ message Time { // that matches Time. Do not use in Go structs. message Timestamp { // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. optional int64 seconds = 1; @@ -646,19 +792,21 @@ message Timestamp { // TypeMeta describes an individual object in an API response or request // with strings representing the type of the object and its API schema version. // Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false message TypeMeta { // Kind is a string value representing the REST resource this object represents. // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional string kind = 1; // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources // +optional optional string apiVersion = 2; } @@ -676,6 +824,8 @@ message Verbs { // Event represents a single event to a watched resource. // // +protobuf=true +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message WatchEvent { optional string type = 1; diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go index 8b6fdef5a..bd4c6d9b5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go @@ -29,8 +29,8 @@ import ( // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupResource struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Resource string `protobuf:"bytes,2,opt,name=resource"` + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"` } func (gr *GroupResource) String() string { @@ -45,9 +45,9 @@ func (gr *GroupResource) String() string { // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupVersionResource struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Version string `protobuf:"bytes,2,opt,name=version"` - Resource string `protobuf:"bytes,3,opt,name=resource"` + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` + Resource string `json:"resource" protobuf:"bytes,3,opt,name=resource"` } func (gvr *GroupVersionResource) String() string { @@ -59,8 +59,8 @@ func (gvr *GroupVersionResource) String() string { // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupKind struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Kind string `protobuf:"bytes,2,opt,name=kind"` + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` } func (gk *GroupKind) String() string { @@ -75,9 +75,9 @@ func (gk *GroupKind) String() string { // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupVersionKind struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Version string `protobuf:"bytes,2,opt,name=version"` - Kind string `protobuf:"bytes,3,opt,name=kind"` + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` + Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` } func (gvk GroupVersionKind) String() string { @@ -88,8 +88,8 @@ func (gvk GroupVersionKind) String() string { // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupVersion struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Version string `protobuf:"bytes,2,opt,name=version"` + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` } // Empty returns true if group and version are empty diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index b62dd9ee0..d845d7b0f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -228,7 +228,7 @@ func NewUIDPreconditions(uid string) *Preconditions { } // HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values. -func HasObjectMetaSystemFieldValues(meta *ObjectMeta) bool { - return !meta.CreationTimestamp.Time.IsZero() || - len(meta.UID) != 0 +func HasObjectMetaSystemFieldValues(meta Object) bool { + return !meta.GetCreationTimestamp().Time.IsZero() || + len(meta.GetUID()) != 0 } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go index 8b4c0423e..9b45145da 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go @@ -25,34 +25,14 @@ func CloneSelectorAndAddLabel(selector *LabelSelector, labelKey, labelValue stri } // Clone. - newSelector := new(LabelSelector) + newSelector := selector.DeepCopy() - // TODO(madhusudancs): Check if you can use deepCopy_extensions_LabelSelector here. - newSelector.MatchLabels = make(map[string]string) - if selector.MatchLabels != nil { - for key, val := range selector.MatchLabels { - newSelector.MatchLabels[key] = val - } + if newSelector.MatchLabels == nil { + newSelector.MatchLabels = make(map[string]string) } + newSelector.MatchLabels[labelKey] = labelValue - if selector.MatchExpressions != nil { - newMExps := make([]LabelSelectorRequirement, len(selector.MatchExpressions)) - for i, me := range selector.MatchExpressions { - newMExps[i].Key = me.Key - newMExps[i].Operator = me.Operator - if me.Values != nil { - newMExps[i].Values = make([]string, len(me.Values)) - copy(newMExps[i].Values, me.Values) - } else { - newMExps[i].Values = nil - } - } - newSelector.MatchExpressions = newMExps - } else { - newSelector.MatchExpressions = nil - } - return newSelector } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go index 108e34f0f..c13fe4af8 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -17,39 +17,10 @@ limitations under the License. package v1 import ( - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" ) -// ObjectMetaFor returns a pointer to a provided object's ObjectMeta. -// TODO: allow runtime.Unknown to extract this object -// TODO: Remove this function and use meta.ObjectMetaAccessor() instead. -func ObjectMetaFor(obj runtime.Object) (*ObjectMeta, error) { - v, err := conversion.EnforcePtr(obj) - if err != nil { - return nil, err - } - var meta *ObjectMeta - err = runtime.FieldPtr(v, "ObjectMeta", &meta) - return meta, err -} - -// ListMetaFor returns a pointer to a provided object's ListMeta, -// or an error if the object does not have that pointer. -// TODO: allow runtime.Unknown to extract this object -// TODO: Remove this function and use meta.ObjectMetaAccessor() instead. -func ListMetaFor(obj runtime.Object) (*ListMeta, error) { - v, err := conversion.EnforcePtr(obj) - if err != nil { - return nil, err - } - var meta *ListMeta - err = runtime.FieldPtr(v, "ListMeta", &meta) - return meta, err -} - // TODO: move this, Object, List, and Type to a different package type ObjectMetaAccessor interface { GetObjectMeta() Object @@ -70,16 +41,22 @@ type Object interface { SetUID(uid types.UID) GetResourceVersion() string SetResourceVersion(version string) + GetGeneration() int64 + SetGeneration(generation int64) GetSelfLink() string SetSelfLink(selfLink string) GetCreationTimestamp() Time SetCreationTimestamp(timestamp Time) GetDeletionTimestamp() *Time SetDeletionTimestamp(timestamp *Time) + GetDeletionGracePeriodSeconds() *int64 + SetDeletionGracePeriodSeconds(*int64) GetLabels() map[string]string SetLabels(labels map[string]string) GetAnnotations() map[string]string SetAnnotations(annotations map[string]string) + GetInitializers() *Initializers + SetInitializers(initializers *Initializers) GetFinalizers() []string SetFinalizers(finalizers []string) GetOwnerReferences() []OwnerReference @@ -90,20 +67,33 @@ type Object interface { // ListMetaAccessor retrieves the list interface from an object type ListMetaAccessor interface { - GetListMeta() List + GetListMeta() ListInterface } -// List lets you work with list metadata from any of the versioned or +// Common lets you work with core metadata from any of the versioned or // internal API objects. Attempting to set or retrieve a field on an object that does // not support that field will be a no-op and return a default value. // TODO: move this, and TypeMeta and ListMeta, to a different package -type List interface { +type Common interface { GetResourceVersion() string SetResourceVersion(version string) GetSelfLink() string SetSelfLink(selfLink string) } +// ListInterface lets you work with list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field will be a no-op and return a default value. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type ListInterface interface { + GetResourceVersion() string + SetResourceVersion(version string) + GetSelfLink() string + SetSelfLink(selfLink string) + GetContinue() string + SetContinue(c string) +} + // Type exposes the type and APIVersion of versioned or internal API objects. // TODO: move this, and TypeMeta and ListMeta, to a different package type Type interface { @@ -117,6 +107,8 @@ func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceV func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ListMeta) GetContinue() string { return meta.Continue } +func (meta *ListMeta) SetContinue(c string) { meta.Continue = c } func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } @@ -130,7 +122,7 @@ func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } -func (obj *ListMeta) GetListMeta() List { return obj } +func (obj *ListMeta) GetListMeta() ListInterface { return obj } func (obj *ObjectMeta) GetObjectMeta() Object { return obj } @@ -146,6 +138,8 @@ func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ObjectMeta) GetGeneration() int64 { return meta.Generation } +func (meta *ObjectMeta) SetGeneration(generation int64) { meta.Generation = generation } func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } func (meta *ObjectMeta) GetCreationTimestamp() Time { return meta.CreationTimestamp } @@ -156,14 +150,23 @@ func (meta *ObjectMeta) GetDeletionTimestamp() *Time { return meta.DeletionTimes func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *Time) { meta.DeletionTimestamp = deletionTimestamp } +func (meta *ObjectMeta) GetDeletionGracePeriodSeconds() *int64 { return meta.DeletionGracePeriodSeconds } +func (meta *ObjectMeta) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) { + meta.DeletionGracePeriodSeconds = deletionGracePeriodSeconds +} func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } +func (meta *ObjectMeta) GetInitializers() *Initializers { return meta.Initializers } +func (meta *ObjectMeta) SetInitializers(initializers *Initializers) { meta.Initializers = initializers } func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { + if meta.OwnerReferences == nil { + return nil + } ret := make([]OwnerReference, len(meta.OwnerReferences)) for i := 0; i < len(meta.OwnerReferences); i++ { ret[i].Kind = meta.OwnerReferences[i].Kind @@ -183,6 +186,10 @@ func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { } func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) { + if references == nil { + meta.OwnerReferences = nil + return + } newReferences := make([]OwnerReference, len(references)) for i := 0; i < len(references); i++ { newReferences[i].Kind = references[i].Kind diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go new file mode 100644 index 000000000..6f6c5111b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -0,0 +1,183 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" + + "github.com/google/gofuzz" +) + +const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" + +// MicroTime is version of Time with microsecond level precision. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +type MicroTime struct { + time.Time `protobuf:"-"` +} + +// DeepCopy returns a deep-copy of the MicroTime value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *MicroTime) DeepCopyInto(out *MicroTime) { + *out = *t +} + +// String returns the representation of the time. +func (t MicroTime) String() string { + return t.Time.String() +} + +// NewMicroTime returns a wrapped instance of the provided time +func NewMicroTime(time time.Time) MicroTime { + return MicroTime{time} +} + +// DateMicro returns the MicroTime corresponding to the supplied parameters +// by wrapping time.Date. +func DateMicro(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) MicroTime { + return MicroTime{time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +// NowMicro returns the current local time. +func NowMicro() MicroTime { + return MicroTime{time.Now()} +} + +// IsZero returns true if the value is nil or time is zero. +func (t *MicroTime) IsZero() bool { + if t == nil { + return true + } + return t.Time.IsZero() +} + +// Before reports whether the time instant t is before u. +func (t *MicroTime) Before(u *MicroTime) bool { + return t.Time.Before(u.Time) +} + +// Equal reports whether the time instant t is equal to u. +func (t *MicroTime) Equal(u *MicroTime) bool { + return t.Time.Equal(u.Time) +} + +// BeforeTime reports whether the time instant t is before second-lever precision u. +func (t *MicroTime) BeforeTime(u *Time) bool { + return t.Time.Before(u.Time) +} + +// EqualTime reports whether the time instant t is equal to second-lever precision u. +func (t *MicroTime) EqualTime(u *Time) bool { + return t.Time.Equal(u.Time) +} + +// UnixMicro returns the local time corresponding to the given Unix time +// by wrapping time.Unix. +func UnixMicro(sec int64, nsec int64) MicroTime { + return MicroTime{time.Unix(sec, nsec)} +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (t *MicroTime) UnmarshalJSON(b []byte) error { + if len(b) == 4 && string(b) == "null" { + t.Time = time.Time{} + return nil + } + + var str string + err := json.Unmarshal(b, &str) + if err != nil { + return err + } + + pt, err := time.Parse(RFC3339Micro, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// UnmarshalQueryParameter converts from a URL query parameter value to an object +func (t *MicroTime) UnmarshalQueryParameter(str string) error { + if len(str) == 0 { + t.Time = time.Time{} + return nil + } + // Tolerate requests from older clients that used JSON serialization to build query params + if len(str) == 4 && str == "null" { + t.Time = time.Time{} + return nil + } + + pt, err := time.Parse(RFC3339Micro, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (t MicroTime) MarshalJSON() ([]byte, error) { + if t.IsZero() { + // Encode unset/nil objects as JSON's "null". + return []byte("null"), nil + } + + return json.Marshal(t.UTC().Format(RFC3339Micro)) +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ MicroTime) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ MicroTime) OpenAPISchemaFormat() string { return "date-time" } + +// MarshalQueryParameter converts to a URL query parameter value +func (t MicroTime) MarshalQueryParameter() (string, error) { + if t.IsZero() { + // Encode unset/nil objects as an empty string + return "", nil + } + + return t.UTC().Format(RFC3339Micro), nil +} + +// Fuzz satisfies fuzz.Interface. +func (t *MicroTime) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Accurate to a tenth of + // micro second. Leave off nanoseconds because JSON doesn't + // represent them so they can't round-trip properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000)) +} + +var _ fuzz.Interface = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go new file mode 100644 index 000000000..14841be51 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" +) + +// Timestamp is declared in time_proto.go + +// Timestamp returns the Time as a new Timestamp value. +func (m *MicroTime) ProtoMicroTime() *Timestamp { + if m == nil { + return &Timestamp{} + } + return &Timestamp{ + Seconds: m.Time.Unix(), + Nanos: int32(m.Time.Nanosecond()), + } +} + +// Size implements the protobuf marshalling interface. +func (m *MicroTime) Size() (n int) { + if m == nil || m.Time.IsZero() { + return 0 + } + return m.ProtoMicroTime().Size() +} + +// Reset implements the protobuf marshalling interface. +func (m *MicroTime) Unmarshal(data []byte) error { + if len(data) == 0 { + m.Time = time.Time{} + return nil + } + p := Timestamp{} + if err := p.Unmarshal(data); err != nil { + return err + } + m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() + return nil +} + +// Marshal implements the protobuf marshalling interface. +func (m *MicroTime) Marshal() (data []byte, err error) { + if m == nil || m.Time.IsZero() { + return nil, nil + } + return m.ProtoMicroTime().Marshal() +} + +// MarshalTo implements the protobuf marshalling interface. +func (m *MicroTime) MarshalTo(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoMicroTime().MarshalTo(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index 8645d1abc..b300d3701 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -27,6 +27,10 @@ const GroupName = "meta.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} +// Unversioned is group version for unversioned API objects +// TODO: this should be v1 probably +var Unversioned = schema.GroupVersion{Group: "", Version: "v1"} + // WatchEventKind is name reserved for serializing watch events. const WatchEventKind = "WatchEvent" @@ -56,8 +60,16 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) Convert_versioned_Event_to_versioned_InternalEvent, ) + // Register Unversioned types under their own special group + scheme.AddUnversionedTypes(Unversioned, + &Status{}, + &APIVersions{}, + &APIGroupList{}, + &APIGroup{}, + &APIResourceList{}, + ) + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) AddConversionFuncs(scheme) RegisterDefaults(scheme) } @@ -77,6 +89,5 @@ func init() { ) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) RegisterDefaults(scheme) } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index a1e01f344..efff656e1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -20,9 +20,6 @@ import ( "encoding/json" "time" - "k8s.io/apimachinery/pkg/openapi" - - "github.com/go-openapi/spec" "github.com/google/gofuzz" ) @@ -37,11 +34,11 @@ type Time struct { time.Time `protobuf:"-"` } -// DeepCopy returns a deep-copy of the Time value. The underlying time.Time +// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time // type is effectively immutable in the time API, so it is safe to // copy-by-assign, despite the presence of (unexported) Pointer fields. -func (t Time) DeepCopy() Time { - return t +func (t *Time) DeepCopyInto(out *Time) { + *out = *t } // String returns the representation of the time. @@ -74,13 +71,19 @@ func (t *Time) IsZero() bool { } // Before reports whether the time instant t is before u. -func (t Time) Before(u Time) bool { +func (t *Time) Before(u *Time) bool { return t.Time.Before(u.Time) } // Equal reports whether the time instant t is equal to u. -func (t Time) Equal(u Time) bool { - return t.Time.Equal(u.Time) +func (t *Time) Equal(u *Time) bool { + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false } // Unix returns the local time corresponding to the given Unix time @@ -103,7 +106,10 @@ func (t *Time) UnmarshalJSON(b []byte) error { } var str string - json.Unmarshal(b, &str) + err := json.Unmarshal(b, &str) + if err != nil { + return err + } pt, err := time.Parse(time.RFC3339, str) if err != nil { @@ -145,16 +151,15 @@ func (t Time) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(time.RFC3339)) } -func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { - return openapi.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "date-time", - }, - }, - } -} +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Time) OpenAPISchemaFormat() string { return "date-time" } // MarshalQueryParameter converts to a URL query parameter value func (t Time) MarshalQueryParameter() (string, error) { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go index aea28e410..ed72186b4 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go @@ -25,7 +25,7 @@ import ( // that matches Time. Do not use in Go structs. type Timestamp struct { // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. Seconds int64 `json:"seconds" protobuf:"varint,1,opt,name=seconds"` // Non-negative fractions of a second at nanosecond resolution. Negative @@ -42,7 +42,10 @@ func (m *Time) ProtoTime() *Timestamp { } return &Timestamp{ Seconds: m.Time.Unix(), - Nanos: int32(m.Time.Nanosecond()), + // leaving this here for the record. our JSON only handled seconds, so this results in writes by + // protobuf clients storing values that aren't read by json clients, which results in unexpected + // field mutation, which fails various validation and equality code. + // Nanos: int32(m.Time.Nanosecond()), } } @@ -64,7 +67,11 @@ func (m *Time) Unmarshal(data []byte) error { if err := p.Unmarshal(data); err != nil { return err } - m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() + // leaving this here for the record. our JSON only handled seconds, so this results in writes by + // protobuf clients storing values that aren't read by json clients, which results in unexpected + // field mutation, which fails various validation and equality code. + // m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() + m.Time = time.Unix(p.Seconds, int64(0)).Local() return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index c6cf1fab8..e93df1846 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package unversioned contains API types that are common to all versions. +// Package v1 contains API types that are common to all versions. // // The package contains two categories of types: // - external (serialized) types that lack their own version (e.g TypeMeta) @@ -29,25 +29,28 @@ import ( "fmt" "strings" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ) // TypeMeta describes an individual object in an API response or request // with strings representing the type of the object and its API schema version. // Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false type TypeMeta struct { // Kind is a string value representing the REST resource this object represents. // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` } @@ -55,7 +58,7 @@ type TypeMeta struct { // ListMeta describes metadata that synthetic resources must have, including lists and // various status objects. A resource may have only one of {ObjectMeta, ListMeta}. type ListMeta struct { - // SelfLink is a URL representing this object. + // selfLink is a URL representing this object. // Populated by the system. // Read-only. // +optional @@ -66,9 +69,17 @@ type ListMeta struct { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` + + // continue may be set if the user set a limit on the number of items returned, and indicates that + // the server has more data available. The value is opaque and may be used to issue another request + // to the endpoint that served this list to retrieve the next set of available objects. Continuing a + // list may not be possible if the server configuration has changed or more than a few minutes have + // passed. The resourceVersion field returned when using this continue value will be identical to + // the value in the first response. + Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"` } // These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here @@ -103,7 +114,7 @@ type ObjectMeta struct { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency // +optional GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` @@ -143,7 +154,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` @@ -159,27 +170,28 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Null for lists. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. // If not set, graceful deletion of the object has not been requested. // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` @@ -209,13 +221,27 @@ type ObjectMeta struct { // then an entry in this list will point to this controller, with the controller field set to true. // There cannot be more than one managing controller. // +optional + // +patchMergeKey=uid + // +patchStrategy=merge OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + Initializers *Initializers `json:"initializers,omitempty" protobuf:"bytes,16,opt,name=initializers"` + // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries // in this list can only be removed. // +optional + // +patchStrategy=merge Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` // The name of the cluster which the object belongs to. @@ -225,6 +251,26 @@ type ObjectMeta struct { ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` } +// Initializers tracks the progress of initialization. +type Initializers struct { + // Pending is a list of initializers that must execute in order before this object is visible. + // When the last pending initializer is removed, and no failing result is set, the initializers + // struct will be set to nil and the object is considered as initialized and visible to all + // clients. + // +patchMergeKey=name + // +patchStrategy=merge + Pending []Initializer `json:"pending" protobuf:"bytes,1,rep,name=pending" patchStrategy:"merge" patchMergeKey:"name"` + // If result is set with the Failure field, the object will be persisted to storage and then deleted, + // ensuring that other clients can observe the deletion. + Result *Status `json:"result,omitempty" protobuf:"bytes,2,opt,name=result"` +} + +// Initializer is information about an initializer that has not yet completed. +type Initializer struct { + // name of the process that is responsible for initializing this object. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + const ( // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients NamespaceDefault string = "default" @@ -245,7 +291,7 @@ type OwnerReference struct { // API version of the referent. APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. // More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -266,6 +312,8 @@ type OwnerReference struct { BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ListOptions is the query options to a standard REST list call. type ListOptions struct { TypeMeta `json:",inline"` @@ -278,6 +326,9 @@ type ListOptions struct { // Defaults to everything. // +optional FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"` // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. // +optional @@ -291,10 +342,40 @@ type ListOptions struct { // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` // Timeout for the list/watch call. + // This limits the duration of the call, regardless of any activity or inactivity. // +optional TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` + + // limit is a maximum number of responses to return for a list call. If more items exist, the + // server will set the `continue` field on the list metadata to a value that can be used with the + // same initial query to retrieve the next set of results. Setting a limit may return fewer than + // the requested amount of items (up to zero items) in the event all requested objects are + // filtered out and clients should only use the presence of the continue field to determine whether + // more results are available. Servers may choose not to support the limit argument and will return + // all of the available results. If limit is specified and the continue field is empty, clients may + // assume that no more results are available. This field is not supported if watch is true. + // + // The server guarantees that the objects returned when using continue will be identical to issuing + // a single list call without a limit - that is, no objects created, modified, or deleted after the + // first request is issued will be included in any subsequent continued requests. This is sometimes + // referred to as a consistent snapshot, and ensures that a client that is using limit to receive + // smaller chunks of a very large result can ensure they see all possible objects. If objects are + // updated during a chunked list the version of the object that was present at the time the first list + // result was calculated is returned. + Limit int64 `json:"limit,omitempty" protobuf:"varint,7,opt,name=limit"` + // The continue option should be set when retrieving more results from the server. Since this value + // is server defined, clients may only use the continue value from a previous query result with + // identical query parameters (except for the value of continue) and the server may reject a continue + // value it does not recognize. If the specified continue value is no longer valid whether due to + // expiration (generally five to fifteen minutes) or a configuration change on the server the server + // will respond with a 410 ResourceExpired error indicating the client must restart their list without + // the continue field. This field is not supported when watch is true. Clients may start a watch from + // the last resourceVersion value returned by the server and not miss any modifications. + Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ExportOptions is the query options to the standard REST get call. type ExportOptions struct { TypeMeta `json:",inline"` @@ -304,6 +385,8 @@ type ExportOptions struct { Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // GetOptions is the standard query options to the standard REST get call. type GetOptions struct { TypeMeta `json:",inline"` @@ -312,6 +395,9 @@ type GetOptions struct { // - if it's 0, then we simply return what we currently have in cache, no guarantee; // - if set to non zero, then the result is at least as fresh as given rv. ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"` + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,2,opt,name=includeUninitialized"` } // DeletionPropagation decides if a deletion will propagate to the dependents of @@ -332,6 +418,8 @@ const ( DeletePropagationForeground DeletionPropagation = "Foreground" ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // DeleteOptions may be provided when deleting an API object. type DeleteOptions struct { TypeMeta `json:",inline"` @@ -359,6 +447,10 @@ type DeleteOptions struct { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional PropagationPolicy *DeletionPropagation `json:"propagationPolicy,omitempty" protobuf:"varint,4,opt,name=propagationPolicy"` } @@ -370,17 +462,19 @@ type Preconditions struct { UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // Status is a return value for calls that don't return other objects. type Status struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Status of the operation. // One of: "Success" or "Failure". - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` // A human-readable description of the status of this operation. @@ -419,14 +513,21 @@ type StatusDetails struct { Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` + // UID of the resource. + // (when there is a single resource which can be described). + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // The Causes array includes more details associated with the StatusReason // failure. Not all StatusReasons may provide detailed causes. // +optional Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"` - // If specified, the time in seconds before the operation should be retried. + // If specified, the time in seconds before the operation should be retried. Some errors may indicate + // the client must take an alternate action - for those errors this field may indicate how long to wait + // before taking the alternate action. // +optional RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty" protobuf:"varint,5,opt,name=retryAfterSeconds"` } @@ -531,6 +632,15 @@ const ( // Status code 504 StatusReasonTimeout StatusReason = "Timeout" + // StatusReasonTooManyRequests means the server experienced too many requests within a + // given window and that the client must wait to perform the action again. A client may + // always retry the request that led to this error, although the client should wait at least + // the number of seconds specified by the retryAfterSeconds field. + // Details (optional): + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 429 + StatusReasonTooManyRequests StatusReason = "TooManyRequests" + // StatusReasonBadRequest means that the request itself was invalid, because the request // doesn't make any sense, for example deleting a read-only object. This is different than // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the @@ -542,6 +652,18 @@ const ( // can only be created. API calls that return MethodNotAllowed can never succeed. StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" + // StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable + // to the server - for instance, attempting to receive protobuf for a resource that supports only json and yaml. + // API calls that return NotAcceptable can never succeed. + // Status code 406 + StatusReasonNotAcceptable StatusReason = "NotAcceptable" + + // StatusReasonUnsupportedMediaType means that the content type sent by the client is not acceptable + // to the server - for instance, attempting to send protobuf for a resource that supports only json and yaml. + // API calls that return UnsupportedMediaType can never succeed. + // Status code 415 + StatusReasonUnsupportedMediaType StatusReason = "UnsupportedMediaType" + // StatusReasonInternalError indicates that an internal error occurred, it is unexpected // and the outcome of the call is unknown. // Details (optional): @@ -613,10 +735,25 @@ const ( CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of objects + Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"` +} + // APIVersions lists the versions that are available, to allow clients to // discover the API at /api, which is the root path of the legacy v1 API. // // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type APIVersions struct { TypeMeta `json:",inline"` // versions are the api versions that are available. @@ -631,6 +768,8 @@ type APIVersions struct { ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // APIGroupList is a list of APIGroup, to allow clients to discover the API at // /apis. type APIGroupList struct { @@ -639,6 +778,8 @@ type APIGroupList struct { Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // APIGroup contains the name, the supported versions, and the preferred version // of a group. type APIGroup struct { @@ -658,7 +799,8 @@ type APIGroup struct { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` + // +optional + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs,omitempty" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` } // ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. @@ -682,10 +824,20 @@ type GroupVersionForDiscovery struct { // APIResource specifies the name of a resource and whether it is namespaced. type APIResource struct { - // name is the name of the resource. + // name is the plural name of the resource. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // The singularName is more correct for reporting status on a single item and both singular and plural are allowed + // from the kubectl CLI interface. + SingularName string `json:"singularName" protobuf:"bytes,6,opt,name=singularName"` // namespaced indicates if a resource is namespaced or not. Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"` + // group is the preferred group of the resource. Empty implies the group of the containing resource list. + // For subresources, this may have a different value, for example: Scale". + Group string `json:"group,omitempty" protobuf:"bytes,8,opt,name=group"` + // version is the preferred version of the resource. Empty implies the version of the containing resource list + // For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)". + Version string `json:"version,omitempty" protobuf:"bytes,9,opt,name=version"` // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` // verbs is a list of supported kube verbs (this includes get, list, watch, create, @@ -693,6 +845,8 @@ type APIResource struct { Verbs Verbs `json:"verbs" protobuf:"bytes,4,opt,name=verbs"` // shortNames is a list of suggested short names of the resource. ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"` + // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` } // Verbs masks the value so protobuf can generate @@ -705,6 +859,8 @@ func (vs Verbs) String() string { return fmt.Sprintf("%v", []string(vs)) } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // APIResourceList is a list of APIResource, it is used to expose the name of the // resources supported in a specific group and version, and if the resource // is namespaced. @@ -769,9 +925,11 @@ type LabelSelector struct { // relates the key and values. type LabelSelectorRequirement struct { // key is the label key that the selector applies to. + // +patchMergeKey=key + // +patchStrategy=merge Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. + // Valid operators are In, NotIn, Exists and DoesNotExist. Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` // values is an array of string values. If the operator is In or NotIn, // the values array must be non-empty. If the operator is Exists or DoesNotExist, diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 950c00285..f91d8a81f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ package v1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_APIGroup = map[string]string{ "": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "name": "name is the name of the group.", @@ -49,12 +49,16 @@ func (APIGroupList) SwaggerDoc() map[string]string { } var map_APIResource = map[string]string{ - "": "APIResource specifies the name of a resource and whether it is namespaced.", - "name": "name is the name of the resource.", - "namespaced": "namespaced indicates if a resource is namespaced or not.", - "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", - "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", - "shortNames": "shortNames is a list of suggested short names of the resource.", + "": "APIResource specifies the name of a resource and whether it is namespaced.", + "name": "name is the plural name of the resource.", + "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + "namespaced": "namespaced indicates if a resource is namespaced or not.", + "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", + "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "shortNames": "shortNames is a list of suggested short names of the resource.", + "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", } func (APIResource) SwaggerDoc() map[string]string { @@ -86,7 +90,7 @@ var map_DeleteOptions = map[string]string{ "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", + "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", } func (DeleteOptions) SwaggerDoc() map[string]string { @@ -104,8 +108,9 @@ func (ExportOptions) SwaggerDoc() map[string]string { } var map_GetOptions = map[string]string{ - "": "GetOptions is the standard query options to the standard REST get call.", - "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "": "GetOptions is the standard query options to the standard REST get call.", + "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "includeUninitialized": "If true, partially initialized resources are included in the response.", } func (GetOptions) SwaggerDoc() map[string]string { @@ -122,6 +127,25 @@ func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { return map_GroupVersionForDiscovery } +var map_Initializer = map[string]string{ + "": "Initializer is information about an initializer that has not yet completed.", + "name": "name of the process that is responsible for initializing this object.", +} + +func (Initializer) SwaggerDoc() map[string]string { + return map_Initializer +} + +var map_Initializers = map[string]string{ + "": "Initializers tracks the progress of initialization.", + "pending": "Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.", + "result": "If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.", +} + +func (Initializers) SwaggerDoc() map[string]string { + return map_Initializers +} + var map_LabelSelector = map[string]string{ "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", @@ -135,7 +159,7 @@ func (LabelSelector) SwaggerDoc() map[string]string { var map_LabelSelectorRequirement = map[string]string{ "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "key": "key is the label key that the selector applies to.", - "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", + "operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", } @@ -143,10 +167,21 @@ func (LabelSelectorRequirement) SwaggerDoc() map[string]string { return map_LabelSelectorRequirement } +var map_List = map[string]string{ + "": "List holds a list of objects, which may not be known by the server.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of objects", +} + +func (List) SwaggerDoc() map[string]string { + return map_List +} + var map_ListMeta = map[string]string{ "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", - "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", - "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response.", } func (ListMeta) SwaggerDoc() map[string]string { @@ -154,12 +189,15 @@ func (ListMeta) SwaggerDoc() map[string]string { } var map_ListOptions = map[string]string{ - "": "ListOptions is the query options to a standard REST list call.", - "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "timeoutSeconds": "Timeout for the list/watch call.", + "": "ListOptions is the query options to a standard REST list call.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "includeUninitialized": "If true, partially initialized resources are included in the response.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", } func (ListOptions) SwaggerDoc() map[string]string { @@ -169,18 +207,19 @@ func (ListOptions) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency", "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.", "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", } @@ -192,7 +231,7 @@ func (ObjectMeta) SwaggerDoc() map[string]string { var map_OwnerReference = map[string]string{ "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", "apiVersion": "API version of the referent.", - "kind": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "controller": "If true, this reference points to the managing controller.", @@ -241,8 +280,8 @@ func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { var map_Status = map[string]string{ "": "Status is a return value for calls that don't return other objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", "message": "A human-readable description of the status of this operation.", "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", @@ -268,9 +307,10 @@ var map_StatusDetails = map[string]string{ "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", - "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", - "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried.", + "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", } func (StatusDetails) SwaggerDoc() map[string]string { @@ -279,8 +319,8 @@ func (StatusDetails) SwaggerDoc() map[string]string { var map_TypeMeta = map[string]string{ "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", - "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", } func (TypeMeta) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go new file mode 100644 index 000000000..fc138e75a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -0,0 +1,451 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + gojson "encoding/json" + "fmt" + "io" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" +) + +// NestedFieldCopy returns a deep copy of the value of a nested field. +// Returns false if the value is missing. +// No error is returned for a nil field. +func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + return runtime.DeepCopyJSONValue(val), true, nil +} + +// NestedFieldNoCopy returns a reference to a nested field. +// Returns false if value is not found and an error if unable +// to traverse obj. +func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { + var val interface{} = obj + + for i, field := range fields { + if m, ok := val.(map[string]interface{}); ok { + val, ok = m[field] + if !ok { + return nil, false, nil + } + } else { + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields[:i+1]), val, val) + } + } + return val, true, nil +} + +// NestedString returns the string value of a nested field. +// Returns false if value is not found and an error if not a string. +func NestedString(obj map[string]interface{}, fields ...string) (string, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return "", found, err + } + s, ok := val.(string) + if !ok { + return "", false, fmt.Errorf("%v accessor error: %v is of the type %T, expected string", jsonPath(fields), val, val) + } + return s, true, nil +} + +// NestedBool returns the bool value of a nested field. +// Returns false if value is not found and an error if not a bool. +func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return false, found, err + } + b, ok := val.(bool) + if !ok { + return false, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected bool", jsonPath(fields), val, val) + } + return b, true, nil +} + +// NestedFloat64 returns the float64 value of a nested field. +// Returns false if value is not found and an error if not a float64. +func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return 0, found, err + } + f, ok := val.(float64) + if !ok { + return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64", jsonPath(fields), val, val) + } + return f, true, nil +} + +// NestedInt64 returns the int64 value of a nested field. +// Returns false if value is not found and an error if not an int64. +func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return 0, found, err + } + i, ok := val.(int64) + if !ok { + return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected int64", jsonPath(fields), val, val) + } + return i, true, nil +} + +// NestedStringSlice returns a copy of []string value of a nested field. +// Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice. +func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + m, ok := val.([]interface{}) + if !ok { + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val) + } + strSlice := make([]string, 0, len(m)) + for _, v := range m { + if str, ok := v.(string); ok { + strSlice = append(strSlice, str) + } else { + return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the slice: %v is of the type %T, expected string", jsonPath(fields), v, v) + } + } + return strSlice, true, nil +} + +// NestedSlice returns a deep copy of []interface{} value of a nested field. +// Returns false if value is not found and an error if not a []interface{}. +func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + _, ok := val.([]interface{}) + if !ok { + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val) + } + return runtime.DeepCopyJSONValue(val).([]interface{}), true, nil +} + +// NestedStringMap returns a copy of map[string]string value of a nested field. +// Returns false if value is not found and an error if not a map[string]interface{} or contains non-string values in the map. +func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) { + m, found, err := nestedMapNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + strMap := make(map[string]string, len(m)) + for k, v := range m { + if str, ok := v.(string); ok { + strMap[k] = str + } else { + return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v) + } + } + return strMap, true, nil +} + +// NestedMap returns a deep copy of map[string]interface{} value of a nested field. +// Returns false if value is not found and an error if not a map[string]interface{}. +func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { + m, found, err := nestedMapNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + return runtime.DeepCopyJSON(m), true, nil +} + +// nestedMapNoCopy returns a map[string]interface{} value of a nested field. +// Returns false if value is not found and an error if not a map[string]interface{}. +func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + m, ok := val.(map[string]interface{}) + if !ok { + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields), val, val) + } + return m, true, nil +} + +// SetNestedField sets the value of a nested field to a deep copy of the value provided. +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) error { + return setNestedFieldNoCopy(obj, runtime.DeepCopyJSONValue(value), fields...) +} + +func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) error { + m := obj + + for i, field := range fields[:len(fields)-1] { + if val, ok := m[field]; ok { + if valMap, ok := val.(map[string]interface{}); ok { + m = valMap + } else { + return fmt.Errorf("value cannot be set because %v is not a map[string]interface{}", jsonPath(fields[:i+1])) + } + } else { + newVal := make(map[string]interface{}) + m[field] = newVal + m = newVal + } + } + m[fields[len(fields)-1]] = value + return nil +} + +// SetNestedStringSlice sets the string slice value of a nested field. +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) error { + m := make([]interface{}, 0, len(value)) // convert []string into []interface{} + for _, v := range value { + m = append(m, v) + } + return setNestedFieldNoCopy(obj, m, fields...) +} + +// SetNestedSlice sets the slice value of a nested field. +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) error { + return SetNestedField(obj, value, fields...) +} + +// SetNestedStringMap sets the map[string]string value of a nested field. +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) error { + m := make(map[string]interface{}, len(value)) // convert map[string]string into map[string]interface{} + for k, v := range value { + m[k] = v + } + return setNestedFieldNoCopy(obj, m, fields...) +} + +// SetNestedMap sets the map[string]interface{} value of a nested field. +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) error { + return SetNestedField(obj, value, fields...) +} + +// RemoveNestedField removes the nested field from the obj. +func RemoveNestedField(obj map[string]interface{}, fields ...string) { + m := obj + for _, field := range fields[:len(fields)-1] { + if x, ok := m[field].(map[string]interface{}); ok { + m = x + } else { + return + } + } + delete(m, fields[len(fields)-1]) +} + +func getNestedString(obj map[string]interface{}, fields ...string) string { + val, found, err := NestedString(obj, fields...) + if !found || err != nil { + return "" + } + return val +} + +func jsonPath(fields []string) string { + return "." + strings.Join(fields, ".") +} + +func extractOwnerReference(v map[string]interface{}) metav1.OwnerReference { + // though this field is a *bool, but when decoded from JSON, it's + // unmarshalled as bool. + var controllerPtr *bool + if controller, found, err := NestedBool(v, "controller"); err == nil && found { + controllerPtr = &controller + } + var blockOwnerDeletionPtr *bool + if blockOwnerDeletion, found, err := NestedBool(v, "blockOwnerDeletion"); err == nil && found { + blockOwnerDeletionPtr = &blockOwnerDeletion + } + return metav1.OwnerReference{ + Kind: getNestedString(v, "kind"), + Name: getNestedString(v, "name"), + APIVersion: getNestedString(v, "apiVersion"), + UID: types.UID(getNestedString(v, "uid")), + Controller: controllerPtr, + BlockOwnerDeletion: blockOwnerDeletionPtr, + } +} + +// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured +// type, which can be used for generic access to objects without a predefined scheme. +// TODO: move into serializer/json. +var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} + +type unstructuredJSONScheme struct{} + +func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + var err error + if obj != nil { + err = s.decodeInto(data, obj) + } else { + obj, err = s.decode(data) + } + + if err != nil { + return nil, nil, err + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, &gvk, runtime.NewMissingKindErr(string(data)) + } + + return obj, &gvk, nil +} + +func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { + switch t := obj.(type) { + case *Unstructured: + return json.NewEncoder(w).Encode(t.Object) + case *UnstructuredList: + items := make([]interface{}, 0, len(t.Items)) + for _, i := range t.Items { + items = append(items, i.Object) + } + listObj := make(map[string]interface{}, len(t.Object)+1) + for k, v := range t.Object { // Make a shallow copy + listObj[k] = v + } + listObj["items"] = items + return json.NewEncoder(w).Encode(listObj) + case *runtime.Unknown: + // TODO: Unstructured needs to deal with ContentType. + _, err := w.Write(t.Raw) + return err + default: + return json.NewEncoder(w).Encode(t) + } +} + +func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { + type detector struct { + Items gojson.RawMessage + } + var det detector + if err := json.Unmarshal(data, &det); err != nil { + return nil, err + } + + if det.Items != nil { + list := &UnstructuredList{} + err := s.decodeToList(data, list) + return list, err + } + + // No Items field, so it wasn't a list. + unstruct := &Unstructured{} + err := s.decodeToUnstructured(data, unstruct) + return unstruct, err +} + +func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error { + switch x := obj.(type) { + case *Unstructured: + return s.decodeToUnstructured(data, x) + case *UnstructuredList: + return s.decodeToList(data, x) + case *runtime.VersionedObjects: + o, err := s.decode(data) + if err == nil { + x.Objects = []runtime.Object{o} + } + return err + default: + return json.Unmarshal(data, x) + } +} + +func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { + m := make(map[string]interface{}) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + + unstruct.Object = m + + return nil +} + +func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { + type decodeList struct { + Items []gojson.RawMessage + } + + var dList decodeList + if err := json.Unmarshal(data, &dList); err != nil { + return err + } + + if err := json.Unmarshal(data, &list.Object); err != nil { + return err + } + + // For typed lists, e.g., a PodList, API server doesn't set each item's + // APIVersion and Kind. We need to set it. + listAPIVersion := list.GetAPIVersion() + listKind := list.GetKind() + itemKind := strings.TrimSuffix(listKind, "List") + + delete(list.Object, "items") + list.Items = make([]Unstructured, 0, len(dList.Items)) + for _, i := range dList.Items { + unstruct := &Unstructured{} + if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { + return err + } + // This is hacky. Set the item's Kind and APIVersion to those inferred + // from the List. + if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { + unstruct.SetKind(itemKind) + unstruct.SetAPIVersion(listAPIVersion) + } + list.Items = append(list.Items, *unstruct) + } + return nil +} + +type JSONFallbackEncoder struct { + runtime.Encoder +} + +func (c JSONFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error { + err := c.Encoder.Encode(obj, w) + if runtime.IsNotRegisteredError(err) { + switch obj.(type) { + case *Unstructured, *UnstructuredList: + return UnstructuredJSONScheme.Encode(obj, w) + } + } + return err +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index ae20726b6..548a01e59 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -18,19 +18,14 @@ package unstructured import ( "bytes" - gojson "encoding/json" "errors" "fmt" - "io" - "strings" - - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) // Unstructured allows objects that do not have Golang structs registered to be manipulated @@ -41,6 +36,8 @@ import ( // type if you are dealing with objects that are not in the server meta v1 schema. // // TODO: make the serialization part of this type distinct from the field accessors. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true type Unstructured struct { // Object is a JSON compatible map with string, float, int, bool, []interface{}, or // map[string]interface{} @@ -50,47 +47,68 @@ type Unstructured struct { var _ metav1.Object = &Unstructured{} var _ runtime.Unstructured = &Unstructured{} -var _ runtime.Unstructured = &UnstructuredList{} -func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } -func (obj *UnstructuredList) GetObjectKind() schema.ObjectKind { return obj } - -func (obj *Unstructured) IsUnstructuredObject() {} -func (obj *UnstructuredList) IsUnstructuredObject() {} +func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } func (obj *Unstructured) IsList() bool { - if obj.Object != nil { - _, ok := obj.Object["items"] - return ok + field, ok := obj.Object["items"] + if !ok { + return false } - return false + _, ok = field.([]interface{}) + return ok +} +func (obj *Unstructured) ToList() (*UnstructuredList, error) { + if !obj.IsList() { + // return an empty list back + return &UnstructuredList{Object: obj.Object}, nil + } + + ret := &UnstructuredList{} + ret.Object = obj.Object + + err := obj.EachListItem(func(item runtime.Object) error { + castItem := item.(*Unstructured) + ret.Items = append(ret.Items, *castItem) + return nil + }) + if err != nil { + return nil, err + } + + return ret, nil +} + +func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { + field, ok := obj.Object["items"] + if !ok { + return errors.New("content is not a list") + } + items, ok := field.([]interface{}) + if !ok { + return fmt.Errorf("content is not a list: %T", field) + } + for _, item := range items { + child, ok := item.(map[string]interface{}) + if !ok { + return fmt.Errorf("items member is not an object: %T", child) + } + if err := fn(&Unstructured{Object: child}); err != nil { + return err + } + } + return nil } -func (obj *UnstructuredList) IsList() bool { return true } func (obj *Unstructured) UnstructuredContent() map[string]interface{} { if obj.Object == nil { - obj.Object = make(map[string]interface{}) + return make(map[string]interface{}) } return obj.Object } -// UnstructuredContent returns a map contain an overlay of the Items field onto -// the Object field. Items always overwrites overlay. Changing "items" in the -// returned object will affect items in the underlying Items field, but changing -// the "items" slice itself will have no effect. -// TODO: expose SetUnstructuredContent on runtime.Unstructured that allows -// items to be changed. -func (obj *UnstructuredList) UnstructuredContent() map[string]interface{} { - out := obj.Object - if out == nil { - out = make(map[string]interface{}) - } - items := make([]interface{}, len(obj.Items)) - for i, item := range obj.Items { - items[i] = item.Object - } - out["items"] = items - return out +func (obj *Unstructured) SetUnstructuredContent(content map[string]interface{}) { + obj.Object = content } // MarshalJSON ensures that the unstructured object produces proper @@ -108,193 +126,67 @@ func (u *Unstructured) UnmarshalJSON(b []byte) error { return err } -func getNestedField(obj map[string]interface{}, fields ...string) interface{} { - var val interface{} = obj - for _, field := range fields { - if _, ok := val.(map[string]interface{}); !ok { - return nil - } - val = val.(map[string]interface{})[field] +func (in *Unstructured) DeepCopy() *Unstructured { + if in == nil { + return nil } - return val -} - -func getNestedString(obj map[string]interface{}, fields ...string) string { - if str, ok := getNestedField(obj, fields...).(string); ok { - return str - } - return "" -} - -func getNestedSlice(obj map[string]interface{}, fields ...string) []string { - if m, ok := getNestedField(obj, fields...).([]interface{}); ok { - strSlice := make([]string, 0, len(m)) - for _, v := range m { - if str, ok := v.(string); ok { - strSlice = append(strSlice, str) - } - } - return strSlice - } - return nil -} - -func getNestedMap(obj map[string]interface{}, fields ...string) map[string]string { - if m, ok := getNestedField(obj, fields...).(map[string]interface{}); ok { - strMap := make(map[string]string, len(m)) - for k, v := range m { - if str, ok := v.(string); ok { - strMap[k] = str - } - } - return strMap - } - return nil -} - -func setNestedField(obj map[string]interface{}, value interface{}, fields ...string) { - m := obj - if len(fields) > 1 { - for _, field := range fields[0 : len(fields)-1] { - if _, ok := m[field].(map[string]interface{}); !ok { - m[field] = make(map[string]interface{}) - } - m = m[field].(map[string]interface{}) - } - } - m[fields[len(fields)-1]] = value -} - -func setNestedSlice(obj map[string]interface{}, value []string, fields ...string) { - m := make([]interface{}, 0, len(value)) - for _, v := range value { - m = append(m, v) - } - setNestedField(obj, m, fields...) -} - -func setNestedMap(obj map[string]interface{}, value map[string]string, fields ...string) { - m := make(map[string]interface{}, len(value)) - for k, v := range value { - m[k] = v - } - setNestedField(obj, m, fields...) + out := new(Unstructured) + *out = *in + out.Object = runtime.DeepCopyJSON(in.Object) + return out } func (u *Unstructured) setNestedField(value interface{}, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } - setNestedField(u.Object, value, fields...) + SetNestedField(u.Object, value, fields...) } func (u *Unstructured) setNestedSlice(value []string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } - setNestedSlice(u.Object, value, fields...) + SetNestedStringSlice(u.Object, value, fields...) } func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } - setNestedMap(u.Object, value, fields...) -} - -func extractOwnerReference(src interface{}) metav1.OwnerReference { - v := src.(map[string]interface{}) - // though this field is a *bool, but when decoded from JSON, it's - // unmarshalled as bool. - var controllerPtr *bool - controller, ok := (getNestedField(v, "controller")).(bool) - if !ok { - controllerPtr = nil - } else { - controllerCopy := controller - controllerPtr = &controllerCopy - } - var blockOwnerDeletionPtr *bool - blockOwnerDeletion, ok := (getNestedField(v, "blockOwnerDeletion")).(bool) - if !ok { - blockOwnerDeletionPtr = nil - } else { - blockOwnerDeletionCopy := blockOwnerDeletion - blockOwnerDeletionPtr = &blockOwnerDeletionCopy - } - return metav1.OwnerReference{ - Kind: getNestedString(v, "kind"), - Name: getNestedString(v, "name"), - APIVersion: getNestedString(v, "apiVersion"), - UID: (types.UID)(getNestedString(v, "uid")), - Controller: controllerPtr, - BlockOwnerDeletion: blockOwnerDeletionPtr, - } -} - -func setOwnerReference(src metav1.OwnerReference) map[string]interface{} { - ret := make(map[string]interface{}) - controllerPtr := src.Controller - if controllerPtr != nil { - controller := *controllerPtr - controllerPtr = &controller - } - blockOwnerDeletionPtr := src.BlockOwnerDeletion - if blockOwnerDeletionPtr != nil { - blockOwnerDeletion := *blockOwnerDeletionPtr - blockOwnerDeletionPtr = &blockOwnerDeletion - } - setNestedField(ret, src.Kind, "kind") - setNestedField(ret, src.Name, "name") - setNestedField(ret, src.APIVersion, "apiVersion") - setNestedField(ret, string(src.UID), "uid") - setNestedField(ret, controllerPtr, "controller") - setNestedField(ret, blockOwnerDeletionPtr, "blockOwnerDeletion") - return ret -} - -func getOwnerReferences(object map[string]interface{}) ([]map[string]interface{}, error) { - field := getNestedField(object, "metadata", "ownerReferences") - if field == nil { - return nil, fmt.Errorf("cannot find field metadata.ownerReferences in %v", object) - } - ownerReferences, ok := field.([]map[string]interface{}) - if ok { - return ownerReferences, nil - } - // TODO: This is hacky... - interfaces, ok := field.([]interface{}) - if !ok { - return nil, fmt.Errorf("expect metadata.ownerReferences to be a slice in %#v", object) - } - ownerReferences = make([]map[string]interface{}, 0, len(interfaces)) - for i := 0; i < len(interfaces); i++ { - r, ok := interfaces[i].(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("expect element metadata.ownerReferences to be a map[string]interface{} in %#v", object) - } - ownerReferences = append(ownerReferences, r) - } - return ownerReferences, nil + SetNestedStringMap(u.Object, value, fields...) } func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference { - original, err := getOwnerReferences(u.Object) - if err != nil { - glog.V(6).Info(err) + field, found, err := NestedFieldNoCopy(u.Object, "metadata", "ownerReferences") + if !found || err != nil { + return nil + } + original, ok := field.([]interface{}) + if !ok { return nil } ret := make([]metav1.OwnerReference, 0, len(original)) - for i := 0; i < len(original); i++ { - ret = append(ret, extractOwnerReference(original[i])) + for _, obj := range original { + o, ok := obj.(map[string]interface{}) + if !ok { + // expected map[string]interface{}, got something else + return nil + } + ret = append(ret, extractOwnerReference(o)) } return ret } func (u *Unstructured) SetOwnerReferences(references []metav1.OwnerReference) { - var newReferences = make([]map[string]interface{}, 0, len(references)) - for i := 0; i < len(references); i++ { - newReferences = append(newReferences, setOwnerReference(references[i])) + newReferences := make([]interface{}, 0, len(references)) + for _, reference := range references { + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&reference) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to convert Owner Reference: %v", err)) + continue + } + newReferences = append(newReferences, out) } u.setNestedField(newReferences, "metadata", "ownerReferences") } @@ -355,6 +247,18 @@ func (u *Unstructured) SetResourceVersion(version string) { u.setNestedField(version, "metadata", "resourceVersion") } +func (u *Unstructured) GetGeneration() int64 { + val, found, err := NestedInt64(u.Object, "metadata", "generation") + if !found || err != nil { + return 0 + } + return val +} + +func (u *Unstructured) SetGeneration(generation int64) { + u.setNestedField(generation, "metadata", "generation") +} + func (u *Unstructured) GetSelfLink() string { return getNestedString(u.Object, "metadata", "selfLink") } @@ -363,6 +267,14 @@ func (u *Unstructured) SetSelfLink(selfLink string) { u.setNestedField(selfLink, "metadata", "selfLink") } +func (u *Unstructured) GetContinue() string { + return getNestedString(u.Object, "metadata", "continue") +} + +func (u *Unstructured) SetContinue(c string) { + u.setNestedField(c, "metadata", "continue") +} + func (u *Unstructured) GetCreationTimestamp() metav1.Time { var timestamp metav1.Time timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) @@ -371,6 +283,10 @@ func (u *Unstructured) GetCreationTimestamp() metav1.Time { func (u *Unstructured) SetCreationTimestamp(timestamp metav1.Time) { ts, _ := timestamp.MarshalQueryParameter() + if len(ts) == 0 || timestamp.Time.IsZero() { + RemoveNestedField(u.Object, "metadata", "creationTimestamp") + return + } u.setNestedField(ts, "metadata", "creationTimestamp") } @@ -384,12 +300,33 @@ func (u *Unstructured) GetDeletionTimestamp() *metav1.Time { } func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) { + if timestamp == nil { + RemoveNestedField(u.Object, "metadata", "deletionTimestamp") + return + } ts, _ := timestamp.MarshalQueryParameter() u.setNestedField(ts, "metadata", "deletionTimestamp") } +func (u *Unstructured) GetDeletionGracePeriodSeconds() *int64 { + val, found, err := NestedInt64(u.Object, "metadata", "deletionGracePeriodSeconds") + if !found || err != nil { + return nil + } + return &val +} + +func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) { + if deletionGracePeriodSeconds == nil { + RemoveNestedField(u.Object, "metadata", "deletionGracePeriodSeconds") + return + } + u.setNestedField(*deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds") +} + func (u *Unstructured) GetLabels() map[string]string { - return getNestedMap(u.Object, "metadata", "labels") + m, _, _ := NestedStringMap(u.Object, "metadata", "labels") + return m } func (u *Unstructured) SetLabels(labels map[string]string) { @@ -397,7 +334,8 @@ func (u *Unstructured) SetLabels(labels map[string]string) { } func (u *Unstructured) GetAnnotations() map[string]string { - return getNestedMap(u.Object, "metadata", "annotations") + m, _, _ := NestedStringMap(u.Object, "metadata", "annotations") + return m } func (u *Unstructured) SetAnnotations(annotations map[string]string) { @@ -418,8 +356,34 @@ func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { return gvk } +func (u *Unstructured) GetInitializers() *metav1.Initializers { + m, found, err := nestedMapNoCopy(u.Object, "metadata", "initializers") + if !found || err != nil { + return nil + } + out := &metav1.Initializers{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, out); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) + return nil + } + return out +} + +func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) { + if initializers == nil { + RemoveNestedField(u.Object, "metadata", "initializers") + return + } + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(initializers) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) + } + u.setNestedField(out, "metadata", "initializers") +} + func (u *Unstructured) GetFinalizers() []string { - return getNestedSlice(u.Object, "metadata", "finalizers") + val, _, _ := NestedStringSlice(u.Object, "metadata", "finalizers") + return val } func (u *Unstructured) SetFinalizers(finalizers []string) { @@ -433,257 +397,3 @@ func (u *Unstructured) GetClusterName() string { func (u *Unstructured) SetClusterName(clusterName string) { u.setNestedField(clusterName, "metadata", "clusterName") } - -// UnstructuredList allows lists that do not have Golang structs -// registered to be manipulated generically. This can be used to deal -// with the API lists from a plug-in. -type UnstructuredList struct { - Object map[string]interface{} - - // Items is a list of unstructured objects. - Items []*Unstructured `json:"items"` -} - -// MarshalJSON ensures that the unstructured list object produces proper -// JSON when passed to Go's standard JSON library. -func (u *UnstructuredList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - err := UnstructuredJSONScheme.Encode(u, &buf) - return buf.Bytes(), err -} - -// UnmarshalJSON ensures that the unstructured list object properly -// decodes JSON when passed to Go's standard JSON library. -func (u *UnstructuredList) UnmarshalJSON(b []byte) error { - _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) - return err -} - -func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - setNestedField(u.Object, value, fields...) -} - -func (u *UnstructuredList) GetAPIVersion() string { - return getNestedString(u.Object, "apiVersion") -} - -func (u *UnstructuredList) SetAPIVersion(version string) { - u.setNestedField(version, "apiVersion") -} - -func (u *UnstructuredList) GetKind() string { - return getNestedString(u.Object, "kind") -} - -func (u *UnstructuredList) SetKind(kind string) { - u.setNestedField(kind, "kind") -} - -func (u *UnstructuredList) GetResourceVersion() string { - return getNestedString(u.Object, "metadata", "resourceVersion") -} - -func (u *UnstructuredList) SetResourceVersion(version string) { - u.setNestedField(version, "metadata", "resourceVersion") -} - -func (u *UnstructuredList) GetSelfLink() string { - return getNestedString(u.Object, "metadata", "selfLink") -} - -func (u *UnstructuredList) SetSelfLink(selfLink string) { - u.setNestedField(selfLink, "metadata", "selfLink") -} - -func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { - u.SetAPIVersion(gvk.GroupVersion().String()) - u.SetKind(gvk.Kind) -} - -func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind { - gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) - if err != nil { - return schema.GroupVersionKind{} - } - gvk := gv.WithKind(u.GetKind()) - return gvk -} - -// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured -// type, which can be used for generic access to objects without a predefined scheme. -// TODO: move into serializer/json. -var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} - -type unstructuredJSONScheme struct{} - -func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - var err error - if obj != nil { - err = s.decodeInto(data, obj) - } else { - obj, err = s.decode(data) - } - - if err != nil { - return nil, nil, err - } - - gvk := obj.GetObjectKind().GroupVersionKind() - if len(gvk.Kind) == 0 { - return nil, &gvk, runtime.NewMissingKindErr(string(data)) - } - - return obj, &gvk, nil -} - -func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { - switch t := obj.(type) { - case *Unstructured: - return json.NewEncoder(w).Encode(t.Object) - case *UnstructuredList: - items := make([]map[string]interface{}, 0, len(t.Items)) - for _, i := range t.Items { - items = append(items, i.Object) - } - t.Object["items"] = items - defer func() { delete(t.Object, "items") }() - return json.NewEncoder(w).Encode(t.Object) - case *runtime.Unknown: - // TODO: Unstructured needs to deal with ContentType. - _, err := w.Write(t.Raw) - return err - default: - return json.NewEncoder(w).Encode(t) - } -} - -func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { - type detector struct { - Items gojson.RawMessage - } - var det detector - if err := json.Unmarshal(data, &det); err != nil { - return nil, err - } - - if det.Items != nil { - list := &UnstructuredList{} - err := s.decodeToList(data, list) - return list, err - } - - // No Items field, so it wasn't a list. - unstruct := &Unstructured{} - err := s.decodeToUnstructured(data, unstruct) - return unstruct, err -} - -func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error { - switch x := obj.(type) { - case *Unstructured: - return s.decodeToUnstructured(data, x) - case *UnstructuredList: - return s.decodeToList(data, x) - case *runtime.VersionedObjects: - o, err := s.decode(data) - if err == nil { - x.Objects = []runtime.Object{o} - } - return err - default: - return json.Unmarshal(data, x) - } -} - -func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { - m := make(map[string]interface{}) - if err := json.Unmarshal(data, &m); err != nil { - return err - } - - unstruct.Object = m - - return nil -} - -func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { - type decodeList struct { - Items []gojson.RawMessage - } - - var dList decodeList - if err := json.Unmarshal(data, &dList); err != nil { - return err - } - - if err := json.Unmarshal(data, &list.Object); err != nil { - return err - } - - // For typed lists, e.g., a PodList, API server doesn't set each item's - // APIVersion and Kind. We need to set it. - listAPIVersion := list.GetAPIVersion() - listKind := list.GetKind() - itemKind := strings.TrimSuffix(listKind, "List") - - delete(list.Object, "items") - list.Items = nil - for _, i := range dList.Items { - unstruct := &Unstructured{} - if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { - return err - } - // This is hacky. Set the item's Kind and APIVersion to those inferred - // from the List. - if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { - unstruct.SetKind(itemKind) - unstruct.SetAPIVersion(listAPIVersion) - } - list.Items = append(list.Items, unstruct) - } - return nil -} - -// UnstructuredObjectConverter is an ObjectConverter for use with -// Unstructured objects. Since it has no schema or type information, -// it will only succeed for no-op conversions. This is provided as a -// sane implementation for APIs that require an object converter. -type UnstructuredObjectConverter struct{} - -func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error { - unstructIn, ok := in.(*Unstructured) - if !ok { - return fmt.Errorf("input type %T in not valid for unstructured conversion", in) - } - - unstructOut, ok := out.(*Unstructured) - if !ok { - return fmt.Errorf("output type %T in not valid for unstructured conversion", out) - } - - // maybe deep copy the map? It is documented in the - // ObjectConverter interface that this function is not - // guaranteeed to not mutate the input. Or maybe set the input - // object to nil. - unstructOut.Object = unstructIn.Object - return nil -} - -func (UnstructuredObjectConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { - if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() { - gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) - if !ok { - // TODO: should this be a typed error? - return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) - } - in.GetObjectKind().SetGroupVersionKind(gvk) - } - return in, nil -} - -func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { - return "", "", errors.New("unstructured cannot convert field labels") -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go new file mode 100644 index 000000000..bf3fd023f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -0,0 +1,188 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + "bytes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ runtime.Unstructured = &UnstructuredList{} +var _ metav1.ListInterface = &UnstructuredList{} + +// UnstructuredList allows lists that do not have Golang structs +// registered to be manipulated generically. This can be used to deal +// with the API lists from a plug-in. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +type UnstructuredList struct { + Object map[string]interface{} + + // Items is a list of unstructured objects. + Items []Unstructured `json:"items"` +} + +func (u *UnstructuredList) GetObjectKind() schema.ObjectKind { return u } + +func (u *UnstructuredList) IsList() bool { return true } + +func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { + for i := range u.Items { + if err := fn(&u.Items[i]); err != nil { + return err + } + } + return nil +} + +// UnstructuredContent returns a map contain an overlay of the Items field onto +// the Object field. Items always overwrites overlay. +func (u *UnstructuredList) UnstructuredContent() map[string]interface{} { + out := make(map[string]interface{}, len(u.Object)+1) + + // shallow copy every property + for k, v := range u.Object { + out[k] = v + } + + items := make([]interface{}, len(u.Items)) + for i, item := range u.Items { + items[i] = item.UnstructuredContent() + } + out["items"] = items + return out +} + +// SetUnstructuredContent obeys the conventions of List and keeps Items and the items +// array in sync. If items is not an array of objects in the incoming map, then any +// mismatched item will be removed. +func (obj *UnstructuredList) SetUnstructuredContent(content map[string]interface{}) { + obj.Object = content + if content == nil { + obj.Items = nil + return + } + items, ok := obj.Object["items"].([]interface{}) + if !ok || items == nil { + items = []interface{}{} + } + unstructuredItems := make([]Unstructured, 0, len(items)) + newItems := make([]interface{}, 0, len(items)) + for _, item := range items { + o, ok := item.(map[string]interface{}) + if !ok { + continue + } + unstructuredItems = append(unstructuredItems, Unstructured{Object: o}) + newItems = append(newItems, o) + } + obj.Items = unstructuredItems + obj.Object["items"] = newItems +} + +func (u *UnstructuredList) DeepCopy() *UnstructuredList { + if u == nil { + return nil + } + out := new(UnstructuredList) + *out = *u + out.Object = runtime.DeepCopyJSON(u.Object) + out.Items = make([]Unstructured, len(u.Items)) + for i := range u.Items { + u.Items[i].DeepCopyInto(&out.Items[i]) + } + return out +} + +// MarshalJSON ensures that the unstructured list object produces proper +// JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := UnstructuredJSONScheme.Encode(u, &buf) + return buf.Bytes(), err +} + +// UnmarshalJSON ensures that the unstructured list object properly +// decodes JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) UnmarshalJSON(b []byte) error { + _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) + return err +} + +func (u *UnstructuredList) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *UnstructuredList) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *UnstructuredList) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *UnstructuredList) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *UnstructuredList) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) SetResourceVersion(version string) { + u.setNestedField(version, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *UnstructuredList) SetSelfLink(selfLink string) { + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *UnstructuredList) GetContinue() string { + return getNestedString(u.Object, "metadata", "continue") +} + +func (u *UnstructuredList) SetContinue(c string) { + u.setNestedField(c, "metadata", "continue") +} + +func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind { + gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return schema.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + +func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + SetNestedField(u.Object, value, fields...) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go new file mode 100644 index 000000000..9a9f25e8f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go @@ -0,0 +1,55 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package unstructured + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Unstructured) DeepCopyInto(out *Unstructured) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Unstructured) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnstructuredList) DeepCopyInto(out *UnstructuredList) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UnstructuredList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go index a645501a1..b7ec50318 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go @@ -26,6 +26,8 @@ import ( // Event represents a single event to a watched resource. // // +protobuf=true +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type WatchEvent struct { Type string `json:"type" protobuf:"bytes,1,opt,name=type"` @@ -78,3 +80,10 @@ type InternalEvent watch.Event func (e *InternalEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } func (e *WatchEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (e *InternalEvent) DeepCopyObject() runtime.Object { + if c := e.DeepCopy(); c != nil { + return c + } else { + return nil + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/well_known_labels.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/well_known_labels.go deleted file mode 100644 index bd17546a4..000000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/well_known_labels.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -const ( - // If you add a new topology domain here, also consider adding it to the set of default values - // for the scheduler's --failure-domain command-line argument. - LabelHostname = "kubernetes.io/hostname" - LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" - LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" - - LabelInstanceType = "beta.kubernetes.io/instance-type" - - LabelOS = "beta.kubernetes.io/os" - LabelArch = "beta.kubernetes.io/arch" - - // Historically fluentd was a manifest pod the was migrated to DaemonSet. - // To avoid situation during cluster upgrade when there are two instances - // of fluentd running on a node, kubelet need to mark node on which - // fluentd in not running as a manifest pod with LabelFluentdDsReady. - LabelFluentdDsReady = "alpha.kubernetes.io/fluentd-ds-ready" - - // When feature-gate for TaintBasedEvictions=true flag is enabled, - // TaintNodeNotReady would be automatically added by node controller - // when node is not ready, and removed when node becomes ready. - TaintNodeNotReady = "node.alpha.kubernetes.io/notReady" - - // When feature-gate for TaintBasedEvictions=true flag is enabled, - // TaintNodeUnreachable would be automatically added by node controller - // when node becomes unreachable (corresponding to NodeReady status ConditionUnknown) - // and removed when node becomes reachable (NodeReady status ConditionTrue). - TaintNodeUnreachable = "node.alpha.kubernetes.io/unreachable" -) - -// Role labels are applied to Nodes to mark their purpose. In particular, we -// usually want to distinguish the master, so that we can isolate privileged -// pods and operations. -// -// Originally we relied on not registering the master, on the fact that the -// master was Unschedulable, and on static manifests for master components. -// But we now do register masters in many environments, are generally moving -// away from static manifests (for better manageability), and working towards -// deprecating the unschedulable field (replacing it with taints & tolerations -// instead). -// -// Even with tainting, a label remains the easiest way of making a positive -// selection, so that pods can schedule only to master nodes for example, and -// thus installations will likely define a label for their master nodes. -// -// So that we can recognize master nodes in consequent places though (such as -// kubectl get nodes), we encourage installations to use the well-known labels. -// We define NodeLabelRole, which is the preferred form, but we will also recognize -// other forms that are known to be in widespread use (NodeLabelKubeadmAlphaRole). - -const ( - // NodeLabelRole is the preferred label applied to a Node as a hint that it has a particular purpose (defined by the value). - NodeLabelRole = "kubernetes.io/role" - - // NodeLabelKubeadmAlphaRole is a label that kubeadm applies to a Node as a hint that it has a particular purpose. - // Use of NodeLabelRole is preferred. - NodeLabelKubeadmAlphaRole = "kubeadm.alpha.kubernetes.io/role" - - // NodeLabelRoleMaster is the value of a NodeLabelRole or NodeLabelKubeadmAlphaRole label, indicating a master node. - // A master node typically runs kubernetes system components and will not typically run user workloads. - NodeLabelRoleMaster = "master" - - // NodeLabelRoleNode is the value of a NodeLabelRole or NodeLabelKubeadmAlphaRole label, indicating a "normal" node, - // as opposed to a RoleMaster node. - NodeLabelRoleNode = "node" -) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index b5f7360e2..98dfea095 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,539 +16,935 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by deepcopy-gen. Do not edit it manually! +// Code generated by deepcopy-gen. DO NOT EDIT. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: DeepCopy_v1_APIGroup, InType: reflect.TypeOf(&APIGroup{})}, - {Fn: DeepCopy_v1_APIGroupList, InType: reflect.TypeOf(&APIGroupList{})}, - {Fn: DeepCopy_v1_APIResource, InType: reflect.TypeOf(&APIResource{})}, - {Fn: DeepCopy_v1_APIResourceList, InType: reflect.TypeOf(&APIResourceList{})}, - {Fn: DeepCopy_v1_APIVersions, InType: reflect.TypeOf(&APIVersions{})}, - {Fn: DeepCopy_v1_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, - {Fn: DeepCopy_v1_Duration, InType: reflect.TypeOf(&Duration{})}, - {Fn: DeepCopy_v1_ExportOptions, InType: reflect.TypeOf(&ExportOptions{})}, - {Fn: DeepCopy_v1_GetOptions, InType: reflect.TypeOf(&GetOptions{})}, - {Fn: DeepCopy_v1_GroupKind, InType: reflect.TypeOf(&GroupKind{})}, - {Fn: DeepCopy_v1_GroupResource, InType: reflect.TypeOf(&GroupResource{})}, - {Fn: DeepCopy_v1_GroupVersion, InType: reflect.TypeOf(&GroupVersion{})}, - {Fn: DeepCopy_v1_GroupVersionForDiscovery, InType: reflect.TypeOf(&GroupVersionForDiscovery{})}, - {Fn: DeepCopy_v1_GroupVersionKind, InType: reflect.TypeOf(&GroupVersionKind{})}, - {Fn: DeepCopy_v1_GroupVersionResource, InType: reflect.TypeOf(&GroupVersionResource{})}, - {Fn: DeepCopy_v1_InternalEvent, InType: reflect.TypeOf(&InternalEvent{})}, - {Fn: DeepCopy_v1_LabelSelector, InType: reflect.TypeOf(&LabelSelector{})}, - {Fn: DeepCopy_v1_LabelSelectorRequirement, InType: reflect.TypeOf(&LabelSelectorRequirement{})}, - {Fn: DeepCopy_v1_ListMeta, InType: reflect.TypeOf(&ListMeta{})}, - {Fn: DeepCopy_v1_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, - {Fn: DeepCopy_v1_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, - {Fn: DeepCopy_v1_OwnerReference, InType: reflect.TypeOf(&OwnerReference{})}, - {Fn: DeepCopy_v1_Patch, InType: reflect.TypeOf(&Patch{})}, - {Fn: DeepCopy_v1_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, - {Fn: DeepCopy_v1_RootPaths, InType: reflect.TypeOf(&RootPaths{})}, - {Fn: DeepCopy_v1_ServerAddressByClientCIDR, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, - {Fn: DeepCopy_v1_Status, InType: reflect.TypeOf(&Status{})}, - {Fn: DeepCopy_v1_StatusCause, InType: reflect.TypeOf(&StatusCause{})}, - {Fn: DeepCopy_v1_StatusDetails, InType: reflect.TypeOf(&StatusDetails{})}, - {Fn: DeepCopy_v1_Time, InType: reflect.TypeOf(&Time{})}, - {Fn: DeepCopy_v1_Timestamp, InType: reflect.TypeOf(&Timestamp{})}, - {Fn: DeepCopy_v1_TypeMeta, InType: reflect.TypeOf(&TypeMeta{})}, - {Fn: DeepCopy_v1_WatchEvent, InType: reflect.TypeOf(&WatchEvent{})}, +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroup) DeepCopyInto(out *APIGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]GroupVersionForDiscovery, len(*in)) + copy(*out, *in) } + out.PreferredVersion = in.PreferredVersion + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return } -func DeepCopy_v1_APIGroup(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIGroup) - out := out.(*APIGroup) - *out = *in - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]GroupVersionForDiscovery, len(*in)) - copy(*out, *in) - } - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - copy(*out, *in) - } +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroup. +func (in *APIGroup) DeepCopy() *APIGroup { + if in == nil { return nil } + out := new(APIGroup) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_APIGroupList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIGroupList) - out := out.(*APIGroupList) - *out = *in - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]APIGroup, len(*in)) - for i := range *in { - if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { - return err - } else { - (*out)[i] = *newVal.(*APIGroup) - } - } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroupList) DeepCopyInto(out *APIGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]APIGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupList. +func (in *APIGroupList) DeepCopy() *APIGroupList { + if in == nil { return nil } + out := new(APIGroupList) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_APIResource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIResource) - out := out.(*APIResource) - *out = *in - if in.Verbs != nil { - in, out := &in.Verbs, &out.Verbs - *out = make(Verbs, len(*in)) - copy(*out, *in) - } - if in.ShortNames != nil { - in, out := &in.ShortNames, &out.ShortNames - *out = make([]string, len(*in)) - copy(*out, *in) - } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResource) DeepCopyInto(out *APIResource) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make(Verbs, len(*in)) + copy(*out, *in) + } + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResource. +func (in *APIResource) DeepCopy() *APIResource { + if in == nil { return nil } + out := new(APIResource) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_APIResourceList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIResourceList) - out := out.(*APIResourceList) - *out = *in - if in.APIResources != nil { - in, out := &in.APIResources, &out.APIResources - *out = make([]APIResource, len(*in)) - for i := range *in { - if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { - return err - } else { - (*out)[i] = *newVal.(*APIResource) - } - } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResourceList) DeepCopyInto(out *APIResourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.APIResources != nil { + in, out := &in.APIResources, &out.APIResources + *out = make([]APIResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceList. +func (in *APIResourceList) DeepCopy() *APIResourceList { + if in == nil { return nil } + out := new(APIResourceList) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_APIVersions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIVersions) - out := out.(*APIVersions) - *out = *in - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - copy(*out, *in) - } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIResourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIVersions) DeepCopyInto(out *APIVersions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersions. +func (in *APIVersions) DeepCopy() *APIVersions { + if in == nil { return nil } + out := new(APIVersions) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeleteOptions) - out := out.(*DeleteOptions) - *out = *in - if in.GracePeriodSeconds != nil { - in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIVersions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + if *in == nil { + *out = nil + } else { *out = new(int64) **out = **in } - if in.Preconditions != nil { - in, out := &in.Preconditions, &out.Preconditions - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*Preconditions) - } + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + if *in == nil { + *out = nil + } else { + *out = new(Preconditions) + (*in).DeepCopyInto(*out) } - if in.OrphanDependents != nil { - in, out := &in.OrphanDependents, &out.OrphanDependents + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + if *in == nil { + *out = nil + } else { *out = new(bool) **out = **in } - if in.PropagationPolicy != nil { - in, out := &in.PropagationPolicy, &out.PropagationPolicy + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + if *in == nil { + *out = nil + } else { *out = new(DeletionPropagation) **out = **in } - return nil } + return } -func DeepCopy_v1_Duration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Duration) - out := out.(*Duration) - *out = *in +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteOptions. +func (in *DeleteOptions) DeepCopy() *DeleteOptions { + if in == nil { return nil } + out := new(DeleteOptions) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_ExportOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ExportOptions) - out := out.(*ExportOptions) - *out = *in +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeleteOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Duration) DeepCopyInto(out *Duration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Duration. +func (in *Duration) DeepCopy() *Duration { + if in == nil { return nil } + out := new(Duration) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_GetOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GetOptions) - out := out.(*GetOptions) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportOptions) DeepCopyInto(out *ExportOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportOptions. +func (in *ExportOptions) DeepCopy() *ExportOptions { + if in == nil { return nil } + out := new(ExportOptions) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_GroupKind(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupKind) - out := out.(*GroupKind) - *out = *in +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExportOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GetOptions) DeepCopyInto(out *GetOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GetOptions. +func (in *GetOptions) DeepCopy() *GetOptions { + if in == nil { return nil } + out := new(GetOptions) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_GroupResource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupResource) - out := out.(*GroupResource) - *out = *in +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GetOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupKind) DeepCopyInto(out *GroupKind) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupKind. +func (in *GroupKind) DeepCopy() *GroupKind { + if in == nil { return nil } + out := new(GroupKind) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_GroupVersion(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupVersion) - out := out.(*GroupVersion) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResource) DeepCopyInto(out *GroupResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResource. +func (in *GroupResource) DeepCopy() *GroupResource { + if in == nil { return nil } + out := new(GroupResource) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_GroupVersionForDiscovery(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupVersionForDiscovery) - out := out.(*GroupVersionForDiscovery) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersion) DeepCopyInto(out *GroupVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersion. +func (in *GroupVersion) DeepCopy() *GroupVersion { + if in == nil { return nil } + out := new(GroupVersion) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_GroupVersionKind(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupVersionKind) - out := out.(*GroupVersionKind) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionForDiscovery) DeepCopyInto(out *GroupVersionForDiscovery) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionForDiscovery. +func (in *GroupVersionForDiscovery) DeepCopy() *GroupVersionForDiscovery { + if in == nil { return nil } + out := new(GroupVersionForDiscovery) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_GroupVersionResource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupVersionResource) - out := out.(*GroupVersionResource) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionKind) DeepCopyInto(out *GroupVersionKind) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionKind. +func (in *GroupVersionKind) DeepCopy() *GroupVersionKind { + if in == nil { return nil } + out := new(GroupVersionKind) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_InternalEvent(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*InternalEvent) - out := out.(*InternalEvent) - *out = *in - // in.Object is kind 'Interface' - if in.Object != nil { - if newVal, err := c.DeepCopy(&in.Object); err != nil { - return err - } else { - out.Object = *newVal.(*runtime.Object) - } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionResource) DeepCopyInto(out *GroupVersionResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionResource. +func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { + if in == nil { + return nil + } + out := new(GroupVersionResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Initializer) DeepCopyInto(out *Initializer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. +func (in *Initializer) DeepCopy() *Initializer { + if in == nil { + return nil + } + out := new(Initializer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Initializers) DeepCopyInto(out *Initializers) { + *out = *in + if in.Pending != nil { + in, out := &in.Pending, &out.Pending + *out = make([]Initializer, len(*in)) + copy(*out, *in) + } + if in.Result != nil { + in, out := &in.Result, &out.Result + if *in == nil { + *out = nil + } else { + *out = new(Status) + (*in).DeepCopyInto(*out) } - return nil } + return } -func DeepCopy_v1_LabelSelector(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LabelSelector) - out := out.(*LabelSelector) - *out = *in - if in.MatchLabels != nil { - in, out := &in.MatchLabels, &out.MatchLabels - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializers. +func (in *Initializers) DeepCopy() *Initializers { + if in == nil { + return nil + } + out := new(Initializers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalEvent) DeepCopyInto(out *InternalEvent) { + *out = *in + if in.Object == nil { + out.Object = nil + } else { + out.Object = in.Object.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalEvent. +func (in *InternalEvent) DeepCopy() *InternalEvent { + if in == nil { + return nil + } + out := new(InternalEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelSelector) DeepCopyInto(out *LabelSelector) { + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val } - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]LabelSelectorRequirement, len(*in)) - for i := range *in { - if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { - return err - } else { - (*out)[i] = *newVal.(*LabelSelectorRequirement) - } - } + } + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } - return nil } + return } -func DeepCopy_v1_LabelSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LabelSelectorRequirement) - out := out.(*LabelSelectorRequirement) - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelector. +func (in *LabelSelector) DeepCopy() *LabelSelector { + if in == nil { + return nil + } + out := new(LabelSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelSelectorRequirement) DeepCopyInto(out *LabelSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelectorRequirement. +func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement { + if in == nil { + return nil + } + out := new(LabelSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } - return nil } + return } -func DeepCopy_v1_ListMeta(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ListMeta) - out := out.(*ListMeta) - *out = *in +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { return nil } + out := new(List) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ListOptions) - out := out.(*ListOptions) - *out = *in - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListMeta) DeepCopyInto(out *ListMeta) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListMeta. +func (in *ListMeta) DeepCopy() *ListMeta { + if in == nil { + return nil + } + out := new(ListMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListOptions) DeepCopyInto(out *ListOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + if *in == nil { + *out = nil + } else { *out = new(int64) **out = **in } - return nil } + return } -func DeepCopy_v1_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectMeta) - out := out.(*ObjectMeta) - *out = *in - out.CreationTimestamp = in.CreationTimestamp.DeepCopy() - if in.DeletionTimestamp != nil { - in, out := &in.DeletionTimestamp, &out.DeletionTimestamp - *out = new(Time) - **out = (*in).DeepCopy() +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. +func (in *ListOptions) DeepCopy() *ListOptions { + if in == nil { + return nil + } + out := new(ListOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ListOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicroTime. +func (in *MicroTime) DeepCopy() *MicroTime { + if in == nil { + return nil + } + out := new(MicroTime) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp) + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() } - if in.DeletionGracePeriodSeconds != nil { - in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + if *in == nil { + *out = nil + } else { *out = new(int64) **out = **in } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.OwnerReferences != nil { - in, out := &in.OwnerReferences, &out.OwnerReferences - *out = make([]OwnerReference, len(*in)) - for i := range *in { - if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { - return err - } else { - (*out)[i] = *newVal.(*OwnerReference) - } - } - } - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]OwnerReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + if *in == nil { + *out = nil + } else { + *out = new(Initializers) + (*in).DeepCopyInto(*out) + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return } -func DeepCopy_v1_OwnerReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*OwnerReference) - out := out.(*OwnerReference) - *out = *in - if in.Controller != nil { - in, out := &in.Controller, &out.Controller +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OwnerReference) DeepCopyInto(out *OwnerReference) { + *out = *in + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + if *in == nil { + *out = nil + } else { *out = new(bool) **out = **in } - if in.BlockOwnerDeletion != nil { - in, out := &in.BlockOwnerDeletion, &out.BlockOwnerDeletion + } + if in.BlockOwnerDeletion != nil { + in, out := &in.BlockOwnerDeletion, &out.BlockOwnerDeletion + if *in == nil { + *out = nil + } else { *out = new(bool) **out = **in } - return nil } + return } -func DeepCopy_v1_Patch(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Patch) - out := out.(*Patch) - *out = *in +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerReference. +func (in *OwnerReference) DeepCopy() *OwnerReference { + if in == nil { return nil } + out := new(OwnerReference) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Preconditions) - out := out.(*Preconditions) - *out = *in - if in.UID != nil { - in, out := &in.UID, &out.UID +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Patch) DeepCopyInto(out *Patch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Patch. +func (in *Patch) DeepCopy() *Patch { + if in == nil { + return nil + } + out := new(Patch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preconditions) DeepCopyInto(out *Preconditions) { + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + if *in == nil { + *out = nil + } else { *out = new(types.UID) **out = **in } - return nil } + return } -func DeepCopy_v1_RootPaths(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RootPaths) - out := out.(*RootPaths) - *out = *in - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]string, len(*in)) - copy(*out, *in) - } +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. +func (in *Preconditions) DeepCopy() *Preconditions { + if in == nil { return nil } + out := new(Preconditions) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_ServerAddressByClientCIDR(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServerAddressByClientCIDR) - out := out.(*ServerAddressByClientCIDR) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootPaths) DeepCopyInto(out *RootPaths) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootPaths. +func (in *RootPaths) DeepCopy() *RootPaths { + if in == nil { return nil } + out := new(RootPaths) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_Status(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Status) - out := out.(*Status) - *out = *in - if in.Details != nil { - in, out := &in.Details, &out.Details - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*StatusDetails) - } - } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerAddressByClientCIDR) DeepCopyInto(out *ServerAddressByClientCIDR) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerAddressByClientCIDR. +func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR { + if in == nil { return nil } + out := new(ServerAddressByClientCIDR) + in.DeepCopyInto(out) + return out } -func DeepCopy_v1_StatusCause(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatusCause) - out := out.(*StatusCause) - *out = *in - return nil - } -} - -func DeepCopy_v1_StatusDetails(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatusDetails) - out := out.(*StatusDetails) - *out = *in - if in.Causes != nil { - in, out := &in.Causes, &out.Causes - *out = make([]StatusCause, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_Time(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Time) - out := out.(*Time) - *out = in.DeepCopy() - return nil - } -} - -func DeepCopy_v1_Timestamp(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Timestamp) - out := out.(*Timestamp) - *out = *in - return nil - } -} - -func DeepCopy_v1_TypeMeta(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TypeMeta) - out := out.(*TypeMeta) - *out = *in - return nil - } -} - -func DeepCopy_v1_WatchEvent(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*WatchEvent) - out := out.(*WatchEvent) - *out = *in - if newVal, err := c.DeepCopy(&in.Object); err != nil { - return err +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Details != nil { + in, out := &in.Details, &out.Details + if *in == nil { + *out = nil } else { - out.Object = *newVal.(*runtime.RawExtension) + *out = new(StatusDetails) + (*in).DeepCopyInto(*out) } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. +func (in *Status) DeepCopy() *Status { + if in == nil { return nil } + out := new(Status) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Status) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusCause) DeepCopyInto(out *StatusCause) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusCause. +func (in *StatusCause) DeepCopy() *StatusCause { + if in == nil { + return nil + } + out := new(StatusCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusDetails) DeepCopyInto(out *StatusDetails) { + *out = *in + if in.Causes != nil { + in, out := &in.Causes, &out.Causes + *out = make([]StatusCause, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDetails. +func (in *StatusDetails) DeepCopy() *StatusDetails { + if in == nil { + return nil + } + out := new(StatusDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. +func (in *Time) DeepCopy() *Time { + if in == nil { + return nil + } + out := new(Time) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Timestamp) DeepCopyInto(out *Timestamp) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timestamp. +func (in *Timestamp) DeepCopy() *Timestamp { + if in == nil { + return nil + } + out := new(Timestamp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Verbs) DeepCopyInto(out *Verbs) { + { + in := &in + *out = make(Verbs, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Verbs. +func (in Verbs) DeepCopy() Verbs { + if in == nil { + return nil + } + out := new(Verbs) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatchEvent) DeepCopyInto(out *WatchEvent) { + *out = *in + in.Object.DeepCopyInto(&out.Object) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchEvent. +func (in *WatchEvent) DeepCopy() *WatchEvent { + if in == nil { + return nil + } + out := new(WatchEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WatchEvent) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go index 6df448eb9..cce2e603a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by defaulter-gen. Do not edit it manually! +// Code generated by defaulter-gen. DO NOT EDIT. package v1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go new file mode 100644 index 000000000..f3e5e4c98 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/apimachinery/pkg/conversion" + +// Convert_Slice_string_To_v1beta1_IncludeObjectPolicy allows converting a URL query parameter value +func Convert_Slice_string_To_v1beta1_IncludeObjectPolicy(input *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { + if len(*input) > 0 { + *out = IncludeObjectPolicy((*input)[0]) + } + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go similarity index 56% rename from vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/defaults.go rename to vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go index cd6a29d33..3b2bedd92 100644 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/defaults.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go @@ -18,14 +18,27 @@ package v1beta1 import "k8s.io/apimachinery/pkg/runtime" -func addDefaultingFuncs(scheme *runtime.Scheme) error { - RegisterDefaults(scheme) - return scheme.AddDefaultingFuncs( - SetDefaults_CertificateSigningRequestSpec, - ) -} -func SetDefaults_CertificateSigningRequestSpec(obj *CertificateSigningRequestSpec) { - if obj.Usages == nil { - obj.Usages = []KeyUsage{UsageDigitalSignature, UsageKeyEncipherment} +func (in *TableRow) DeepCopy() *TableRow { + if in == nil { + return nil } + + out := new(TableRow) + + if in.Cells != nil { + out.Cells = make([]interface{}, len(in.Cells)) + for i := range in.Cells { + out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) + } + } + + if in.Conditions != nil { + out.Conditions = make([]TableRowCondition, len(in.Conditions)) + for i := range in.Conditions { + in.Conditions[i].DeepCopyInto(&out.Conditions[i]) + } + } + + in.Object.DeepCopyInto(&out.Object) + return out } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go new file mode 100644 index 000000000..dc461cc29 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=meta.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go new file mode 100644 index 000000000..4e427b3b3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -0,0 +1,633 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto + + It has these top-level messages: + PartialObjectMetadata + PartialObjectMetadataList + TableOptions +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } +func (*PartialObjectMetadata) ProtoMessage() {} +func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } +func (*PartialObjectMetadataList) ProtoMessage() {} +func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *TableOptions) Reset() { *m = TableOptions{} } +func (*TableOptions) ProtoMessage() {} +func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadata") + proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList") + proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.TableOptions") +} +func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TableOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) + i += copy(dAtA[i:], m.IncludeObject) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PartialObjectMetadata) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PartialObjectMetadataList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TableOptions) Size() (n int) { + var l int + _ = l + l = len(m.IncludeObject) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PartialObjectMetadata) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartialObjectMetadata{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PartialObjectMetadataList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TableOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TableOptions{`, + `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TableOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 391 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xbd, 0x6e, 0xd4, 0x40, + 0x10, 0xc7, 0xbd, 0x42, 0x11, 0x64, 0x43, 0x1a, 0x23, 0xa4, 0x70, 0xc5, 0x3a, 0xba, 0x2a, 0x48, + 0x64, 0x97, 0x04, 0x84, 0x28, 0x91, 0xbb, 0x48, 0xa0, 0x44, 0x16, 0x15, 0x15, 0x6b, 0x7b, 0xf0, + 0x2d, 0xb6, 0x77, 0xad, 0xdd, 0x71, 0xa4, 0x6b, 0x10, 0x8f, 0xc0, 0x63, 0x5d, 0x99, 0x32, 0x95, + 0xc5, 0x99, 0xb7, 0xa0, 0x42, 0xfe, 0x10, 0xf9, 0xb8, 0x3b, 0xe5, 0xba, 0x99, 0xff, 0xe8, 0xf7, + 0xf3, 0x8c, 0x97, 0x46, 0xf9, 0x7b, 0xc7, 0x95, 0x11, 0x79, 0x1d, 0x83, 0xd5, 0x80, 0xe0, 0xc4, + 0x25, 0xe8, 0xd4, 0x58, 0x31, 0x0e, 0x64, 0xa5, 0x4a, 0x99, 0xcc, 0x94, 0x06, 0x3b, 0x17, 0x55, + 0x9e, 0x75, 0x81, 0x13, 0x25, 0xa0, 0x14, 0x97, 0x27, 0x31, 0xa0, 0x3c, 0x11, 0x19, 0x68, 0xb0, + 0x12, 0x21, 0xe5, 0x95, 0x35, 0x68, 0xfc, 0x97, 0x03, 0xca, 0x6f, 0xa3, 0xbc, 0xca, 0xb3, 0x2e, + 0x70, 0xbc, 0x43, 0xf9, 0x88, 0x4e, 0x8e, 0x33, 0x85, 0xb3, 0x3a, 0xe6, 0x89, 0x29, 0x45, 0x66, + 0x32, 0x23, 0x7a, 0x43, 0x5c, 0x7f, 0xeb, 0xbb, 0xbe, 0xe9, 0xab, 0xc1, 0x3c, 0x79, 0xbb, 0xcd, + 0x52, 0xf7, 0xf7, 0x99, 0x6c, 0x3c, 0xc5, 0xd6, 0x1a, 0x55, 0x09, 0x2b, 0xc0, 0xbb, 0x87, 0x00, + 0x97, 0xcc, 0xa0, 0x94, 0x2b, 0xdc, 0x9b, 0x4d, 0x5c, 0x8d, 0xaa, 0x10, 0x4a, 0xa3, 0x43, 0x7b, + 0x1f, 0x9a, 0xce, 0xe9, 0xf3, 0x0b, 0x69, 0x51, 0xc9, 0xe2, 0x3c, 0xfe, 0x0e, 0x09, 0x7e, 0x02, + 0x94, 0xa9, 0x44, 0xe9, 0x7f, 0xa5, 0x4f, 0xca, 0xb1, 0x3e, 0x20, 0x87, 0xe4, 0x68, 0xef, 0xf4, + 0x35, 0xdf, 0xe6, 0xcf, 0xf2, 0x1b, 0x4f, 0xe8, 0x2f, 0x9a, 0xc0, 0x6b, 0x9b, 0x80, 0xde, 0x64, + 0xd1, 0x7f, 0xeb, 0xf4, 0x07, 0x7d, 0xb1, 0xf6, 0xd3, 0x1f, 0x95, 0x43, 0x5f, 0xd2, 0x1d, 0x85, + 0x50, 0xba, 0x03, 0x72, 0xf8, 0xe8, 0x68, 0xef, 0xf4, 0x03, 0xdf, 0xfa, 0x55, 0xf9, 0x5a, 0x69, + 0xb8, 0xdb, 0x36, 0xc1, 0xce, 0x59, 0xa7, 0x8c, 0x06, 0xf3, 0x34, 0xa6, 0x4f, 0x3f, 0xcb, 0xb8, + 0x80, 0xf3, 0x0a, 0x95, 0xd1, 0xce, 0x8f, 0xe8, 0xbe, 0xd2, 0x49, 0x51, 0xa7, 0x30, 0xa0, 0xfd, + 0xd9, 0xbb, 0xe1, 0xab, 0xf1, 0x88, 0xfd, 0xb3, 0xdb, 0xc3, 0xbf, 0x4d, 0xf0, 0xec, 0x4e, 0x70, + 0x61, 0x0a, 0x95, 0xcc, 0xa3, 0xbb, 0x8a, 0xf0, 0x78, 0xb1, 0x64, 0xde, 0xd5, 0x92, 0x79, 0xd7, + 0x4b, 0xe6, 0xfd, 0x6c, 0x19, 0x59, 0xb4, 0x8c, 0x5c, 0xb5, 0x8c, 0x5c, 0xb7, 0x8c, 0xfc, 0x6e, + 0x19, 0xf9, 0xf5, 0x87, 0x79, 0x5f, 0x1e, 0x8f, 0xab, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x73, + 0xdf, 0x3a, 0x0c, 0x10, 0x03, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto new file mode 100644 index 000000000..472902ad3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.apis.meta.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadata { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadataList { + // items contains each of the included items. + repeated PartialObjectMetadata items = 1; +} + +// TableOptions are used when a Table is requested by the caller. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message TableOptions { + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + optional string includeObject = 1; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go similarity index 54% rename from vendor/k8s.io/client-go/pkg/apis/authorization/register.go rename to vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go index 5693885e4..d13254b41 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,39 +14,44 @@ See the License for the specific language governing permissions and limitations under the License. */ -package authorization +package v1beta1 import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -// GroupName is the group name use in this package -const GroupName = "authorization.k8s.io" +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} // Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} +// scheme is the registry for the common types that adhere to the meta v1beta1 API spec. +var scheme = runtime.NewScheme() -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) +// ParameterCodec knows about query parameters used with the meta v1beta1 API spec. +var ParameterCodec = runtime.NewParameterCodec(scheme) -func addKnownTypes(scheme *runtime.Scheme) error { +func init() { scheme.AddKnownTypes(SchemeGroupVersion, - &SelfSubjectAccessReview{}, - &SubjectAccessReview{}, - &LocalSubjectAccessReview{}, + &Table{}, + &TableOptions{}, + &PartialObjectMetadata{}, + &PartialObjectMetadataList{}, ) - return nil + + if err := scheme.AddConversionFuncs( + Convert_Slice_string_To_v1beta1_IncludeObjectPolicy, + ); err != nil { + panic(err) + } + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + //scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go new file mode 100644 index 000000000..344c533e1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package v1beta1 is alpha objects from meta that will be introduced. +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf +// generation to support a meta type that can accept any valid JSON. + +// Table is a tabular representation of a set of API resources. The server transforms the +// object into a set of preferred columns for quickly reviewing the objects. +// +protobuf=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type Table struct { + v1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + v1.ListMeta `json:"metadata,omitempty"` + + // columnDefinitions describes each column in the returned items array. The number of cells per row + // will always match the number of column definitions. + ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` + // rows is the list of items in the table. + Rows []TableRow `json:"rows"` +} + +// TableColumnDefinition contains information about a column returned in the Table. +// +protobuf=false +type TableColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name"` + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Type string `json:"type"` + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Format string `json:"format"` + // description is a human readable description of this column. + Description string `json:"description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a higher priority. + Priority int32 `json:"priority"` +} + +// TableRow is an individual row in a table. +// +protobuf=false +type TableRow struct { + // cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple + // maps, or lists, or null. See the type field of the column definition for a more detailed description. + Cells []interface{} `json:"cells"` + // conditions describe additional status of a row that are relevant for a human user. + // +optional + Conditions []TableRowCondition `json:"conditions,omitempty"` + // This field contains the requested additional information about each object based on the includeObject + // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the + // default serialization of the object for the current API version, and if "Metadata" (the default) will + // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. + // +optional + Object runtime.RawExtension `json:"object,omitempty"` +} + +// TableRowCondition allows a row to be marked with additional information. +// +protobuf=false +type TableRowCondition struct { + // Type of row condition. + Type RowConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status"` + // (brief) machine readable reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +type RowConditionType string + +// These are valid conditions of a row. This list is not exhaustive and new conditions may be +// included by other resources. +const ( + // RowCompleted means the underlying resource has reached completion and may be given less + // visual priority than other resources. + RowCompleted RowConditionType = "Completed" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// IncludeObjectPolicy controls which portion of the object is returned with a Table. +type IncludeObjectPolicy string + +const ( + // IncludeNone returns no object. + IncludeNone IncludeObjectPolicy = "None" + // IncludeMetadata serializes the object containing only its metadata field. + IncludeMetadata IncludeObjectPolicy = "Metadata" + // IncludeObject contains the full object. + IncludeObject IncludeObjectPolicy = "Object" +) + +// TableOptions are used when a Table is requested by the caller. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type TableOptions struct { + v1.TypeMeta `json:",inline"` + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` +} + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadata struct { + v1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadataList struct { + v1.TypeMeta `json:",inline"` + + // items contains each of the included items. + Items []*PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..7394535d9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,104 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_PartialObjectMetadata = map[string]string{ + "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", +} + +func (PartialObjectMetadata) SwaggerDoc() map[string]string { + return map_PartialObjectMetadata +} + +var map_PartialObjectMetadataList = map[string]string{ + "": "PartialObjectMetadataList contains a list of objects containing only their metadata", + "items": "items contains each of the included items.", +} + +func (PartialObjectMetadataList) SwaggerDoc() map[string]string { + return map_PartialObjectMetadataList +} + +var map_Table = map[string]string{ + "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", + "rows": "rows is the list of items in the table.", +} + +func (Table) SwaggerDoc() map[string]string { + return map_Table +} + +var map_TableColumnDefinition = map[string]string{ + "": "TableColumnDefinition contains information about a column returned in the Table.", + "name": "name is a human readable name for the column.", + "type": "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "format": "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "description": "description is a human readable description of this column.", + "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", +} + +func (TableColumnDefinition) SwaggerDoc() map[string]string { + return map_TableColumnDefinition +} + +var map_TableOptions = map[string]string{ + "": "TableOptions are used when a Table is requested by the caller.", + "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", +} + +func (TableOptions) SwaggerDoc() map[string]string { + return map_TableOptions +} + +var map_TableRow = map[string]string{ + "": "TableRow is an individual row in a table.", + "cells": "cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple maps, or lists, or null. See the type field of the column definition for a more detailed description.", + "conditions": "conditions describe additional status of a row that are relevant for a human user.", + "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing.", +} + +func (TableRow) SwaggerDoc() map[string]string { + return map_TableRow +} + +var map_TableRowCondition = map[string]string{ + "": "TableRowCondition allows a row to be marked with additional information.", + "type": "Type of row condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "reason": "(brief) machine readable reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (TableRowCondition) SwaggerDoc() map[string]string { + return map_TableRowCondition +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..2e79a131f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,190 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. +func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { + if in == nil { + return nil + } + out := new(PartialObjectMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*PartialObjectMetadata, len(*in)) + for i := range *in { + if (*in)[i] == nil { + (*out)[i] = nil + } else { + (*out)[i] = new(PartialObjectMetadata) + (*in)[i].DeepCopyInto((*out)[i]) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList. +func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList { + if in == nil { + return nil + } + out := new(PartialObjectMetadataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Table) DeepCopyInto(out *Table) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.ColumnDefinitions != nil { + in, out := &in.ColumnDefinitions, &out.ColumnDefinitions + *out = make([]TableColumnDefinition, len(*in)) + copy(*out, *in) + } + if in.Rows != nil { + in, out := &in.Rows, &out.Rows + *out = make([]TableRow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. +func (in *Table) DeepCopy() *Table { + if in == nil { + return nil + } + out := new(Table) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Table) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. +func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { + if in == nil { + return nil + } + out := new(TableColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableOptions) DeepCopyInto(out *TableOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. +func (in *TableOptions) DeepCopy() *TableOptions { + if in == nil { + return nil + } + out := new(TableOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRow) DeepCopyInto(out *TableRow) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. +func (in *TableRowCondition) DeepCopy() *TableRowCondition { + if in == nil { + return nil + } + out := new(TableRowCondition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go similarity index 88% rename from vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.defaults.go rename to vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go index e24e70be3..73e63fc11 100644 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by defaulter-gen. Do not edit it manually! +// Code generated by defaulter-gen. DO NOT EDIT. package v1beta1 diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/OWNERS b/vendor/k8s.io/apimachinery/pkg/conversion/OWNERS deleted file mode 100644 index c25e7faf2..000000000 --- a/vendor/k8s.io/apimachinery/pkg/conversion/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -approvers: -- derekwaynecarr -- lavalamp -- smarterclayton -- wojtek-t -reviewers: -- derekwaynecarr -- lavalamp -- smarterclayton -- wojtek-t diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go b/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go deleted file mode 100644 index c5dec1f31..000000000 --- a/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go +++ /dev/null @@ -1,249 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package conversion - -import ( - "fmt" - "reflect" -) - -// Cloner knows how to copy one type to another. -type Cloner struct { - // Map from the type to a function which can do the deep copy. - deepCopyFuncs map[reflect.Type]reflect.Value - generatedDeepCopyFuncs map[reflect.Type]func(in interface{}, out interface{}, c *Cloner) error -} - -// NewCloner creates a new Cloner object. -func NewCloner() *Cloner { - c := &Cloner{ - deepCopyFuncs: map[reflect.Type]reflect.Value{}, - generatedDeepCopyFuncs: map[reflect.Type]func(in interface{}, out interface{}, c *Cloner) error{}, - } - if err := c.RegisterDeepCopyFunc(byteSliceDeepCopy); err != nil { - // If one of the deep-copy functions is malformed, detect it immediately. - panic(err) - } - return c -} - -// Prevent recursing into every byte... -func byteSliceDeepCopy(in *[]byte, out *[]byte, c *Cloner) error { - if *in != nil { - *out = make([]byte, len(*in)) - copy(*out, *in) - } else { - *out = nil - } - return nil -} - -// Verifies whether a deep-copy function has a correct signature. -func verifyDeepCopyFunctionSignature(ft reflect.Type) error { - if ft.Kind() != reflect.Func { - return fmt.Errorf("expected func, got: %v", ft) - } - if ft.NumIn() != 3 { - return fmt.Errorf("expected three 'in' params, got %v", ft) - } - if ft.NumOut() != 1 { - return fmt.Errorf("expected one 'out' param, got %v", ft) - } - if ft.In(0).Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft) - } - if ft.In(1) != ft.In(0) { - return fmt.Errorf("expected 'in' param 0 the same as param 1, got: %v", ft) - } - var forClonerType Cloner - if expected := reflect.TypeOf(&forClonerType); ft.In(2) != expected { - return fmt.Errorf("expected '%v' arg for 'in' param 2, got: '%v'", expected, ft.In(2)) - } - var forErrorType error - // This convolution is necessary, otherwise TypeOf picks up on the fact - // that forErrorType is nil - errorType := reflect.TypeOf(&forErrorType).Elem() - if ft.Out(0) != errorType { - return fmt.Errorf("expected error return, got: %v", ft) - } - return nil -} - -// RegisterGeneratedDeepCopyFunc registers a copying func with the Cloner. -// deepCopyFunc must take three parameters: a type input, a pointer to a -// type output, and a pointer to Cloner. It should return an error. -// -// Example: -// c.RegisterGeneratedDeepCopyFunc( -// func(in Pod, out *Pod, c *Cloner) error { -// // deep copy logic... -// return nil -// }) -func (c *Cloner) RegisterDeepCopyFunc(deepCopyFunc interface{}) error { - fv := reflect.ValueOf(deepCopyFunc) - ft := fv.Type() - if err := verifyDeepCopyFunctionSignature(ft); err != nil { - return err - } - c.deepCopyFuncs[ft.In(0)] = fv - return nil -} - -// GeneratedDeepCopyFunc bundles an untyped generated deep-copy function of a type -// with a reflection type object used as a key to lookup the deep-copy function. -type GeneratedDeepCopyFunc struct { - Fn func(in interface{}, out interface{}, c *Cloner) error - InType reflect.Type -} - -// Similar to RegisterDeepCopyFunc, but registers deep copy function that were -// automatically generated. -func (c *Cloner) RegisterGeneratedDeepCopyFunc(fn GeneratedDeepCopyFunc) error { - c.generatedDeepCopyFuncs[fn.InType] = fn.Fn - return nil -} - -// DeepCopy will perform a deep copy of a given object. -func (c *Cloner) DeepCopy(in interface{}) (interface{}, error) { - // Can be invalid if we run DeepCopy(X) where X is a nil interface type. - // For example, we get an invalid value when someone tries to deep-copy - // a nil labels.Selector. - // This does not occur if X is nil and is a pointer to a concrete type. - if in == nil { - return nil, nil - } - inValue := reflect.ValueOf(in) - outValue, err := c.deepCopy(inValue) - if err != nil { - return nil, err - } - return outValue.Interface(), nil -} - -func (c *Cloner) deepCopy(src reflect.Value) (reflect.Value, error) { - inType := src.Type() - - switch src.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - if src.IsNil() { - return src, nil - } - } - - if fv, ok := c.deepCopyFuncs[inType]; ok { - return c.customDeepCopy(src, fv) - } - if fv, ok := c.generatedDeepCopyFuncs[inType]; ok { - var outValue reflect.Value - outValue = reflect.New(inType.Elem()) - err := fv(src.Interface(), outValue.Interface(), c) - return outValue, err - } - return c.defaultDeepCopy(src) -} - -func (c *Cloner) customDeepCopy(src, fv reflect.Value) (reflect.Value, error) { - outValue := reflect.New(src.Type().Elem()) - args := []reflect.Value{src, outValue, reflect.ValueOf(c)} - result := fv.Call(args)[0].Interface() - // This convolution is necessary because nil interfaces won't convert - // to error. - if result == nil { - return outValue, nil - } - return outValue, result.(error) -} - -func (c *Cloner) defaultDeepCopy(src reflect.Value) (reflect.Value, error) { - switch src.Kind() { - case reflect.Chan, reflect.Func, reflect.UnsafePointer, reflect.Uintptr: - return src, fmt.Errorf("cannot deep copy kind: %s", src.Kind()) - case reflect.Array: - dst := reflect.New(src.Type()) - for i := 0; i < src.Len(); i++ { - copyVal, err := c.deepCopy(src.Index(i)) - if err != nil { - return src, err - } - dst.Elem().Index(i).Set(copyVal) - } - return dst.Elem(), nil - case reflect.Interface: - if src.IsNil() { - return src, nil - } - return c.deepCopy(src.Elem()) - case reflect.Map: - if src.IsNil() { - return src, nil - } - dst := reflect.MakeMap(src.Type()) - for _, k := range src.MapKeys() { - copyVal, err := c.deepCopy(src.MapIndex(k)) - if err != nil { - return src, err - } - dst.SetMapIndex(k, copyVal) - } - return dst, nil - case reflect.Ptr: - if src.IsNil() { - return src, nil - } - dst := reflect.New(src.Type().Elem()) - copyVal, err := c.deepCopy(src.Elem()) - if err != nil { - return src, err - } - dst.Elem().Set(copyVal) - return dst, nil - case reflect.Slice: - if src.IsNil() { - return src, nil - } - dst := reflect.MakeSlice(src.Type(), 0, src.Len()) - for i := 0; i < src.Len(); i++ { - copyVal, err := c.deepCopy(src.Index(i)) - if err != nil { - return src, err - } - dst = reflect.Append(dst, copyVal) - } - return dst, nil - case reflect.Struct: - dst := reflect.New(src.Type()) - for i := 0; i < src.NumField(); i++ { - if !dst.Elem().Field(i).CanSet() { - // Can't set private fields. At this point, the - // best we can do is a shallow copy. For - // example, time.Time is a value type with - // private members that can be shallow copied. - return src, nil - } - copyVal, err := c.deepCopy(src.Field(i)) - if err != nil { - return src, err - } - dst.Elem().Field(i).Set(copyVal) - } - return dst.Elem(), nil - - default: - // Value types like numbers, booleans, and strings. - return src, nil - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go index 9ab468ebe..7854c207c 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -66,12 +66,6 @@ type Converter struct { // source field name and type to look for. structFieldSources map[typeNamePair][]typeNamePair - // Map from a type to a function which applies defaults. - defaultingFuncs map[reflect.Type]reflect.Value - - // Similar to above, but function is stored as interface{}. - defaultingInterfaces map[reflect.Type]interface{} - // Map from an input type to a function which can apply a key name mapping inputFieldMappingFuncs map[reflect.Type]FieldMappingFunc @@ -93,8 +87,6 @@ func NewConverter(nameFn NameFunc) *Converter { conversionFuncs: NewConversionFuncs(), generatedConversionFuncs: NewConversionFuncs(), ignoredConversions: make(map[typePair]struct{}), - defaultingFuncs: make(map[reflect.Type]reflect.Value), - defaultingInterfaces: make(map[reflect.Type]interface{}), nameFunc: nameFn, structFieldDests: make(map[typeNamePair][]typeNamePair), structFieldSources: make(map[typeNamePair][]typeNamePair), @@ -152,10 +144,6 @@ type Scope interface { // on the current stack frame. This makes it safe to call from a conversion func. DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error - // If registered, returns a function applying defaults for objects of a given type. - // Used for automatically generating conversion functions. - DefaultingInterface(inType reflect.Type) (interface{}, bool) - // SrcTags and DestTags contain the struct tags that src and dest had, respectively. // If the enclosing object was not a struct, then these will contain no tags, of course. SrcTag() reflect.StructTag @@ -269,11 +257,6 @@ func (s scopeStack) describe() string { return desc } -func (s *scope) DefaultingInterface(inType reflect.Type) (interface{}, bool) { - value, found := s.converter.defaultingInterfaces[inType] - return value, found -} - // Formats src & dest as indices for printing. func (s *scope) setIndices(src, dest int) { s.srcStack.top().key = fmt.Sprintf("[%v]", src) @@ -430,35 +413,6 @@ func (c *Converter) SetStructFieldCopy(srcFieldType interface{}, srcFieldName st return nil } -// RegisterDefaultingFunc registers a value-defaulting func with the Converter. -// defaultingFunc must take one parameter: a pointer to the input type. -// -// Example: -// c.RegisterDefaultingFunc( -// func(in *v1.Pod) { -// // defaulting logic... -// }) -func (c *Converter) RegisterDefaultingFunc(defaultingFunc interface{}) error { - fv := reflect.ValueOf(defaultingFunc) - ft := fv.Type() - if ft.Kind() != reflect.Func { - return fmt.Errorf("expected func, got: %v", ft) - } - if ft.NumIn() != 1 { - return fmt.Errorf("expected one 'in' param, got: %v", ft) - } - if ft.NumOut() != 0 { - return fmt.Errorf("expected zero 'out' params, got: %v", ft) - } - if ft.In(0).Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft) - } - inType := ft.In(0).Elem() - c.defaultingFuncs[inType] = fv - c.defaultingInterfaces[inType] = defaultingFunc - return nil -} - // RegisterInputDefaults registers a field name mapping function, used when converting // from maps to structs. Inputs to the conversion methods are checked for this type and a mapping // applied automatically if the input matches in. A set of default flags for the input conversion @@ -596,15 +550,6 @@ func (c *Converter) callCustom(sv, dv, custom reflect.Value, scope *scope) error // one is registered. func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error { dt, st := dv.Type(), sv.Type() - // Apply default values. - if fv, ok := c.defaultingFuncs[st]; ok { - if c.Debug != nil { - c.Debug.Logf("Applying defaults for '%v'", st) - } - args := []reflect.Value{sv.Addr()} - fv.Call(args) - } - pair := typePair{st, dt} // ignore conversions of this type diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go index 30f717b2c..b3804aa42 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -90,7 +90,14 @@ func customMarshalValue(value reflect.Value) (reflect.Value, bool) { marshaler, ok := value.Interface().(Marshaler) if !ok { - return reflect.Value{}, false + if !isPointerKind(value.Kind()) && value.CanAddr() { + marshaler, ok = value.Addr().Interface().(Marshaler) + if !ok { + return reflect.Value{}, false + } + } else { + return reflect.Value{}, false + } } // Don't invoke functions on nil pointers @@ -167,6 +174,9 @@ func convertStruct(result url.Values, st reflect.Type, sv reflect.Value) { kind = ft.Kind() if !field.IsNil() { field = reflect.Indirect(field) + // If the field is non-nil, it should be added to params + // and the omitempty should be overwite to false + omitempty = false } } diff --git a/vendor/k8s.io/apimachinery/pkg/fields/selector.go b/vendor/k8s.io/apimachinery/pkg/fields/selector.go index bb156b4cb..e3e4453b6 100644 --- a/vendor/k8s.io/apimachinery/pkg/fields/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/fields/selector.go @@ -40,6 +40,8 @@ type Selector interface { // Transform returns a new copy of the selector after TransformFunc has been // applied to the entire selector, or an error if fn returns an error. + // If for a given requirement both field and value are transformed to empty + // string, the requirement is skipped. Transform(fn TransformFunc) (Selector, error) // Requirements converts this interface to Requirements to expose @@ -48,6 +50,24 @@ type Selector interface { // String returns a human readable string that represents this selector. String() string + + // Make a deep copy of the selector. + DeepCopySelector() Selector +} + +type nothingSelector struct{} + +func (n nothingSelector) Matches(_ Fields) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Requirements() Requirements { return nil } +func (n nothingSelector) DeepCopySelector() Selector { return n } +func (n nothingSelector) RequiresExactMatch(field string) (value string, found bool) { return "", false } +func (n nothingSelector) Transform(fn TransformFunc) (Selector, error) { return n, nil } + +// Nothing returns a selector that matches no fields +func Nothing() Selector { + return nothingSelector{} } // Everything returns a selector that matches all fields. @@ -79,6 +99,9 @@ func (t *hasTerm) Transform(fn TransformFunc) (Selector, error) { if err != nil { return nil, err } + if len(field) == 0 && len(value) == 0 { + return Everything(), nil + } return &hasTerm{field, value}, nil } @@ -94,6 +117,15 @@ func (t *hasTerm) String() string { return fmt.Sprintf("%v=%v", t.field, EscapeValue(t.value)) } +func (t *hasTerm) DeepCopySelector() Selector { + if t == nil { + return nil + } + out := new(hasTerm) + *out = *t + return out +} + type notHasTerm struct { field, value string } @@ -115,6 +147,9 @@ func (t *notHasTerm) Transform(fn TransformFunc) (Selector, error) { if err != nil { return nil, err } + if len(field) == 0 && len(value) == 0 { + return Everything(), nil + } return ¬HasTerm{field, value}, nil } @@ -130,6 +165,15 @@ func (t *notHasTerm) String() string { return fmt.Sprintf("%v!=%v", t.field, EscapeValue(t.value)) } +func (t *notHasTerm) DeepCopySelector() Selector { + if t == nil { + return nil + } + out := new(notHasTerm) + *out = *t + return out +} + type andTerm []Selector func (t andTerm) Matches(ls Fields) bool { @@ -169,13 +213,15 @@ func (t andTerm) RequiresExactMatch(field string) (string, bool) { } func (t andTerm) Transform(fn TransformFunc) (Selector, error) { - next := make([]Selector, len([]Selector(t))) - for i, s := range []Selector(t) { + next := make([]Selector, 0, len([]Selector(t))) + for _, s := range []Selector(t) { n, err := s.Transform(fn) if err != nil { return nil, err } - next[i] = n + if !n.Empty() { + next = append(next, n) + } } return andTerm(next), nil } @@ -197,6 +243,17 @@ func (t andTerm) String() string { return strings.Join(terms, ",") } +func (t andTerm) DeepCopySelector() Selector { + if t == nil { + return nil + } + out := make([]Selector, len(t)) + for i := range t { + out[i] = t[i].DeepCopySelector() + } + return andTerm(out) +} + // SelectorFromSet returns a Selector which will match exactly the given Set. A // nil Set is considered equivalent to Everything(). func SelectorFromSet(ls Set) Selector { @@ -222,7 +279,7 @@ var valueEscaper = strings.NewReplacer( `=`, `\=`, ) -// Escapes an arbitrary literal string for use as a fieldSelector value +// EscapeValue escapes an arbitrary literal string for use as a fieldSelector value func EscapeValue(s string) string { return valueEscaper.Replace(s) } @@ -245,7 +302,7 @@ func (i UnescapedRune) Error() string { return fmt.Sprintf("invalid field selector: unescaped character in value: %v", i.r) } -// Unescapes a fieldSelector value and returns the original literal value. +// UnescapeValue unescapes a fieldSelector value and returns the original literal value. // May return the original string if it contains no escaped or special characters. func UnescapeValue(s string) (string, error) { // if there's no escaping or special characters, just return to avoid allocation @@ -307,12 +364,12 @@ func ParseSelector(selector string) (Selector, error) { }) } -// Parses the selector and runs them through the given TransformFunc. +// ParseAndTransformSelector parses the selector and runs them through the given TransformFunc. func ParseAndTransformSelector(selector string, fn TransformFunc) (Selector, error) { return parseSelector(selector, fn) } -// Function to transform selectors. +// TransformFunc transforms selectors. type TransformFunc func(field, value string) (newField, newValue string, err error) // splitTerms returns the comma-separated terms contained in the given fieldSelector. @@ -354,7 +411,7 @@ const ( var termOperators = []string{notEqualOperator, doubleEqualOperator, equalOperator} // splitTerm returns the lhs, operator, and rhs parsed from the given term, along with an indicator of whether the parse was successful. -// no escaping of special characters is supported in the lhs value, so the first occurance of a recognized operator is used as the split point. +// no escaping of special characters is supported in the lhs value, so the first occurrence of a recognized operator is used as the split point. // the literal rhs is returned, and the caller is responsible for applying any desired unescaping. func splitTerm(term string) (lhs, op, rhs string, ok bool) { for i := range term { @@ -407,6 +464,12 @@ func OneTermEqualSelector(k, v string) Selector { return &hasTerm{field: k, value: v} } +// OneTermNotEqualSelector returns an object that matches objects where one field/field does not equal one value. +// Cannot return an error. +func OneTermNotEqualSelector(k, v string) Selector { + return ¬HasTerm{field: k, value: v} +} + // AndSelectors creates a selector that is the logical AND of all the given selectors func AndSelectors(selectors ...Selector) Selector { return andTerm(selectors) diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index 0d0caa77d..32db4d96f 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -66,12 +66,12 @@ func (ls Set) AsSelector() Selector { // assumes that labels are already validated and thus don't // preform any validation. // According to our measurements this is significantly faster -// in codepaths that matter at high sccale. +// in codepaths that matter at high scale. func (ls Set) AsSelectorPreValidated() Selector { return SelectorFromValidatedSet(ls) } -// FormatLables convert label map into plain string +// FormatLabels convert label map into plain string func FormatLabels(labelMap map[string]string) string { l := Set(labelMap).String() if l == "" { diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 9bddc35a6..b301b4284 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -51,6 +51,9 @@ type Selector interface { // If there are querying parameters, it will return converted requirements and selectable=true. // If this selector doesn't want to select anything, it will return selectable=false. Requirements() (requirements Requirements, selectable bool) + + // Make a deep copy of the selector. + DeepCopySelector() Selector } // Everything returns a selector that matches all labels. @@ -65,19 +68,36 @@ func (n nothingSelector) Empty() bool { return false } func (n nothingSelector) String() string { return "" } func (n nothingSelector) Add(_ ...Requirement) Selector { return n } func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } +func (n nothingSelector) DeepCopySelector() Selector { return n } // Nothing returns a selector that matches no labels func Nothing() Selector { return nothingSelector{} } +// NewSelector returns a nil selector func NewSelector() Selector { return internalSelector(nil) } type internalSelector []Requirement -// Sort by key to obtain determisitic parser +func (s internalSelector) DeepCopy() internalSelector { + if s == nil { + return nil + } + result := make([]Requirement, len(s)) + for i := range s { + s[i].DeepCopyInto(&result[i]) + } + return result +} + +func (s internalSelector) DeepCopySelector() Selector { + return s.DeepCopy() +} + +// ByKey sorts requirements by key to obtain deterministic parser type ByKey []Requirement func (a ByKey) Len() int { return len(a) } @@ -90,6 +110,7 @@ func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } // The zero value of Requirement is invalid. // Requirement implements both set based match and exact match // Requirement should be initialized via NewRequirement constructor for creating a valid Requirement. +// +k8s:deepcopy-gen=true type Requirement struct { key string operator selection.Operator @@ -215,12 +236,17 @@ func (r *Requirement) Matches(ls Labels) bool { } } +// Key returns requirement key func (r *Requirement) Key() string { return r.key } + +// Operator returns requirement operator func (r *Requirement) Operator() selection.Operator { return r.operator } + +// Values returns requirement values func (r *Requirement) Values() sets.String { ret := sets.String{} for i := range r.strValues { @@ -229,7 +255,7 @@ func (r *Requirement) Values() sets.String { return ret } -// Return true if the internalSelector doesn't restrict selection space +// Empty returns true if the internalSelector doesn't restrict selection space func (lsel internalSelector) Empty() bool { if lsel == nil { return true @@ -320,23 +346,37 @@ func (lsel internalSelector) String() string { return strings.Join(reqs, ",") } -// constants definition for lexer token +// Token represents constant definition for lexer token type Token int const ( + // ErrorToken represents scan error ErrorToken Token = iota + // EndOfStringToken represents end of string EndOfStringToken + // ClosedParToken represents close parenthesis ClosedParToken + // CommaToken represents the comma CommaToken + // DoesNotExistToken represents logic not DoesNotExistToken + // DoubleEqualsToken represents double equals DoubleEqualsToken + // EqualsToken represents equal EqualsToken + // GreaterThanToken represents greater than GreaterThanToken - IdentifierToken // to represent keys and values + // IdentifierToken represents identifier, e.g. keys and values + IdentifierToken + // InToken represents in InToken + // LessThanToken represents less than LessThanToken + // NotEqualsToken represents not equal NotEqualsToken + // NotInToken represents not in NotInToken + // OpenParToken represents open parenthesis OpenParToken ) @@ -356,7 +396,7 @@ var string2token = map[string]Token{ "(": OpenParToken, } -// The item produced by the lexer. It contains the Token and the literal. +// ScannedItem contains the Token and the literal produced by the lexer. type ScannedItem struct { tok Token literal string @@ -401,8 +441,8 @@ func (l *Lexer) unread() { l.pos-- } -// scanIdOrKeyword scans string to recognize literal token (for example 'in') or an identifier. -func (l *Lexer) scanIdOrKeyword() (tok Token, lit string) { +// scanIDOrKeyword scans string to recognize literal token (for example 'in') or an identifier. +func (l *Lexer) scanIDOrKeyword() (tok Token, lit string) { var buffer []byte IdentifierLoop: for { @@ -474,7 +514,7 @@ func (l *Lexer) Lex() (tok Token, lit string) { return l.scanSpecialSymbol() default: l.unread() - return l.scanIdOrKeyword() + return l.scanIDOrKeyword() } } @@ -485,14 +525,16 @@ type Parser struct { position int } -// Parser context represents context during parsing: +// ParserContext represents context during parsing: // some literal for example 'in' and 'notin' can be // recognized as operator for example 'x in (a)' but // it can be recognized as value for example 'value in (in)' type ParserContext int const ( + // KeyAndOperator represents key and operator KeyAndOperator ParserContext = iota + // Values represents values Values ) @@ -508,7 +550,7 @@ func (p *Parser) lookahead(context ParserContext) (Token, string) { return tok, lit } -// consume returns current token and string. Increments the the position +// consume returns current token and string. Increments the position func (p *Parser) consume(context ParserContext) (Token, string) { p.position++ tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal @@ -798,11 +840,12 @@ func SelectorFromSet(ls Set) Selector { } var requirements internalSelector for label, value := range ls { - if r, err := NewRequirement(label, selection.Equals, []string{value}); err != nil { + r, err := NewRequirement(label, selection.Equals, []string{value}) + if err == nil { + requirements = append(requirements, *r) + } else { //TODO: double check errors when input comes from serialization? return internalSelector{} - } else { - requirements = append(requirements, *r) } } // sort to have deterministic string representation diff --git a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go new file mode 100644 index 000000000..4d482947f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go @@ -0,0 +1,42 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package labels + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Requirement) DeepCopyInto(out *Requirement) { + *out = *in + if in.strValues != nil { + in, out := &in.strValues, &out.strValues + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Requirement. +func (in *Requirement) DeepCopy() *Requirement { + if in == nil { + return nil + } + out := new(Requirement) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/openapi/common.go b/vendor/k8s.io/apimachinery/pkg/openapi/common.go deleted file mode 100644 index 605776ed8..000000000 --- a/vendor/k8s.io/apimachinery/pkg/openapi/common.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package openapi - -import ( - "github.com/emicklei/go-restful" - "github.com/go-openapi/spec" - "strings" -) - -// OpenAPIDefinition describes single type. Normally these definitions are auto-generated using gen-openapi. -type OpenAPIDefinition struct { - Schema spec.Schema - Dependencies []string -} - -type ReferenceCallback func(path string) spec.Ref - -// OpenAPIDefinitions is collection of all definitions. -type GetOpenAPIDefinitions func(ReferenceCallback) map[string]OpenAPIDefinition - -// OpenAPIDefinitionGetter gets openAPI definitions for a given type. If a type implements this interface, -// the definition returned by it will be used, otherwise the auto-generated definitions will be used. See -// GetOpenAPITypeFormat for more information about trade-offs of using this interface or GetOpenAPITypeFormat method when -// possible. -type OpenAPIDefinitionGetter interface { - OpenAPIDefinition() *OpenAPIDefinition -} - -// Config is set of configuration for openAPI spec generation. -type Config struct { - // List of supported protocols such as https, http, etc. - ProtocolList []string - - // Info is general information about the API. - Info *spec.Info - - // DefaultResponse will be used if an operation does not have any responses listed. It - // will show up as ... "responses" : {"default" : $DefaultResponse} in the spec. - DefaultResponse *spec.Response - - // CommonResponses will be added as a response to all operation specs. This is a good place to add common - // responses such as authorization failed. - CommonResponses map[int]spec.Response - - // List of webservice's path prefixes to ignore - IgnorePrefixes []string - - // OpenAPIDefinitions should provide definition for all models used by routes. Failure to provide this map - // or any of the models will result in spec generation failure. - GetDefinitions GetOpenAPIDefinitions - - // GetOperationIDAndTags returns operation id and tags for a restful route. It is an optional function to customize operation IDs. - GetOperationIDAndTags func(servePath string, r *restful.Route) (string, []string, error) - - // GetDefinitionName returns a friendly name for a definition base on the serving path. parameter `name` is the full name of the definition. - // It is an optional function to customize model names. - GetDefinitionName func(servePath string, name string) (string, spec.Extensions) - - // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. - PostProcessSpec func(*spec.Swagger) (*spec.Swagger, error) - - // SecurityDefinitions is list of all security definitions for OpenAPI service. If this is not nil, the user of config - // is responsible to provide DefaultSecurity and (maybe) add unauthorized response to CommonResponses. - SecurityDefinitions *spec.SecurityDefinitions - - // DefaultSecurity for all operations. This will pass as spec.SwaggerProps.Security to OpenAPI. - // For most cases, this will be list of acceptable definitions in SecurityDefinitions. - DefaultSecurity []map[string][]string -} - -// This function is a reference for converting go (or any custom type) to a simple open API type,format pair. There are -// two ways to customize spec for a type. If you add it here, a type will be converted to a simple type and the type -// comment (the comment that is added before type definition) will be lost. The spec will still have the property -// comment. The second way is to implement OpenAPIDefinitionGetter interface. That function can customize the spec (so -// the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple -// type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation. -// Example: -// type Sample struct { -// ... -// // port of the server -// port IntOrString -// ... -// } -// // IntOrString documentation... -// type IntOrString { ... } -// -// Adding IntOrString to this function: -// "port" : { -// format: "string", -// type: "int-or-string", -// Description: "port of the server" -// } -// -// Implement OpenAPIDefinitionGetter for IntOrString: -// -// "port" : { -// $Ref: "#/definitions/IntOrString" -// Description: "port of the server" -// } -// ... -// definitions: -// { -// "IntOrString": { -// format: "string", -// type: "int-or-string", -// Description: "IntOrString documentation..." // new -// } -// } -// -func GetOpenAPITypeFormat(typeName string) (string, string) { - schemaTypeFormatMap := map[string][]string{ - "uint": {"integer", "int32"}, - "uint8": {"integer", "byte"}, - "uint16": {"integer", "int32"}, - "uint32": {"integer", "int64"}, - "uint64": {"integer", "int64"}, - "int": {"integer", "int32"}, - "int8": {"integer", "byte"}, - "int16": {"integer", "int32"}, - "int32": {"integer", "int32"}, - "int64": {"integer", "int64"}, - "byte": {"integer", "byte"}, - "float64": {"number", "double"}, - "float32": {"number", "float"}, - "bool": {"boolean", ""}, - "time.Time": {"string", "date-time"}, - "string": {"string", ""}, - "integer": {"integer", ""}, - "number": {"number", ""}, - "boolean": {"boolean", ""}, - "[]byte": {"string", "byte"}, // base64 encoded characters - } - mapped, ok := schemaTypeFormatMap[typeName] - if !ok { - return "", "" - } - return mapped[0], mapped[1] -} - -func EscapeJsonPointer(p string) string { - // Escaping reference name using rfc6901 - p = strings.Replace(p, "~", "~0", -1) - p = strings.Replace(p, "/", "~1", -1) - return p -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/OWNERS b/vendor/k8s.io/apimachinery/pkg/runtime/OWNERS deleted file mode 100644 index a49419f70..000000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/OWNERS +++ /dev/null @@ -1,19 +0,0 @@ -approvers: -- caesarxuchao -- deads2k -- lavalamp -- smarterclayton -reviewers: -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- derekwaynecarr -- caesarxuchao -- mikedanese -- nikhiljindal -- gmarek -- krousey -- timothysc -- piosz -- mbohlool diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go index d9748f066..10dc12cca 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -139,6 +139,7 @@ func NewParameterCodec(scheme *Scheme) ParameterCodec { typer: scheme, convertor: scheme, creator: scheme, + defaulter: scheme, } } @@ -147,6 +148,7 @@ type parameterCodec struct { typer ObjectTyper convertor ObjectConvertor creator ObjectCreater + defaulter ObjectDefaulter } var _ ParameterCodec = ¶meterCodec{} @@ -163,9 +165,17 @@ func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.Gro } for i := range targetGVKs { if targetGVKs[i].GroupVersion() == from { - return c.convertor.Convert(¶meters, into, nil) + if err := c.convertor.Convert(¶meters, into, nil); err != nil { + return err + } + // in the case where we going into the same object we're receiving, default on the outbound object + if c.defaulter != nil { + c.defaulter.Default(into) + } + return nil } } + input, err := c.creator.New(from.WithKind(targetGVKs[0].Kind)) if err != nil { return err @@ -173,6 +183,10 @@ func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.Gro if err := c.convertor.Convert(¶meters, input, nil); err != nil { return err } + // if we have defaulter, default the input before converting to output + if c.defaulter != nil { + c.defaulter.Default(input) + } return c.convertor.Convert(input, into, nil) } @@ -267,7 +281,7 @@ func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersi // GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind. type GroupVersioners []GroupVersioner -// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occured. +// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occurred. func (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { for _, gv := range gvs { target, ok := gv.KindForGroupVersionKinds(kinds) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go index 1d34ec1a8..510444a4d 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go @@ -28,8 +28,7 @@ import ( // object. (Will modify internalObject.) (Assumes JSON serialization.) // TODO: verify that the correct external version is chosen on encode... func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersionKind) error { - _, err := Encode(c, internalType) - if err != nil { + if _, err := Encode(c, internalType); err != nil { return fmt.Errorf("Internal type not encodable: %v", err) } for _, et := range externalTypes { @@ -41,9 +40,8 @@ func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersi if reflect.TypeOf(obj) != reflect.TypeOf(internalType) { return fmt.Errorf("decode of external type %s produced: %#v", et, obj) } - err = DecodeInto(c, exBytes, internalType) - if err != nil { - return fmt.Errorf("external type %s not convertable to internal type: %v", et, err) + if err = DecodeInto(c, exBytes, internalType); err != nil { + return fmt.Errorf("external type %s not convertible to internal type: %v", et, err) } } return nil diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go index 8eedffc9c..afe4fab15 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go @@ -19,6 +19,7 @@ limitations under the License. package runtime import ( + "fmt" "reflect" "strconv" "strings" @@ -26,6 +27,20 @@ import ( "k8s.io/apimachinery/pkg/conversion" ) +// DefaultFieldSelectorConversion auto-accepts metav1 values for name and namespace. +// A cluster scoped resource specifying namespace empty works fine and specifying a particular +// namespace will return no results, as expected. +func DefaultMetaV1FieldSelectorConversion(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + case "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("%q is not a known field selector: only %q, %q", label, "metadata.name", "metadata.namespace") + } +} + // JSONKeyMapper uses the struct tags on a conversion to determine the key value for // the other side. Use when mapping from a map[string]* to a struct or vice versa. func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, string) { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go new file mode 100644 index 000000000..291d7a4e8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -0,0 +1,805 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + encodingjson "encoding/json" + "fmt" + "math" + "os" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/util/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + "github.com/golang/glog" +) + +// UnstructuredConverter is an interface for converting between interface{} +// and map[string]interface representation. +type UnstructuredConverter interface { + ToUnstructured(obj interface{}) (map[string]interface{}, error) + FromUnstructured(u map[string]interface{}, obj interface{}) error +} + +type structField struct { + structType reflect.Type + field int +} + +type fieldInfo struct { + name string + nameValue reflect.Value + omitempty bool +} + +type fieldsCacheMap map[structField]*fieldInfo + +type fieldsCache struct { + sync.Mutex + value atomic.Value +} + +func newFieldsCache() *fieldsCache { + cache := &fieldsCache{} + cache.value.Store(make(fieldsCacheMap)) + return cache +} + +var ( + marshalerType = reflect.TypeOf(new(encodingjson.Marshaler)).Elem() + unmarshalerType = reflect.TypeOf(new(encodingjson.Unmarshaler)).Elem() + mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) + stringType = reflect.TypeOf(string("")) + int64Type = reflect.TypeOf(int64(0)) + float64Type = reflect.TypeOf(float64(0)) + boolType = reflect.TypeOf(bool(false)) + fieldCache = newFieldsCache() + + // DefaultUnstructuredConverter performs unstructured to Go typed object conversions. + DefaultUnstructuredConverter = &unstructuredConverter{ + mismatchDetection: parseBool(os.Getenv("KUBE_PATCH_CONVERSION_DETECTOR")), + comparison: conversion.EqualitiesOrDie( + func(a, b time.Time) bool { + return a.UTC() == b.UTC() + }, + ), + } +) + +func parseBool(key string) bool { + if len(key) == 0 { + return false + } + value, err := strconv.ParseBool(key) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't parse '%s' as bool for unstructured mismatch detection", key)) + } + return value +} + +// unstructuredConverter knows how to convert between interface{} and +// Unstructured in both ways. +type unstructuredConverter struct { + // If true, we will be additionally running conversion via json + // to ensure that the result is true. + // This is supposed to be set only in tests. + mismatchDetection bool + // comparison is the default test logic used to compare + comparison conversion.Equalities +} + +// NewTestUnstructuredConverter creates an UnstructuredConverter that accepts JSON typed maps and translates them +// to Go types via reflection. It performs mismatch detection automatically and is intended for use by external +// test tools. Use DefaultUnstructuredConverter if you do not explicitly need mismatch detection. +func NewTestUnstructuredConverter(comparison conversion.Equalities) UnstructuredConverter { + return &unstructuredConverter{ + mismatchDetection: true, + comparison: comparison, + } +} + +// FromUnstructured converts an object from map[string]interface{} representation into a concrete type. +// It uses encoding/json/Unmarshaler if object implements it or reflection if not. +func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error { + t := reflect.TypeOf(obj) + value := reflect.ValueOf(obj) + if t.Kind() != reflect.Ptr || value.IsNil() { + return fmt.Errorf("FromUnstructured requires a non-nil pointer to an object, got %v", t) + } + err := fromUnstructured(reflect.ValueOf(u), value.Elem()) + if c.mismatchDetection { + newObj := reflect.New(t.Elem()).Interface() + newErr := fromUnstructuredViaJSON(u, newObj) + if (err != nil) != (newErr != nil) { + glog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) + } + if err == nil && !c.comparison.DeepEqual(obj, newObj) { + glog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) + } + } + return err +} + +func fromUnstructuredViaJSON(u map[string]interface{}, obj interface{}) error { + data, err := json.Marshal(u) + if err != nil { + return err + } + return json.Unmarshal(data, obj) +} + +func fromUnstructured(sv, dv reflect.Value) error { + sv = unwrapInterface(sv) + if !sv.IsValid() { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + st, dt := sv.Type(), dv.Type() + + switch dt.Kind() { + case reflect.Map, reflect.Slice, reflect.Ptr, reflect.Struct, reflect.Interface: + // Those require non-trivial conversion. + default: + // This should handle all simple types. + if st.AssignableTo(dt) { + dv.Set(sv) + return nil + } + // We cannot simply use "ConvertibleTo", as JSON doesn't support conversions + // between those four groups: bools, integers, floats and string. We need to + // do the same. + if st.ConvertibleTo(dt) { + switch st.Kind() { + case reflect.String: + switch dt.Kind() { + case reflect.String: + dv.Set(sv.Convert(dt)) + return nil + } + case reflect.Bool: + switch dt.Kind() { + case reflect.Bool: + dv.Set(sv.Convert(dt)) + return nil + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch dt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + dv.Set(sv.Convert(dt)) + return nil + } + case reflect.Float32, reflect.Float64: + switch dt.Kind() { + case reflect.Float32, reflect.Float64: + dv.Set(sv.Convert(dt)) + return nil + } + if sv.Float() == math.Trunc(sv.Float()) { + dv.Set(sv.Convert(dt)) + return nil + } + } + return fmt.Errorf("cannot convert %s to %s", st.String(), dt.String()) + } + } + + // Check if the object has a custom JSON marshaller/unmarshaller. + if reflect.PtrTo(dt).Implements(unmarshalerType) { + data, err := json.Marshal(sv.Interface()) + if err != nil { + return fmt.Errorf("error encoding %s to json: %v", st.String(), err) + } + unmarshaler := dv.Addr().Interface().(encodingjson.Unmarshaler) + return unmarshaler.UnmarshalJSON(data) + } + + switch dt.Kind() { + case reflect.Map: + return mapFromUnstructured(sv, dv) + case reflect.Slice: + return sliceFromUnstructured(sv, dv) + case reflect.Ptr: + return pointerFromUnstructured(sv, dv) + case reflect.Struct: + return structFromUnstructured(sv, dv) + case reflect.Interface: + return interfaceFromUnstructured(sv, dv) + default: + return fmt.Errorf("unrecognized type: %v", dt.Kind()) + } +} + +func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo { + fieldCacheMap := fieldCache.value.Load().(fieldsCacheMap) + if info, ok := fieldCacheMap[structField{structType, field}]; ok { + return info + } + + // Cache miss - we need to compute the field name. + info := &fieldInfo{} + typeField := structType.Field(field) + jsonTag := typeField.Tag.Get("json") + if len(jsonTag) == 0 { + // Make the first character lowercase. + if typeField.Name == "" { + info.name = typeField.Name + } else { + info.name = strings.ToLower(typeField.Name[:1]) + typeField.Name[1:] + } + } else { + items := strings.Split(jsonTag, ",") + info.name = items[0] + for i := range items { + if items[i] == "omitempty" { + info.omitempty = true + } + } + } + info.nameValue = reflect.ValueOf(info.name) + + fieldCache.Lock() + defer fieldCache.Unlock() + fieldCacheMap = fieldCache.value.Load().(fieldsCacheMap) + newFieldCacheMap := make(fieldsCacheMap) + for k, v := range fieldCacheMap { + newFieldCacheMap[k] = v + } + newFieldCacheMap[structField{structType, field}] = info + fieldCache.value.Store(newFieldCacheMap) + return info +} + +func unwrapInterface(v reflect.Value) reflect.Value { + for v.Kind() == reflect.Interface { + v = v.Elem() + } + return v +} + +func mapFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if st.Kind() != reflect.Map { + return fmt.Errorf("cannot restore map from %v", st.Kind()) + } + + if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) { + return fmt.Errorf("cannot copy map with non-assignable keys: %v %v", st.Key(), dt.Key()) + } + + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeMap(dt)) + for _, key := range sv.MapKeys() { + value := reflect.New(dt.Elem()).Elem() + if val := unwrapInterface(sv.MapIndex(key)); val.IsValid() { + if err := fromUnstructured(val, value); err != nil { + return err + } + } else { + value.Set(reflect.Zero(dt.Elem())) + } + if st.Key().AssignableTo(dt.Key()) { + dv.SetMapIndex(key, value) + } else { + dv.SetMapIndex(key.Convert(dt.Key()), value) + } + } + return nil +} + +func sliceFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if st.Kind() == reflect.String && dt.Elem().Kind() == reflect.Uint8 { + // We store original []byte representation as string. + // This conversion is allowed, but we need to be careful about + // marshaling data appropriately. + if len(sv.Interface().(string)) > 0 { + marshalled, err := json.Marshal(sv.Interface()) + if err != nil { + return fmt.Errorf("error encoding %s to json: %v", st, err) + } + // TODO: Is this Unmarshal needed? + var data []byte + err = json.Unmarshal(marshalled, &data) + if err != nil { + return fmt.Errorf("error decoding from json: %v", err) + } + dv.SetBytes(data) + } else { + dv.Set(reflect.Zero(dt)) + } + return nil + } + if st.Kind() != reflect.Slice { + return fmt.Errorf("cannot restore slice from %v", st.Kind()) + } + + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap())) + for i := 0; i < sv.Len(); i++ { + if err := fromUnstructured(sv.Index(i), dv.Index(i)); err != nil { + return err + } + } + return nil +} + +func pointerFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + + if st.Kind() == reflect.Ptr && sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.New(dt.Elem())) + switch st.Kind() { + case reflect.Ptr, reflect.Interface: + return fromUnstructured(sv.Elem(), dv.Elem()) + default: + return fromUnstructured(sv, dv.Elem()) + } +} + +func structFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if st.Kind() != reflect.Map { + return fmt.Errorf("cannot restore struct from: %v", st.Kind()) + } + + for i := 0; i < dt.NumField(); i++ { + fieldInfo := fieldInfoFromField(dt, i) + fv := dv.Field(i) + + if len(fieldInfo.name) == 0 { + // This field is inlined. + if err := fromUnstructured(sv, fv); err != nil { + return err + } + } else { + value := unwrapInterface(sv.MapIndex(fieldInfo.nameValue)) + if value.IsValid() { + if err := fromUnstructured(value, fv); err != nil { + return err + } + } else { + fv.Set(reflect.Zero(fv.Type())) + } + } + } + return nil +} + +func interfaceFromUnstructured(sv, dv reflect.Value) error { + // TODO: Is this conversion safe? + dv.Set(sv) + return nil +} + +// ToUnstructured converts an object into map[string]interface{} representation. +// It uses encoding/json/Marshaler if object implements it or reflection if not. +func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]interface{}, error) { + var u map[string]interface{} + var err error + if unstr, ok := obj.(Unstructured); ok { + u = unstr.UnstructuredContent() + } else { + t := reflect.TypeOf(obj) + value := reflect.ValueOf(obj) + if t.Kind() != reflect.Ptr || value.IsNil() { + return nil, fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t) + } + u = map[string]interface{}{} + err = toUnstructured(value.Elem(), reflect.ValueOf(&u).Elem()) + } + if c.mismatchDetection { + newUnstr := map[string]interface{}{} + newErr := toUnstructuredViaJSON(obj, &newUnstr) + if (err != nil) != (newErr != nil) { + glog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) + } + if err == nil && !c.comparison.DeepEqual(u, newUnstr) { + glog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) + } + } + if err != nil { + return nil, err + } + return u, nil +} + +// DeepCopyJSON deep copies the passed value, assuming it is a valid JSON representation i.e. only contains +// types produced by json.Unmarshal() and also int64. +// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil +func DeepCopyJSON(x map[string]interface{}) map[string]interface{} { + return DeepCopyJSONValue(x).(map[string]interface{}) +} + +// DeepCopyJSONValue deep copies the passed value, assuming it is a valid JSON representation i.e. only contains +// types produced by json.Unmarshal() and also int64. +// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil +func DeepCopyJSONValue(x interface{}) interface{} { + switch x := x.(type) { + case map[string]interface{}: + if x == nil { + // Typed nil - an interface{} that contains a type map[string]interface{} with a value of nil + return x + } + clone := make(map[string]interface{}, len(x)) + for k, v := range x { + clone[k] = DeepCopyJSONValue(v) + } + return clone + case []interface{}: + if x == nil { + // Typed nil - an interface{} that contains a type []interface{} with a value of nil + return x + } + clone := make([]interface{}, len(x)) + for i, v := range x { + clone[i] = DeepCopyJSONValue(v) + } + return clone + case string, int64, bool, float64, nil, encodingjson.Number: + return x + default: + panic(fmt.Errorf("cannot deep copy %T", x)) + } +} + +func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error { + data, err := json.Marshal(obj) + if err != nil { + return err + } + return json.Unmarshal(data, u) +} + +var ( + nullBytes = []byte("null") + trueBytes = []byte("true") + falseBytes = []byte("false") +) + +func getMarshaler(v reflect.Value) (encodingjson.Marshaler, bool) { + // Check value receivers if v is not a pointer and pointer receivers if v is a pointer + if v.Type().Implements(marshalerType) { + return v.Interface().(encodingjson.Marshaler), true + } + // Check pointer receivers if v is not a pointer + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + if v.Type().Implements(marshalerType) { + return v.Interface().(encodingjson.Marshaler), true + } + } + return nil, false +} + +func toUnstructured(sv, dv reflect.Value) error { + // Check if the object has a custom JSON marshaller/unmarshaller. + if marshaler, ok := getMarshaler(sv); ok { + if sv.Kind() == reflect.Ptr && sv.IsNil() { + // We're done - we don't need to store anything. + return nil + } + + data, err := marshaler.MarshalJSON() + if err != nil { + return err + } + switch { + case len(data) == 0: + return fmt.Errorf("error decoding from json: empty value") + + case bytes.Equal(data, nullBytes): + // We're done - we don't need to store anything. + + case bytes.Equal(data, trueBytes): + dv.Set(reflect.ValueOf(true)) + + case bytes.Equal(data, falseBytes): + dv.Set(reflect.ValueOf(false)) + + case data[0] == '"': + var result string + err := json.Unmarshal(data, &result) + if err != nil { + return fmt.Errorf("error decoding string from json: %v", err) + } + dv.Set(reflect.ValueOf(result)) + + case data[0] == '{': + result := make(map[string]interface{}) + err := json.Unmarshal(data, &result) + if err != nil { + return fmt.Errorf("error decoding object from json: %v", err) + } + dv.Set(reflect.ValueOf(result)) + + case data[0] == '[': + result := make([]interface{}, 0) + err := json.Unmarshal(data, &result) + if err != nil { + return fmt.Errorf("error decoding array from json: %v", err) + } + dv.Set(reflect.ValueOf(result)) + + default: + var ( + resultInt int64 + resultFloat float64 + err error + ) + if err = json.Unmarshal(data, &resultInt); err == nil { + dv.Set(reflect.ValueOf(resultInt)) + } else if err = json.Unmarshal(data, &resultFloat); err == nil { + dv.Set(reflect.ValueOf(resultFloat)) + } else { + return fmt.Errorf("error decoding number from json: %v", err) + } + } + + return nil + } + + st, dt := sv.Type(), dv.Type() + switch st.Kind() { + case reflect.String: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(stringType)) + } + dv.Set(reflect.ValueOf(sv.String())) + return nil + case reflect.Bool: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(boolType)) + } + dv.Set(reflect.ValueOf(sv.Bool())) + return nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(int64Type)) + } + dv.Set(reflect.ValueOf(sv.Int())) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + uVal := sv.Uint() + if uVal > math.MaxInt64 { + return fmt.Errorf("unsigned value %d does not fit into int64 (overflow)", uVal) + } + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(int64Type)) + } + dv.Set(reflect.ValueOf(int64(uVal))) + return nil + case reflect.Float32, reflect.Float64: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(float64Type)) + } + dv.Set(reflect.ValueOf(sv.Float())) + return nil + case reflect.Map: + return mapToUnstructured(sv, dv) + case reflect.Slice: + return sliceToUnstructured(sv, dv) + case reflect.Ptr: + return pointerToUnstructured(sv, dv) + case reflect.Struct: + return structToUnstructured(sv, dv) + case reflect.Interface: + return interfaceToUnstructured(sv, dv) + default: + return fmt.Errorf("unrecognized type: %v", st.Kind()) + } +} + +func mapToUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + if st.Key().Kind() == reflect.String { + switch st.Elem().Kind() { + // TODO It should be possible to reuse the slice for primitive types. + // However, it is panicing in the following form. + // case reflect.String, reflect.Bool, + // reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + // reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // sv.Set(sv) + // return nil + default: + // We need to do a proper conversion. + } + } + dv.Set(reflect.MakeMap(mapStringInterfaceType)) + dv = dv.Elem() + dt = dv.Type() + } + if dt.Kind() != reflect.Map { + return fmt.Errorf("cannot convert struct to: %v", dt.Kind()) + } + + if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) { + return fmt.Errorf("cannot copy map with non-assignable keys: %v %v", st.Key(), dt.Key()) + } + + for _, key := range sv.MapKeys() { + value := reflect.New(dt.Elem()).Elem() + if err := toUnstructured(sv.MapIndex(key), value); err != nil { + return err + } + if st.Key().AssignableTo(dt.Key()) { + dv.SetMapIndex(key, value) + } else { + dv.SetMapIndex(key.Convert(dt.Key()), value) + } + } + return nil +} + +func sliceToUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + if st.Elem().Kind() == reflect.Uint8 { + dv.Set(reflect.New(stringType)) + data, err := json.Marshal(sv.Bytes()) + if err != nil { + return err + } + var result string + if err = json.Unmarshal(data, &result); err != nil { + return err + } + dv.Set(reflect.ValueOf(result)) + return nil + } + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + switch st.Elem().Kind() { + // TODO It should be possible to reuse the slice for primitive types. + // However, it is panicing in the following form. + // case reflect.String, reflect.Bool, + // reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + // reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // sv.Set(sv) + // return nil + default: + // We need to do a proper conversion. + dv.Set(reflect.MakeSlice(reflect.SliceOf(dt), sv.Len(), sv.Cap())) + dv = dv.Elem() + dt = dv.Type() + } + } + if dt.Kind() != reflect.Slice { + return fmt.Errorf("cannot convert slice to: %v", dt.Kind()) + } + for i := 0; i < sv.Len(); i++ { + if err := toUnstructured(sv.Index(i), dv.Index(i)); err != nil { + return err + } + } + return nil +} + +func pointerToUnstructured(sv, dv reflect.Value) error { + if sv.IsNil() { + // We're done - we don't need to store anything. + return nil + } + return toUnstructured(sv.Elem(), dv) +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Map, reflect.Slice: + // TODO: It seems that 0-len maps are ignored in it. + return v.IsNil() || v.Len() == 0 + case reflect.Ptr, reflect.Interface: + return v.IsNil() + } + return false +} + +func structToUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.MakeMap(mapStringInterfaceType)) + dv = dv.Elem() + dt = dv.Type() + } + if dt.Kind() != reflect.Map { + return fmt.Errorf("cannot convert struct to: %v", dt.Kind()) + } + realMap := dv.Interface().(map[string]interface{}) + + for i := 0; i < st.NumField(); i++ { + fieldInfo := fieldInfoFromField(st, i) + fv := sv.Field(i) + + if fieldInfo.name == "-" { + // This field should be skipped. + continue + } + if fieldInfo.omitempty && isZero(fv) { + // omitempty fields should be ignored. + continue + } + if len(fieldInfo.name) == 0 { + // This field is inlined. + if err := toUnstructured(fv, dv); err != nil { + return err + } + continue + } + switch fv.Type().Kind() { + case reflect.String: + realMap[fieldInfo.name] = fv.String() + case reflect.Bool: + realMap[fieldInfo.name] = fv.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + realMap[fieldInfo.name] = fv.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + realMap[fieldInfo.name] = fv.Uint() + case reflect.Float32, reflect.Float64: + realMap[fieldInfo.name] = fv.Float() + default: + subv := reflect.New(dt.Elem()).Elem() + if err := toUnstructured(fv, subv); err != nil { + return err + } + dv.SetMapIndex(fieldInfo.nameValue, subv) + } + } + return nil +} + +func interfaceToUnstructured(sv, dv reflect.Value) error { + if !sv.IsValid() || sv.IsNil() { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + return toUnstructured(sv.Elem(), dv) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go index e8825a787..2cdac9e14 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go @@ -30,6 +30,12 @@ type encodable struct { } func (e encodable) GetObjectKind() schema.ObjectKind { return e.obj.GetObjectKind() } +func (e encodable) DeepCopyObject() Object { + var out encodable = e + out.obj = e.obj.DeepCopyObject() + copy(out.versions, e.versions) + return out +} // NewEncodable creates an object that will be encoded with the provided codec on demand. // Provided as a convenience for test cases dealing with internal objects. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go index c9a0e1696..778796602 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -24,16 +24,35 @@ import ( ) type notRegisteredErr struct { - gvk schema.GroupVersionKind - t reflect.Type + gvk schema.GroupVersionKind + target GroupVersioner + t reflect.Type } -// NewNotRegisteredErr is exposed for testing. -func NewNotRegisteredErr(gvk schema.GroupVersionKind, t reflect.Type) error { - return ¬RegisteredErr{gvk: gvk, t: t} +func NewNotRegisteredErrForKind(gvk schema.GroupVersionKind) error { + return ¬RegisteredErr{gvk: gvk} +} + +func NewNotRegisteredErrForType(t reflect.Type) error { + return ¬RegisteredErr{t: t} +} + +func NewNotRegisteredErrForTarget(t reflect.Type, target GroupVersioner) error { + return ¬RegisteredErr{t: t, target: target} +} + +func NewNotRegisteredGVKErrForTarget(gvk schema.GroupVersionKind, target GroupVersioner) error { + return ¬RegisteredErr{gvk: gvk, target: target} } func (k *notRegisteredErr) Error() string { + if k.t != nil && k.target != nil { + return fmt.Sprintf("%v is not suitable for converting to %q", k.t, k.target) + } + nullGVK := schema.GroupVersionKind{} + if k.gvk != nullGVK && k.target != nil { + return fmt.Sprintf("%q is not suitable for converting to %q", k.gvk.GroupVersion(), k.target) + } if k.t != nil { return fmt.Sprintf("no kind is registered for the type %v", k.t) } @@ -83,8 +102,6 @@ type missingVersionErr struct { data string } -// IsMissingVersion returns true if the error indicates that the provided object -// is missing a 'Version' field. func NewMissingVersionErr(data string) error { return &missingVersionErr{data} } @@ -93,6 +110,8 @@ func (k *missingVersionErr) Error() string { return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data) } +// IsMissingVersion returns true if the error indicates that the provided object +// is missing a 'Version' field. func IsMissingVersion(err error) bool { if err == nil { return false diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go index 4d23ee9ee..737e2e9ff 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go @@ -17,6 +17,7 @@ limitations under the License. package runtime import ( + "bytes" "encoding/json" "errors" ) @@ -25,7 +26,9 @@ func (re *RawExtension) UnmarshalJSON(in []byte) error { if re == nil { return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer") } - re.Raw = append(re.Raw[0:0], in...) + if !bytes.Equal(in, []byte("null")) { + re.Raw = append(re.Raw[0:0], in...) + } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 9947bd8e1..9bcbd7226 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -47,7 +47,9 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *RawExtension) Reset() { *m = RawExtension{} } func (*RawExtension) ProtoMessage() {} @@ -66,121 +68,121 @@ func init() { proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") } -func (m *RawExtension) Marshal() (data []byte, err error) { +func (m *RawExtension) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *RawExtension) MarshalTo(data []byte) (int, error) { +func (m *RawExtension) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Raw != nil { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Raw))) - i += copy(data[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i += copy(dAtA[i:], m.Raw) } return i, nil } -func (m *TypeMeta) Marshal() (data []byte, err error) { +func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *TypeMeta) MarshalTo(data []byte) (int, error) { +func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - data[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) return i, nil } -func (m *Unknown) Marshal() (data []byte, err error) { +func (m *Unknown) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Unknown) MarshalTo(data []byte) (int, error) { +func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(data[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.TypeMeta.Size())) + n1, err := m.TypeMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 if m.Raw != nil { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Raw))) - i += copy(data[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i += copy(dAtA[i:], m.Raw) } - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) - i += copy(data[i:], m.ContentEncoding) - data[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) + i += copy(dAtA[i:], m.ContentEncoding) + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) - i += copy(data[i:], m.ContentType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) + i += copy(dAtA[i:], m.ContentType) return i, nil } -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *RawExtension) Size() (n int) { @@ -274,8 +276,8 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *RawExtension) Unmarshal(data []byte) error { - l := len(data) +func (m *RawExtension) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -287,7 +289,7 @@ func (m *RawExtension) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -315,7 +317,7 @@ func (m *RawExtension) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -329,14 +331,14 @@ func (m *RawExtension) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...) + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) if m.Raw == nil { m.Raw = []byte{} } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -355,8 +357,8 @@ func (m *RawExtension) Unmarshal(data []byte) error { } return nil } -func (m *TypeMeta) Unmarshal(data []byte) error { - l := len(data) +func (m *TypeMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -368,7 +370,7 @@ func (m *TypeMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -396,7 +398,7 @@ func (m *TypeMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -411,7 +413,7 @@ func (m *TypeMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(data[iNdEx:postIndex]) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -425,7 +427,7 @@ func (m *TypeMeta) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -440,11 +442,11 @@ func (m *TypeMeta) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(data[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -463,8 +465,8 @@ func (m *TypeMeta) Unmarshal(data []byte) error { } return nil } -func (m *Unknown) Unmarshal(data []byte) error { - l := len(data) +func (m *Unknown) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -476,7 +478,7 @@ func (m *Unknown) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -504,7 +506,7 @@ func (m *Unknown) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -518,7 +520,7 @@ func (m *Unknown) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.TypeMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.TypeMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -534,7 +536,7 @@ func (m *Unknown) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -548,7 +550,7 @@ func (m *Unknown) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...) + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) if m.Raw == nil { m.Raw = []byte{} } @@ -565,7 +567,7 @@ func (m *Unknown) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -580,7 +582,7 @@ func (m *Unknown) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ContentEncoding = string(data[iNdEx:postIndex]) + m.ContentEncoding = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -594,7 +596,7 @@ func (m *Unknown) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -609,11 +611,11 @@ func (m *Unknown) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ContentType = string(data[iNdEx:postIndex]) + m.ContentType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -632,8 +634,8 @@ func (m *Unknown) Unmarshal(data []byte) error { } return nil } -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -644,7 +646,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -662,7 +664,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -679,7 +681,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -702,7 +704,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -713,7 +715,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -737,31 +739,35 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 391 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x90, 0x4f, 0x8b, 0xd3, 0x40, - 0x18, 0xc6, 0x93, 0x6d, 0xa1, 0xeb, 0xb4, 0xb0, 0x32, 0x1e, 0x8c, 0x7b, 0x98, 0x2c, 0x3d, 0xd9, - 0x83, 0x33, 0xb0, 0x22, 0x78, 0xdd, 0x94, 0x82, 0x22, 0x82, 0x0c, 0xfe, 0x01, 0x4f, 0x4e, 0x93, - 0x31, 0x1d, 0x62, 0xdf, 0x09, 0x93, 0x89, 0xb1, 0x37, 0x3f, 0x82, 0x1f, 0xab, 0xc7, 0x1e, 0x3d, - 0x15, 0x1b, 0x3f, 0x84, 0x57, 0xe9, 0x74, 0x5a, 0x6b, 0x45, 0xf6, 0x96, 0x79, 0x9f, 0xe7, 0xf7, - 0xbc, 0xcf, 0x1b, 0xf4, 0xac, 0x78, 0x5a, 0x51, 0xa5, 0x59, 0x51, 0x4f, 0xa5, 0x01, 0x69, 0x65, - 0xc5, 0x3e, 0x4b, 0xc8, 0xb4, 0x61, 0x5e, 0x10, 0xa5, 0x9a, 0x8b, 0x74, 0xa6, 0x40, 0x9a, 0x05, - 0x2b, 0x8b, 0x9c, 0x99, 0x1a, 0xac, 0x9a, 0x4b, 0x96, 0x4b, 0x90, 0x46, 0x58, 0x99, 0xd1, 0xd2, - 0x68, 0xab, 0x71, 0xbc, 0x03, 0xe8, 0x31, 0x40, 0xcb, 0x22, 0xa7, 0x1e, 0xb8, 0x7c, 0x94, 0x2b, - 0x3b, 0xab, 0xa7, 0x34, 0xd5, 0x73, 0x96, 0xeb, 0x5c, 0x33, 0xc7, 0x4d, 0xeb, 0x8f, 0xee, 0xe5, - 0x1e, 0xee, 0x6b, 0x97, 0x77, 0xf9, 0xf8, 0x7f, 0x05, 0x6a, 0xab, 0x3e, 0x31, 0x05, 0xb6, 0xb2, - 0xe6, 0xb4, 0xc4, 0x70, 0x84, 0x06, 0x5c, 0x34, 0x93, 0x2f, 0x56, 0x42, 0xa5, 0x34, 0xe0, 0x07, - 0xa8, 0x63, 0x44, 0x13, 0x85, 0x57, 0xe1, 0xc3, 0x41, 0xd2, 0x6b, 0xd7, 0x71, 0x87, 0x8b, 0x86, - 0x6f, 0x67, 0xc3, 0x0f, 0xe8, 0xfc, 0xf5, 0xa2, 0x94, 0x2f, 0xa5, 0x15, 0xf8, 0x1a, 0x21, 0x51, - 0xaa, 0xb7, 0xd2, 0x6c, 0x21, 0xe7, 0xbe, 0x93, 0xe0, 0xe5, 0x3a, 0x0e, 0xda, 0x75, 0x8c, 0x6e, - 0x5e, 0x3d, 0xf7, 0x0a, 0x3f, 0x72, 0xe1, 0x2b, 0xd4, 0x2d, 0x14, 0x64, 0xd1, 0x99, 0x73, 0x0f, - 0xbc, 0xbb, 0xfb, 0x42, 0x41, 0xc6, 0x9d, 0x32, 0xfc, 0x15, 0xa2, 0xde, 0x1b, 0x28, 0x40, 0x37, - 0x80, 0xdf, 0xa1, 0x73, 0xeb, 0xb7, 0xb9, 0xfc, 0xfe, 0xf5, 0x88, 0xde, 0xf2, 0xc3, 0xe8, 0xbe, - 0x5e, 0x72, 0xd7, 0x87, 0x1f, 0x0a, 0xf3, 0x43, 0xd8, 0xfe, 0xc2, 0xb3, 0x7f, 0x2f, 0xc4, 0x37, - 0xe8, 0x22, 0xd5, 0x60, 0x25, 0xd8, 0x09, 0xa4, 0x3a, 0x53, 0x90, 0x47, 0x1d, 0x57, 0xf6, 0xbe, - 0xcf, 0xbb, 0x18, 0xff, 0x2d, 0xf3, 0x53, 0x3f, 0x7e, 0x82, 0xfa, 0x7e, 0xb4, 0x5d, 0x1d, 0x75, - 0x1d, 0x7e, 0xcf, 0xe3, 0xfd, 0xf1, 0x1f, 0x89, 0x1f, 0xfb, 0x92, 0xd1, 0x72, 0x43, 0x82, 0xd5, - 0x86, 0x04, 0xdf, 0x37, 0x24, 0xf8, 0xda, 0x92, 0x70, 0xd9, 0x92, 0x70, 0xd5, 0x92, 0xf0, 0x47, - 0x4b, 0xc2, 0x6f, 0x3f, 0x49, 0xf0, 0xbe, 0xe7, 0x8f, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x5d, - 0x24, 0xc6, 0x1a, 0x81, 0x02, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 395 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0x4f, 0x6f, 0xd3, 0x30, + 0x18, 0xc6, 0xe3, 0xb5, 0x52, 0x87, 0x5b, 0x69, 0xc8, 0x1c, 0x08, 0x3b, 0x38, 0x53, 0x4f, 0xec, + 0x30, 0x5b, 0x1a, 0x42, 0xe2, 0xba, 0x4c, 0x93, 0x40, 0x08, 0x09, 0x59, 0xfc, 0x91, 0x38, 0xe1, + 0x26, 0x26, 0xb3, 0x42, 0x5f, 0x47, 0x8e, 0x43, 0xd8, 0x8d, 0x8f, 0xc0, 0xc7, 0xea, 0x71, 0xc7, + 0x9e, 0x2a, 0x1a, 0x3e, 0x04, 0x57, 0x54, 0xd7, 0x2d, 0xa5, 0x08, 0xed, 0x16, 0xbf, 0xcf, 0xf3, + 0x7b, 0xde, 0xe7, 0x0d, 0x7e, 0x5e, 0x3e, 0xab, 0x99, 0x36, 0xbc, 0x6c, 0x26, 0xca, 0x82, 0x72, + 0xaa, 0xe6, 0x5f, 0x14, 0xe4, 0xc6, 0xf2, 0x20, 0xc8, 0x4a, 0x4f, 0x65, 0x76, 0xad, 0x41, 0xd9, + 0x1b, 0x5e, 0x95, 0x05, 0xb7, 0x0d, 0x38, 0x3d, 0x55, 0xbc, 0x50, 0xa0, 0xac, 0x74, 0x2a, 0x67, + 0x95, 0x35, 0xce, 0x90, 0x64, 0x0d, 0xb0, 0x5d, 0x80, 0x55, 0x65, 0xc1, 0x02, 0x70, 0x7c, 0x56, + 0x68, 0x77, 0xdd, 0x4c, 0x58, 0x66, 0xa6, 0xbc, 0x30, 0x85, 0xe1, 0x9e, 0x9b, 0x34, 0x9f, 0xfc, + 0xcb, 0x3f, 0xfc, 0xd7, 0x3a, 0xef, 0xf8, 0xc9, 0xff, 0x0a, 0x34, 0x4e, 0x7f, 0xe6, 0x1a, 0x5c, + 0xed, 0xec, 0x7e, 0x89, 0xf1, 0x29, 0x1e, 0x09, 0xd9, 0x5e, 0x7d, 0x75, 0x0a, 0x6a, 0x6d, 0x80, + 0x3c, 0xc2, 0x3d, 0x2b, 0xdb, 0x18, 0x9d, 0xa0, 0xc7, 0xa3, 0x74, 0xd0, 0x2d, 0x92, 0x9e, 0x90, + 0xad, 0x58, 0xcd, 0xc6, 0x1f, 0xf1, 0xe1, 0x9b, 0x9b, 0x4a, 0xbd, 0x52, 0x4e, 0x92, 0x73, 0x8c, + 0x65, 0xa5, 0xdf, 0x29, 0xbb, 0x82, 0xbc, 0xfb, 0x5e, 0x4a, 0x66, 0x8b, 0x24, 0xea, 0x16, 0x09, + 0xbe, 0x78, 0xfd, 0x22, 0x28, 0x62, 0xc7, 0x45, 0x4e, 0x70, 0xbf, 0xd4, 0x90, 0xc7, 0x07, 0xde, + 0x3d, 0x0a, 0xee, 0xfe, 0x4b, 0x0d, 0xb9, 0xf0, 0xca, 0xf8, 0x17, 0xc2, 0x83, 0xb7, 0x50, 0x82, + 0x69, 0x81, 0xbc, 0xc7, 0x87, 0x2e, 0x6c, 0xf3, 0xf9, 0xc3, 0xf3, 0x53, 0x76, 0xc7, 0x0f, 0x63, + 0x9b, 0x7a, 0xe9, 0xfd, 0x10, 0xbe, 0x2d, 0x2c, 0xb6, 0x61, 0x9b, 0x0b, 0x0f, 0xfe, 0xbd, 0x90, + 0x5c, 0xe0, 0xa3, 0xcc, 0x80, 0x53, 0xe0, 0xae, 0x20, 0x33, 0xb9, 0x86, 0x22, 0xee, 0xf9, 0xb2, + 0x0f, 0x43, 0xde, 0xd1, 0xe5, 0xdf, 0xb2, 0xd8, 0xf7, 0x93, 0xa7, 0x78, 0x18, 0x46, 0xab, 0xd5, + 0x71, 0xdf, 0xe3, 0x0f, 0x02, 0x3e, 0xbc, 0xfc, 0x23, 0x89, 0x5d, 0x5f, 0x7a, 0x36, 0x5b, 0xd2, + 0xe8, 0x76, 0x49, 0xa3, 0xf9, 0x92, 0x46, 0xdf, 0x3a, 0x8a, 0x66, 0x1d, 0x45, 0xb7, 0x1d, 0x45, + 0xf3, 0x8e, 0xa2, 0x1f, 0x1d, 0x45, 0xdf, 0x7f, 0xd2, 0xe8, 0xc3, 0x20, 0x1c, 0xfa, 0x3b, 0x00, + 0x00, 0xff, 0xff, 0x3f, 0x1e, 0x24, 0x09, 0x85, 0x02, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto index 57fc84078..2ff383915 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -89,7 +89,7 @@ message RawExtension { // TypeMeta is provided here for convenience. You may use it directly from this package or define // your own with the same fields. // -// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen=false // +protobuf=true // +k8s:openapi-gen=true message TypeMeta { @@ -107,6 +107,7 @@ message TypeMeta { // metadata and field mutatation. // // +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +protobuf=true // +k8s:openapi-gen=true message Unknown { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index fcb18ba11..ba48e6146 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -174,13 +174,16 @@ type ObjectVersioner interface { // ObjectConvertor converts an object to a different version. type ObjectConvertor interface { - // Convert attempts to convert one object into another, or returns an error. This method does - // not guarantee the in object is not mutated. The context argument will be passed to - // all nested conversions. + // Convert attempts to convert one object into another, or returns an error. This + // method does not mutate the in object, but the in and out object might share data structures, + // i.e. the out object cannot be mutated without mutating the in object as well. + // The context argument will be passed to all nested conversions. Convert(in, out, context interface{}) error // ConvertToVersion takes the provided object and converts it the provided version. This - // method does not guarantee that the in object is not mutated. This method is similar to - // Convert() but handles specific details of choosing the correct output version. + // method does not mutate the in object, but the in and out object might share data structures, + // i.e. the out object cannot be mutated without mutating the in object as well. + // This method is similar to Convert() but handles specific details of choosing the correct + // output version. ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) ConvertFieldLabel(version, kind, label, value string) (string, string, error) } @@ -203,13 +206,6 @@ type ObjectCreater interface { New(kind schema.GroupVersionKind) (out Object, err error) } -// ObjectCopier duplicates an object. -type ObjectCopier interface { - // Copy returns an exact copy of the provided Object, or an error if the - // copy could not be completed. - Copy(Object) (Object, error) -} - // ResourceVersioner provides methods for setting and retrieving // the resource version from an API object. type ResourceVersioner interface { @@ -234,18 +230,23 @@ type SelfLinker interface { // to return a no-op ObjectKindAccessor in cases where it is not expected to be serialized. type Object interface { GetObjectKind() schema.ObjectKind + DeepCopyObject() Object } // Unstructured objects store values as map[string]interface{}, with only values that can be serialized // to JSON allowed. type Unstructured interface { - // IsUnstructuredObject is a marker interface to allow objects that can be serialized but not introspected - // to bypass conversion. - IsUnstructuredObject() + Object + // UnstructuredContent returns a non-nil map with this object's contents. Values may be + // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to + // and from JSON. SetUnstructuredContent should be used to mutate the contents. + UnstructuredContent() map[string]interface{} + // SetUnstructuredContent updates the object content to match the provided map. + SetUnstructuredContent(map[string]interface{}) // IsList returns true if this type is a list or matches the list convention - has an array called "items". IsList() bool - // UnstructuredContent returns a non-nil, mutable map of the contents of this object. Values may be - // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to - // and from JSON. - UnstructuredContent() map[string]interface{} + // EachListItem should pass a single item out of the list as an Object to the provided function. Any + // error should terminate the iteration. If IsList() returns false, this method should return an error + // instead of calling the provided function. + EachListItem(func(Object) error) error } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/register.go b/vendor/k8s.io/apimachinery/pkg/runtime/register.go index 2ec6db820..eeb380c3d 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/register.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/register.go @@ -28,7 +28,7 @@ func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } -func (obj *Unknown) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } // GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind // interface if no objects are provided, or the ObjectKind interface of the object in the diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index dfe4c5f53..46c853661 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -39,21 +39,27 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptorGenerated) +} var fileDescriptorGenerated = []byte{ - // 199 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0xce, 0x2f, 0x4e, 0x05, 0x31, - 0x10, 0xc7, 0xf1, 0xd6, 0x20, 0x90, 0xc8, 0x27, 0x46, 0x12, 0x0c, 0x1d, 0x81, 0x41, 0x73, 0x01, - 0x3c, 0xae, 0xbb, 0x6f, 0xe8, 0x36, 0xa5, 0x7f, 0xd2, 0x4e, 0x49, 0x70, 0x1c, 0x81, 0x63, 0xad, - 0x5c, 0x89, 0x64, 0xcb, 0x45, 0x48, 0xda, 0x15, 0x84, 0x04, 0xd7, 0x5f, 0x9a, 0xcf, 0xe4, 0x7b, - 0xf9, 0xe8, 0xee, 0x8b, 0xb2, 0x11, 0x5d, 0x9d, 0x28, 0x07, 0x62, 0x2a, 0xf8, 0x4a, 0xe1, 0x1c, - 0x33, 0x1e, 0x1f, 0x3a, 0x59, 0xaf, 0xe7, 0xc5, 0x06, 0xca, 0x6f, 0x98, 0x9c, 0xc1, 0x5c, 0x03, - 0x5b, 0x4f, 0x58, 0xe6, 0x85, 0xbc, 0x46, 0x43, 0x81, 0xb2, 0x66, 0x3a, 0xab, 0x94, 0x23, 0xc7, - 0xab, 0xeb, 0xe1, 0xd4, 0x6f, 0xa7, 0x92, 0x33, 0xea, 0x70, 0x6a, 0xb8, 0xd3, 0xad, 0xb1, 0xbc, - 0xd4, 0x49, 0xcd, 0xd1, 0xa3, 0x89, 0x26, 0x62, 0xe7, 0x53, 0x7d, 0xee, 0xab, 0x8f, 0xfe, 0x1a, - 0x67, 0x4f, 0x77, 0xff, 0xe5, 0x54, 0xb6, 0x2f, 0x68, 0x03, 0x17, 0xce, 0x7f, 0x5b, 0x1e, 0x6e, - 0xd6, 0x1d, 0xc4, 0xb6, 0x83, 0xf8, 0xdc, 0x41, 0xbc, 0x37, 0x90, 0x6b, 0x03, 0xb9, 0x35, 0x90, - 0x5f, 0x0d, 0xe4, 0xc7, 0x37, 0x88, 0xa7, 0x8b, 0x51, 0xf3, 0x13, 0x00, 0x00, 0xff, 0xff, 0xd9, - 0x82, 0x09, 0xbe, 0x07, 0x01, 0x00, 0x00, + // 202 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xce, 0xaf, 0x4e, 0x04, 0x31, + 0x10, 0xc7, 0xf1, 0xd6, 0x20, 0x90, 0xc8, 0x13, 0x23, 0x51, 0xd0, 0x11, 0x18, 0x34, 0x2f, 0x80, + 0xc7, 0x75, 0xf7, 0x86, 0x6e, 0x53, 0xfa, 0x27, 0xed, 0x94, 0x04, 0xc7, 0x23, 0xf0, 0x58, 0x27, + 0x4f, 0xae, 0x64, 0xcb, 0x8b, 0x90, 0xb4, 0x2b, 0x08, 0xc9, 0xb9, 0xfe, 0xd2, 0x7c, 0x26, 0xdf, + 0xeb, 0x67, 0xf7, 0x58, 0x94, 0x8d, 0xe8, 0xea, 0x44, 0x39, 0x10, 0x53, 0xc1, 0x77, 0x0a, 0xc7, + 0x98, 0x71, 0xff, 0xd0, 0xc9, 0x7a, 0x3d, 0x2f, 0x36, 0x50, 0xfe, 0xc0, 0xe4, 0x0c, 0xe6, 0x1a, + 0xd8, 0x7a, 0xc2, 0x32, 0x2f, 0xe4, 0x35, 0x1a, 0x0a, 0x94, 0x35, 0xd3, 0x51, 0xa5, 0x1c, 0x39, + 0xde, 0xdc, 0x0e, 0xa7, 0xfe, 0x3a, 0x95, 0x9c, 0x51, 0xbb, 0x53, 0xc3, 0x1d, 0xee, 0x8d, 0xe5, + 0xa5, 0x4e, 0x6a, 0x8e, 0x1e, 0x4d, 0x34, 0x11, 0x3b, 0x9f, 0xea, 0x6b, 0x5f, 0x7d, 0xf4, 0xd7, + 0x38, 0x7b, 0x78, 0xb8, 0x94, 0x53, 0xd9, 0xbe, 0xa1, 0x0d, 0x5c, 0x38, 0xff, 0x6f, 0x79, 0xba, + 0x3b, 0x6d, 0x20, 0xce, 0x1b, 0x88, 0x75, 0x03, 0xf1, 0xd9, 0x40, 0x9e, 0x1a, 0xc8, 0x73, 0x03, + 0xb9, 0x36, 0x90, 0xdf, 0x0d, 0xe4, 0xd7, 0x0f, 0x88, 0x97, 0xab, 0x51, 0xf4, 0x1b, 0x00, 0x00, + 0xff, 0xff, 0xfd, 0x59, 0x57, 0x93, 0x0b, 0x01, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto index ebc1a263d..8655f4818 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index 1a9bba106..da642fa73 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -36,6 +36,21 @@ func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { return gvr, ParseGroupResource(arg) } +// ParseKindArg takes the common style of string which may be either `Kind.group.com` or `Kind.version.group.com` +// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended +// but with a knowledge of all GroupKinds, calling code can take a very good guess. If there are only two segments, then +// `*GroupVersionResource` is nil. +// `Kind.group.com` -> `group=com, version=group, kind=Kind` and `group=group.com, kind=Kind` +func ParseKindArg(arg string) (*GroupVersionKind, GroupKind) { + var gvk *GroupVersionKind + if strings.Count(arg, ".") >= 2 { + s := strings.SplitN(arg, ".", 3) + gvk = &GroupVersionKind{Group: s[2], Version: s[1], Kind: s[0]} + } + + return gvk, ParseGroupKind(arg) +} + // GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying // concepts during lookup stages without having partially valid types type GroupResource struct { @@ -58,6 +73,15 @@ func (gr *GroupResource) String() string { return gr.Resource + "." + gr.Group } +func ParseGroupKind(gk string) GroupKind { + i := strings.Index(gk, ".") + if i == -1 { + return GroupKind{Kind: gk} + } + + return GroupKind{Group: gk[i+1:], Kind: gk[:i]} +} + // ParseGroupResource turns "resource.group" string into a GroupResource struct. Empty strings are allowed // for each field. func ParseGroupResource(gr string) GroupResource { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index fbec6ad9b..59163d777 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -21,8 +21,11 @@ import ( "net/url" "reflect" + "strings" + "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" ) // Scheme defines methods for serializing and deserializing API objects, a type @@ -69,9 +72,12 @@ type Scheme struct { // default coverting behavior. converter *conversion.Converter - // cloner stores all registered copy functions. It also has default - // deep copy behavior. - cloner *conversion.Cloner + // versionPriority is a map of groups to ordered lists of versions for those groups indicating the + // default priorities of these versions as registered in the scheme + versionPriority map[string][]string + + // observedVersions keeps track of the order we've seen versions during type registration + observedVersions []schema.GroupVersion } // Function to convert a field selector to internal representation. @@ -80,13 +86,13 @@ type FieldLabelConversionFunc func(label, value string) (internalLabel, internal // NewScheme creates a new Scheme. This scheme is pluggable by default. func NewScheme() *Scheme { s := &Scheme{ - gvkToType: map[schema.GroupVersionKind]reflect.Type{}, - typeToGVK: map[reflect.Type][]schema.GroupVersionKind{}, - unversionedTypes: map[reflect.Type]schema.GroupVersionKind{}, - unversionedKinds: map[string]reflect.Type{}, - cloner: conversion.NewCloner(), + gvkToType: map[schema.GroupVersionKind]reflect.Type{}, + typeToGVK: map[reflect.Type][]schema.GroupVersionKind{}, + unversionedTypes: map[reflect.Type]schema.GroupVersionKind{}, + unversionedKinds: map[string]reflect.Type{}, fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{}, defaulterFuncs: map[reflect.Type]func(interface{}){}, + versionPriority: map[string][]string{}, } s.converter = conversion.NewConverter(s.nameFunc) @@ -116,7 +122,7 @@ func (s *Scheme) nameFunc(t reflect.Type) string { for _, gvk := range gvks { internalGV := gvk.GroupVersion() - internalGV.Version = "__internal" // this is hacky and maybe should be passed in + internalGV.Version = APIVersionInternal // this is hacky and maybe should be passed in internalGVK := internalGV.WithKind(gvk.Kind) if internalType, exists := s.gvkToType[internalGVK]; exists { @@ -146,13 +152,14 @@ func (s *Scheme) Converter() *conversion.Converter { // TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into // every version with particular schemas. Resolve this method at that point. func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { + s.addObservedVersion(version) s.AddKnownTypes(version, types...) for _, obj := range types { t := reflect.TypeOf(obj).Elem() gvk := version.WithKind(t.Name()) s.unversionedTypes[t] = gvk - if _, ok := s.unversionedKinds[gvk.Kind]; ok { - panic(fmt.Sprintf("%v has already been registered as unversioned kind %q - kind name must be unique", reflect.TypeOf(t), gvk.Kind)) + if old, ok := s.unversionedKinds[gvk.Kind]; ok && t != old { + panic(fmt.Sprintf("%v.%v has already been registered as unversioned kind %q - kind name must be unique", old.PkgPath(), old.Name(), gvk)) } s.unversionedKinds[gvk.Kind] = t } @@ -163,6 +170,7 @@ func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Objec // the struct becomes the "kind" field when encoding. Version may not be empty - use the // APIVersionInternal constant if you have a type that does not have a formal version. func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) { + s.addObservedVersion(gv) for _, obj := range types { t := reflect.TypeOf(obj) if t.Kind() != reflect.Ptr { @@ -178,6 +186,7 @@ func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) { // your structs. Version may not be empty - use the APIVersionInternal constant if you have a // type that does not have a formal version. func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) { + s.addObservedVersion(gvk.GroupVersion()) t := reflect.TypeOf(obj) if len(gvk.Version) == 0 { panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t)) @@ -222,19 +231,22 @@ func (s *Scheme) AllKnownTypes() map[schema.GroupVersionKind]reflect.Type { return s.gvkToType } -// ObjectKind returns the group,version,kind of the go object and true if this object -// is considered unversioned, or an error if it's not a pointer or is unregistered. -func (s *Scheme) ObjectKind(obj Object) (schema.GroupVersionKind, bool, error) { - gvks, unversionedType, err := s.ObjectKinds(obj) - if err != nil { - return schema.GroupVersionKind{}, false, err - } - return gvks[0], unversionedType, nil -} - // ObjectKinds returns all possible group,version,kind of the go object, true if the // object is considered unversioned, or an error if it's not a pointer or is unregistered. func (s *Scheme) ObjectKinds(obj Object) ([]schema.GroupVersionKind, bool, error) { + // Unstructured objects are always considered to have their declared GVK + if _, ok := obj.(Unstructured); ok { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, false, NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return nil, false, NewMissingVersionErr("unstructured object has no version") + } + return []schema.GroupVersionKind{gvk}, false, nil + } + v, err := conversion.EnforcePtr(obj) if err != nil { return nil, false, err @@ -243,7 +255,7 @@ func (s *Scheme) ObjectKinds(obj Object) ([]schema.GroupVersionKind, bool, error gvks, ok := s.typeToGVK[t] if !ok { - return nil, false, NewNotRegisteredErr(schema.GroupVersionKind{}, t) + return nil, false, NewNotRegisteredErrForType(t) } _, unversionedType := s.unversionedTypes[t] @@ -281,7 +293,7 @@ func (s *Scheme) New(kind schema.GroupVersionKind) (Object, error) { if t, exists := s.unversionedKinds[kind.Kind]; exists { return reflect.New(t).Interface().(Object), nil } - return nil, NewNotRegisteredErr(kind, nil) + return nil, NewNotRegisteredErrForKind(kind) } // AddGenericConversionFunc adds a function that accepts the ConversionFunc call pattern @@ -343,7 +355,7 @@ func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error { return nil } -// Similar to AddConversionFuncs, but registers conversion functions that were +// AddGeneratedConversionFuncs registers conversion functions that were // automatically generated. func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) error { for _, f := range conversionFuncs { @@ -354,29 +366,6 @@ func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) err return nil } -// AddDeepCopyFuncs adds a function to the list of deep-copy functions. -// For the expected format of deep-copy function, see the comment for -// Copier.RegisterDeepCopyFunction. -func (s *Scheme) AddDeepCopyFuncs(deepCopyFuncs ...interface{}) error { - for _, f := range deepCopyFuncs { - if err := s.cloner.RegisterDeepCopyFunc(f); err != nil { - return err - } - } - return nil -} - -// Similar to AddDeepCopyFuncs, but registers deep-copy functions that were -// automatically generated. -func (s *Scheme) AddGeneratedDeepCopyFuncs(deepCopyFuncs ...conversion.GeneratedDeepCopyFunc) error { - for _, fn := range deepCopyFuncs { - if err := s.cloner.RegisterGeneratedDeepCopyFunc(fn); err != nil { - return err - } - } - return nil -} - // AddFieldLabelConversionFunc adds a conversion function to convert field selectors // of the given kind from the given version to internal version representation. func (s *Scheme) AddFieldLabelConversionFunc(version, kind string, conversionFunc FieldLabelConversionFunc) error { @@ -404,29 +393,6 @@ func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappin return s.converter.RegisterInputDefaults(in, fn, defaultFlags) } -// AddDefaultingFuncs adds functions to the list of default-value functions. -// Each of the given functions is responsible for applying default values -// when converting an instance of a versioned API object into an internal -// API object. These functions do not need to handle sub-objects. We deduce -// how to call these functions from the types of their two parameters. -// -// s.AddDefaultingFuncs( -// func(obj *v1.Pod) { -// if obj.OptionalField == "" { -// obj.OptionalField = "DefaultValue" -// } -// }, -// ) -func (s *Scheme) AddDefaultingFuncs(defaultingFuncs ...interface{}) error { - for _, f := range defaultingFuncs { - err := s.converter.RegisterDefaultingFunc(f) - if err != nil { - return err - } - } - return nil -} - // AddTypeDefaultingFuncs registers a function that is passed a pointer to an // object and can default fields on the object. These functions will be invoked // when Default() is called. The function will never be called unless the @@ -443,28 +409,73 @@ func (s *Scheme) Default(src Object) { } } -// Copy does a deep copy of an API object. -func (s *Scheme) Copy(src Object) (Object, error) { - dst, err := s.DeepCopy(src) - if err != nil { - return nil, err - } - return dst.(Object), nil -} - -// Performs a deep copy of the given object. -func (s *Scheme) DeepCopy(src interface{}) (interface{}, error) { - return s.cloner.DeepCopy(src) -} - // Convert will attempt to convert in into out. Both must be pointers. For easy // testing of conversion functions. Returns an error if the conversion isn't // possible. You can call this with types that haven't been registered (for example, // a to test conversion of types that are nested within registered types). The -// context interface is passed to the convertor. -// TODO: identify whether context should be hidden, or behind a formal context/scope -// interface +// context interface is passed to the convertor. Convert also supports Unstructured +// types and will convert them intelligently. func (s *Scheme) Convert(in, out interface{}, context interface{}) error { + unstructuredIn, okIn := in.(Unstructured) + unstructuredOut, okOut := out.(Unstructured) + switch { + case okIn && okOut: + // converting unstructured input to an unstructured output is a straight copy - unstructured + // is a "smart holder" and the contents are passed by reference between the two objects + unstructuredOut.SetUnstructuredContent(unstructuredIn.UnstructuredContent()) + return nil + + case okOut: + // if the output is an unstructured object, use the standard Go type to unstructured + // conversion. The object must not be internal. + obj, ok := in.(Object) + if !ok { + return fmt.Errorf("unable to convert object type %T to Unstructured, must be a runtime.Object", in) + } + gvks, unversioned, err := s.ObjectKinds(obj) + if err != nil { + return err + } + gvk := gvks[0] + + // if no conversion is necessary, convert immediately + if unversioned || gvk.Version != APIVersionInternal { + content, err := DefaultUnstructuredConverter.ToUnstructured(in) + if err != nil { + return err + } + unstructuredOut.SetUnstructuredContent(content) + unstructuredOut.GetObjectKind().SetGroupVersionKind(gvk) + return nil + } + + // attempt to convert the object to an external version first. + target, ok := context.(GroupVersioner) + if !ok { + return fmt.Errorf("unable to convert the internal object type %T to Unstructured without providing a preferred version to convert to", in) + } + // Convert is implicitly unsafe, so we don't need to perform a safe conversion + versioned, err := s.UnsafeConvertToVersion(obj, target) + if err != nil { + return err + } + content, err := DefaultUnstructuredConverter.ToUnstructured(versioned) + if err != nil { + return err + } + unstructuredOut.SetUnstructuredContent(content) + return nil + + case okIn: + // converting an unstructured object to any type is modeled by first converting + // the input to a versioned type, then running standard conversions + typed, err := s.unstructuredToTyped(unstructuredIn) + if err != nil { + return err + } + in = typed + } + flags, meta := s.generateConvertMeta(in) meta.Context = context if flags == 0 { @@ -473,15 +484,15 @@ func (s *Scheme) Convert(in, out interface{}, context interface{}) error { return s.converter.Convert(in, out, flags, meta) } -// Converts the given field label and value for an kind field selector from -// versioned representation to an unversioned one. +// ConvertFieldLabel alters the given field label and value for an kind field selector from +// versioned representation to an unversioned one or returns an error. func (s *Scheme) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { if s.fieldLabelConversionFuncs[version] == nil { - return "", "", fmt.Errorf("No field label conversion function found for version: %s", version) + return DefaultMetaV1FieldSelectorConversion(label, value) } conversionFunc, ok := s.fieldLabelConversionFuncs[version][kind] if !ok { - return "", "", fmt.Errorf("No field label conversion function found for version %s and kind %s", version, kind) + return DefaultMetaV1FieldSelectorConversion(label, value) } return conversionFunc(label, value) } @@ -504,18 +515,33 @@ func (s *Scheme) UnsafeConvertToVersion(in Object, target GroupVersioner) (Objec // convertToVersion handles conversion with an optional copy. func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (Object, error) { - // determine the incoming kinds with as few allocations as possible. - t := reflect.TypeOf(in) - if t.Kind() != reflect.Ptr { - return nil, fmt.Errorf("only pointer types may be converted: %v", t) - } - t = t.Elem() - if t.Kind() != reflect.Struct { - return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) + var t reflect.Type + + if u, ok := in.(Unstructured); ok { + typed, err := s.unstructuredToTyped(u) + if err != nil { + return nil, err + } + + in = typed + // unstructuredToTyped returns an Object, which must be a pointer to a struct. + t = reflect.TypeOf(in).Elem() + + } else { + // determine the incoming kinds with as few allocations as possible. + t = reflect.TypeOf(in) + if t.Kind() != reflect.Ptr { + return nil, fmt.Errorf("only pointer types may be converted: %v", t) + } + t = t.Elem() + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) + } } + kinds, ok := s.typeToGVK[t] if !ok || len(kinds) == 0 { - return nil, NewNotRegisteredErr(schema.GroupVersionKind{}, t) + return nil, NewNotRegisteredErrForType(t) } gvk, ok := target.KindForGroupVersionKinds(kinds) @@ -524,28 +550,26 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( // TODO: when we move to server API versions, we should completely remove the unversioned concept if unversionedKind, ok := s.unversionedTypes[t]; ok { if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { - return copyAndSetTargetKind(copy, s, in, gvk) + return copyAndSetTargetKind(copy, in, gvk) } - return copyAndSetTargetKind(copy, s, in, unversionedKind) + return copyAndSetTargetKind(copy, in, unversionedKind) } - - // TODO: should this be a typed error? - return nil, fmt.Errorf("%v is not suitable for converting to %q", t, target) + return nil, NewNotRegisteredErrForTarget(t, target) } // target wants to use the existing type, set kind and return (no conversion necessary) for _, kind := range kinds { if gvk == kind { - return copyAndSetTargetKind(copy, s, in, gvk) + return copyAndSetTargetKind(copy, in, gvk) } } // type is unversioned, no conversion necessary if unversionedKind, ok := s.unversionedTypes[t]; ok { if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { - return copyAndSetTargetKind(copy, s, in, gvk) + return copyAndSetTargetKind(copy, in, gvk) } - return copyAndSetTargetKind(copy, s, in, unversionedKind) + return copyAndSetTargetKind(copy, in, unversionedKind) } out, err := s.New(gvk) @@ -554,11 +578,7 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( } if copy { - copied, err := s.Copy(in) - if err != nil { - return nil, err - } - in = copied + in = in.DeepCopyObject() } flags, meta := s.generateConvertMeta(in) @@ -571,19 +591,34 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( return out, nil } +// unstructuredToTyped attempts to transform an unstructured object to a typed +// object if possible. It will return an error if conversion is not possible, or the versioned +// Go form of the object. Note that this conversion will lose fields. +func (s *Scheme) unstructuredToTyped(in Unstructured) (Object, error) { + // the type must be something we recognize + gvks, _, err := s.ObjectKinds(in) + if err != nil { + return nil, err + } + typed, err := s.New(gvks[0]) + if err != nil { + return nil, err + } + if err := DefaultUnstructuredConverter.FromUnstructured(in.UnstructuredContent(), typed); err != nil { + return nil, fmt.Errorf("unable to convert unstructured object to %v: %v", gvks[0], err) + } + return typed, nil +} + // generateConvertMeta constructs the meta value we pass to Convert. func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { return s.converter.DefaultMeta(reflect.TypeOf(in)) } // copyAndSetTargetKind performs a conditional copy before returning the object, or an error if copy was not successful. -func copyAndSetTargetKind(copy bool, copier ObjectCopier, obj Object, kind schema.GroupVersionKind) (Object, error) { +func copyAndSetTargetKind(copy bool, obj Object, kind schema.GroupVersionKind) (Object, error) { if copy { - copied, err := copier.Copy(obj) - if err != nil { - return nil, err - } - obj = copied + obj = obj.DeepCopyObject() } setTargetKind(obj, kind) return obj, nil @@ -599,3 +634,133 @@ func setTargetKind(obj Object, kind schema.GroupVersionKind) { } obj.GetObjectKind().SetGroupVersionKind(kind) } + +// SetVersionPriority allows specifying a precise order of priority. All specified versions must be in the same group, +// and the specified order overwrites any previously specified order for this group +func (s *Scheme) SetVersionPriority(versions ...schema.GroupVersion) error { + groups := sets.String{} + order := []string{} + for _, version := range versions { + if len(version.Version) == 0 || version.Version == APIVersionInternal { + return fmt.Errorf("internal versions cannot be prioritized: %v", version) + } + + groups.Insert(version.Group) + order = append(order, version.Version) + } + if len(groups) != 1 { + return fmt.Errorf("must register versions for exactly one group: %v", strings.Join(groups.List(), ", ")) + } + + s.versionPriority[groups.List()[0]] = order + return nil +} + +// PrioritizedVersionsForGroup returns versions for a single group in priority order +func (s *Scheme) PrioritizedVersionsForGroup(group string) []schema.GroupVersion { + ret := []schema.GroupVersion{} + for _, version := range s.versionPriority[group] { + ret = append(ret, schema.GroupVersion{Group: group, Version: version}) + } + for _, observedVersion := range s.observedVersions { + if observedVersion.Group != group { + continue + } + found := false + for _, existing := range ret { + if existing == observedVersion { + found = true + break + } + } + if !found { + ret = append(ret, observedVersion) + } + } + + return ret +} + +// PrioritizedVersionsAllGroups returns all known versions in their priority order. Groups are random, but +// versions for a single group are prioritized +func (s *Scheme) PrioritizedVersionsAllGroups() []schema.GroupVersion { + ret := []schema.GroupVersion{} + for group, versions := range s.versionPriority { + for _, version := range versions { + ret = append(ret, schema.GroupVersion{Group: group, Version: version}) + } + } + for _, observedVersion := range s.observedVersions { + found := false + for _, existing := range ret { + if existing == observedVersion { + found = true + break + } + } + if !found { + ret = append(ret, observedVersion) + } + } + return ret +} + +// PreferredVersionAllGroups returns the most preferred version for every group. +// group ordering is random. +func (s *Scheme) PreferredVersionAllGroups() []schema.GroupVersion { + ret := []schema.GroupVersion{} + for group, versions := range s.versionPriority { + for _, version := range versions { + ret = append(ret, schema.GroupVersion{Group: group, Version: version}) + break + } + } + for _, observedVersion := range s.observedVersions { + found := false + for _, existing := range ret { + if existing.Group == observedVersion.Group { + found = true + break + } + } + if !found { + ret = append(ret, observedVersion) + } + } + + return ret +} + +// IsGroupRegistered returns true if types for the group have been registered with the scheme +func (s *Scheme) IsGroupRegistered(group string) bool { + for _, observedVersion := range s.observedVersions { + if observedVersion.Group == group { + return true + } + } + return false +} + +// IsVersionRegistered returns true if types for the version have been registered with the scheme +func (s *Scheme) IsVersionRegistered(version schema.GroupVersion) bool { + for _, observedVersion := range s.observedVersions { + if observedVersion == version { + return true + } + } + + return false +} + +func (s *Scheme) addObservedVersion(version schema.GroupVersion) { + if len(version.Version) == 0 || version.Version == APIVersionInternal { + return + } + for _, observedVersion := range s.observedVersions { + if observedVersion == version { + return + } + } + + s.observedVersions = append(s.observedVersions, version) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 28bb91b53..068d3f708 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -19,9 +19,11 @@ package json import ( "encoding/json" "io" + "strconv" + "unsafe" "github.com/ghodss/yaml" - "github.com/ugorji/go/codec" + jsoniter "github.com/json-iterator/go" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -66,11 +68,68 @@ type Serializer struct { var _ runtime.Serializer = &Serializer{} var _ recognizer.RecognizingDecoder = &Serializer{} +func init() { + // Force jsoniter to decode number to interface{} via ints, if possible. + decodeNumberAsInt64IfPossible := func(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + switch iter.WhatIsNext() { + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + i64, err := strconv.ParseInt(string(number), 10, 64) + if err == nil { + *(*interface{})(ptr) = i64 + return + } + f64, err := strconv.ParseFloat(string(number), 64) + if err == nil { + *(*interface{})(ptr) = f64 + return + } + // Not much we can do here. + default: + *(*interface{})(ptr) = iter.Read() + } + } + jsoniter.RegisterTypeDecoderFunc("interface {}", decodeNumberAsInt64IfPossible) +} + +// CaseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// case-sensitive when unmarshalling, and otherwise compatible with +// the encoding/json standard library. +func CaseSensitiveJsonIterator() jsoniter.API { + return jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + }.Froze() +} + +var caseSensitiveJsonIterator = CaseSensitiveJsonIterator() + +// gvkWithDefaults returns group kind and version defaulting from provided default +func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { + if len(actual.Kind) == 0 { + actual.Kind = defaultGVK.Kind + } + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = defaultGVK.Group + actual.Version = defaultGVK.Version + } + if len(actual.Version) == 0 && actual.Group == defaultGVK.Group { + actual.Version = defaultGVK.Version + } + return actual +} + // Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then -// load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, the raw data will be -// extracted and no decoding will be performed. If into is not registered with the typer, then the object will be straight decoded using -// normal JSON/YAML unmarshalling. If into is provided and the original data is not fully qualified with kind/version/group, the type of -// the into will be used to alter the returned gvk. On success or most errors, the method will return the calculated schema kind. +// load that data into an object matching the desired schema kind or the provided into. +// If into is *runtime.Unknown, the raw data will be extracted and no decoding will be performed. +// If into is not registered with the typer, then the object will be straight decoded using normal JSON/YAML unmarshalling. +// If into is provided and the original data is not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. +// If into is nil or data's gvk different from into's gvk, it will generate a new Object with ObjectCreater.New(gvk) +// On success or most errors, the method will return the calculated schema kind. +// The gvk calculate priority will be originalData > default gvk > into func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { if versioned, ok := into.(*runtime.VersionedObjects); ok { into = versioned.Last() @@ -97,17 +156,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i } if gvk != nil { - // apply kind and version defaulting from provided default - if len(actual.Kind) == 0 { - actual.Kind = gvk.Kind - } - if len(actual.Version) == 0 && len(actual.Group) == 0 { - actual.Group = gvk.Group - actual.Version = gvk.Version - } - if len(actual.Version) == 0 && actual.Group == gvk.Group { - actual.Version = gvk.Version - } + *actual = gvkWithDefaults(*actual, *gvk) } if unk, ok := into.(*runtime.Unknown); ok && unk != nil { @@ -118,27 +167,18 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i } if into != nil { + _, isUnstructured := into.(runtime.Unstructured) types, _, err := s.typer.ObjectKinds(into) switch { - case runtime.IsNotRegisteredError(err): - if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(into); err != nil { + case runtime.IsNotRegisteredError(err), isUnstructured: + if err := caseSensitiveJsonIterator.Unmarshal(data, into); err != nil { return nil, actual, err } return into, actual, nil case err != nil: return nil, actual, err default: - typed := types[0] - if len(actual.Kind) == 0 { - actual.Kind = typed.Kind - } - if len(actual.Version) == 0 && len(actual.Group) == 0 { - actual.Group = typed.Group - actual.Version = typed.Version - } - if len(actual.Version) == 0 && actual.Group == typed.Group { - actual.Version = typed.Version - } + *actual = gvkWithDefaults(*actual, types[0]) } } @@ -155,7 +195,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i return nil, actual, err } - if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(obj); err != nil { + if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil { return nil, actual, err } return obj, actual, nil @@ -164,7 +204,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { if s.yaml { - json, err := json.Marshal(obj) + json, err := caseSensitiveJsonIterator.Marshal(obj) if err != nil { return err } @@ -177,7 +217,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { } if s.pretty { - data, err := json.MarshalIndent(obj, "", " ") + data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ") if err != nil { return err } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index 829d4fa89..7716cc421 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -19,9 +19,9 @@ package versioning import ( "io" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) // NewCodecForScheme is a convenience method for callers that are using a scheme. @@ -33,7 +33,7 @@ func NewCodecForScheme( encodeVersion runtime.GroupVersioner, decodeVersion runtime.GroupVersioner, ) runtime.Codec { - return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, nil, encodeVersion, decodeVersion) + return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, nil, encodeVersion, decodeVersion) } // NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme. @@ -45,7 +45,7 @@ func NewDefaultingCodecForScheme( encodeVersion runtime.GroupVersioner, decodeVersion runtime.GroupVersioner, ) runtime.Codec { - return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, scheme, encodeVersion, decodeVersion) + return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion) } // NewCodec takes objects in their internal versions and converts them to external versions before @@ -56,7 +56,6 @@ func NewCodec( decoder runtime.Decoder, convertor runtime.ObjectConvertor, creater runtime.ObjectCreater, - copier runtime.ObjectCopier, typer runtime.ObjectTyper, defaulter runtime.ObjectDefaulter, encodeVersion runtime.GroupVersioner, @@ -67,7 +66,6 @@ func NewCodec( decoder: decoder, convertor: convertor, creater: creater, - copier: copier, typer: typer, defaulter: defaulter, @@ -82,7 +80,6 @@ type codec struct { decoder runtime.Decoder convertor runtime.ObjectConvertor creater runtime.ObjectCreater - copier runtime.ObjectCopier typer runtime.ObjectTyper defaulter runtime.ObjectDefaulter @@ -123,12 +120,7 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru if c.defaulter != nil { // create a copy to ensure defaulting is not applied to the original versioned objects if isVersioned { - copied, err := c.copier.Copy(obj) - if err != nil { - utilruntime.HandleError(err) - copied = obj - } - versioned.Objects = []runtime.Object{copied} + versioned.Objects = []runtime.Object{obj.DeepCopyObject()} } c.defaulter.Default(obj) } else { @@ -151,12 +143,7 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru // Convert if needed. if isVersioned { // create a copy, because ConvertToVersion does not guarantee non-mutation of objects - copied, err := c.copier.Copy(obj) - if err != nil { - utilruntime.HandleError(err) - copied = obj - } - versioned.Objects = []runtime.Object{copied} + versioned.Objects = []runtime.Object{obj.DeepCopyObject()} } // perform defaulting if requested @@ -180,9 +167,27 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru // Encode ensures the provided object is output in the appropriate group and version, invoking // conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is. func (c *codec) Encode(obj runtime.Object, w io.Writer) error { - switch obj.(type) { - case *runtime.Unknown, runtime.Unstructured: + switch obj := obj.(type) { + case *runtime.Unknown: return c.encoder.Encode(obj, w) + case runtime.Unstructured: + // An unstructured list can contain objects of multiple group version kinds. don't short-circuit just + // because the top-level type matches our desired destination type. actually send the object to the converter + // to give it a chance to convert the list items if needed. + if _, ok := obj.(*unstructured.UnstructuredList); !ok { + // avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl) + objGVK := obj.GetObjectKind().GroupVersionKind() + if len(objGVK.Version) == 0 { + return c.encoder.Encode(obj, w) + } + targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK}) + if !ok { + return runtime.NewNotRegisteredGVKErrForTarget(objGVK, c.encodeVersion) + } + if targetGVK == objGVK { + return c.encoder.Encode(obj, w) + } + } } gvks, isUnversioned, err := c.typer.ObjectKinds(obj) @@ -213,7 +218,7 @@ func (c *codec) Encode(obj runtime.Object, w io.Writer) error { } if e, ok := out.(runtime.NestedObjectEncoder); ok { - if err := e.EncodeNestedObjects(DirectEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + if err := e.EncodeNestedObjects(DirectEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { return err } } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go index 29722d52e..5bc642bc8 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go @@ -71,7 +71,7 @@ func fmtRawDoc(rawDoc string) string { delPrevChar() buffer.WriteString("\n\n") case strings.HasPrefix(leading, "TODO"): // Ignore one line TODOs - case strings.HasPrefix(leading, "+"): // Ignore instructions to go2idl + case strings.HasPrefix(leading, "+"): // Ignore instructions to the generators default: if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") { delPrevChar() diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index f972c5e69..e4515d8ed 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -30,7 +30,7 @@ package runtime // TypeMeta is provided here for convenience. You may use it directly from this package or define // your own with the same fields. // -// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen=false // +protobuf=true // +k8s:openapi-gen=true type TypeMeta struct { @@ -106,6 +106,7 @@ type RawExtension struct { // metadata and field mutatation. // // +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +protobuf=true // +k8s:openapi-gen=true type Unknown struct { @@ -124,6 +125,9 @@ type Unknown struct { // VersionedObjects is used by Decoders to give callers a way to access all versions // of an object during the decoding process. +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true type VersionedObjects struct { // Objects is the set of objects retrieved during decoding, in order of conversion. // The 0 index is the object as serialized on the wire. If conversion has occurred, diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index b75b3ebb2..167de6104 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,65 +16,97 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by deepcopy-gen. Do not edit it manually! +// Code generated by deepcopy-gen. DO NOT EDIT. package runtime -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: DeepCopy_runtime_RawExtension, InType: reflect.TypeOf(&RawExtension{})}, - {Fn: DeepCopy_runtime_TypeMeta, InType: reflect.TypeOf(&TypeMeta{})}, - {Fn: DeepCopy_runtime_Unknown, InType: reflect.TypeOf(&Unknown{})}, +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RawExtension) DeepCopyInto(out *RawExtension) { + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) } + if in.Object == nil { + out.Object = nil + } else { + out.Object = in.Object.DeepCopyObject() + } + return } -func DeepCopy_runtime_RawExtension(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RawExtension) - out := out.(*RawExtension) - *out = *in - if in.Raw != nil { - in, out := &in.Raw, &out.Raw - *out = make([]byte, len(*in)) - copy(*out, *in) - } - // in.Object is kind 'Interface' - if in.Object != nil { - if newVal, err := c.DeepCopy(&in.Object); err != nil { - return err +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RawExtension. +func (in *RawExtension) DeepCopy() *RawExtension { + if in == nil { + return nil + } + out := new(RawExtension) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Unknown) DeepCopyInto(out *Unknown) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Unknown. +func (in *Unknown) DeepCopy() *Unknown { + if in == nil { + return nil + } + out := new(Unknown) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object. +func (in *Unknown) DeepCopyObject() Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionedObjects) DeepCopyInto(out *VersionedObjects) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]Object, len(*in)) + for i := range *in { + if (*in)[i] == nil { + (*out)[i] = nil } else { - out.Object = *newVal.(*Object) + (*out)[i] = (*in)[i].DeepCopyObject() } } - return nil } + return } -func DeepCopy_runtime_TypeMeta(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TypeMeta) - out := out.(*TypeMeta) - *out = *in +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionedObjects. +func (in *VersionedObjects) DeepCopy() *VersionedObjects { + if in == nil { return nil } + out := new(VersionedObjects) + in.DeepCopyInto(out) + return out } -func DeepCopy_runtime_Unknown(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Unknown) - out := out.(*Unknown) - *out = *in - if in.Raw != nil { - in, out := &in.Raw, &out.Raw - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return nil +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object. +func (in *VersionedObjects) DeepCopyObject() Object { + if c := in.DeepCopy(); c != nil { + return c } + return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go index 1e2130da0..88f0de36d 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -18,7 +18,6 @@ package types import ( "fmt" - "strings" ) // NamespacedName comprises a resource name, with a mandatory namespace, @@ -42,19 +41,3 @@ const ( func (n NamespacedName) String() string { return fmt.Sprintf("%s%c%s", n.Namespace, Separator, n.Name) } - -// NewNamespacedNameFromString parses the provided string and returns a NamespacedName. -// The expected format is as per String() above. -// If the input string is invalid, the returned NamespacedName has all empty string field values. -// This allows a single-value return from this function, while still allowing error checks in the caller. -// Note that an input string which does not include exactly one Separator is not a valid input (as it could never -// have neem returned by String() ) -func NewNamespacedNameFromString(s string) NamespacedName { - nn := NamespacedName{} - result := strings.Split(s, string(Separator)) - if len(result) == 2 { - nn.Namespace = result[0] - nn.Name = result[1] - } - return nn -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go new file mode 100644 index 000000000..9a09fe54d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go @@ -0,0 +1,83 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "sync" +) + +const ( + shardsCount int = 32 +) + +type Cache []*cacheShard + +func NewCache(maxSize int) Cache { + if maxSize < shardsCount { + maxSize = shardsCount + } + cache := make(Cache, shardsCount) + for i := 0; i < shardsCount; i++ { + cache[i] = &cacheShard{ + items: make(map[uint64]interface{}), + maxSize: maxSize / shardsCount, + } + } + return cache +} + +func (c Cache) getShard(index uint64) *cacheShard { + return c[index%uint64(shardsCount)] +} + +// Returns true if object already existed, false otherwise. +func (c *Cache) Add(index uint64, obj interface{}) bool { + return c.getShard(index).add(index, obj) +} + +func (c *Cache) Get(index uint64) (obj interface{}, found bool) { + return c.getShard(index).get(index) +} + +type cacheShard struct { + items map[uint64]interface{} + sync.RWMutex + maxSize int +} + +// Returns true if object already existed, false otherwise. +func (s *cacheShard) add(index uint64, obj interface{}) bool { + s.Lock() + defer s.Unlock() + _, isOverwrite := s.items[index] + if !isOverwrite && len(s.items) >= s.maxSize { + var randomKey uint64 + for randomKey = range s.items { + break + } + delete(s.items, randomKey) + } + s.items[index] = obj + return isOverwrite +} + +func (s *cacheShard) get(index uint64) (obj interface{}, found bool) { + s.RLock() + defer s.RUnlock() + obj, found = s.items[index] + return +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go new file mode 100644 index 000000000..f6b307aa6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "sync" + "time" + + "github.com/hashicorp/golang-lru" +) + +// Clock defines an interface for obtaining the current time +type Clock interface { + Now() time.Time +} + +// realClock implements the Clock interface by calling time.Now() +type realClock struct{} + +func (realClock) Now() time.Time { return time.Now() } + +// LRUExpireCache is a cache that ensures the mostly recently accessed keys are returned with +// a ttl beyond which keys are forcibly expired. +type LRUExpireCache struct { + // clock is used to obtain the current time + clock Clock + + cache *lru.Cache + lock sync.Mutex +} + +// NewLRUExpireCache creates an expiring cache with the given size +func NewLRUExpireCache(maxSize int) *LRUExpireCache { + return NewLRUExpireCacheWithClock(maxSize, realClock{}) +} + +// NewLRUExpireCacheWithClock creates an expiring cache with the given size, using the specified clock to obtain the current time. +func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache { + cache, err := lru.New(maxSize) + if err != nil { + // if called with an invalid size + panic(err) + } + return &LRUExpireCache{clock: clock, cache: cache} +} + +type cacheEntry struct { + value interface{} + expireTime time.Time +} + +// Add adds the value to the cache at key with the specified maximum duration. +func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Duration) { + c.lock.Lock() + defer c.lock.Unlock() + c.cache.Add(key, &cacheEntry{value, c.clock.Now().Add(ttl)}) +} + +// Get returns the value at the specified key from the cache if it exists and is not +// expired, or returns false. +func (c *LRUExpireCache) Get(key interface{}) (interface{}, bool) { + c.lock.Lock() + defer c.lock.Unlock() + e, ok := c.cache.Get(key) + if !ok { + return nil, false + } + if c.clock.Now().After(e.(*cacheEntry).expireTime) { + c.cache.Remove(key) + return nil, false + } + return e.(*cacheEntry).value, true +} + +// Remove removes the specified key from the cache if it exists +func (c *LRUExpireCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + c.cache.Remove(key) +} + +// Keys returns all the keys in the cache, even if they are expired. Subsequent calls to +// get may return not found. It returns all keys from oldest to newest. +func (c *LRUExpireCache) Keys() []interface{} { + c.lock.Lock() + defer c.lock.Unlock() + return c.cache.Keys() +} diff --git a/vendor/k8s.io/client-go/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go similarity index 90% rename from vendor/k8s.io/client-go/util/clock/clock.go rename to vendor/k8s.io/apimachinery/pkg/util/clock/clock.go index c303a212a..9567f9006 100644 --- a/vendor/k8s.io/client-go/util/clock/clock.go +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go @@ -26,18 +26,12 @@ import ( type Clock interface { Now() time.Time Since(time.Time) time.Duration - After(d time.Duration) <-chan time.Time - NewTimer(d time.Duration) Timer - Sleep(d time.Duration) - Tick(d time.Duration) <-chan time.Time + After(time.Duration) <-chan time.Time + NewTimer(time.Duration) Timer + Sleep(time.Duration) + NewTicker(time.Duration) Ticker } -var ( - _ = Clock(RealClock{}) - _ = Clock(&FakeClock{}) - _ = Clock(&IntervalClock{}) -) - // RealClock really calls time.Now() type RealClock struct{} @@ -62,8 +56,10 @@ func (RealClock) NewTimer(d time.Duration) Timer { } } -func (RealClock) Tick(d time.Duration) <-chan time.Time { - return time.Tick(d) +func (RealClock) NewTicker(d time.Duration) Ticker { + return &realTicker{ + ticker: time.NewTicker(d), + } } func (RealClock) Sleep(d time.Duration) { @@ -137,7 +133,7 @@ func (f *FakeClock) NewTimer(d time.Duration) Timer { return timer } -func (f *FakeClock) Tick(d time.Duration) <-chan time.Time { +func (f *FakeClock) NewTicker(d time.Duration) Ticker { f.lock.Lock() defer f.lock.Unlock() tickTime := f.time.Add(d) @@ -149,7 +145,9 @@ func (f *FakeClock) Tick(d time.Duration) <-chan time.Time { destChan: ch, }) - return ch + return &fakeTicker{ + c: ch, + } } // Move clock by Duration, notify anyone that's called After, Tick, or NewTimer @@ -242,8 +240,8 @@ func (*IntervalClock) NewTimer(d time.Duration) Timer { // Unimplemented, will panic. // TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { - panic("IntervalClock doesn't implement Tick") +func (*IntervalClock) NewTicker(d time.Duration) Ticker { + panic("IntervalClock doesn't implement NewTicker") } func (*IntervalClock) Sleep(d time.Duration) { @@ -258,11 +256,6 @@ type Timer interface { Reset(d time.Duration) bool } -var ( - _ = Timer(&realTimer{}) - _ = Timer(&fakeTimer{}) -) - // realTimer is backed by an actual time.Timer. type realTimer struct { timer *time.Timer @@ -325,3 +318,31 @@ func (f *fakeTimer) Reset(d time.Duration) bool { return active } + +type Ticker interface { + C() <-chan time.Time + Stop() +} + +type realTicker struct { + ticker *time.Ticker +} + +func (t *realTicker) C() <-chan time.Time { + return t.ticker.C +} + +func (t *realTicker) Stop() { + t.ticker.Stop() +} + +type fakeTicker struct { + c <-chan time.Time +} + +func (t *fakeTicker) C() <-chan time.Time { + return t.c +} + +func (t *fakeTicker) Stop() { +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go index 0f730875e..bce95baf1 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -89,20 +89,52 @@ func ObjectReflectDiff(a, b interface{}) string { } out := []string{""} for _, d := range diffs { + elidedA, elidedB := limit(d.a, d.b, 80) out = append(out, fmt.Sprintf("%s:", d.path), - limit(fmt.Sprintf(" a: %#v", d.a), 80), - limit(fmt.Sprintf(" b: %#v", d.b), 80), + fmt.Sprintf(" a: %s", elidedA), + fmt.Sprintf(" b: %s", elidedB), ) } return strings.Join(out, "\n") } -func limit(s string, max int) string { - if len(s) > max { - return s[:max] +// limit: +// 1. stringifies aObj and bObj +// 2. elides identical prefixes if either is too long +// 3. elides remaining content from the end if either is too long +func limit(aObj, bObj interface{}, max int) (string, string) { + elidedPrefix := "" + elidedASuffix := "" + elidedBSuffix := "" + a, b := fmt.Sprintf("%#v", aObj), fmt.Sprintf("%#v", bObj) + for { + switch { + case len(a) > max && len(a) > 4 && len(b) > 4 && a[:4] == b[:4]: + // a is too long, b has data, and the first several characters are the same + elidedPrefix = "..." + a = a[2:] + b = b[2:] + + case len(b) > max && len(b) > 4 && len(a) > 4 && a[:4] == b[:4]: + // b is too long, a has data, and the first several characters are the same + elidedPrefix = "..." + a = a[2:] + b = b[2:] + + case len(a) > max: + a = a[:max] + elidedASuffix = "..." + + case len(b) > max: + b = b[:max] + elidedBSuffix = "..." + + default: + // both are short enough + return elidedPrefix + a + elidedASuffix, elidedPrefix + b + elidedBSuffix + } } - return s } func public(s string) bool { @@ -142,10 +174,6 @@ func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff { } if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 { changes = append(changes, sub...) - } else { - if !reflect.DeepEqual(a.Field(i).Interface(), b.Field(i).Interface()) { - changes = append(changes, diff{path: path, a: a.Field(i).Interface(), b: b.Field(i).Interface()}) - } } } return changes @@ -178,21 +206,18 @@ func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff { } return nil } + var diffs []diff for i := 0; i < l; i++ { if !reflect.DeepEqual(a.Index(i), b.Index(i)) { - return objectReflectDiff(path.Index(i), a.Index(i), b.Index(i)) + diffs = append(diffs, objectReflectDiff(path.Index(i), a.Index(i), b.Index(i))...) } } - var diffs []diff for i := l; i < lA; i++ { diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil}) } for i := l; i < lB; i++ { diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)}) } - if len(diffs) == 0 { - diffs = append(diffs, diff{path: path, a: a, b: b}) - } return diffs case reflect.Map: if reflect.DeepEqual(a.Interface(), b.Interface()) { diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index de62fe399..88e937679 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -21,6 +21,9 @@ import ( "fmt" ) +// MessageCountMap contains occurrence for each error message. +type MessageCountMap map[string]int + // Aggregate represents an object that contains multiple errors, but does not // necessarily have singular semantic meaning. type Aggregate interface { @@ -147,6 +150,22 @@ func Flatten(agg Aggregate) Aggregate { return NewAggregate(result) } +// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate +func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate { + if m == nil { + return nil + } + result := make([]error, 0, len(m)) + for errStr, count := range m { + var countStr string + if count > 1 { + countStr = fmt.Sprintf(" (repeated %v times)", count) + } + result = append(result, fmt.Errorf("%v%v", errStr, countStr)) + } + return NewAggregate(result) +} + // Reduce will return err or, if err is an Aggregate and only has one item, // the first item in the aggregate. func Reduce(err error) error { diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 7b9c554e0..5c2ac4f23 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -42,7 +42,9 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *IntOrString) Reset() { *m = IntOrString{} } func (*IntOrString) ProtoMessage() {} @@ -51,59 +53,59 @@ func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerate func init() { proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") } -func (m *IntOrString) Marshal() (data []byte, err error) { +func (m *IntOrString) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *IntOrString) MarshalTo(data []byte) (int, error) { +func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintGenerated(data, i, uint64(m.Type)) - data[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 i++ - i = encodeVarintGenerated(data, i, uint64(m.IntVal)) - data[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(len(m.StrVal))) - i += copy(data[i:], m.StrVal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) + i += copy(dAtA[i:], m.StrVal) return i, nil } -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintGenerated(data []byte, offset int, v uint64) int { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *IntOrString) Size() (n int) { @@ -129,8 +131,8 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *IntOrString) Unmarshal(data []byte) error { - l := len(data) +func (m *IntOrString) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -142,7 +144,7 @@ func (m *IntOrString) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -170,7 +172,7 @@ func (m *IntOrString) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.Type |= (Type(b) & 0x7F) << shift if b < 0x80 { @@ -189,7 +191,7 @@ func (m *IntOrString) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ m.IntVal |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -208,7 +210,7 @@ func (m *IntOrString) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -223,11 +225,11 @@ func (m *IntOrString) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.StrVal = string(data[iNdEx:postIndex]) + m.StrVal = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) + skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } @@ -246,8 +248,8 @@ func (m *IntOrString) Unmarshal(data []byte) error { } return nil } -func skipGenerated(data []byte) (n int, err error) { - l := len(data) +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -258,7 +260,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -276,7 +278,7 @@ func skipGenerated(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -293,7 +295,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -316,7 +318,7 @@ func skipGenerated(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -327,7 +329,7 @@ func skipGenerated(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipGenerated(data[start:]) + next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } @@ -351,24 +353,29 @@ var ( ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) -var fileDescriptorGenerated = []byte{ - // 288 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0xf4, 0x30, - 0x1c, 0xc6, 0x93, 0xf7, 0xee, 0x3d, 0xb4, 0x82, 0x43, 0x71, 0x38, 0x1c, 0xd2, 0xa2, 0x20, 0x5d, - 0x4c, 0x56, 0x71, 0xec, 0x76, 0x20, 0x08, 0x3d, 0x71, 0x70, 0x6b, 0xef, 0x62, 0x2e, 0xf4, 0x2e, - 0x09, 0xe9, 0xbf, 0x42, 0xb7, 0xfb, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x3a, 0xde, 0xe8, 0x20, 0x87, - 0xad, 0xdf, 0xc2, 0x49, 0x9a, 0x16, 0x74, 0x4a, 0x9e, 0xe7, 0xf9, 0xfd, 0x02, 0xf1, 0x6e, 0xf2, - 0xab, 0x82, 0x4a, 0xcd, 0xf2, 0x32, 0xe3, 0x56, 0x71, 0xe0, 0x05, 0x7b, 0xe2, 0x6a, 0xa9, 0x2d, - 0x1b, 0x86, 0xd4, 0xc8, 0x4d, 0xba, 0x58, 0x49, 0xc5, 0x6d, 0xc5, 0x4c, 0x2e, 0x58, 0x09, 0x72, - 0xcd, 0xa4, 0x82, 0x02, 0x2c, 0x13, 0x5c, 0x71, 0x9b, 0x02, 0x5f, 0x52, 0x63, 0x35, 0x68, 0xff, - 0xbc, 0x97, 0xe8, 0x5f, 0x89, 0x9a, 0x5c, 0xd0, 0x4e, 0xa2, 0xbd, 0x74, 0x7a, 0x29, 0x24, 0xac, - 0xca, 0x8c, 0x2e, 0xf4, 0x86, 0x09, 0x2d, 0x34, 0x73, 0x6e, 0x56, 0x3e, 0xba, 0xe4, 0x82, 0xbb, - 0xf5, 0x6f, 0x9e, 0xbd, 0x60, 0xef, 0x68, 0xa6, 0xe0, 0xd6, 0xce, 0xc1, 0x4a, 0x25, 0xfc, 0xc8, - 0x1b, 0x43, 0x65, 0xf8, 0x14, 0x87, 0x38, 0x1a, 0xc5, 0x27, 0xf5, 0x3e, 0x40, 0xed, 0x3e, 0x18, - 0xdf, 0x55, 0x86, 0x7f, 0x0f, 0x67, 0xe2, 0x08, 0xff, 0xc2, 0x9b, 0x48, 0x05, 0xf7, 0xe9, 0x7a, - 0xfa, 0x2f, 0xc4, 0xd1, 0xff, 0xf8, 0x78, 0x60, 0x27, 0x33, 0xd7, 0x26, 0xc3, 0xda, 0x71, 0x05, - 0xd8, 0x8e, 0x1b, 0x85, 0x38, 0x3a, 0xfc, 0xe5, 0xe6, 0xae, 0x4d, 0x86, 0xf5, 0xfa, 0xe0, 0xf5, - 0x2d, 0x40, 0xdb, 0x8f, 0x10, 0xc5, 0x51, 0xdd, 0x10, 0xb4, 0x6b, 0x08, 0x7a, 0x6f, 0x08, 0xda, - 0xb6, 0x04, 0xd7, 0x2d, 0xc1, 0xbb, 0x96, 0xe0, 0xcf, 0x96, 0xe0, 0xe7, 0x2f, 0x82, 0x1e, 0x26, - 0xfd, 0x67, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x55, 0xdf, 0x2a, 0x60, 0x01, 0x00, 0x00, +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, + 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, + 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, + 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, + 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, + 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, + 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, + 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, + 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, + 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, + 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, + 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, + 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, + 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, + 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, + 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, + 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, + 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, + 0x64, 0x01, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto index cccaf6f68..1c3ec732e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 02586b348..231498ca0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -24,9 +24,6 @@ import ( "strconv" "strings" - "k8s.io/apimachinery/pkg/openapi" - - "github.com/go-openapi/spec" "github.com/golang/glog" "github.com/google/gofuzz" ) @@ -120,16 +117,15 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { } } -func (_ IntOrString) OpenAPIDefinition() openapi.OpenAPIDefinition { - return openapi.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "int-or-string", - }, - }, - } -} +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ IntOrString) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } func (intstr *IntOrString) Fuzz(c fuzz.Continue) { if intstr == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index e8054a12e..10c8cb837 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -50,6 +50,18 @@ func Unmarshal(data []byte, v interface{}) error { // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 return convertMapNumbers(*v) + case *[]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertSliceNumbers(*v) + default: return json.Unmarshal(data, v) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS new file mode 100644 index 000000000..8e8d9fce8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS @@ -0,0 +1,5 @@ +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go new file mode 100644 index 000000000..16501d5af --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go @@ -0,0 +1,102 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + ErrBadJSONDoc = errors.New("invalid JSON document") + ErrNoListOfLists = errors.New("lists of lists are not supported") + ErrBadPatchFormatForPrimitiveList = errors.New("invalid patch format of primitive list") + ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys") + ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list") + ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list") + ErrUnsupportedStrategicMergePatchFormat = errors.New("strategic merge patch format is not supported") +) + +func ErrNoMergeKey(m map[string]interface{}, k string) error { + return fmt.Errorf("map: %v does not contain declared merge key: %s", m, k) +} + +func ErrBadArgType(expected, actual interface{}) error { + return fmt.Errorf("expected a %s, but received a %s", + reflect.TypeOf(expected), + reflect.TypeOf(actual)) +} + +func ErrBadArgKind(expected, actual interface{}) error { + var expectedKindString, actualKindString string + if expected == nil { + expectedKindString = "nil" + } else { + expectedKindString = reflect.TypeOf(expected).Kind().String() + } + if actual == nil { + actualKindString = "nil" + } else { + actualKindString = reflect.TypeOf(actual).Kind().String() + } + return fmt.Errorf("expected a %s, but received a %s", expectedKindString, actualKindString) +} + +func ErrBadPatchType(t interface{}, m map[string]interface{}) error { + return fmt.Errorf("unknown patch type: %s in map: %v", t, m) +} + +// IsPreconditionFailed returns true if the provided error indicates +// a precondition failed. +func IsPreconditionFailed(err error) bool { + _, ok := err.(ErrPreconditionFailed) + return ok +} + +type ErrPreconditionFailed struct { + message string +} + +func NewErrPreconditionFailed(target map[string]interface{}) ErrPreconditionFailed { + s := fmt.Sprintf("precondition failed for: %v", target) + return ErrPreconditionFailed{s} +} + +func (err ErrPreconditionFailed) Error() string { + return err.message +} + +type ErrConflict struct { + message string +} + +func NewErrConflict(patch, current string) ErrConflict { + s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current) + return ErrConflict{s} +} + +func (err ErrConflict) Error() string { + return err.message +} + +// IsConflict returns true if the provided error indicates +// a conflict between the patch and the current configuration. +func IsConflict(err error) bool { + _, ok := err.(ErrConflict) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go new file mode 100644 index 000000000..9261290a7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -0,0 +1,133 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "fmt" + "reflect" + + "github.com/davecgh/go-spew/spew" + "github.com/ghodss/yaml" +) + +// PreconditionFunc asserts that an incompatible change is not present within a patch. +type PreconditionFunc func(interface{}) bool + +// RequireKeyUnchanged returns a precondition function that fails if the provided key +// is present in the patch (indicating that its value has changed). +func RequireKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + + // The presence of key means that its value has been changed, so the test fails. + _, ok = patchMap[key] + return !ok + } +} + +// RequireMetadataKeyUnchanged creates a precondition function that fails +// if the metadata.key is present in the patch (indicating its value +// has changed). +func RequireMetadataKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + patchMap1, ok := patchMap["metadata"] + if !ok { + return true + } + patchMap2, ok := patchMap1.(map[string]interface{}) + if !ok { + return true + } + _, ok = patchMap2[key] + return !ok + } +} + +func ToYAMLOrError(v interface{}) string { + y, err := toYAML(v) + if err != nil { + return err.Error() + } + + return y +} + +func toYAML(v interface{}) (string, error) { + y, err := yaml.Marshal(v) + if err != nil { + return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) + } + + return string(y), nil +} + +// HasConflicts returns true if the left and right JSON interface objects overlap with +// different values in any key. All keys are required to be strings. Since patches of the +// same Type have congruent keys, this is valid for multiple patch types. This method +// supports JSON merge patch semantics. +// +// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). +func HasConflicts(left, right interface{}) (bool, error) { + switch typedLeft := left.(type) { + case map[string]interface{}: + switch typedRight := right.(type) { + case map[string]interface{}: + for key, leftValue := range typedLeft { + rightValue, ok := typedRight[key] + if !ok { + continue + } + if conflict, err := HasConflicts(leftValue, rightValue); err != nil || conflict { + return conflict, err + } + } + + return false, nil + default: + return true, nil + } + case []interface{}: + switch typedRight := right.(type) { + case []interface{}: + if len(typedLeft) != len(typedRight) { + return true, nil + } + + for i := range typedLeft { + if conflict, err := HasConflicts(typedLeft[i], typedRight[i]); err != nil || conflict { + return conflict, err + } + } + + return false, nil + default: + return true, nil + } + case string, float64, bool, int, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index c32082e93..7ea2df226 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -17,6 +17,9 @@ limitations under the License. package net import ( + "bufio" + "bytes" + "context" "crypto/tls" "fmt" "io" @@ -24,6 +27,7 @@ import ( "net/http" "net/url" "os" + "path" "strconv" "strings" @@ -31,6 +35,26 @@ import ( "golang.org/x/net/http2" ) +// JoinPreservingTrailingSlash does a path.Join of the specified elements, +// preserving any trailing slash on the last non-empty segment +func JoinPreservingTrailingSlash(elem ...string) string { + // do the basic path join + result := path.Join(elem...) + + // find the last non-empty segment + for i := len(elem) - 1; i >= 0; i-- { + if len(elem[i]) > 0 { + // if the last segment ended in a slash, ensure our result does as well + if strings.HasSuffix(elem[i], "/") && !strings.HasSuffix(result, "/") { + result += "/" + } + break + } + } + + return result +} + // IsProbableEOF returns true if the given error resembles a connection termination // scenario that would justify assuming that the watch is empty. // These errors are what the Go http stack returns back to us which are general @@ -38,6 +62,9 @@ import ( // differentiate probable errors in connection behavior between normal "this is // disconnected" should use the method. func IsProbableEOF(err error) bool { + if err == nil { + return false + } if uerr, ok := err.(*url.Error); ok { err = uerr.Err } @@ -64,8 +91,8 @@ func SetOldTransportDefaults(t *http.Transport) *http.Transport { // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) } - if t.Dial == nil { - t.Dial = defaultTransport.Dial + if t.DialContext == nil { + t.DialContext = defaultTransport.DialContext } if t.TLSHandshakeTimeout == 0 { t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout @@ -93,48 +120,20 @@ type RoundTripperWrapper interface { WrappedRoundTripper() http.RoundTripper } -type DialFunc func(net, addr string) (net.Conn, error) +type DialFunc func(ctx context.Context, net, addr string) (net.Conn, error) -func Dialer(transport http.RoundTripper) (DialFunc, error) { +func DialerFor(transport http.RoundTripper) (DialFunc, error) { if transport == nil { return nil, nil } switch transport := transport.(type) { case *http.Transport: - return transport.Dial, nil + return transport.DialContext, nil case RoundTripperWrapper: - return Dialer(transport.WrappedRoundTripper()) + return DialerFor(transport.WrappedRoundTripper()) default: - return nil, fmt.Errorf("unknown transport type: %v", transport) - } -} - -// CloneTLSConfig returns a tls.Config with all exported fields except SessionTicketsDisabled and SessionTicketKey copied. -// This makes it safe to call CloneTLSConfig on a config in active use by a server. -// TODO: replace with tls.Config#Clone when we move to go1.8 -func CloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, + return nil, fmt.Errorf("unknown transport type: %T", transport) } } @@ -155,7 +154,7 @@ func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) { case RoundTripperWrapper: return TLSClientConfig(transport.WrappedRoundTripper()) default: - return nil, fmt.Errorf("unknown transport type: %v", transport) + return nil, fmt.Errorf("unknown transport type: %T", transport) } } @@ -176,13 +175,13 @@ func GetHTTPClient(req *http.Request) string { return "unknown" } -// Extracts and returns the clients IP from the given request. -// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order. -// Returns nil if none of them are set or is set to an invalid value. -func GetClientIP(req *http.Request) net.IP { +// SourceIPs splits the comma separated X-Forwarded-For header or returns the X-Real-Ip header or req.RemoteAddr, +// in that order, ignoring invalid IPs. It returns nil if all of these are empty or invalid. +func SourceIPs(req *http.Request) []net.IP { hdr := req.Header // First check the X-Forwarded-For header for requests via proxy. hdrForwardedFor := hdr.Get("X-Forwarded-For") + forwardedForIPs := []net.IP{} if hdrForwardedFor != "" { // X-Forwarded-For can be a csv of IPs in case of multiple proxies. // Use the first valid one. @@ -190,17 +189,20 @@ func GetClientIP(req *http.Request) net.IP { for _, part := range parts { ip := net.ParseIP(strings.TrimSpace(part)) if ip != nil { - return ip + forwardedForIPs = append(forwardedForIPs, ip) } } } + if len(forwardedForIPs) > 0 { + return forwardedForIPs + } // Try the X-Real-Ip header. hdrRealIp := hdr.Get("X-Real-Ip") if hdrRealIp != "" { ip := net.ParseIP(hdrRealIp) if ip != nil { - return ip + return []net.IP{ip} } } @@ -208,11 +210,43 @@ func GetClientIP(req *http.Request) net.IP { // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. host, _, err := net.SplitHostPort(req.RemoteAddr) if err == nil { - return net.ParseIP(host) + if remoteIP := net.ParseIP(host); remoteIP != nil { + return []net.IP{remoteIP} + } } // Fallback if Remote Address was just IP. - return net.ParseIP(req.RemoteAddr) + if remoteIP := net.ParseIP(req.RemoteAddr); remoteIP != nil { + return []net.IP{remoteIP} + } + + return nil +} + +// Extracts and returns the clients IP from the given request. +// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order. +// Returns nil if none of them are set or is set to an invalid value. +func GetClientIP(req *http.Request) net.IP { + ips := SourceIPs(req) + if len(ips) == 0 { + return nil + } + return ips[0] +} + +// Prepares the X-Forwarded-For header for another forwarding hop by appending the previous sender's +// IP address to the X-Forwarded-For chain. +func AppendForwardedForHeader(req *http.Request) { + // Copied from net/http/httputil/reverseproxy.go: + if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { + // If we aren't the first proxy retain prior + // X-Forwarded-For information as a comma+space + // separated list and fold multiple headers into one. + if prior, ok := req.Header["X-Forwarded-For"]; ok { + clientIP = strings.Join(prior, ", ") + ", " + clientIP + } + req.Header.Set("X-Forwarded-For", clientIP) + } } var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment) @@ -226,8 +260,11 @@ func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool { // NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if // no matching CIDRs are found func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { - // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it + // we wrap the default method, so we only need to perform our check if the NO_PROXY (or no_proxy) envvar has a CIDR in it noProxyEnv := os.Getenv("NO_PROXY") + if noProxyEnv == "" { + noProxyEnv = os.Getenv("no_proxy") + } noProxyRules := strings.Split(noProxyEnv, ",") cidrs := []*net.IPNet{} @@ -243,17 +280,7 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error } return func(req *http.Request) (*url.URL, error) { - host := req.URL.Host - // for some urls, the Host is already the host, not the host:port - if net.ParseIP(host) == nil { - var err error - host, _, err = net.SplitHostPort(req.URL.Host) - if err != nil { - return delegate(req) - } - } - - ip := net.ParseIP(host) + ip := net.ParseIP(req.URL.Hostname()) if ip == nil { return delegate(req) } @@ -267,3 +294,133 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error return delegate(req) } } + +// DialerFunc implements Dialer for the provided function. +type DialerFunc func(req *http.Request) (net.Conn, error) + +func (fn DialerFunc) Dial(req *http.Request) (net.Conn, error) { + return fn(req) +} + +// Dialer dials a host and writes a request to it. +type Dialer interface { + // Dial connects to the host specified by req's URL, writes the request to the connection, and + // returns the opened net.Conn. + Dial(req *http.Request) (net.Conn, error) +} + +// ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to +// originalLocation). It returns the opened net.Conn and the raw response bytes. +func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer) (net.Conn, []byte, error) { + const ( + maxRedirects = 10 + maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers + ) + + var ( + location = originalLocation + method = originalMethod + intermediateConn net.Conn + rawResponse = bytes.NewBuffer(make([]byte, 0, 256)) + body = originalBody + ) + + defer func() { + if intermediateConn != nil { + intermediateConn.Close() + } + }() + +redirectLoop: + for redirects := 0; ; redirects++ { + if redirects > maxRedirects { + return nil, nil, fmt.Errorf("too many redirects (%d)", redirects) + } + + req, err := http.NewRequest(method, location.String(), body) + if err != nil { + return nil, nil, err + } + + req.Header = header + + intermediateConn, err = dialer.Dial(req) + if err != nil { + return nil, nil, err + } + + // Peek at the backend response. + rawResponse.Reset() + respReader := bufio.NewReader(io.TeeReader( + io.LimitReader(intermediateConn, maxResponseSize), // Don't read more than maxResponseSize bytes. + rawResponse)) // Save the raw response. + resp, err := http.ReadResponse(respReader, nil) + if err != nil { + // Unable to read the backend response; let the client handle it. + glog.Warningf("Error reading backend response: %v", err) + break redirectLoop + } + + switch resp.StatusCode { + case http.StatusFound: + // Redirect, continue. + default: + // Don't redirect. + break redirectLoop + } + + // Redirected requests switch to "GET" according to the HTTP spec: + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3 + method = "GET" + // don't send a body when following redirects + body = nil + + resp.Body.Close() // not used + + // Reset the connection. + intermediateConn.Close() + intermediateConn = nil + + // Prepare to follow the redirect. + redirectStr := resp.Header.Get("Location") + if redirectStr == "" { + return nil, nil, fmt.Errorf("%d response missing Location header", resp.StatusCode) + } + // We have to parse relative to the current location, NOT originalLocation. For example, + // if we request http://foo.com/a and get back "http://bar.com/b", the result should be + // http://bar.com/b. If we then make that request and get back a redirect to "/c", the result + // should be http://bar.com/c, not http://foo.com/c. + location, err = location.Parse(redirectStr) + if err != nil { + return nil, nil, fmt.Errorf("malformed Location header: %v", err) + } + } + + connToReturn := intermediateConn + intermediateConn = nil // Don't close the connection when we return it. + return connToReturn, rawResponse.Bytes(), nil +} + +// CloneRequest creates a shallow copy of the request along with a deep copy of the Headers. +func CloneRequest(req *http.Request) *http.Request { + r := new(http.Request) + + // shallow clone + *r = *req + + // deep copy headers + r.Header = CloneHeader(req.Header) + + return r +} + +// CloneHeader creates a deep copy of an http.Header. +func CloneHeader(in http.Header) http.Header { + out := make(http.Header, len(in)) + for key, values := range in { + newValues := make([]string, len(values)) + copy(newValues, values) + out[key] = newValues + } + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go index a1e53d2e4..42816bd70 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -29,18 +29,47 @@ import ( "github.com/golang/glog" ) +type AddressFamily uint + +const ( + familyIPv4 AddressFamily = 4 + familyIPv6 AddressFamily = 6 +) + +const ( + ipv4RouteFile = "/proc/net/route" + ipv6RouteFile = "/proc/net/ipv6_route" +) + type Route struct { Interface string Destination net.IP Gateway net.IP - // TODO: add more fields here if needed + Family AddressFamily } -func getRoutes(input io.Reader) ([]Route, error) { - routes := []Route{} - if input == nil { - return nil, fmt.Errorf("input is nil") +type RouteFile struct { + name string + parse func(input io.Reader) ([]Route, error) +} + +var ( + v4File = RouteFile{name: ipv4RouteFile, parse: getIPv4DefaultRoutes} + v6File = RouteFile{name: ipv6RouteFile, parse: getIPv6DefaultRoutes} +) + +func (rf RouteFile) extract() ([]Route, error) { + file, err := os.Open(rf.name) + if err != nil { + return nil, err } + defer file.Close() + return rf.parse(file) +} + +// getIPv4DefaultRoutes obtains the IPv4 routes, and filters out non-default routes. +func getIPv4DefaultRoutes(input io.Reader) ([]Route, error) { + routes := []Route{} scanner := bufio.NewReader(input) for { line, err := scanner.ReadString('\n') @@ -52,24 +81,71 @@ func getRoutes(input io.Reader) ([]Route, error) { continue } fields := strings.Fields(line) - routes = append(routes, Route{}) - route := &routes[len(routes)-1] - route.Interface = fields[0] - ip, err := parseIP(fields[1]) + // Interested in fields: + // 0 - interface name + // 1 - destination address + // 2 - gateway + dest, err := parseIP(fields[1], familyIPv4) if err != nil { return nil, err } - route.Destination = ip - ip, err = parseIP(fields[2]) + gw, err := parseIP(fields[2], familyIPv4) if err != nil { return nil, err } - route.Gateway = ip + if !dest.Equal(net.IPv4zero) { + continue + } + routes = append(routes, Route{ + Interface: fields[0], + Destination: dest, + Gateway: gw, + Family: familyIPv4, + }) } return routes, nil } -func parseIP(str string) (net.IP, error) { +func getIPv6DefaultRoutes(input io.Reader) ([]Route, error) { + routes := []Route{} + scanner := bufio.NewReader(input) + for { + line, err := scanner.ReadString('\n') + if err == io.EOF { + break + } + fields := strings.Fields(line) + // Interested in fields: + // 0 - destination address + // 4 - gateway + // 9 - interface name + dest, err := parseIP(fields[0], familyIPv6) + if err != nil { + return nil, err + } + gw, err := parseIP(fields[4], familyIPv6) + if err != nil { + return nil, err + } + if !dest.Equal(net.IPv6zero) { + continue + } + if gw.Equal(net.IPv6zero) { + continue // loopback + } + routes = append(routes, Route{ + Interface: fields[9], + Destination: dest, + Gateway: gw, + Family: familyIPv6, + }) + } + return routes, nil +} + +// parseIP takes the hex IP address string from route file and converts it +// to a net.IP address. For IPv4, the value must be converted to big endian. +func parseIP(str string, family AddressFamily) (net.IP, error) { if str == "" { return nil, fmt.Errorf("input is nil") } @@ -77,11 +153,16 @@ func parseIP(str string) (net.IP, error) { if err != nil { return nil, err } - //TODO add ipv6 support - if len(bytes) != net.IPv4len { - return nil, fmt.Errorf("only IPv4 is supported") + if family == familyIPv4 { + if len(bytes) != net.IPv4len { + return nil, fmt.Errorf("invalid IPv4 address in route") + } + return net.IP([]byte{bytes[3], bytes[2], bytes[1], bytes[0]}), nil + } + // Must be IPv6 + if len(bytes) != net.IPv6len { + return nil, fmt.Errorf("invalid IPv6 address in route") } - bytes[0], bytes[1], bytes[2], bytes[3] = bytes[3], bytes[2], bytes[1], bytes[0] return net.IP(bytes), nil } @@ -96,10 +177,13 @@ func isInterfaceUp(intf *net.Interface) bool { return false } -//getFinalIP method receives all the IP addrs of a Interface -//and returns a nil if the address is Loopback, Ipv6, link-local or nil. -//It returns a valid IPv4 if an Ipv4 address is found in the array. -func getFinalIP(addrs []net.Addr) (net.IP, error) { +func isLoopbackOrPointToPoint(intf *net.Interface) bool { + return intf.Flags&(net.FlagLoopback|net.FlagPointToPoint) != 0 +} + +// getMatchingGlobalIP returns the first valid global unicast address of the given +// 'family' from the list of 'addrs'. +func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) { if len(addrs) > 0 { for i := range addrs { glog.V(4).Infof("Checking addr %s.", addrs[i].String()) @@ -107,17 +191,15 @@ func getFinalIP(addrs []net.Addr) (net.IP, error) { if err != nil { return nil, err } - //Only IPv4 - //TODO : add IPv6 support - if ip.To4() != nil { - if !ip.IsLoopback() && !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() { + if memberOf(ip, family) { + if ip.IsGlobalUnicast() { glog.V(4).Infof("IP found %v", ip) return ip, nil } else { - glog.V(4).Infof("Loopback/link-local found %v", ip) + glog.V(4).Infof("Non-global unicast address found %v", ip) } } else { - glog.V(4).Infof("%v is not a valid IPv4 address", ip) + glog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) } } @@ -125,7 +207,9 @@ func getFinalIP(addrs []net.Addr) (net.IP, error) { return nil, nil } -func getIPFromInterface(intfName string, nw networkInterfacer) (net.IP, error) { +// getIPFromInterface gets the IPs on an interface and returns a global unicast address, if any. The +// interface must be up, the IP must in the family requested, and the IP must be a global unicast address. +func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInterfacer) (net.IP, error) { intf, err := nw.InterfaceByName(intfName) if err != nil { return nil, err @@ -136,131 +220,161 @@ func getIPFromInterface(intfName string, nw networkInterfacer) (net.IP, error) { return nil, err } glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) - finalIP, err := getFinalIP(addrs) + matchingIP, err := getMatchingGlobalIP(addrs, forFamily) if err != nil { return nil, err } - if finalIP != nil { - glog.V(4).Infof("valid IPv4 address for interface %q found as %v.", intfName, finalIP) - return finalIP, nil + if matchingIP != nil { + glog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) + return matchingIP, nil } } - return nil, nil } -func flagsSet(flags net.Flags, test net.Flags) bool { - return flags&test != 0 +// memberOF tells if the IP is of the desired family. Used for checking interface addresses. +func memberOf(ip net.IP, family AddressFamily) bool { + if ip.To4() != nil { + return family == familyIPv4 + } else { + return family == familyIPv6 + } } -func flagsClear(flags net.Flags, test net.Flags) bool { - return flags&test == 0 -} - -func chooseHostInterfaceNativeGo() (net.IP, error) { - intfs, err := net.Interfaces() +// chooseIPFromHostInterfaces looks at all system interfaces, trying to find one that is up that +// has a global unicast address (non-loopback, non-link local, non-point2point), and returns the IP. +// Searches for IPv4 addresses, and then IPv6 addresses. +func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { + intfs, err := nw.Interfaces() if err != nil { return nil, err } - i := 0 - var ip net.IP - for i = range intfs { - if flagsSet(intfs[i].Flags, net.FlagUp) && flagsClear(intfs[i].Flags, net.FlagLoopback|net.FlagPointToPoint) { - addrs, err := intfs[i].Addrs() + if len(intfs) == 0 { + return nil, fmt.Errorf("no interfaces found on host.") + } + for _, family := range []AddressFamily{familyIPv4, familyIPv6} { + glog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) + for _, intf := range intfs { + if !isInterfaceUp(&intf) { + glog.V(4).Infof("Skipping: down interface %q", intf.Name) + continue + } + if isLoopbackOrPointToPoint(&intf) { + glog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) + continue + } + addrs, err := nw.Addrs(&intf) if err != nil { return nil, err } - if len(addrs) > 0 { - for _, addr := range addrs { - if addrIP, _, err := net.ParseCIDR(addr.String()); err == nil { - if addrIP.To4() != nil { - ip = addrIP.To4() - if !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() { - break - } - } - } + if len(addrs) == 0 { + glog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) + continue + } + for _, addr := range addrs { + ip, _, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) } - if ip != nil { - // This interface should suffice. - break + if !memberOf(ip, family) { + glog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) + continue } + // TODO: Decide if should open up to allow IPv6 LLAs in future. + if !ip.IsGlobalUnicast() { + glog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) + continue + } + glog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) + return ip, nil } } } - if ip == nil { - return nil, fmt.Errorf("no acceptable interface from host") - } - glog.V(4).Infof("Choosing interface %s (IP %v) as default", intfs[i].Name, ip) - return ip, nil + return nil, fmt.Errorf("no acceptable interface with global unicast address found on host") } -//ChooseHostInterface is a method used fetch an IP for a daemon. -//It uses data from /proc/net/route file. -//For a node with no internet connection ,it returns error -//For a multi n/w interface node it returns the IP of the interface with gateway on it. +// ChooseHostInterface is a method used fetch an IP for a daemon. +// If there is no routing info file, it will choose a global IP from the system +// interfaces. Otherwise, it will use IPv4 and IPv6 route information to return the +// IP of the interface with a gateway on it (with priority given to IPv4). For a node +// with no internet connection, it returns error. func ChooseHostInterface() (net.IP, error) { - inFile, err := os.Open("/proc/net/route") + var nw networkInterfacer = networkInterface{} + if _, err := os.Stat(ipv4RouteFile); os.IsNotExist(err) { + return chooseIPFromHostInterfaces(nw) + } + routes, err := getAllDefaultRoutes() if err != nil { - if os.IsNotExist(err) { - return chooseHostInterfaceNativeGo() - } return nil, err } - defer inFile.Close() - var nw networkInterfacer = networkInterface{} - return chooseHostInterfaceFromRoute(inFile, nw) + return chooseHostInterfaceFromRoute(routes, nw) } +// networkInterfacer defines an interface for several net library functions. Production +// code will forward to net library functions, and unit tests will override the methods +// for testing purposes. type networkInterfacer interface { InterfaceByName(intfName string) (*net.Interface, error) Addrs(intf *net.Interface) ([]net.Addr, error) + Interfaces() ([]net.Interface, error) } +// networkInterface implements the networkInterfacer interface for production code, just +// wrapping the underlying net library function calls. type networkInterface struct{} func (_ networkInterface) InterfaceByName(intfName string) (*net.Interface, error) { - intf, err := net.InterfaceByName(intfName) - if err != nil { - return nil, err - } - return intf, nil + return net.InterfaceByName(intfName) } func (_ networkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { - addrs, err := intf.Addrs() - if err != nil { - return nil, err - } - return addrs, nil + return intf.Addrs() } -func chooseHostInterfaceFromRoute(inFile io.Reader, nw networkInterfacer) (net.IP, error) { - routes, err := getRoutes(inFile) +func (_ networkInterface) Interfaces() ([]net.Interface, error) { + return net.Interfaces() +} + +// getAllDefaultRoutes obtains IPv4 and IPv6 default routes on the node. If unable +// to read the IPv4 routing info file, we return an error. If unable to read the IPv6 +// routing info file (which is optional), we'll just use the IPv4 route information. +// Using all the routing info, if no default routes are found, an error is returned. +func getAllDefaultRoutes() ([]Route, error) { + routes, err := v4File.extract() if err != nil { return nil, err } - zero := net.IP{0, 0, 0, 0} - var finalIP net.IP - for i := range routes { - //find interface with gateway - if routes[i].Destination.Equal(zero) { - glog.V(4).Infof("Default route transits interface %q", routes[i].Interface) - finalIP, err := getIPFromInterface(routes[i].Interface, nw) + v6Routes, _ := v6File.extract() + routes = append(routes, v6Routes...) + if len(routes) == 0 { + return nil, fmt.Errorf("No default routes.") + } + return routes, nil +} + +// chooseHostInterfaceFromRoute cycles through each default route provided, looking for a +// global IP address from the interface for the route. Will first look all each IPv4 route for +// an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP. +func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) { + for _, family := range []AddressFamily{familyIPv4, familyIPv6} { + glog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) + for _, route := range routes { + if route.Family != family { + continue + } + glog.V(4).Infof("Default route transits interface %q", route.Interface) + finalIP, err := getIPFromInterface(route.Interface, family, nw) if err != nil { return nil, err } if finalIP != nil { - glog.V(4).Infof("Choosing IP %v ", finalIP) + glog.V(4).Infof("Found active IP %v ", finalIP) return finalIP, nil } } } - glog.V(4).Infof("No valid IP found") - if finalIP == nil { - return nil, fmt.Errorf("Unable to select an IP.") - } - return nil, nil + glog.V(4).Infof("No active IP found by looking at default routes") + return nil, fmt.Errorf("unable to select an IP from default routes.") } // If bind-address is usable, return it directly diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go index 6a50e6186..7b6eca893 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go @@ -43,14 +43,19 @@ func (pr PortRange) String() string { return fmt.Sprintf("%d-%d", pr.Base, pr.Base+pr.Size-1) } -// Set parses a string of the form "min-max", inclusive at both ends, and +// Set parses a string of the form "value", "min-max", or "min+offset", inclusive at both ends, and // sets the PortRange from it. This is part of the flag.Value and pflag.Value // interfaces. func (pr *PortRange) Set(value string) error { - value = strings.TrimSpace(value) + const ( + SinglePortNotation = 1 << iota + HyphenNotation + PlusNotation + ) - // TODO: Accept "80" syntax - // TODO: Accept "80+8" syntax + value = strings.TrimSpace(value) + hyphenIndex := strings.Index(value, "-") + plusIndex := strings.Index(value, "+") if value == "" { pr.Base = 0 @@ -58,20 +63,51 @@ func (pr *PortRange) Set(value string) error { return nil } - hyphenIndex := strings.Index(value, "-") - if hyphenIndex == -1 { - return fmt.Errorf("expected hyphen in port range") + var err error + var low, high int + var notation int + + if plusIndex == -1 && hyphenIndex == -1 { + notation |= SinglePortNotation + } + if hyphenIndex != -1 { + notation |= HyphenNotation + } + if plusIndex != -1 { + notation |= PlusNotation } - var err error - var low int - var high int - low, err = strconv.Atoi(value[:hyphenIndex]) - if err == nil { + switch notation { + case SinglePortNotation: + var port int + port, err = strconv.Atoi(value) + if err != nil { + return err + } + low = port + high = port + case HyphenNotation: + low, err = strconv.Atoi(value[:hyphenIndex]) + if err != nil { + return err + } high, err = strconv.Atoi(value[hyphenIndex+1:]) - } - if err != nil { - return fmt.Errorf("unable to parse port range: %s: %v", value, err) + if err != nil { + return err + } + case PlusNotation: + var offset int + low, err = strconv.Atoi(value[:plusIndex]) + if err != nil { + return err + } + offset, err = strconv.Atoi(value[plusIndex+1:]) + if err != nil { + return err + } + high = low + offset + default: + return fmt.Errorf("unable to parse port range: %s", value) } if low > 65535 || high > 65535 { diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go index 461144f0b..8344d10c8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -18,6 +18,8 @@ package net import ( "net" + "net/url" + "os" "reflect" "syscall" ) @@ -38,8 +40,16 @@ func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool { // Returns if the given err is "connection reset by peer" error. func IsConnectionReset(err error) bool { - opErr, ok := err.(*net.OpError) - if ok && opErr.Err.Error() == syscall.ECONNRESET.Error() { + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + if opErr, ok := err.(*net.OpError); ok { + err = opErr.Err + } + if osErr, ok := err.(*os.SyscallError); ok { + err = osErr.Err + } + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNRESET { return true } return false diff --git a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go index db109c2cd..9421edae8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go +++ b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go @@ -70,16 +70,51 @@ func Perm(n int) []int { return rng.rand.Perm(n) } -// We omit vowels from the set of available characters to reduce the chances -// of "bad words" being formed. -var alphanums = []rune("bcdfghjklmnpqrstvwxz0123456789") +const ( + // We omit vowels from the set of available characters to reduce the chances + // of "bad words" being formed. + alphanums = "bcdfghjklmnpqrstvwxz2456789" + // No. of bits required to index into alphanums string. + alphanumsIdxBits = 5 + // Mask used to extract last alphanumsIdxBits of an int. + alphanumsIdxMask = 1<>= alphanumsIdxBits + remaining-- } return string(b) } + +// SafeEncodeString encodes s using the same characters as rand.String. This reduces the chances of bad words and +// ensures that strings generated from hash functions appear consistent throughout the API. +func SafeEncodeString(s string) string { + r := make([]byte, len(s)) + for i, b := range []rune(s) { + r[i] = alphanums[(int(b) % len(alphanums))] + } + return string(r) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 748174e19..da32fe12f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -43,7 +43,7 @@ var PanicHandlers = []func(interface{}){logPanic} // TODO: remove this function. We are switching to a world where it's safe for // apiserver to panic, since it will be restarted by kubelet. At the beginning // of the Kubernetes project, nothing was going to restart apiserver and so -// catching panics was important. But it's actually much simpler for montoring +// catching panics was important. But it's actually much simpler for monitoring // software if we just exit when an unexpected panic happens. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { @@ -129,6 +129,7 @@ func (r *rudimentaryErrorBackoff) OnError(error) { defer r.lastErrorTimeLock.Unlock() d := time.Since(r.lastErrorTime) if d < r.minPeriod { + // If the time moves backwards for any reason, do nothing time.Sleep(r.minPeriod - d) } r.lastErrorTime = time.Now() @@ -159,3 +160,10 @@ func RecoverFromPanic(err *error) { callers) } } + +// Must panics on non-nil errors. Useful to handling programmer level errors. +func Must(err error) { + if err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go index a460e4b1f..766f4501e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by set-gen. Do not edit it manually! +// Code generated by set-gen. DO NOT EDIT. package sets @@ -26,7 +26,7 @@ import ( // sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. type Byte map[byte]Empty -// New creates a Byte from a list of values. +// NewByte creates a Byte from a list of values. func NewByte(items ...byte) Byte { ss := Byte{} ss.Insert(items...) diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go index 28a6a7d5c..b152a0bf0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by set-gen. Do not edit it manually! +// Code generated by set-gen. DO NOT EDIT. // Package sets has auto-generated set types. package sets diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go index cd22b953a..e11e622c5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by set-gen. Do not edit it manually! +// Code generated by set-gen. DO NOT EDIT. package sets diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go index 0614e9fb0..a0a513cd9 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by set-gen. Do not edit it manually! +// Code generated by set-gen. DO NOT EDIT. package sets @@ -26,7 +26,7 @@ import ( // sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. type Int map[int]Empty -// New creates a Int from a list of values. +// NewInt creates a Int from a list of values. func NewInt(items ...int) Int { ss := Int{} ss.Insert(items...) diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go index 82e1ba782..9ca9af0c5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by set-gen. Do not edit it manually! +// Code generated by set-gen. DO NOT EDIT. package sets @@ -26,7 +26,7 @@ import ( // sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. type Int64 map[int64]Empty -// New creates a Int64 from a list of values. +// NewInt64 creates a Int64 from a list of values. func NewInt64(items ...int64) Int64 { ss := Int64{} ss.Insert(items...) diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go index baef7a6a2..ba00ad7df 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by set-gen. Do not edit it manually! +// Code generated by set-gen. DO NOT EDIT. package sets @@ -26,7 +26,7 @@ import ( // sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. type String map[string]Empty -// New creates a String from a list of values. +// NewString creates a String from a list of values. func NewString(items ...string) String { ss := String{} ss.Insert(items...) diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS new file mode 100644 index 000000000..8e8d9fce8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -0,0 +1,5 @@ +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go new file mode 100644 index 000000000..ab66d0452 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" +) + +type LookupPatchMetaError struct { + Path string + Err error +} + +func (e LookupPatchMetaError) Error() string { + return fmt.Sprintf("LookupPatchMetaError(%s): %v", e.Path, e.Err) +} + +type FieldNotFoundError struct { + Path string + Field string +} + +func (e FieldNotFoundError) Error() string { + return fmt.Sprintf("unable to find api field %q in %s", e.Field, e.Path) +} + +type InvalidTypeError struct { + Path string + Expected string + Actual string +} + +func (e InvalidTypeError) Error() string { + return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go new file mode 100644 index 000000000..c31de15e7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -0,0 +1,194 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/util/mergepatch" + forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +type PatchMeta struct { + patchStrategies []string + patchMergeKey string +} + +func (pm PatchMeta) GetPatchStrategies() []string { + if pm.patchStrategies == nil { + return []string{} + } + return pm.patchStrategies +} + +func (pm PatchMeta) SetPatchStrategies(ps []string) { + pm.patchStrategies = ps +} + +func (pm PatchMeta) GetPatchMergeKey() string { + return pm.patchMergeKey +} + +func (pm PatchMeta) SetPatchMergeKey(pmk string) { + pm.patchMergeKey = pmk +} + +type LookupPatchMeta interface { + // LookupPatchMetadataForStruct gets subschema and the patch metadata (e.g. patch strategy and merge key) for map. + LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) + // LookupPatchMetadataForSlice get subschema and the patch metadata for slice. + LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) + // Get the type name of the field + Name() string +} + +type PatchMetaFromStruct struct { + T reflect.Type +} + +func NewPatchMetaFromStruct(dataStruct interface{}) (PatchMetaFromStruct, error) { + t, err := getTagStructType(dataStruct) + return PatchMetaFromStruct{T: t}, err +} + +var _ LookupPatchMeta = PatchMetaFromStruct{} + +func (s PatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadataForStruct(s.T, key) + if err != nil { + return nil, PatchMeta{}, err + } + + return PatchMetaFromStruct{T: fieldType}, + PatchMeta{ + patchStrategies: fieldPatchStrategies, + patchMergeKey: fieldPatchMergeKey, + }, nil +} + +func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + subschema, patchMeta, err := s.LookupPatchMetadataForStruct(key) + if err != nil { + return nil, PatchMeta{}, err + } + elemPatchMetaFromStruct := subschema.(PatchMetaFromStruct) + t := elemPatchMetaFromStruct.T + + var elemType reflect.Type + switch t.Kind() { + // If t is an array or a slice, get the element type. + // If element is still an array or a slice, return an error. + // Otherwise, return element type. + case reflect.Array, reflect.Slice: + elemType = t.Elem() + if elemType.Kind() == reflect.Array || elemType.Kind() == reflect.Slice { + return nil, PatchMeta{}, errors.New("unexpected slice of slice") + } + // If t is an pointer, get the underlying element. + // If the underlying element is neither an array nor a slice, the pointer is pointing to a slice, + // e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822 + // If the underlying element is either an array or a slice, return its element type. + case reflect.Ptr: + t = t.Elem() + if t.Kind() == reflect.Array || t.Kind() == reflect.Slice { + t = t.Elem() + } + elemType = t + default: + return nil, PatchMeta{}, fmt.Errorf("expected slice or array type, but got: %s", s.T.Kind().String()) + } + + return PatchMetaFromStruct{T: elemType}, patchMeta, nil +} + +func (s PatchMetaFromStruct) Name() string { + return s.T.Kind().String() +} + +func getTagStructType(dataStruct interface{}) (reflect.Type, error) { + if dataStruct == nil { + return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) + } + + t := reflect.TypeOf(dataStruct) + // Get the underlying type for pointers + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) + } + + return t, nil +} + +func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { + t, err := getTagStructType(dataStruct) + if err != nil { + panic(err) + } + return t +} + +type PatchMetaFromOpenAPI struct { + Schema openapi.Schema +} + +func NewPatchMetaFromOpenAPI(s openapi.Schema) PatchMetaFromOpenAPI { + return PatchMetaFromOpenAPI{Schema: s} +} + +var _ LookupPatchMeta = PatchMetaFromOpenAPI{} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + kindItem := NewKindItem(key, s.Schema.GetPath()) + s.Schema.Accept(kindItem) + + err := kindItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: kindItem.subschema}, + kindItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + sliceItem := NewSliceItem(key, s.Schema.GetPath()) + s.Schema.Accept(sliceItem) + + err := sliceItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: sliceItem.subschema}, + sliceItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) Name() string { + schema := s.Schema + return schema.GetName() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go new file mode 100644 index 000000000..6be328f74 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -0,0 +1,2174 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" +) + +// An alternate implementation of JSON Merge Patch +// (https://tools.ietf.org/html/rfc7386) which supports the ability to annotate +// certain fields with metadata that indicates whether the elements of JSON +// lists should be merged or replaced. +// +// For more information, see the PATCH section of docs/devel/api-conventions.md. +// +// Some of the content of this package was borrowed with minor adaptations from +// evanphx/json-patch and openshift/origin. + +const ( + directiveMarker = "$patch" + deleteDirective = "delete" + replaceDirective = "replace" + mergeDirective = "merge" + + retainKeysStrategy = "retainKeys" + + deleteFromPrimitiveListDirectivePrefix = "$deleteFromPrimitiveList" + retainKeysDirective = "$" + retainKeysStrategy + setElementOrderDirectivePrefix = "$setElementOrder" +) + +// JSONMap is a representations of JSON object encoded as map[string]interface{} +// where the children can be either map[string]interface{}, []interface{} or +// primitive type). +// Operating on JSONMap representation is much faster as it doesn't require any +// json marshaling and/or unmarshaling operations. +type JSONMap map[string]interface{} + +type DiffOptions struct { + // SetElementOrder determines whether we generate the $setElementOrder parallel list. + SetElementOrder bool + // IgnoreChangesAndAdditions indicates if we keep the changes and additions in the patch. + IgnoreChangesAndAdditions bool + // IgnoreDeletions indicates if we keep the deletions in the patch. + IgnoreDeletions bool + // We introduce a new value retainKeys for patchStrategy. + // It indicates that all fields needing to be preserved must be + // present in the `retainKeys` list. + // And the fields that are present will be merged with live object. + // All the missing fields will be cleared when patching. + BuildRetainKeysDirective bool +} + +type MergeOptions struct { + // MergeParallelList indicates if we are merging the parallel list. + // We don't merge parallel list when calling mergeMap() in CreateThreeWayMergePatch() + // which is called client-side. + // We merge parallel list iff when calling mergeMap() in StrategicMergeMapPatch() + // which is called server-side + MergeParallelList bool + // IgnoreUnmatchedNulls indicates if we should process the unmatched nulls. + IgnoreUnmatchedNulls bool +} + +// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge. +// Instead of defining a Delta that holds an original, a patch and a set of preconditions, +// the reconcile method accepts a set of preconditions as an argument. + +// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original +// document and a modified document, which are passed to the method as json encoded content. It will +// return a patch that yields the modified document when applied to the original document, or an error +// if either of the two documents is invalid. +func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergePatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergePatchUsingLookupPatchMeta( + original, modified []byte, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + patchMap, err := CreateTwoWayMergeMapPatchUsingLookupPatchMeta(originalMap, modifiedMap, schema, fns...) + if err != nil { + return nil, err + } + + return json.Marshal(patchMap) +} + +// CreateTwoWayMergeMapPatch creates a patch from an original and modified JSON objects, +// encoded JSONMap. +// The serialized version of the map can then be passed to StrategicMergeMapPatch. +func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified JSONMap, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { + diffOptions := DiffOptions{ + SetElementOrder: true, + } + patchMap, err := diffMaps(original, modified, schema, diffOptions) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + return patchMap, nil +} + +// Returns a (recursive) strategic merge patch that yields modified when applied to original. +// Including: +// - Adding fields to the patch present in modified, missing from original +// - Setting fields to the patch present in modified and original with different values +// - Delete fields present in original, missing from modified through +// - IFF map field - set to nil in patch +// - IFF list of maps && merge strategy - use deleteDirective for the elements +// - IFF list of primitives && merge strategy - use parallel deletion list +// - IFF list of maps or primitives with replace strategy (default) - set patch value to the value in modified +// - Build $retainKeys directive for fields with retainKeys patch strategy +func diffMaps(original, modified map[string]interface{}, schema LookupPatchMeta, diffOptions DiffOptions) (map[string]interface{}, error) { + patch := map[string]interface{}{} + + // This will be used to build the $retainKeys directive sent in the patch + retainKeysList := make([]interface{}, 0, len(modified)) + + // Compare each value in the modified map against the value in the original map + for key, modifiedValue := range modified { + // Get the underlying type for pointers + if diffOptions.BuildRetainKeysDirective && modifiedValue != nil { + retainKeysList = append(retainKeysList, key) + } + + originalValue, ok := original[key] + if !ok { + // Key was added, so add to patch + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + continue + } + + // The patch may have a patch directive + // TODO: figure out if we need this. This shouldn't be needed by apply. When would the original map have patch directives in it? + foundDirectiveMarker, err := handleDirectiveMarker(key, originalValue, modifiedValue, patch) + if err != nil { + return nil, err + } + if foundDirectiveMarker { + continue + } + + if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) { + // Types have changed, so add to patch + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + continue + } + + // Types are the same, so compare values + switch originalValueTyped := originalValue.(type) { + case map[string]interface{}: + modifiedValueTyped := modifiedValue.(map[string]interface{}) + err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) + case []interface{}: + modifiedValueTyped := modifiedValue.([]interface{}) + err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) + default: + replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) + } + if err != nil { + return nil, err + } + } + + updatePatchIfMissing(original, modified, patch, diffOptions) + // Insert the retainKeysList iff there are values present in the retainKeysList and + // either of the following is true: + // - the patch is not empty + // - there are additional field in original that need to be cleared + if len(retainKeysList) > 0 && + (len(patch) > 0 || hasAdditionalNewField(original, modified)) { + patch[retainKeysDirective] = sortScalars(retainKeysList) + } + return patch, nil +} + +// handleDirectiveMarker handles how to diff directive marker between 2 objects +func handleDirectiveMarker(key string, originalValue, modifiedValue interface{}, patch map[string]interface{}) (bool, error) { + if key == directiveMarker { + originalString, ok := originalValue.(string) + if !ok { + return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + modifiedString, ok := modifiedValue.(string) + if !ok { + return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + if modifiedString != originalString { + patch[directiveMarker] = modifiedValue + } + return true, nil + } + return false, nil +} + +// handleMapDiff diff between 2 maps `originalValueTyped` and `modifiedValue`, +// puts the diff in the `patch` associated with `key` +// key is the key associated with originalValue and modifiedValue. +// originalValue, modifiedValue are the old and new value respectively.They are both maps +// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue +// diffOptions contains multiple options to control how we do the diff. +func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]interface{}, + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(key) + + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + return nil + } + // Otherwise, return the error + return err + } + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + diffOptions.BuildRetainKeysDirective = retainKeys + switch patchStrategy { + // The patch strategic from metadata tells us to replace the entire object instead of diffing it + case replaceDirective: + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + default: + patchValue, err := diffMaps(originalValue, modifiedValue, subschema, diffOptions) + if err != nil { + return err + } + // Maps were not identical, use provided patch value + if len(patchValue) > 0 { + patch[key] = patchValue + } + } + return nil +} + +// handleSliceDiff diff between 2 slices `originalValueTyped` and `modifiedValue`, +// puts the diff in the `patch` associated with `key` +// key is the key associated with originalValue and modifiedValue. +// originalValue, modifiedValue are the old and new value respectively.They are both slices +// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue +// diffOptions contains multiple options to control how we do the diff. +func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, patch map[string]interface{}, + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(key) + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + return nil + } + // Otherwise, return the error + return err + } + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + switch patchStrategy { + // Merge the 2 slices using mergePatchKey + case mergeDirective: + diffOptions.BuildRetainKeysDirective = retainKeys + addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, subschema, patchMeta.GetPatchMergeKey(), diffOptions) + if err != nil { + return err + } + if len(addList) > 0 { + patch[key] = addList + } + // generate a parallel list for deletion + if len(deletionList) > 0 { + parallelDeletionListKey := fmt.Sprintf("%s/%s", deleteFromPrimitiveListDirectivePrefix, key) + patch[parallelDeletionListKey] = deletionList + } + if len(setOrderList) > 0 { + parallelSetOrderListKey := fmt.Sprintf("%s/%s", setElementOrderDirectivePrefix, key) + patch[parallelSetOrderListKey] = setOrderList + } + default: + replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) + } + return nil +} + +// replacePatchFieldIfNotEqual updates the patch if original and modified are not deep equal +// if diffOptions.IgnoreChangesAndAdditions is false. +// original is the old value, maybe either the live cluster object or the last applied configuration +// modified is the new value, is always the users new config +func replacePatchFieldIfNotEqual(key string, original, modified interface{}, + patch map[string]interface{}, diffOptions DiffOptions) { + if diffOptions.IgnoreChangesAndAdditions { + // Ignoring changes - do nothing + return + } + if reflect.DeepEqual(original, modified) { + // Contents are identical - do nothing + return + } + // Create a patch to replace the old value with the new one + patch[key] = modified +} + +// updatePatchIfMissing iterates over `original` when ignoreDeletions is false. +// Clear the field whose key is not present in `modified`. +// original is the old value, maybe either the live cluster object or the last applied configuration +// modified is the new value, is always the users new config +func updatePatchIfMissing(original, modified, patch map[string]interface{}, diffOptions DiffOptions) { + if diffOptions.IgnoreDeletions { + // Ignoring deletion - do nothing + return + } + // Add nils for deleted values + for key := range original { + if _, found := modified[key]; !found { + patch[key] = nil + } + } +} + +// validateMergeKeyInLists checks if each map in the list has the mentryerge key. +func validateMergeKeyInLists(mergeKey string, lists ...[]interface{}) error { + for _, list := range lists { + for _, item := range list { + m, ok := item.(map[string]interface{}) + if !ok { + return mergepatch.ErrBadArgType(m, item) + } + if _, ok = m[mergeKey]; !ok { + return mergepatch.ErrNoMergeKey(m, mergeKey) + } + } + } + return nil +} + +// normalizeElementOrder sort `patch` list by `patchOrder` and sort `serverOnly` list by `serverOrder`. +// Then it merges the 2 sorted lists. +// It guarantee the relative order in the patch list and in the serverOnly list is kept. +// `patch` is a list of items in the patch, and `serverOnly` is a list of items in the live object. +// `patchOrder` is the order we want `patch` list to have and +// `serverOrder` is the order we want `serverOnly` list to have. +// kind is the kind of each item in the lists `patch` and `serverOnly`. +func normalizeElementOrder(patch, serverOnly, patchOrder, serverOrder []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { + patch, err := normalizeSliceOrder(patch, patchOrder, mergeKey, kind) + if err != nil { + return nil, err + } + serverOnly, err = normalizeSliceOrder(serverOnly, serverOrder, mergeKey, kind) + if err != nil { + return nil, err + } + all := mergeSortedSlice(serverOnly, patch, serverOrder, mergeKey, kind) + + return all, nil +} + +// mergeSortedSlice merges the 2 sorted lists by serverOrder with best effort. +// It will insert each item in `left` list to `right` list. In most cases, the 2 lists will be interleaved. +// The relative order of left and right are guaranteed to be kept. +// They have higher precedence than the order in the live list. +// The place for a item in `left` is found by: +// scan from the place of last insertion in `right` to the end of `right`, +// the place is before the first item that is greater than the item we want to insert. +// example usage: using server-only items as left and patch items as right. We insert server-only items +// to patch list. We use the order of live object as record for comparison. +func mergeSortedSlice(left, right, serverOrder []interface{}, mergeKey string, kind reflect.Kind) []interface{} { + // Returns if l is less than r, and if both have been found. + // If l and r both present and l is in front of r, l is less than r. + less := func(l, r interface{}) (bool, bool) { + li := index(serverOrder, l, mergeKey, kind) + ri := index(serverOrder, r, mergeKey, kind) + if li >= 0 && ri >= 0 { + return li < ri, true + } else { + return false, false + } + } + + // left and right should be non-overlapping. + size := len(left) + len(right) + i, j := 0, 0 + s := make([]interface{}, size, size) + + for k := 0; k < size; k++ { + if i >= len(left) && j < len(right) { + // have items left in `right` list + s[k] = right[j] + j++ + } else if j >= len(right) && i < len(left) { + // have items left in `left` list + s[k] = left[i] + i++ + } else { + // compare them if i and j are both in bound + less, foundBoth := less(left[i], right[j]) + if foundBoth && less { + s[k] = left[i] + i++ + } else { + s[k] = right[j] + j++ + } + } + } + return s +} + +// index returns the index of the item in the given items, or -1 if it doesn't exist +// l must NOT be a slice of slices, this should be checked before calling. +func index(l []interface{}, valToLookUp interface{}, mergeKey string, kind reflect.Kind) int { + var getValFn func(interface{}) interface{} + // Get the correct `getValFn` based on item `kind`. + // It should return the value of merge key for maps and + // return the item for other kinds. + switch kind { + case reflect.Map: + getValFn = func(item interface{}) interface{} { + typedItem, ok := item.(map[string]interface{}) + if !ok { + return nil + } + val := typedItem[mergeKey] + return val + } + default: + getValFn = func(item interface{}) interface{} { + return item + } + } + + for i, v := range l { + if getValFn(valToLookUp) == getValFn(v) { + return i + } + } + return -1 +} + +// extractToDeleteItems takes a list and +// returns 2 lists: one contains items that should be kept and the other contains items to be deleted. +func extractToDeleteItems(l []interface{}) ([]interface{}, []interface{}, error) { + var nonDelete, toDelete []interface{} + for _, v := range l { + m, ok := v.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(m, v) + } + + directive, foundDirective := m[directiveMarker] + if foundDirective && directive == deleteDirective { + toDelete = append(toDelete, v) + } else { + nonDelete = append(nonDelete, v) + } + } + return nonDelete, toDelete, nil +} + +// normalizeSliceOrder sort `toSort` list by `order` +func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { + var toDelete []interface{} + if kind == reflect.Map { + // make sure each item in toSort, order has merge key + err := validateMergeKeyInLists(mergeKey, toSort, order) + if err != nil { + return nil, err + } + toSort, toDelete, err = extractToDeleteItems(toSort) + if err != nil { + return nil, err + } + } + + sort.SliceStable(toSort, func(i, j int) bool { + if ii := index(order, toSort[i], mergeKey, kind); ii >= 0 { + if ij := index(order, toSort[j], mergeKey, kind); ij >= 0 { + return ii < ij + } + } + return true + }) + toSort = append(toSort, toDelete...) + return toSort, nil +} + +// Returns a (recursive) strategic merge patch, a parallel deletion list if necessary and +// another list to set the order of the list +// Only list of primitives with merge strategy will generate a parallel deletion list. +// These two lists should yield modified when applied to original, for lists with merge semantics. +func diffLists(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { + if len(original) == 0 { + // Both slices are empty - do nothing + if len(modified) == 0 || diffOptions.IgnoreChangesAndAdditions { + return nil, nil, nil, nil + } + + // Old slice was empty - add all elements from the new slice + return modified, nil, nil, nil + } + + elementType, err := sliceElementType(original, modified) + if err != nil { + return nil, nil, nil, err + } + + var patchList, deleteList, setOrderList []interface{} + kind := elementType.Kind() + switch kind { + case reflect.Map: + patchList, deleteList, err = diffListsOfMaps(original, modified, schema, mergeKey, diffOptions) + if err != nil { + return nil, nil, nil, err + } + patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) + if err != nil { + return nil, nil, nil, err + } + orderSame, err := isOrderSame(original, modified, mergeKey) + if err != nil { + return nil, nil, nil, err + } + // append the deletions to the end of the patch list. + patchList = append(patchList, deleteList...) + deleteList = nil + // generate the setElementOrder list when there are content changes or order changes + if diffOptions.SetElementOrder && + ((!diffOptions.IgnoreChangesAndAdditions && (len(patchList) > 0 || !orderSame)) || + (!diffOptions.IgnoreDeletions && len(patchList) > 0)) { + // Generate a list of maps that each item contains only the merge key. + setOrderList = make([]interface{}, len(modified)) + for i, v := range modified { + typedV := v.(map[string]interface{}) + setOrderList[i] = map[string]interface{}{ + mergeKey: typedV[mergeKey], + } + } + } + case reflect.Slice: + // Lists of Lists are not permitted by the api + return nil, nil, nil, mergepatch.ErrNoListOfLists + default: + patchList, deleteList, err = diffListsOfScalars(original, modified, diffOptions) + if err != nil { + return nil, nil, nil, err + } + patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) + // generate the setElementOrder list when there are content changes or order changes + if diffOptions.SetElementOrder && ((!diffOptions.IgnoreDeletions && len(deleteList) > 0) || + (!diffOptions.IgnoreChangesAndAdditions && !reflect.DeepEqual(original, modified))) { + setOrderList = modified + } + } + return patchList, deleteList, setOrderList, err +} + +// isOrderSame checks if the order in a list has changed +func isOrderSame(original, modified []interface{}, mergeKey string) (bool, error) { + if len(original) != len(modified) { + return false, nil + } + for i, modifiedItem := range modified { + equal, err := mergeKeyValueEqual(original[i], modifiedItem, mergeKey) + if err != nil || !equal { + return equal, err + } + } + return true, nil +} + +// diffListsOfScalars returns 2 lists, the first one is addList and the second one is deletionList. +// Argument diffOptions.IgnoreChangesAndAdditions controls if calculate addList. true means not calculate. +// Argument diffOptions.IgnoreDeletions controls if calculate deletionList. true means not calculate. +// original may be changed, but modified is guaranteed to not be changed +func diffListsOfScalars(original, modified []interface{}, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { + modifiedCopy := make([]interface{}, len(modified)) + copy(modifiedCopy, modified) + // Sort the scalars for easier calculating the diff + originalScalars := sortScalars(original) + modifiedScalars := sortScalars(modifiedCopy) + + originalIndex, modifiedIndex := 0, 0 + addList := []interface{}{} + deletionList := []interface{}{} + + for { + originalInBounds := originalIndex < len(originalScalars) + modifiedInBounds := modifiedIndex < len(modifiedScalars) + if !originalInBounds && !modifiedInBounds { + break + } + // we need to compare the string representation of the scalar, + // because the scalar is an interface which doesn't support either < or > + // And that's how func sortScalars compare scalars. + var originalString, modifiedString string + var originalValue, modifiedValue interface{} + if originalInBounds { + originalValue = originalScalars[originalIndex] + originalString = fmt.Sprintf("%v", originalValue) + } + if modifiedInBounds { + modifiedValue = modifiedScalars[modifiedIndex] + modifiedString = fmt.Sprintf("%v", modifiedValue) + } + + originalV, modifiedV := compareListValuesAtIndex(originalInBounds, modifiedInBounds, originalString, modifiedString) + switch { + case originalV == nil && modifiedV == nil: + originalIndex++ + modifiedIndex++ + case originalV != nil && modifiedV == nil: + if !diffOptions.IgnoreDeletions { + deletionList = append(deletionList, originalValue) + } + originalIndex++ + case originalV == nil && modifiedV != nil: + if !diffOptions.IgnoreChangesAndAdditions { + addList = append(addList, modifiedValue) + } + modifiedIndex++ + default: + return nil, nil, fmt.Errorf("Unexpected returned value from compareListValuesAtIndex: %v and %v", originalV, modifiedV) + } + } + + return addList, deduplicateScalars(deletionList), nil +} + +// If first return value is non-nil, list1 contains an element not present in list2 +// If second return value is non-nil, list2 contains an element not present in list1 +func compareListValuesAtIndex(list1Inbounds, list2Inbounds bool, list1Value, list2Value string) (interface{}, interface{}) { + bothInBounds := list1Inbounds && list2Inbounds + switch { + // scalars are identical + case bothInBounds && list1Value == list2Value: + return nil, nil + // only list2 is in bound + case !list1Inbounds: + fallthrough + // list2 has additional scalar + case bothInBounds && list1Value > list2Value: + return nil, list2Value + // only original is in bound + case !list2Inbounds: + fallthrough + // original has additional scalar + case bothInBounds && list1Value < list2Value: + return list1Value, nil + default: + return nil, nil + } +} + +// diffListsOfMaps takes a pair of lists and +// returns a (recursive) strategic merge patch list contains additions and changes and +// a deletion list contains deletions +func diffListsOfMaps(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { + patch := make([]interface{}, 0, len(modified)) + deletionList := make([]interface{}, 0, len(original)) + + originalSorted, err := sortMergeListsByNameArray(original, schema, mergeKey, false) + if err != nil { + return nil, nil, err + } + modifiedSorted, err := sortMergeListsByNameArray(modified, schema, mergeKey, false) + if err != nil { + return nil, nil, err + } + + originalIndex, modifiedIndex := 0, 0 + for { + originalInBounds := originalIndex < len(originalSorted) + modifiedInBounds := modifiedIndex < len(modifiedSorted) + bothInBounds := originalInBounds && modifiedInBounds + if !originalInBounds && !modifiedInBounds { + break + } + + var originalElementMergeKeyValueString, modifiedElementMergeKeyValueString string + var originalElementMergeKeyValue, modifiedElementMergeKeyValue interface{} + var originalElement, modifiedElement map[string]interface{} + if originalInBounds { + originalElement, originalElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(originalIndex, mergeKey, originalSorted) + if err != nil { + return nil, nil, err + } + originalElementMergeKeyValueString = fmt.Sprintf("%v", originalElementMergeKeyValue) + } + if modifiedInBounds { + modifiedElement, modifiedElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(modifiedIndex, mergeKey, modifiedSorted) + if err != nil { + return nil, nil, err + } + modifiedElementMergeKeyValueString = fmt.Sprintf("%v", modifiedElementMergeKeyValue) + } + + switch { + case bothInBounds && ItemMatchesOriginalAndModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + // Merge key values are equal, so recurse + patchValue, err := diffMaps(originalElement, modifiedElement, schema, diffOptions) + if err != nil { + return nil, nil, err + } + if len(patchValue) > 0 { + patchValue[mergeKey] = modifiedElementMergeKeyValue + patch = append(patch, patchValue) + } + originalIndex++ + modifiedIndex++ + // only modified is in bound + case !originalInBounds: + fallthrough + // modified has additional map + case bothInBounds && ItemAddedToModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + if !diffOptions.IgnoreChangesAndAdditions { + patch = append(patch, modifiedElement) + } + modifiedIndex++ + // only original is in bound + case !modifiedInBounds: + fallthrough + // original has additional map + case bothInBounds && ItemRemovedFromModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + if !diffOptions.IgnoreDeletions { + // Item was deleted, so add delete directive + deletionList = append(deletionList, CreateDeleteDirective(mergeKey, originalElementMergeKeyValue)) + } + originalIndex++ + } + } + + return patch, deletionList, nil +} + +// getMapAndMergeKeyValueByIndex return a map in the list and its merge key value given the index of the map. +func getMapAndMergeKeyValueByIndex(index int, mergeKey string, listOfMaps []interface{}) (map[string]interface{}, interface{}, error) { + m, ok := listOfMaps[index].(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(m, listOfMaps[index]) + } + + val, ok := m[mergeKey] + if !ok { + return nil, nil, mergepatch.ErrNoMergeKey(m, mergeKey) + } + return m, val, nil +} + +// StrategicMergePatch applies a strategic merge patch. The patch and the original document +// must be json encoded content. A patch can be created from an original and a modified document +// by calling CreateStrategicMergePatch. +func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return StrategicMergePatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergePatchUsingLookupPatchMeta(original, patch []byte, schema LookupPatchMeta) ([]byte, error) { + originalMap, err := handleUnmarshal(original) + if err != nil { + return nil, err + } + patchMap, err := handleUnmarshal(patch) + if err != nil { + return nil, err + } + + result, err := StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, schema) + if err != nil { + return nil, err + } + + return json.Marshal(result) +} + +func handleUnmarshal(j []byte) (map[string]interface{}, error) { + if j == nil { + j = []byte("{}") + } + + m := map[string]interface{}{} + err := json.Unmarshal(j, &m) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + return m, nil +} + +// StrategicMergeMapPatch applies a strategic merge patch. The original and patch documents +// must be JSONMap. A patch can be created from an original and modified document by +// calling CreateTwoWayMergeMapPatch. +// Warning: the original and patch JSONMap objects are mutated by this function and should not be reused. +func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + // We need the go struct tags `patchMergeKey` and `patchStrategy` for fields that support a strategic merge patch. + // For native resources, we can easily figure out these tags since we know the fields. + + // Because custom resources are decoded as Unstructured and because we're missing the metadata about how to handle + // each field in a strategic merge patch, we can't find the go struct tags. Hence, we can't easily do a strategic merge + // for custom resources. So we should fail fast and return an error. + if _, ok := dataStruct.(*unstructured.Unstructured); ok { + return nil, mergepatch.ErrUnsupportedStrategicMergePatchFormat + } + + return StrategicMergeMapPatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergeMapPatchUsingLookupPatchMeta(original, patch JSONMap, schema LookupPatchMeta) (JSONMap, error) { + mergeOptions := MergeOptions{ + MergeParallelList: true, + IgnoreUnmatchedNulls: true, + } + return mergeMap(original, patch, schema, mergeOptions) +} + +// MergeStrategicMergeMapPatchUsingLookupPatchMeta merges strategic merge +// patches retaining `null` fields and parallel lists. If 2 patches change the +// same fields and the latter one will override the former one. If you don't +// want that happen, you need to run func MergingMapsHaveConflicts before +// merging these patches. Applying the resulting merged merge patch to a JSONMap +// yields the same as merging each strategic merge patch to the JSONMap in +// succession. +func MergeStrategicMergeMapPatchUsingLookupPatchMeta(schema LookupPatchMeta, patches ...JSONMap) (JSONMap, error) { + mergeOptions := MergeOptions{ + MergeParallelList: false, + IgnoreUnmatchedNulls: false, + } + merged := JSONMap{} + var err error + for _, patch := range patches { + merged, err = mergeMap(merged, patch, schema, mergeOptions) + if err != nil { + return nil, err + } + } + return merged, nil +} + +// handleDirectiveInMergeMap handles the patch directive when merging 2 maps. +func handleDirectiveInMergeMap(directive interface{}, patch map[string]interface{}) (map[string]interface{}, error) { + if directive == replaceDirective { + // If the patch contains "$patch: replace", don't merge it, just use the + // patch directly. Later on, we can add a single level replace that only + // affects the map that the $patch is in. + delete(patch, directiveMarker) + return patch, nil + } + + if directive == deleteDirective { + // If the patch contains "$patch: delete", don't merge it, just return + // an empty map. + return map[string]interface{}{}, nil + } + + return nil, mergepatch.ErrBadPatchType(directive, patch) +} + +func containsDirectiveMarker(item interface{}) bool { + m, ok := item.(map[string]interface{}) + if ok { + if _, foundDirectiveMarker := m[directiveMarker]; foundDirectiveMarker { + return true + } + } + return false +} + +func mergeKeyValueEqual(left, right interface{}, mergeKey string) (bool, error) { + if len(mergeKey) == 0 { + return left == right, nil + } + typedLeft, ok := left.(map[string]interface{}) + if !ok { + return false, mergepatch.ErrBadArgType(typedLeft, left) + } + typedRight, ok := right.(map[string]interface{}) + if !ok { + return false, mergepatch.ErrBadArgType(typedRight, right) + } + mergeKeyLeft, ok := typedLeft[mergeKey] + if !ok { + return false, mergepatch.ErrNoMergeKey(typedLeft, mergeKey) + } + mergeKeyRight, ok := typedRight[mergeKey] + if !ok { + return false, mergepatch.ErrNoMergeKey(typedRight, mergeKey) + } + return mergeKeyLeft == mergeKeyRight, nil +} + +// extractKey trims the prefix and return the original key +func extractKey(s, prefix string) (string, error) { + substrings := strings.SplitN(s, "/", 2) + if len(substrings) <= 1 || substrings[0] != prefix { + switch prefix { + case deleteFromPrimitiveListDirectivePrefix: + return "", mergepatch.ErrBadPatchFormatForPrimitiveList + case setElementOrderDirectivePrefix: + return "", mergepatch.ErrBadPatchFormatForSetElementOrderList + default: + return "", fmt.Errorf("fail to find unknown prefix %q in %s\n", prefix, s) + } + } + return substrings[1], nil +} + +// validatePatchUsingSetOrderList verifies: +// the relative order of any two items in the setOrderList list matches that in the patch list. +// the items in the patch list must be a subset or the same as the $setElementOrder list (deletions are ignored). +func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey string) error { + typedSetOrderList, ok := setOrderList.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + typedPatchList, ok := patchList.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + if len(typedSetOrderList) == 0 || len(typedPatchList) == 0 { + return nil + } + + var nonDeleteList, toDeleteList []interface{} + var err error + if len(mergeKey) > 0 { + nonDeleteList, toDeleteList, err = extractToDeleteItems(typedPatchList) + if err != nil { + return err + } + } else { + nonDeleteList = typedPatchList + } + + patchIndex, setOrderIndex := 0, 0 + for patchIndex < len(nonDeleteList) && setOrderIndex < len(typedSetOrderList) { + if containsDirectiveMarker(nonDeleteList[patchIndex]) { + patchIndex++ + continue + } + mergeKeyEqual, err := mergeKeyValueEqual(nonDeleteList[patchIndex], typedSetOrderList[setOrderIndex], mergeKey) + if err != nil { + return err + } + if mergeKeyEqual { + patchIndex++ + } + setOrderIndex++ + } + // If patchIndex is inbound but setOrderIndex if out of bound mean there are items mismatching between the patch list and setElementOrder list. + // the second check is is a sanity check, and should always be true if the first is true. + if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) { + return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList) + } + typedPatchList = append(nonDeleteList, toDeleteList...) + return nil +} + +// preprocessDeletionListForMerging preprocesses the deletion list. +// it returns shouldContinue, isDeletionList, noPrefixKey +func preprocessDeletionListForMerging(key string, original map[string]interface{}, + patchVal interface{}, mergeDeletionList bool) (bool, bool, string, error) { + // If found a parallel list for deletion and we are going to merge the list, + // overwrite the key to the original key and set flag isDeleteList + foundParallelListPrefix := strings.HasPrefix(key, deleteFromPrimitiveListDirectivePrefix) + if foundParallelListPrefix { + if !mergeDeletionList { + original[key] = patchVal + return true, false, "", nil + } + originalKey, err := extractKey(key, deleteFromPrimitiveListDirectivePrefix) + return false, true, originalKey, err + } + return false, false, "", nil +} + +// applyRetainKeysDirective looks for a retainKeys directive and applies to original +// - if no directive exists do nothing +// - if directive is found, clear keys in original missing from the directive list +// - validate that all keys present in the patch are present in the retainKeys directive +// note: original may be another patch request, e.g. applying the add+modified patch to the deletions patch. In this case it may have directives +func applyRetainKeysDirective(original, patch map[string]interface{}, options MergeOptions) error { + retainKeysInPatch, foundInPatch := patch[retainKeysDirective] + if !foundInPatch { + return nil + } + // cleanup the directive + delete(patch, retainKeysDirective) + + if !options.MergeParallelList { + // If original is actually a patch, make sure the retainKeys directives are the same in both patches if present in both. + // If not present in the original patch, copy from the modified patch. + retainKeysInOriginal, foundInOriginal := original[retainKeysDirective] + if foundInOriginal { + if !reflect.DeepEqual(retainKeysInOriginal, retainKeysInPatch) { + // This error actually should never happen. + return fmt.Errorf("%v and %v are not deep equal: this may happen when calculating the 3-way diff patch", retainKeysInOriginal, retainKeysInPatch) + } + } else { + original[retainKeysDirective] = retainKeysInPatch + } + return nil + } + + retainKeysList, ok := retainKeysInPatch.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForRetainKeys + } + + // validate patch to make sure all fields in the patch are present in the retainKeysList. + // The map is used only as a set, the value is never referenced + m := map[interface{}]struct{}{} + for _, v := range retainKeysList { + m[v] = struct{}{} + } + for k, v := range patch { + if v == nil || strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) || + strings.HasPrefix(k, setElementOrderDirectivePrefix) { + continue + } + // If there is an item present in the patch but not in the retainKeys list, + // the patch is invalid. + if _, found := m[k]; !found { + return mergepatch.ErrBadPatchFormatForRetainKeys + } + } + + // clear not present fields + for k := range original { + if _, found := m[k]; !found { + delete(original, k) + } + } + return nil +} + +// mergePatchIntoOriginal processes $setElementOrder list. +// When not merging the directive, it will make sure $setElementOrder list exist only in original. +// When merging the directive, it will try to find the $setElementOrder list and +// its corresponding patch list, validate it and merge it. +// Then, sort them by the relative order in setElementOrder, patch list and live list. +// The precedence is $setElementOrder > order in patch list > order in live list. +// This function will delete the item after merging it to prevent process it again in the future. +// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md +func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { + for key, patchV := range patch { + // Do nothing if there is no ordering directive + if !strings.HasPrefix(key, setElementOrderDirectivePrefix) { + continue + } + + setElementOrderInPatch := patchV + // Copies directive from the second patch (`patch`) to the first patch (`original`) + // and checks they are equal and delete the directive in the second patch + if !mergeOptions.MergeParallelList { + setElementOrderListInOriginal, ok := original[key] + if ok { + // check if the setElementOrder list in original and the one in patch matches + if !reflect.DeepEqual(setElementOrderListInOriginal, setElementOrderInPatch) { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + } else { + // move the setElementOrder list from patch to original + original[key] = setElementOrderInPatch + } + } + delete(patch, key) + + var ( + ok bool + originalFieldValue, patchFieldValue, merged []interface{} + patchStrategy string + patchMeta PatchMeta + subschema LookupPatchMeta + ) + typedSetElementOrderList, ok := setElementOrderInPatch.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(typedSetElementOrderList, setElementOrderInPatch) + } + // Trim the setElementOrderDirectivePrefix to get the key of the list field in original. + originalKey, err := extractKey(key, setElementOrderDirectivePrefix) + if err != nil { + return err + } + // try to find the list with `originalKey` in `original` and `modified` and merge them. + originalList, foundOriginal := original[originalKey] + patchList, foundPatch := patch[originalKey] + if foundOriginal { + originalFieldValue, ok = originalList.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(originalFieldValue, originalList) + } + } + if foundPatch { + patchFieldValue, ok = patchList.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(patchFieldValue, patchList) + } + } + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(originalKey) + if err != nil { + return err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + // Check for consistency between the element order list and the field it applies to + err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) + if err != nil { + return err + } + + switch { + case foundOriginal && !foundPatch: + // no change to list contents + merged = originalFieldValue + case !foundOriginal && foundPatch: + // list was added + merged = patchFieldValue + case foundOriginal && foundPatch: + merged, err = mergeSliceHandler(originalList, patchList, subschema, + patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) + if err != nil { + return err + } + case !foundOriginal && !foundPatch: + continue + } + + // Split all items into patch items and server-only items and then enforce the order. + var patchItems, serverOnlyItems []interface{} + if len(patchMeta.GetPatchMergeKey()) == 0 { + // Primitives doesn't need merge key to do partitioning. + patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, typedSetElementOrderList) + + } else { + // Maps need merge key to do partitioning. + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) + if err != nil { + return err + } + } + + elementType, err := sliceElementType(originalFieldValue, patchFieldValue) + if err != nil { + return err + } + kind := elementType.Kind() + // normalize merged list + // typedSetElementOrderList contains all the relative order in typedPatchList, + // so don't need to use typedPatchList + both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, patchMeta.GetPatchMergeKey(), kind) + if err != nil { + return err + } + original[originalKey] = both + // delete patch list from patch to prevent process again in the future + delete(patch, originalKey) + } + return nil +} + +// partitionPrimitivesByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. +func partitionPrimitivesByPresentInList(original, partitionBy []interface{}) ([]interface{}, []interface{}) { + patch := make([]interface{}, 0, len(original)) + serverOnly := make([]interface{}, 0, len(original)) + inPatch := map[interface{}]bool{} + for _, v := range partitionBy { + inPatch[v] = true + } + for _, v := range original { + if !inPatch[v] { + serverOnly = append(serverOnly, v) + } else { + patch = append(patch, v) + } + } + return patch, serverOnly +} + +// partitionMapsByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. +func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { + patch := make([]interface{}, 0, len(original)) + serverOnly := make([]interface{}, 0, len(original)) + for _, v := range original { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedV, v) + } + mergeKeyValue, foundMergeKey := typedV[mergeKey] + if !foundMergeKey { + return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + _, _, found, err := findMapInSliceBasedOnKeyValue(partitionBy, mergeKey, mergeKeyValue) + if err != nil { + return nil, nil, err + } + if !found { + serverOnly = append(serverOnly, v) + } else { + patch = append(patch, v) + } + } + return patch, serverOnly, nil +} + +// Merge fields from a patch map into the original map. Note: This may modify +// both the original map and the patch because getting a deep copy of a map in +// golang is highly non-trivial. +// flag mergeOptions.MergeParallelList controls if using the parallel list to delete or keeping the list. +// If patch contains any null field (e.g. field_1: null) that is not +// present in original, then to propagate it to the end result use +// mergeOptions.IgnoreUnmatchedNulls == false. +func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) (map[string]interface{}, error) { + if v, ok := patch[directiveMarker]; ok { + return handleDirectiveInMergeMap(v, patch) + } + + // nil is an accepted value for original to simplify logic in other places. + // If original is nil, replace it with an empty map and then apply the patch. + if original == nil { + original = map[string]interface{}{} + } + + err := applyRetainKeysDirective(original, patch, mergeOptions) + if err != nil { + return nil, err + } + + // Process $setElementOrder list and other lists sharing the same key. + // When not merging the directive, it will make sure $setElementOrder list exist only in original. + // When merging the directive, it will process $setElementOrder and its patch list together. + // This function will delete the merged elements from patch so they will not be reprocessed + err = mergePatchIntoOriginal(original, patch, schema, mergeOptions) + if err != nil { + return nil, err + } + + // Start merging the patch into the original. + for k, patchV := range patch { + skipProcessing, isDeleteList, noPrefixKey, err := preprocessDeletionListForMerging(k, original, patchV, mergeOptions.MergeParallelList) + if err != nil { + return nil, err + } + if skipProcessing { + continue + } + if len(noPrefixKey) > 0 { + k = noPrefixKey + } + + // If the value of this key is null, delete the key if it exists in the + // original. Otherwise, check if we want to preserve it or skip it. + // Preserving the null value is useful when we want to send an explicit + // delete to the API server. + if patchV == nil { + if _, ok := original[k]; ok { + delete(original, k) + } + if mergeOptions.IgnoreUnmatchedNulls { + continue + } + } + + _, ok := original[k] + if !ok { + // If it's not in the original document, just take the patch value. + original[k] = patchV + continue + } + + originalType := reflect.TypeOf(original[k]) + patchType := reflect.TypeOf(patchV) + if originalType != patchType { + original[k] = patchV + continue + } + // If they're both maps or lists, recurse into the value. + switch originalType.Kind() { + case reflect.Map: + subschema, patchMeta, err2 := schema.LookupPatchMetadataForStruct(k) + if err2 != nil { + return nil, err2 + } + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 + } + original[k], err = mergeMapHandler(original[k], patchV, subschema, patchStrategy, mergeOptions) + case reflect.Slice: + subschema, patchMeta, err2 := schema.LookupPatchMetadataForSlice(k) + if err2 != nil { + return nil, err2 + } + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 + } + original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) + default: + original[k] = patchV + } + if err != nil { + return nil, err + } + } + return original, nil +} + +// mergeMapHandler handles how to merge `patchV` whose key is `key` with `original` respecting +// fieldPatchStrategy and mergeOptions. +func mergeMapHandler(original, patch interface{}, schema LookupPatchMeta, + fieldPatchStrategy string, mergeOptions MergeOptions) (map[string]interface{}, error) { + typedOriginal, typedPatch, err := mapTypeAssertion(original, patch) + if err != nil { + return nil, err + } + + if fieldPatchStrategy != replaceDirective { + return mergeMap(typedOriginal, typedPatch, schema, mergeOptions) + } else { + return typedPatch, nil + } +} + +// mergeSliceHandler handles how to merge `patchV` whose key is `key` with `original` respecting +// fieldPatchStrategy, fieldPatchMergeKey, isDeleteList and mergeOptions. +func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, isDeleteList bool, mergeOptions MergeOptions) ([]interface{}, error) { + typedOriginal, typedPatch, err := sliceTypeAssertion(original, patch) + if err != nil { + return nil, err + } + + if fieldPatchStrategy == mergeDirective { + return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) + } else { + return typedPatch, nil + } +} + +// Merge two slices together. Note: This may modify both the original slice and +// the patch because getting a deep copy of a slice in golang is highly +// non-trivial. +func mergeSlice(original, patch []interface{}, schema LookupPatchMeta, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { + if len(original) == 0 && len(patch) == 0 { + return original, nil + } + + // All the values must be of the same type, but not a list. + t, err := sliceElementType(original, patch) + if err != nil { + return nil, err + } + + var merged []interface{} + kind := t.Kind() + // If the elements are not maps, merge the slices of scalars. + if kind != reflect.Map { + if mergeOptions.MergeParallelList && isDeleteList { + return deleteFromSlice(original, patch), nil + } + // Maybe in the future add a "concat" mode that doesn't + // deduplicate. + both := append(original, patch...) + merged = deduplicateScalars(both) + + } else { + if mergeKey == "" { + return nil, fmt.Errorf("cannot merge lists without merge key for %s", schema.Name()) + } + + original, patch, err = mergeSliceWithSpecialElements(original, patch, mergeKey) + if err != nil { + return nil, err + } + + merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, schema, mergeOptions) + if err != nil { + return nil, err + } + } + + // enforce the order + var patchItems, serverOnlyItems []interface{} + if len(mergeKey) == 0 { + patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, patch) + } else { + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, patch, mergeKey) + if err != nil { + return nil, err + } + } + return normalizeElementOrder(patchItems, serverOnlyItems, patch, original, mergeKey, kind) +} + +// mergeSliceWithSpecialElements handles special elements with directiveMarker +// before merging the slices. It returns a updated `original` and a patch without special elements. +// original and patch must be slices of maps, they should be checked before calling this function. +func mergeSliceWithSpecialElements(original, patch []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { + patchWithoutSpecialElements := []interface{}{} + replace := false + for _, v := range patch { + typedV := v.(map[string]interface{}) + patchType, ok := typedV[directiveMarker] + if !ok { + patchWithoutSpecialElements = append(patchWithoutSpecialElements, v) + } else { + switch patchType { + case deleteDirective: + mergeValue, ok := typedV[mergeKey] + if ok { + var err error + original, err = deleteMatchingEntries(original, mergeKey, mergeValue) + if err != nil { + return nil, nil, err + } + } else { + return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + case replaceDirective: + replace = true + // Continue iterating through the array to prune any other $patch elements. + case mergeDirective: + return nil, nil, fmt.Errorf("merging lists cannot yet be specified in the patch") + default: + return nil, nil, mergepatch.ErrBadPatchType(patchType, typedV) + } + } + } + if replace { + return patchWithoutSpecialElements, nil, nil + } + return original, patchWithoutSpecialElements, nil +} + +// delete all matching entries (based on merge key) from a merging list +func deleteMatchingEntries(original []interface{}, mergeKey string, mergeValue interface{}) ([]interface{}, error) { + for { + _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if !found { + break + } + // Delete the element at originalKey. + original = append(original[:originalKey], original[originalKey+1:]...) + } + return original, nil +} + +// mergeSliceWithoutSpecialElements merges slices with non-special elements. +// original and patch must be slices of maps, they should be checked before calling this function. +func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, schema LookupPatchMeta, mergeOptions MergeOptions) ([]interface{}, error) { + for _, v := range patch { + typedV := v.(map[string]interface{}) + mergeValue, ok := typedV[mergeKey] + if !ok { + return nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + + // If we find a value with this merge key value in original, merge the + // maps. Otherwise append onto original. + originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if found { + var mergedMaps interface{} + var err error + // Merge into original. + mergedMaps, err = mergeMap(originalMap, typedV, schema, mergeOptions) + if err != nil { + return nil, err + } + + original[originalKey] = mergedMaps + } else { + original = append(original, v) + } + } + return original, nil +} + +// deleteFromSlice uses the parallel list to delete the items in a list of scalars +func deleteFromSlice(current, toDelete []interface{}) []interface{} { + toDeleteMap := map[interface{}]interface{}{} + processed := make([]interface{}, 0, len(current)) + for _, v := range toDelete { + toDeleteMap[v] = true + } + for _, v := range current { + if _, found := toDeleteMap[v]; !found { + processed = append(processed, v) + } + } + return processed +} + +// This method no longer panics if any element of the slice is not a map. +func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) { + for k, v := range m { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, 0, false, fmt.Errorf("value for key %v is not a map", k) + } + + valueToMatch, ok := typedV[key] + if ok && valueToMatch == value { + return typedV, k, true, nil + } + } + + return nil, 0, false, nil +} + +// This function takes a JSON map and sorts all the lists that should be merged +// by key. This is needed by tests because in JSON, list order is significant, +// but in Strategic Merge Patch, merge lists do not have significant order. +// Sorting the lists allows for order-insensitive comparison of patched maps. +func sortMergeListsByName(mapJSON []byte, schema LookupPatchMeta) ([]byte, error) { + var m map[string]interface{} + err := json.Unmarshal(mapJSON, &m) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + + newM, err := sortMergeListsByNameMap(m, schema) + if err != nil { + return nil, err + } + + return json.Marshal(newM) +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map. +func sortMergeListsByNameMap(s map[string]interface{}, schema LookupPatchMeta) (map[string]interface{}, error) { + newS := map[string]interface{}{} + for k, v := range s { + if k == retainKeysDirective { + typedV, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForRetainKeys + } + v = sortScalars(typedV) + } else if strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) { + typedV, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForPrimitiveList + } + v = sortScalars(typedV) + } else if strings.HasPrefix(k, setElementOrderDirectivePrefix) { + _, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForSetElementOrderList + } + } else if k != directiveMarker { + // recurse for map and slice. + switch typedV := v.(type) { + case map[string]interface{}: + subschema, _, err := schema.LookupPatchMetadataForStruct(k) + if err != nil { + return nil, err + } + v, err = sortMergeListsByNameMap(typedV, subschema) + if err != nil { + return nil, err + } + case []interface{}: + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return nil, err + } + if patchStrategy == mergeDirective { + var err error + v, err = sortMergeListsByNameArray(typedV, subschema, patchMeta.GetPatchMergeKey(), true) + if err != nil { + return nil, err + } + } + } + } + + newS[k] = v + } + + return newS, nil +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in an array. +func sortMergeListsByNameArray(s []interface{}, schema LookupPatchMeta, mergeKey string, recurse bool) ([]interface{}, error) { + if len(s) == 0 { + return s, nil + } + + // We don't support lists of lists yet. + t, err := sliceElementType(s) + if err != nil { + return nil, err + } + + // If the elements are not maps... + if t.Kind() != reflect.Map { + // Sort the elements, because they may have been merged out of order. + return deduplicateAndSortScalars(s), nil + } + + // Elements are maps - if one of the keys of the map is a map or a + // list, we may need to recurse into it. + newS := []interface{}{} + for _, elem := range s { + if recurse { + typedElem := elem.(map[string]interface{}) + newElem, err := sortMergeListsByNameMap(typedElem, schema) + if err != nil { + return nil, err + } + + newS = append(newS, newElem) + } else { + newS = append(newS, elem) + } + } + + // Sort the maps. + newS = sortMapsBasedOnField(newS, mergeKey) + return newS, nil +} + +func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} { + mapM := mapSliceFromSlice(m) + ss := SortableSliceOfMaps{mapM, fieldName} + sort.Sort(ss) + newS := sliceFromMapSlice(ss.s) + return newS +} + +func mapSliceFromSlice(m []interface{}) []map[string]interface{} { + newM := []map[string]interface{}{} + for _, v := range m { + vt := v.(map[string]interface{}) + newM = append(newM, vt) + } + + return newM +} + +func sliceFromMapSlice(s []map[string]interface{}) []interface{} { + newS := []interface{}{} + for _, v := range s { + newS = append(newS, v) + } + + return newS +} + +type SortableSliceOfMaps struct { + s []map[string]interface{} + k string // key to sort on +} + +func (ss SortableSliceOfMaps) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfMaps) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i][ss.k]) + jStr := fmt.Sprintf("%v", ss.s[j][ss.k]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfMaps) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +func deduplicateAndSortScalars(s []interface{}) []interface{} { + s = deduplicateScalars(s) + return sortScalars(s) +} + +func sortScalars(s []interface{}) []interface{} { + ss := SortableSliceOfScalars{s} + sort.Sort(ss) + return ss.s +} + +func deduplicateScalars(s []interface{}) []interface{} { + // Clever algorithm to deduplicate. + length := len(s) - 1 + for i := 0; i < length; i++ { + for j := i + 1; j <= length; j++ { + if s[i] == s[j] { + s[j] = s[length] + s = s[0:length] + length-- + j-- + } + } + } + + return s +} + +type SortableSliceOfScalars struct { + s []interface{} +} + +func (ss SortableSliceOfScalars) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfScalars) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i]) + jStr := fmt.Sprintf("%v", ss.s[j]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfScalars) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +// Returns the type of the elements of N slice(s). If the type is different, +// another slice or undefined, returns an error. +func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { + var prevType reflect.Type + for _, s := range slices { + // Go through elements of all given slices and make sure they are all the same type. + for _, v := range s { + currentType := reflect.TypeOf(v) + if prevType == nil { + prevType = currentType + // We don't support lists of lists yet. + if prevType.Kind() == reflect.Slice { + return nil, mergepatch.ErrNoListOfLists + } + } else { + if prevType != currentType { + return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices)) + } + prevType = currentType + } + } + } + + if prevType == nil { + return nil, fmt.Errorf("no elements in any of the given slices") + } + + return prevType, nil +} + +// MergingMapsHaveConflicts returns true if the left and right JSON interface +// objects overlap with different values in any key. All keys are required to be +// strings. Since patches of the same Type have congruent keys, this is valid +// for multiple patch types. This method supports strategic merge patch semantics. +func MergingMapsHaveConflicts(left, right map[string]interface{}, schema LookupPatchMeta) (bool, error) { + return mergingMapFieldsHaveConflicts(left, right, schema, "", "") +} + +func mergingMapFieldsHaveConflicts( + left, right interface{}, + schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + switch leftType := left.(type) { + case map[string]interface{}: + rightType, ok := right.(map[string]interface{}) + if !ok { + return true, nil + } + leftMarker, okLeft := leftType[directiveMarker] + rightMarker, okRight := rightType[directiveMarker] + // if one or the other has a directive marker, + // then we need to consider that before looking at the individual keys, + // since a directive operates on the whole map. + if okLeft || okRight { + // if one has a directive marker and the other doesn't, + // then we have a conflict, since one is deleting or replacing the whole map, + // and the other is doing things to individual keys. + if okLeft != okRight { + return true, nil + } + // if they both have markers, but they are not the same directive, + // then we have a conflict because they're doing different things to the map. + if leftMarker != rightMarker { + return true, nil + } + } + if fieldPatchStrategy == replaceDirective { + return false, nil + } + // Check the individual keys. + return mapsHaveConflicts(leftType, rightType, schema) + + case []interface{}: + rightType, ok := right.([]interface{}) + if !ok { + return true, nil + } + return slicesHaveConflicts(leftType, rightType, schema, fieldPatchStrategy, fieldPatchMergeKey) + case string, float64, bool, int, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} + +func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { + for key, leftValue := range typedLeft { + if key != directiveMarker && key != retainKeysDirective { + if rightValue, ok := typedRight[key]; ok { + var subschema LookupPatchMeta + var patchMeta PatchMeta + var patchStrategy string + var err error + switch leftValue.(type) { + case []interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + case map[string]interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForStruct(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + } + + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, + subschema, patchStrategy, patchMeta.GetPatchMergeKey()); hasConflicts { + return true, err + } + } + } + } + + return false, nil +} + +func slicesHaveConflicts( + typedLeft, typedRight []interface{}, + schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + elementType, err := sliceElementType(typedLeft, typedRight) + if err != nil { + return true, err + } + + if fieldPatchStrategy == mergeDirective { + // Merging lists of scalars have no conflicts by definition + // So we only need to check further if the elements are maps + if elementType.Kind() != reflect.Map { + return false, nil + } + + // Build a map for each slice and then compare the two maps + leftMap, err := sliceOfMapsToMapOfMaps(typedLeft, fieldPatchMergeKey) + if err != nil { + return true, err + } + + rightMap, err := sliceOfMapsToMapOfMaps(typedRight, fieldPatchMergeKey) + if err != nil { + return true, err + } + + return mapsOfMapsHaveConflicts(leftMap, rightMap, schema) + } + + // Either we don't have type information, or these are non-merging lists + if len(typedLeft) != len(typedRight) { + return true, nil + } + + // Sort scalar slices to prevent ordering issues + // We have no way to sort non-merging lists of maps + if elementType.Kind() != reflect.Map { + typedLeft = deduplicateAndSortScalars(typedLeft) + typedRight = deduplicateAndSortScalars(typedRight) + } + + // Compare the slices element by element in order + // This test will fail if the slices are not sorted + for i := range typedLeft { + if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], schema, "", ""); hasConflicts { + return true, err + } + } + + return false, nil +} + +func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]interface{}, error) { + result := make(map[string]interface{}, len(slice)) + for _, value := range slice { + typedValue, ok := value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid element type in merging list:%v", slice) + } + + mergeValue, ok := typedValue[mergeKey] + if !ok { + return nil, fmt.Errorf("cannot find merge key `%s` in merging list element:%v", mergeKey, typedValue) + } + + result[fmt.Sprintf("%s", mergeValue)] = typedValue + } + + return result, nil +} + +func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { + for key, leftValue := range typedLeft { + if rightValue, ok := typedRight[key]; ok { + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, schema, "", ""); hasConflicts { + return true, err + } + } + } + + return false, nil +} + +// CreateThreeWayMergePatch reconciles a modified configuration with an original configuration, +// while preserving any changes or deletions made to the original configuration in the interim, +// and not overridden by the current configuration. All three documents must be passed to the +// method as json encoded content. It will return a strategic merge patch, or an error if any +// of the documents is invalid, or if there are any preconditions that fail against the modified +// configuration, or, if overwrite is false and there are conflicts between the modified and current +// configurations. Conflicts are defined as keys changed differently from original to modified +// than from original to current. In other words, a conflict occurs if modified changes any key +// in a way that is different from how it is changed in current (e.g., deleting it, changing its +// value). We also propagate values fields that do not exist in original but are explicitly +// defined in modified. +func CreateThreeWayMergePatch(original, modified, current []byte, schema LookupPatchMeta, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + currentMap := map[string]interface{}{} + if len(current) > 0 { + if err := json.Unmarshal(current, ¤tMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + // The patch is the difference from current to modified without deletions, plus deletions + // from original to modified. To find it, we compute deletions, which are the deletions from + // original to modified, and delta, which is the difference from current to modified without + // deletions, and then apply delta to deletions as a patch, which should be strictly additive. + deltaMapDiffOptions := DiffOptions{ + IgnoreDeletions: true, + SetElementOrder: true, + } + deltaMap, err := diffMaps(currentMap, modifiedMap, schema, deltaMapDiffOptions) + if err != nil { + return nil, err + } + deletionsMapDiffOptions := DiffOptions{ + SetElementOrder: true, + IgnoreChangesAndAdditions: true, + } + deletionsMap, err := diffMaps(originalMap, modifiedMap, schema, deletionsMapDiffOptions) + if err != nil { + return nil, err + } + + mergeOptions := MergeOptions{} + patchMap, err := mergeMap(deletionsMap, deltaMap, schema, mergeOptions) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + // If overwrite is false, and the patch contains any keys that were changed differently, + // then return a conflict error. + if !overwrite { + changeMapDiffOptions := DiffOptions{} + changedMap, err := diffMaps(originalMap, currentMap, schema, changeMapDiffOptions) + if err != nil { + return nil, err + } + + hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, schema) + if err != nil { + return nil, err + } + + if hasConflicts { + return nil, mergepatch.NewErrConflict(mergepatch.ToYAMLOrError(patchMap), mergepatch.ToYAMLOrError(changedMap)) + } + } + + return json.Marshal(patchMap) +} + +func ItemAddedToModifiedSlice(original, modified string) bool { return original > modified } + +func ItemRemovedFromModifiedSlice(original, modified string) bool { return original < modified } + +func ItemMatchesOriginalAndModifiedSlice(original, modified string) bool { return original == modified } + +func CreateDeleteDirective(mergeKey string, mergeKeyValue interface{}) map[string]interface{} { + return map[string]interface{}{mergeKey: mergeKeyValue, directiveMarker: deleteDirective} +} + +func mapTypeAssertion(original, patch interface{}) (map[string]interface{}, map[string]interface{}, error) { + typedOriginal, ok := original.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) + } + typedPatch, ok := patch.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) + } + return typedOriginal, typedPatch, nil +} + +func sliceTypeAssertion(original, patch interface{}) ([]interface{}, []interface{}, error) { + typedOriginal, ok := original.([]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) + } + typedPatch, ok := patch.([]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) + } + return typedOriginal, typedPatch, nil +} + +// extractRetainKeysPatchStrategy process patch strategy, which is a string may contains multiple +// patch strategies separated by ",". It returns a boolean var indicating if it has +// retainKeys strategies and a string for the other strategy. +func extractRetainKeysPatchStrategy(strategies []string) (bool, string, error) { + switch len(strategies) { + case 0: + return false, "", nil + case 1: + singleStrategy := strategies[0] + switch singleStrategy { + case retainKeysStrategy: + return true, "", nil + default: + return false, singleStrategy, nil + } + case 2: + switch { + case strategies[0] == retainKeysStrategy: + return true, strategies[1], nil + case strategies[1] == retainKeysStrategy: + return true, strategies[0], nil + default: + return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) + } + default: + return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) + } +} + +// hasAdditionalNewField returns if original map has additional key with non-nil value than modified. +func hasAdditionalNewField(original, modified map[string]interface{}) bool { + for k, v := range original { + if v == nil { + continue + } + if _, found := modified[k]; !found { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go new file mode 100644 index 000000000..f84d65aac --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go @@ -0,0 +1,193 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "strings" + + "k8s.io/apimachinery/pkg/util/mergepatch" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +const ( + patchStrategyOpenapiextensionKey = "x-kubernetes-patch-strategy" + patchMergeKeyOpenapiextensionKey = "x-kubernetes-patch-merge-key" +) + +type LookupPatchItem interface { + openapi.SchemaVisitor + + Error() error + Path() *openapi.Path +} + +type kindItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewKindItem(key string, path *openapi.Path) *kindItem { + return &kindItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &kindItem{} + +func (item *kindItem) Error() error { + return item.err +} + +func (item *kindItem) Path() *openapi.Path { + return item.path +} + +func (item *kindItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected kind, but got primitive") +} + +func (item *kindItem) VisitArray(schema *openapi.Array) { + item.err = errors.New("expected kind, but got slice") +} + +func (item *kindItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected kind, but got map") +} + +func (item *kindItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } +} + +func (item *kindItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.subschema = subschema +} + +type sliceItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewSliceItem(key string, path *openapi.Path) *sliceItem { + return &sliceItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &sliceItem{} + +func (item *sliceItem) Error() error { + return item.err +} + +func (item *sliceItem) Path() *openapi.Path { + return item.path +} + +func (item *sliceItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected slice, but got primitive") +} + +func (item *sliceItem) VisitArray(schema *openapi.Array) { + if !item.hasVisitKind { + item.err = errors.New("expected visit kind first, then visit array") + } + subschema := schema.SubType + item.subschema = subschema +} + +func (item *sliceItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected slice, but got map") +} + +func (item *sliceItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } else { + item.subschema = schema.SubSchema() + } +} + +func (item *sliceItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.hasVisitKind = true + subschema.Accept(item) +} + +func parsePatchMetadata(extensions map[string]interface{}) (string, []string, error) { + ps, foundPS := extensions[patchStrategyOpenapiextensionKey] + var patchStrategies []string + var mergeKey, patchStrategy string + var ok bool + if foundPS { + patchStrategy, ok = ps.(string) + if ok { + patchStrategies = strings.Split(patchStrategy, ",") + } else { + return "", nil, mergepatch.ErrBadArgType(patchStrategy, ps) + } + } + mk, foundMK := extensions[patchMergeKeyOpenapiextensionKey] + if foundMK { + mergeKey, ok = mk.(string) + if !ok { + return "", nil, mergepatch.ErrBadArgType(mergeKey, mk) + } + } + return mergeKey, patchStrategies, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index 43c779a11..31705dee3 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -19,6 +19,7 @@ package field import ( "fmt" "reflect" + "strconv" "strings" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -175,7 +176,11 @@ func Invalid(field *Path, value interface{}, detail string) *Error { func NotSupported(field *Path, value interface{}, validValues []string) *Error { detail := "" if validValues != nil && len(validValues) > 0 { - detail = "supported values: " + strings.Join(validValues, ", ") + quotedValues := make([]string, len(validValues)) + for i, v := range validValues { + quotedValues[i] = strconv.Quote(v) + } + detail = "supported values: " + strings.Join(quotedValues, ", ") } return &Error{ErrorTypeNotSupported, field.String(), value, detail} } diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index a0afc26e7..7da6a17d9 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -22,6 +22,8 @@ import ( "net" "regexp" "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" ) const qnameCharFmt string = "[A-Za-z0-9]" @@ -67,6 +69,21 @@ func IsQualifiedName(value string) []string { return errs } +// IsFullyQualifiedName checks if the name is fully qualified. +func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { + var allErrors field.ErrorList + if len(name) == 0 { + return append(allErrors, field.Required(fldPath, "")) + } + if errs := IsDNS1123Subdomain(name); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) + } + if len(strings.Split(name, ".")) < 3 { + return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least three segments separated by dots")) + } + return allErrors +} + const labelValueFmt string = "(" + qualifiedNameFmt + ")?" const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" const LabelValueMaxLength int = 63 @@ -126,7 +143,7 @@ func IsDNS1123Subdomain(value string) []string { } const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" -const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" +const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" const DNS1035LabelMaxLength int = 63 var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") @@ -188,6 +205,14 @@ func IsValidPortNum(port int) []string { return []string{InclusiveRangeError(1, 65535)} } +// IsInRange tests that the argument is in an inclusive range. +func IsInRange(value int, min int, max int) []string { + if value >= min && value <= max { + return nil + } + return []string{InclusiveRangeError(min, max)} +} + // Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 // TODO: once we have a type for UID/GID we should make these that type. const ( @@ -197,16 +222,16 @@ const ( maxGroupID = math.MaxInt32 ) -// IsValidGroupId tests that the argument is a valid Unix GID. -func IsValidGroupId(gid int64) []string { +// IsValidGroupID tests that the argument is a valid Unix GID. +func IsValidGroupID(gid int64) []string { if minGroupID <= gid && gid <= maxGroupID { return nil } return []string{InclusiveRangeError(minGroupID, maxGroupID)} } -// IsValidUserId tests that the argument is a valid Unix UID. -func IsValidUserId(uid int64) []string { +// IsValidUserID tests that the argument is a valid Unix UID. +func IsValidUserID(uid int64) []string { if minUserID <= uid && uid <= maxUserID { return nil } @@ -277,6 +302,22 @@ func IsHTTPHeaderName(value string) []string { return nil } +const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*" +const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit" + +var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$") + +// IsEnvVarName tests if a string is a valid environment variable name. +func IsEnvVarName(value string) []string { + var errs []string + if !envVarNameRegexp.MatchString(value) { + errs = append(errs, RegexError(envVarNameFmtErrMsg, envVarNameFmt, "my.env-name", "MY_ENV.NAME", "MyEnvName1")) + } + + errs = append(errs, hasChDirPrefix(value)...) + return errs +} + const configMapKeyFmt = `[-._a-zA-Z0-9]+` const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" @@ -291,13 +332,7 @@ func IsConfigMapKey(value string) []string { if !configMapKeyRegexp.MatchString(value) { errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name")) } - if value == "." { - errs = append(errs, `must not be '.'`) - } else if value == ".." { - errs = append(errs, `must not be '..'`) - } else if strings.HasPrefix(value, "..") { - errs = append(errs, `must not start with '..'`) - } + errs = append(errs, hasChDirPrefix(value)...) return errs } @@ -341,3 +376,16 @@ func prefixEach(msgs []string, prefix string) []string { func InclusiveRangeError(lo, hi int) string { return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) } + +func hasChDirPrefix(value string) []string { + var errs []string + switch { + case value == ".": + errs = append(errs, `must not be '.'`) + case value == "..": + errs = append(errs, `must not be '..'`) + case strings.HasPrefix(value, ".."): + errs = append(errs, `must not start with '..'`) + } + return errs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 4704afd91..a25e92465 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -17,8 +17,10 @@ limitations under the License. package wait import ( + "context" "errors" "math/rand" + "sync" "time" "k8s.io/apimachinery/pkg/util/runtime" @@ -36,6 +38,40 @@ var ForeverTestTimeout = time.Second * 30 // NeverStop may be passed to Until to make it never stop. var NeverStop <-chan struct{} = make(chan struct{}) +// Group allows to start a group of goroutines and wait for their completion. +type Group struct { + wg sync.WaitGroup +} + +func (g *Group) Wait() { + g.wg.Wait() +} + +// StartWithChannel starts f in a new goroutine in the group. +// stopCh is passed to f as an argument. f should stop when stopCh is available. +func (g *Group) StartWithChannel(stopCh <-chan struct{}, f func(stopCh <-chan struct{})) { + g.Start(func() { + f(stopCh) + }) +} + +// StartWithContext starts f in a new goroutine in the group. +// ctx is passed to f as an argument. f should stop when ctx.Done() is available. +func (g *Group) StartWithContext(ctx context.Context, f func(context.Context)) { + g.Start(func() { + f(ctx) + }) +} + +// Start starts f in a new goroutine in the group. +func (g *Group) Start(f func()) { + g.wg.Add(1) + go func() { + defer g.wg.Done() + f() + }() +} + // Forever calls f every period for ever. // // Forever is syntactic sugar on top of Until. @@ -65,16 +101,18 @@ func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { // JitterUntil loops until stop channel is closed, running f every period. // // If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jitterd. +// If jitterFactor is not positive, the period is unchanged and not jittered. // -// If slidingis true, the period is computed after f runs. If it is false then +// If sliding is true, the period is computed after f runs. If it is false then // period includes the runtime for f. // // Close stopCh to stop. f may not be invoked if stop channel is already // closed. Pass NeverStop to if you don't want it stop. func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { - for { + var t *time.Timer + var sawTimeout bool + for { select { case <-stopCh: return @@ -86,9 +124,8 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b jitteredPeriod = Jitter(period, jitterFactor) } - var t *time.Timer if !sliding { - t = time.NewTimer(jitteredPeriod) + t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout) } func() { @@ -97,7 +134,7 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b }() if sliding { - t = time.NewTimer(jitteredPeriod) + t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout) } // NOTE: b/c there is no priority selection in golang @@ -109,6 +146,7 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b case <-stopCh: return case <-t.C: + sawTimeout = true } } } @@ -136,14 +174,14 @@ type ConditionFunc func() (done bool, err error) // Backoff holds parameters applied to a Backoff function. type Backoff struct { Duration time.Duration // the base duration - Factor float64 // Duration is multipled by factor each iteration + Factor float64 // Duration is multiplied by factor each iteration Jitter float64 // The amount of jitter applied each iteration Steps int // Exit with error after this many steps } // ExponentialBackoff repeats a condition check with exponential backoff. // -// It checks the condition up to Steps times, increasing the wait by multipling +// It checks the condition up to Steps times, increasing the wait by multiplying // the previous duration by Factor. // // If Jitter is greater than zero, a random amount of each duration is added @@ -184,7 +222,9 @@ func Poll(interval, timeout time.Duration, condition ConditionFunc) error { } func pollInternal(wait WaitFunc, condition ConditionFunc) error { - return WaitFor(wait, condition, NeverStop) + done := make(chan struct{}) + defer close(done) + return WaitFor(wait, condition, done) } // PollImmediate tries a condition func until it returns true, an error, or the timeout @@ -244,12 +284,32 @@ func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) erro // PollUntil tries a condition func until it returns true, an error or stopCh is // closed. // -// PolUntil always waits interval before the first run of 'condition'. +// PollUntil always waits interval before the first run of 'condition'. // 'condition' will always be invoked at least once. func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { return WaitFor(poller(interval, 0), condition, stopCh) } +// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. +// +// PollImmediateUntil runs the 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + done, err := condition() + if err != nil { + return err + } + if done { + return nil + } + select { + case <-stopCh: + return ErrWaitTimeout + default: + return PollUntil(interval, condition, stopCh) + } +} + // WaitFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. type WaitFunc func(done <-chan struct{}) <-chan struct{} @@ -330,3 +390,16 @@ func poller(interval, timeout time.Duration) WaitFunc { return ch }) } + +// resetOrReuseTimer avoids allocating a new timer if one is already in use. +// Not safe for multiple threads. +func resetOrReuseTimer(t *time.Timer, d time.Duration, sawTimeout bool) *time.Timer { + if t == nil { + return time.NewTimer(d) + } + if !t.Stop() && !sawTimeout { + <-t.C + } + t.Reset(d) + return t +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 6ebfaea70..3cd85515d 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -122,12 +122,12 @@ func (d *YAMLDecoder) Read(data []byte) (n int, err error) { if left <= len(data) { copy(data, d.remaining) d.remaining = nil - return len(d.remaining), nil + return left, nil } // caller will need to reread - copy(data, d.remaining[:left]) - d.remaining = d.remaining[left:] + copy(data, d.remaining[:len(data)]) + d.remaining = d.remaining[len(data):] return len(data), io.ErrShortBuffer } diff --git a/vendor/k8s.io/apimachinery/pkg/version/helpers.go b/vendor/k8s.io/apimachinery/pkg/version/helpers.go new file mode 100644 index 000000000..5e041d6f3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/version/helpers.go @@ -0,0 +1,88 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "regexp" + "strconv" + "strings" +) + +type versionType int + +const ( + // Bigger the version type number, higher priority it is + versionTypeAlpha versionType = iota + versionTypeBeta + versionTypeGA +) + +var kubeVersionRegex = regexp.MustCompile("^v([\\d]+)(?:(alpha|beta)([\\d]+))?$") + +func parseKubeVersion(v string) (majorVersion int, vType versionType, minorVersion int, ok bool) { + var err error + submatches := kubeVersionRegex.FindStringSubmatch(v) + if len(submatches) != 4 { + return 0, 0, 0, false + } + switch submatches[2] { + case "alpha": + vType = versionTypeAlpha + case "beta": + vType = versionTypeBeta + case "": + vType = versionTypeGA + default: + return 0, 0, 0, false + } + if majorVersion, err = strconv.Atoi(submatches[1]); err != nil { + return 0, 0, 0, false + } + if vType != versionTypeGA { + if minorVersion, err = strconv.Atoi(submatches[3]); err != nil { + return 0, 0, 0, false + } + } + return majorVersion, vType, minorVersion, true +} + +// CompareKubeAwareVersionStrings compares two kube-like version strings. +// Kube-like version strings are starting with a v, followed by a major version, optional "alpha" or "beta" strings +// followed by a minor version (e.g. v1, v2beta1). Versions will be sorted based on GA/alpha/beta first and then major +// and minor versions. e.g. v2, v1, v1beta2, v1beta1, v1alpha1. +func CompareKubeAwareVersionStrings(v1, v2 string) int { + if v1 == v2 { + return 0 + } + v1major, v1type, v1minor, ok1 := parseKubeVersion(v1) + v2major, v2type, v2minor, ok2 := parseKubeVersion(v2) + switch { + case !ok1 && !ok2: + return strings.Compare(v2, v1) + case !ok1 && ok2: + return -1 + case ok1 && !ok2: + return 1 + } + if v1type != v2type { + return int(v1type) - int(v2type) + } + if v1major != v2major { + return v1major - v2major + } + return v1minor - v2minor +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/filter.go b/vendor/k8s.io/apimachinery/pkg/watch/filter.go index 3ca27f22c..22c9449f5 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/filter.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/filter.go @@ -62,11 +62,7 @@ func (fw *filteredWatch) Stop() { // loop waits for new values, filters them, and resends them. func (fw *filteredWatch) loop() { defer close(fw.result) - for { - event, ok := <-fw.incoming.ResultChan() - if !ok { - break - } + for event := range fw.incoming.ResultChan() { filtered, keep := fw.f(event) if keep { fw.result <- filtered diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go index fafccd78e..0ac8dc4ef 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/mux.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go @@ -84,6 +84,13 @@ type functionFakeRuntimeObject func() func (obj functionFakeRuntimeObject) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (obj functionFakeRuntimeObject) DeepCopyObject() runtime.Object { + if obj == nil { + return nil + } + // funcs are immutable. Hence, just return the original func. + return obj +} // Execute f, blocking the incoming queue (and waiting for it to drain first). // The purpose of this terrible hack is so that watchers added after an event @@ -197,11 +204,7 @@ func (m *Broadcaster) Shutdown() { func (m *Broadcaster) loop() { // Deliberately not catching crashes here. Yes, bring down the process if there's a // bug in watch.Broadcaster. - for { - event, ok := <-m.incoming - if !ok { - break - } + for event := range m.incoming { if event.Type == internalRunFunctionMarker { event.Object.(functionFakeRuntimeObject)() continue diff --git a/vendor/k8s.io/apimachinery/pkg/watch/until.go b/vendor/k8s.io/apimachinery/pkg/watch/until.go index 6e139de59..c2772ddb5 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/until.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/until.go @@ -29,8 +29,8 @@ import ( // from false to true). type ConditionFunc func(event Event) (bool, error) -// errWatchClosed is returned when the watch channel is closed before timeout in Until. -var errWatchClosed = errors.New("watch closed before Until timeout") +// ErrWatchClosed is returned when the watch channel is closed before timeout in Until. +var ErrWatchClosed = errors.New("watch closed before Until timeout") // Until reads items from the watch until each provided condition succeeds, and then returns the last watch // encountered. The first condition that returns an error terminates the watch (and the event is also returned). @@ -65,7 +65,7 @@ func Until(timeout time.Duration, watcher Interface, conditions ...ConditionFunc select { case event, ok := <-ch: if !ok { - return lastEvent, errWatchClosed + return lastEvent, ErrWatchClosed } lastEvent = &event diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index dd49c41f9..5c1380b23 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -20,9 +20,9 @@ import ( "fmt" "sync" - "k8s.io/apimachinery/pkg/runtime" - "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/runtime" ) // Interface can be implemented by anything that knows how to watch and report changes. @@ -50,6 +50,7 @@ const ( ) // Event represents a single event to a watched resource. +// +k8s:deepcopy-gen=true type Event struct { Type EventType diff --git a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go new file mode 100644 index 000000000..0d266ffb6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go @@ -0,0 +1,42 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package watch + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + if in.Object == nil { + out.Object = nil + } else { + out.Object = in.Object.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS new file mode 100644 index 000000000..8e8d9fce8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS @@ -0,0 +1,5 @@ +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go new file mode 100644 index 000000000..8205a4dd1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go @@ -0,0 +1,513 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json is forked from the Go standard library to enable us to find the +// field of a struct that a given JSON key maps to. +package json + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +const ( + patchStrategyTagKey = "patchStrategy" + patchMergeKeyTagKey = "patchMergeKey" +) + +// Finds the patchStrategy and patchMergeKey struct tag fields on a given +// struct field given the struct type and the JSON name of the field. +// It returns field type, a slice of patch strategies, merge key and error. +// TODO: fix the returned errors to be introspectable. +func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) ( + elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + e = fmt.Errorf("merging an object in json but data type is not struct, instead is: %s", + t.Kind().String()) + return + } + jf := []byte(jsonField) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, jf) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, jf) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential struct field. + tjf := t.Field(f.index[0]) + // we must navigate down all the anonymously included structs in the chain + for i := 1; i < len(f.index); i++ { + tjf = tjf.Type.Field(f.index[i]) + } + patchStrategy := tjf.Tag.Get(patchStrategyTagKey) + patchMergeKey = tjf.Tag.Get(patchMergeKeyTagKey) + patchStrategies = strings.Split(patchStrategy, ",") + elemType = tjf.Type + return + } + e = fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField) + return +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + // index is the sequence of indexes from the containing type fields to this field. + // it is a slice because anonymous structs will need multiple navigation steps to correctly + // resolve the proper fields + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func (f field) String() string { + return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted) +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go index 9e45dbe1d..7ed1d1cff 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go @@ -44,7 +44,7 @@ func (e Equalities) AddFunc(eqFunc interface{}) error { return fmt.Errorf("expected func, got: %v", ft) } if ft.NumIn() != 2 { - return fmt.Errorf("expected three 'in' params, got: %v", ft) + return fmt.Errorf("expected two 'in' params, got: %v", ft) } if ft.NumOut() != 1 { return fmt.Errorf("expected one 'out' param, got: %v", ft) diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go deleted file mode 100644 index 67957ee33..000000000 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go +++ /dev/null @@ -1,91 +0,0 @@ -//This package is copied from Go library reflect/type.go. -//The struct tag library provides no way to extract the list of struct tags, only -//a specific tag -package reflect - -import ( - "fmt" - - "strconv" - "strings" -) - -type StructTag struct { - Name string - Value string -} - -func (t StructTag) String() string { - return fmt.Sprintf("%s:%q", t.Name, t.Value) -} - -type StructTags []StructTag - -func (tags StructTags) String() string { - s := make([]string, 0, len(tags)) - for _, tag := range tags { - s = append(s, tag.String()) - } - return "`" + strings.Join(s, " ") + "`" -} - -func (tags StructTags) Has(name string) bool { - for i := range tags { - if tags[i].Name == name { - return true - } - } - return false -} - -// ParseStructTags returns the full set of fields in a struct tag in the order they appear in -// the struct tag. -func ParseStructTags(tag string) (StructTags, error) { - tags := StructTags{} - for tag != "" { - // Skip leading space. - i := 0 - for i < len(tag) && tag[i] == ' ' { - i++ - } - tag = tag[i:] - if tag == "" { - break - } - - // Scan to colon. A space, a quote or a control character is a syntax error. - // Strictly speaking, control chars include the range [0x7f, 0x9f], not just - // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters - // as it is simpler to inspect the tag's bytes than the tag's runes. - i = 0 - for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { - i++ - } - if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { - break - } - name := string(tag[:i]) - tag = tag[i+1:] - - // Scan quoted string to find value. - i = 1 - for i < len(tag) && tag[i] != '"' { - if tag[i] == '\\' { - i++ - } - i++ - } - if i >= len(tag) { - break - } - qvalue := string(tag[:i+1]) - tag = tag[i+1:] - - value, err := strconv.Unquote(qvalue) - if err != nil { - return nil, err - } - tags = append(tags, StructTag{Name: name, Value: value}) - } - return tags, nil -} diff --git a/vendor/k8s.io/client-go/CHANGELOG.md b/vendor/k8s.io/client-go/CHANGELOG.md new file mode 100644 index 000000000..030611620 --- /dev/null +++ b/vendor/k8s.io/client-go/CHANGELOG.md @@ -0,0 +1,420 @@ +TODO: This document was manually maintained so might be incomplete. The +automation effort is tracked in +https://github.com/kubernetes/client-go/issues/234. + +Changes in `k8s.io/api` and `k8s.io/apimachinery` are mentioned here +because `k8s.io/client-go` depends on them. + +# v7.0.0 + +**Breaking Changes:** + +* Google Cloud Service Account email addresses can now be used in RBAC Role bindings since the default scopes now include the `userinfo.email` scope. This is a breaking change if the numeric uniqueIDs of the Google service accounts were being used in RBAC role bindings. The behavior can be overridden by explicitly specifying the scope values as comma-separated string in the `users[*].config.scopes` field in the `KUBECONFIG` file. + + * [https://github.com/kubernetes/kubernetes/pull/58141](https://github.com/kubernetes/kubernetes/pull/58141) + +* [k8s.io/api] The `ConfigOK` node condition has been renamed to `KubeletConfigOk`. + + * [https://github.com/kubernetes/kubernetes/pull/59905](https://github.com/kubernetes/kubernetes/pull/59905) + +**New Features:** + +* Subresource support is added to the dynamic client. + + * [https://github.com/kubernetes/kubernetes/pull/56717](https://github.com/kubernetes/kubernetes/pull/56717) + +* A watch method is added to the Fake Client. + + * [https://github.com/kubernetes/kubernetes/pull/57504](https://github.com/kubernetes/kubernetes/pull/57504) + +* `ListOptions` can be modified when creating a `ListWatch`. + + * [https://github.com/kubernetes/kubernetes/pull/57508](https://github.com/kubernetes/kubernetes/pull/57508) + +* A `/token` subresource for ServiceAccount is added. + + * [https://github.com/kubernetes/kubernetes/pull/58111](https://github.com/kubernetes/kubernetes/pull/58111) + +* If an informer delivery fails, the particular notification is skipped and continued the next time. + + * [https://github.com/kubernetes/kubernetes/pull/58394](https://github.com/kubernetes/kubernetes/pull/58394) + +* Certificate manager will no longer wait until the initial rotation succeeds or fails before returning from `Start()`. + + * [https://github.com/kubernetes/kubernetes/pull/58930](https://github.com/kubernetes/kubernetes/pull/58930) + +* [k8s.io/api] `VolumeScheduling` and `LocalPersistentVolume` features are beta and enabled by default. The PersistentVolume NodeAffinity alpha annotation is deprecated and will be removed in a future release. + + * [https://github.com/kubernetes/kubernetes/pull/59391](https://github.com/kubernetes/kubernetes/pull/59391) + +* [k8s.io/api] The `PodSecurityPolicy` API has been moved to the `policy/v1beta1` API group. The `PodSecurityPolicy` API in the `extensions/v1beta1` API group is deprecated and will be removed in a future release. + + * [https://github.com/kubernetes/kubernetes/pull/54933](https://github.com/kubernetes/kubernetes/pull/54933) + +* [k8s.io/api] ConfigMap objects now support binary data via a new `binaryData` field. + + * [https://github.com/kubernetes/kubernetes/pull/57938](https://github.com/kubernetes/kubernetes/pull/57938) + +* [k8s.io/api] Service account TokenRequest API is added. + + * [https://github.com/kubernetes/kubernetes/pull/58027](https://github.com/kubernetes/kubernetes/pull/58027) + +* [k8s.io/api] FSType is added in CSI volume source to specify filesystems. + + * [https://github.com/kubernetes/kubernetes/pull/58209](https://github.com/kubernetes/kubernetes/pull/58209) + +* [k8s.io/api] v1beta1 VolumeAttachment API is added. + + * [https://github.com/kubernetes/kubernetes/pull/58462](https://github.com/kubernetes/kubernetes/pull/58462) + +* [k8s.io/api] `v1.Pod` now has a field `ShareProcessNamespace` to configure whether a single process namespace should be shared between all containers in a pod. This feature is in alpha preview. + + * [https://github.com/kubernetes/kubernetes/pull/58716](https://github.com/kubernetes/kubernetes/pull/58716) + +* [k8s.io/api] Add `NominatedNodeName` field to `PodStatus`. This field is set when a pod preempts other pods on the node. + + * [https://github.com/kubernetes/kubernetes/pull/58990](https://github.com/kubernetes/kubernetes/pull/58990) + +* [k8s.io/api] Promote `CSIPersistentVolumeSourc`e to beta. + + * [https://github.com/kubernetes/kubernetes/pull/59157](https://github.com/kubernetes/kubernetes/pull/59157) + +* [k8s.io/api] Promote `DNSPolicy` and `DNSConfig` in `PodSpec` to beta. + + * [https://github.com/kubernetes/kubernetes/pull/59771](https://github.com/kubernetes/kubernetes/pull/59771) + +* [k8s.io/api] External metric types are added to the HPA API. + + * [https://github.com/kubernetes/kubernetes/pull/60096](https://github.com/kubernetes/kubernetes/pull/60096) + +* [k8s.io/apimachinery] The `meta.k8s.io/v1alpha1` objects for retrieving tabular responses from the server (`Table`) or fetching just the `ObjectMeta` for an object (as `PartialObjectMetadata`) are now beta as part of `meta.k8s.io/v1beta1`. Clients may request alternate representations of normal Kubernetes objects by passing an `Accept` header like `application/json;as=Table;g=meta.k8s.io;v=v1beta1` or `application/json;as=PartialObjectMetadata;g=meta.k8s.io;v1=v1beta1`. Older servers will ignore this representation or return an error if it is not available. Clients may request fallback to the normal object by adding a non-qualified mime-type to their `Accept` header like `application/json` - the server will then respond with either the alternate representation if it is supported or the fallback mime-type which is the normal object response. + + * [https://github.com/kubernetes/kubernetes/pull/59059](https://github.com/kubernetes/kubernetes/pull/59059) + + +**Bug fixes and Improvements:** + +* Port-forwarding of TCP6 ports is fixed. + + * [https://github.com/kubernetes/kubernetes/pull/57457](https://github.com/kubernetes/kubernetes/pull/57457) + +* A race condition in SharedInformer that could violate the sequential delivery guarantee and cause panics on shutdown is fixed. + + * [https://github.com/kubernetes/kubernetes/pull/59828](https://github.com/kubernetes/kubernetes/pull/59828) + +* [k8s.io/api] PersistentVolume flexVolume sources can now reference secrets in a namespace other than the PersistentVolumeClaim's namespace. + + * [https://github.com/kubernetes/kubernetes/pull/56460](https://github.com/kubernetes/kubernetes/pull/56460) + +* [k8s.io/apimachinery] YAMLDecoder Read can now return the number of bytes read. + + * [https://github.com/kubernetes/kubernetes/pull/57000](https://github.com/kubernetes/kubernetes/pull/57000) + +* [k8s.io/apimachinery] YAMLDecoder Read now tracks rest of buffer on `io.ErrShortBuffer`. + + * [https://github.com/kubernetes/kubernetes/pull/58817](https://github.com/kubernetes/kubernetes/pull/58817) + +* [k8s.io/apimachinery] Prompt required merge key in the error message while applying a strategic merge patch. + + * [https://github.com/kubernetes/kubernetes/pull/57854](https://github.com/kubernetes/kubernetes/pull/57854) + +# v6.0.0 + +**Breaking Changes:** + +* If you upgrade your client-go libs and use the `AppsV1() or Apps()` interface, please note that the default garbage collection behavior is changed. + + * [https://github.com/kubernetes/kubernetes/pull/55148](https://github.com/kubernetes/kubernetes/pull/55148) + +* Swagger 1.2 retriever `DiscoveryClient.SwaggerSchema` was removed from the discovery client + + * [https://github.com/kubernetes/kubernetes/pull/53441](https://github.com/kubernetes/kubernetes/pull/53441) + +* Informers got a NewFilteredSharedInformerFactory to e.g. filter by namespace + + * [https://github.com/kubernetes/kubernetes/pull/54660](https://github.com/kubernetes/kubernetes/pull/54660) + +* [k8s.io/api] The dynamic admission webhook is split into two kinds, mutating and validating. +The kinds have changed completely and old code must be ported to `admissionregistration.k8s.io/v1beta1` - +`MutatingWebhookConfiguration` and `ValidatingWebhookConfiguration` + + * [https://github.com/kubernetes/kubernetes/pull/55282](https://github.com/kubernetes/kubernetes/pull/55282) + +* [k8s.io/api] Renamed `core/v1.ScaleIOVolumeSource` to `ScaleIOPersistentVolumeSource` + + * [https://github.com/kubernetes/kubernetes/pull/54013](https://github.com/kubernetes/kubernetes/pull/54013) + +* [k8s.io/api] Renamed `core/v1.RBDVolumeSource` to `RBDPersistentVolumeSource` + + * [https://github.com/kubernetes/kubernetes/pull/54302](https://github.com/kubernetes/kubernetes/pull/54302) + +* [k8s.io/api] Removed `core/v1.CreatedByAnnotation` + + * [https://github.com/kubernetes/kubernetes/pull/54445](https://github.com/kubernetes/kubernetes/pull/54445) + +* [k8s.io/api] Renamed `core/v1.StorageMediumHugepages` to `StorageMediumHugePages` + + * [https://github.com/kubernetes/kubernetes/pull/54748](https://github.com/kubernetes/kubernetes/pull/54748) + +* [k8s.io/api] `core/v1.Taint.TimeAdded` became a pointer + + * [https://github.com/kubernetes/kubernetes/pull/43016](https://github.com/kubernetes/kubernetes/pull/43016) + +* [k8s.io/api] `core/v1.DefaultHardPodAffinitySymmetricWeight` type changed from int to int32 + + * [https://github.com/kubernetes/kubernetes/pull/53850](https://github.com/kubernetes/kubernetes/pull/53850) + +* [k8s.io/apimachinery] `ObjectCopier` interface was removed (requires switch to new generators with DeepCopy methods) + + * [https://github.com/kubernetes/kubernetes/pull/53525](https://github.com/kubernetes/kubernetes/pull/53525) + +**New Features:** + +* Certificate manager was moved from kubelet to `k8s.io/client-go/util/certificates` + + * [https://github.com/kubernetes/kubernetes/pull/49654](https://github.com/kubernetes/kubernetes/pull/49654) + +* [k8s.io/api] Workloads api types are promoted to `apps/v1` version + + * [https://github.com/kubernetes/kubernetes/pull/53679](https://github.com/kubernetes/kubernetes/pull/53679) + +* [k8s.io/api] Added `storage.k8s.io/v1alpha1` API group + + * [https://github.com/kubernetes/kubernetes/pull/54463](https://github.com/kubernetes/kubernetes/pull/54463) + +* [k8s.io/api] Added support for conditions in StatefulSet status + + * [https://github.com/kubernetes/kubernetes/pull/55268](https://github.com/kubernetes/kubernetes/pull/55268) + +* [k8s.io/api] Added support for conditions in DaemonSet status + + * [https://github.com/kubernetes/kubernetes/pull/55272](https://github.com/kubernetes/kubernetes/pull/55272) + +* [k8s.io/apimachinery] Added polymorphic scale client in `k8s.io/client-go/scale`, which supports scaling of resources in arbitrary API groups + + * [https://github.com/kubernetes/kubernetes/pull/53743](https://github.com/kubernetes/kubernetes/pull/53743) + +* [k8s.io/apimachinery] `meta.MetadataAccessor` got API chunking support + + * [https://github.com/kubernetes/kubernetes/pull/53768](https://github.com/kubernetes/kubernetes/pull/53768) + +* [k8s.io/apimachinery] `unstructured.Unstructured` got getters and setters + + * [https://github.com/kubernetes/kubernetes/pull/51940](https://github.com/kubernetes/kubernetes/pull/51940) + +**Bug fixes and Improvements:** + +* The body in glog output is not truncated with log level 10 + + * [https://github.com/kubernetes/kubernetes/pull/54801](https://github.com/kubernetes/kubernetes/pull/54801) + +* [k8s.io/api] Unset `creationTimestamp` field is output as null if encoded from an unstructured object + + * [https://github.com/kubernetes/kubernetes/pull/53464](https://github.com/kubernetes/kubernetes/pull/53464) + +* [k8s.io/apimachinery] Redirect behavior is restored for proxy subresources + + * [https://github.com/kubernetes/kubernetes/pull/52933](https://github.com/kubernetes/kubernetes/pull/52933) + +* [k8s.io/apimachinery] Random string generation functions are optimized + + * [https://github.com/kubernetes/kubernetes/pull/53720](https://github.com/kubernetes/kubernetes/pull/53720) + +# v5.0.1 + +Bug fix: picked up a security fix [kubernetes/kubernetes#53443](https://github.com/kubernetes/kubernetes/pull/53443) for `PodSecurityPolicy`. + +# v5.0.0 + +**New features:** + +* Added paging support + + * [https://github.com/kubernetes/kubernetes/pull/51876](https://github.com/kubernetes/kubernetes/pull/51876) + +* Added support for client-side spam filtering of events + + * [https://github.com/kubernetes/kubernetes/pull/47367](https://github.com/kubernetes/kubernetes/pull/47367) + +* Added support for http etag and caching + + * [https://github.com/kubernetes/kubernetes/pull/50404](https://github.com/kubernetes/kubernetes/pull/50404) + +* Added priority queue support to informer cache + + * [https://github.com/kubernetes/kubernetes/pull/49752](https://github.com/kubernetes/kubernetes/pull/49752) + +* Added openstack auth provider + + * [https://github.com/kubernetes/kubernetes/pull/39587](https://github.com/kubernetes/kubernetes/pull/39587) + +* Added metrics for checking reflector health + + * [https://github.com/kubernetes/kubernetes/pull/48224](https://github.com/kubernetes/kubernetes/pull/48224) + +* Client-go now includes the leaderelection package + + * [https://github.com/kubernetes/kubernetes/pull/39173](https://github.com/kubernetes/kubernetes/pull/39173) + +**API changes:** + +* Promoted Autoscaling v2alpha1 to v2beta1 + + * [https://github.com/kubernetes/kubernetes/pull/50708](https://github.com/kubernetes/kubernetes/pull/50708) + +* Promoted CronJobs to batch/v1beta1 + + * [https://github.com/kubernetes/kubernetes/pull/41901](https://github.com/kubernetes/kubernetes/pull/41901) + +* Promoted rbac.authorization.k8s.io/v1beta1 to rbac.authorization.k8s.io/v1 + + * [https://github.com/kubernetes/kubernetes/pull/49642](https://github.com/kubernetes/kubernetes/pull/49642) + +* Added a new API version apps/v1beta2 + + * [https://github.com/kubernetes/kubernetes/pull/48746](https://github.com/kubernetes/kubernetes/pull/48746) + +* Added a new API version scheduling/v1alpha1 + + * [https://github.com/kubernetes/kubernetes/pull/48377](https://github.com/kubernetes/kubernetes/pull/48377) + +**Breaking changes:** + +* Moved pkg/api and pkg/apis to [k8s.io/api](https://github.com/kubernetes/api). Other kubernetes repositories also import types from there, so they are composable with client-go. + +* Removed helper functions in pkg/api and pkg/apis. They are planned to be exported in other repos. The issue is tracked [here](https://github.com/kubernetes/kubernetes/issues/48209#issuecomment-314537745). During the transition, you'll have to copy the helper functions to your projects. + +* The discovery client now fetches the protobuf encoded OpenAPI schema and returns `openapi_v2.Document` + + * [https://github.com/kubernetes/kubernetes/pull/46803](https://github.com/kubernetes/kubernetes/pull/46803) + +* Enforced explicit references to API group client interfaces in clientsets to avoid ambiguity. + + * [https://github.com/kubernetes/kubernetes/pull/49370](https://github.com/kubernetes/kubernetes/pull/49370) + +* The generic RESTClient type (`k8s.io/client-go/rest`) no longer exposes `LabelSelectorParam` or `FieldSelectorParam` methods - use `VersionedParams` with `metav1.ListOptions` instead. The `UintParam` method has been removed. The `timeout` parameter will no longer cause an error when using `Param()`. + + * [https://github.com/kubernetes/kubernetes/pull/48991](https://github.com/kubernetes/kubernetes/pull/48991) + +# v4.0.0 + +No significant changes since v4.0.0-beta.0. + +# v4.0.0-beta.0 + +**New features:** + +* Added OpenAPISchema support in the discovery client + + * [https://github.com/kubernetes/kubernetes/pull/44531](https://github.com/kubernetes/kubernetes/pull/44531) + +* Added mutation cache filter: MutationCache is able to take the result of update operations and stores them in an LRU that can be used to provide a more current view of a requested object. + + * [https://github.com/kubernetes/kubernetes/pull/45838](https://github.com/kubernetes/kubernetes/pull/45838/commits/f88c7725b4f9446c652d160bdcfab7c6201bddea) + +* Moved the remotecommand package (used by `kubectl exec/attach`) to client-go + + * [https://github.com/kubernetes/kubernetes/pull/41331](https://github.com/kubernetes/kubernetes/pull/41331) + +* Added support for following redirects to the SpdyRoundTripper + + * [https://github.com/kubernetes/kubernetes/pull/44451](https://github.com/kubernetes/kubernetes/pull/44451) + +* Added Azure Active Directory plugin + + * [https://github.com/kubernetes/kubernetes/pull/43987](https://github.com/kubernetes/kubernetes/pull/43987) + +**Usability improvements:** + +* Added several new examples and reorganized client-go/examples + + * [Related PRs](https://github.com/kubernetes/kubernetes/commits/release-1.7/staging/src/k8s.io/client-go/examples) + +**API changes:** + +* Added networking.k8s.io/v1 API + + * [https://github.com/kubernetes/kubernetes/pull/39164](https://github.com/kubernetes/kubernetes/pull/39164) + +* ControllerRevision type added for StatefulSet and DaemonSet history. + + * [https://github.com/kubernetes/kubernetes/pull/45867](https://github.com/kubernetes/kubernetes/pull/45867) + +* Added support for initializers + + * [https://github.com/kubernetes/kubernetes/pull/38058](https://github.com/kubernetes/kubernetes/pull/38058) + +* Added admissionregistration.k8s.io/v1alpha1 API + + * [https://github.com/kubernetes/kubernetes/pull/46294](https://github.com/kubernetes/kubernetes/pull/46294) + +**Breaking changes:** + +* Moved client-go/util/clock to apimachinery/pkg/util/clock + + * [https://github.com/kubernetes/kubernetes/pull/45933](https://github.com/kubernetes/kubernetes/pull/45933/commits/8013212db54e95050c622675c6706cce5de42b45) + +* Some [API helpers](https://github.com/kubernetes/client-go/blob/release-3.0/pkg/api/helpers.go) were removed. + +* Dynamic client takes GetOptions as an input parameter + + * [https://github.com/kubernetes/kubernetes/pull/47251](https://github.com/kubernetes/kubernetes/pull/47251) + +**Bug fixes:** + +* PortForwarder: don't log an error if net.Listen fails. [https://github.com/kubernetes/kubernetes/pull/44636](https://github.com/kubernetes/kubernetes/pull/44636) + +* oidc auth plugin not to override the Auth header if it's already exits. [https://github.com/kubernetes/kubernetes/pull/45529](https://github.com/kubernetes/kubernetes/pull/45529) + +* The --namespace flag is now honored for in-cluster clients that have an empty configuration. [https://github.com/kubernetes/kubernetes/pull/46299](https://github.com/kubernetes/kubernetes/pull/46299) + +* GCP auth plugin no longer overwrites existing Authorization headers. [https://github.com/kubernetes/kubernetes/pull/45575](https://github.com/kubernetes/kubernetes/pull/45575) + +# v3.0.0 + +Bug fixes: +* Use OS-specific libs when computing client User-Agent in kubectl, etc. (https://github.com/kubernetes/kubernetes/pull/44423) +* kubectl commands run inside a pod using a kubeconfig file now use the namespace specified in the kubeconfig file, instead of using the pod namespace. If no kubeconfig file is used, or the kubeconfig does not specify a namespace, the pod namespace is still used as a fallback. (https://github.com/kubernetes/kubernetes/pull/44570) +* Restored the ability of kubectl running inside a pod to consume resource files specifying a different namespace than the one the pod is running in. (https://github.com/kubernetes/kubernetes/pull/44862) + +# v3.0.0-beta.0 + +* Added dependency on k8s.io/apimachinery. The impacts include changing import path of API objects like `ListOptions` from `k8s.io/client-go/pkg/api/v1` to `k8s.io/apimachinery/pkg/apis/meta/v1`. +* Added generated listers (listers/) and informers (informers/) +* Kubernetes API changes: + * Added client support for: + * authentication/v1 + * authorization/v1 + * autoscaling/v2alpha1 + * rbac/v1beta1 + * settings/v1alpha1 + * storage/v1 + * Changed client support for: + * certificates from v1alpha1 to v1beta1 + * policy from v1alpha1 to v1beta1 + * Deleted client support for: + * extensions/v1beta1#Job +* CHANGED: pass typed options to dynamic client (https://github.com/kubernetes/kubernetes/pull/41887) + +# v2.0.0 + +* Included bug fixes in k8s.io/kuberentes release-1.5 branch, up to commit + bde8578d9675129b7a2aa08f1b825ec6cc0f3420 + +# v2.0.0-alpha.1 + +* Removed top-level version folder (e.g., 1.4 and 1.5), switching to maintaining separate versions + in separate branches. +* Clientset supported multiple versions per API group +* Added ThirdPartyResources example +* Kubernetes API changes + * Apps API group graduated to v1beta1 + * Policy API group graduated to v1beta1 + * Added support for batch/v2alpha1/cronjob + * Renamed PetSet to StatefulSet + + +# v1.5.0 + +* Included the auth plugin (https://github.com/kubernetes/kubernetes/pull/33334) +* Added timeout field to RESTClient config (https://github.com/kubernetes/kubernetes/pull/33958) diff --git a/vendor/k8s.io/client-go/CONTRIBUTING.md b/vendor/k8s.io/client-go/CONTRIBUTING.md new file mode 100644 index 000000000..b01f8abb0 --- /dev/null +++ b/vendor/k8s.io/client-go/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/client-go](https://git.k8s.io/kubernetes/staging/src/k8s.io/client-go) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/vendor/k8s.io/client-go/INSTALL.md b/vendor/k8s.io/client-go/INSTALL.md new file mode 100644 index 000000000..42656284f --- /dev/null +++ b/vendor/k8s.io/client-go/INSTALL.md @@ -0,0 +1,162 @@ +# Installing client-go + +## For the casual user + +If you want to write a simple script, don't care about a reproducible client +library install, don't mind getting head (which may be less stable than a +particular release), then simply: + +```sh +$ go get k8s.io/client-go/... +``` + +This will install `k8s.io/client-go` in your `$GOPATH`. `k8s.io/client-go` +includes most of its own dependencies in its `k8s.io/client-go/vendor` path, +except for `k8s.io/apimachinery` and `glog`. `go get` will recursively download +these excluded repos to your `$GOPATH`, if they don't already exist. If +`k8s.io/apimachinery` preexisted in `$GOPATH`, you also need to: + +```sh +$ go get -u k8s.io/apimachinery/... +``` + +because the head of client-go is only guaranteed to work with the head of +apimachinery. + +We excluded `k8s.io/apimachinery` and `glog` from `k8s.io/client-go/vendor` to +prevent `go get` users from hitting issues like +[#19](https://github.com/kubernetes/client-go/issues/19) and +[#83](https://github.com/kubernetes/client-go/issues/83). If your project share +other dependencies with client-go, and you hit issues similar to #19 or #83, +then you'll need to look down at the next section. + +Note: the official go policy is that libraries should not vendor their +dependencies. This is unworkable for us, since our dependencies change and HEAD +on every dependency has not necessarily been tested with client-go. In fact, +HEAD from all dependencies may not even compile with client-go! + +## Dependency management for the serious (or reluctant) user + +Reasons why you might need to use a dependency management system: +* You use a dependency that client-go also uses, and don't want two copies of + the dependency compiled into your application. For some dependencies with + singletons or global inits (e.g. `glog`) this wouldn't even compile... +* You want to lock in a particular version (so you don't have to change your + code every time we change a public interface). +* You want your install to be reproducible. For example, for your CI system or + for new team members. + +There are three tools you could in theory use for this. Instructions +for each follows. + +### Godep + +[godep](https://github.com/tools/godep) is an older dependency management tool, which is +used by the main Kubernetes repo and `client-go` to manage dependencies. + +Before proceeding with the below instructions, you should ensure that your +$GOPATH is empty except for containing your own package and its dependencies, +and you have a copy of godep somewhere in your $PATH. + +To install `client-go` and place its dependencies in your `$GOPATH`: + +```sh +go get k8s.io/client-go/... +cd $GOPATH/src/k8s.io/client-go +git checkout v6.0.0 +# cd 1.5 # only necessary with 1.5 and 1.4 clients. +godep restore ./... +``` + +At this point, `client-go`'s dependencies have been placed in your $GOPATH, but +if you were to build, `client-go` would still see its own copy of its +dependencies in its `vendor` directory. You have two options at this point. + +If you would like to keep dependencies in your own project's vendor directory, +then you can continue like this: + +```sh +cd $GOPATH/src/ +godep save ./... +``` + +Alternatively, if you want to build using the dependencies in your `$GOPATH`, +then `rm -rf vendor/` to remove `client-go`'s copy of its dependencies. + +### Glide + +[Glide](https://github.com/Masterminds/glide) is another popular dependency +management tool for Go. Glide will manage your /vendor directory, but unlike +godep, will not use or modify your $GOPATH (there's no equivalent of +`godep restore` or `godep save`). + +Generally, it's best to avoid Glide's many subcommands, favoring modifying +Glide's manifest file (`glide.yaml`) directly, then running +`glide update --strip-vendor`. First create a `glide.yaml` file at the root of +your project: + +```yaml +package: ( your project's import path ) # e.g. github.com/foo/bar +import: +- package: k8s.io/client-go + version: v6.0.0 +``` + +Second, add a Go file that imports `client-go` somewhere in your project, +otherwise `client-go`'s dependencies will not be added to your project's +vendor/. Then run the following command in the same directory as `glide.yaml`: + +```sh +glide update --strip-vendor +``` + +This can also be abbreviated as: + +```sh +glide up -v +``` + +At this point, `k8s.io/client-go` should be added to your project's vendor/. +`client-go`'s dependencies should be flattened and be added to your project's +vendor/ as well. + +Glide will detect the versions of dependencies `client-go` specified in +`client-go`'s Godep.json file, and automatically set the versions of these +imports in your /vendor directory. It will also record the detected version of +all dependencies in the `glide.lock` file. + +Projects that require a different version of a dependency than `client-go` +requests can override the version manually in `glide.yaml`. For example: + +```yaml +package: ( your project's import path ) # e.g. github.com/foo/bar +import: +- package: k8s.io/client-go + version: v6.0.0 +# Use a newer version of go-spew even though client-go wants an old one. +- package: github.com/davecgh/go-spew + version: v1.1.0 +``` + +After modifying, run `glide up -v` again to re-populate your /vendor directory. + +Optionally, Glide users can also use [`glide-vc`](https://github.com/sgotti/glide-vc) +after running `glide up -v` to remove unused files from /vendor. + +### Dep (Not supported yet!) + +[dep](https://github.com/golang/dep) is an up-and-coming dependency management +tool, which has the goal of being accepted as part of the standard go toolchain. +However, client-go does **NOT** work well with `dep` yet. To support `dep`, we +need to fix at least two issues: +1. publish native `Gopkg.toml` in client-go and other k8s.io repos, like `k8s.io/apimachinery`; +2. find a way to express transitive constraints (see https://github.com/golang/dep/issues/1124). + +As a workaround, which may or may not be worthwhile, you can specify all +client-go dependencies manually as +[override](https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md#override) +in Gopkg.toml with the versions listed in [Godeps.json](./Godeps/Godeps.json), +and manually update them when you upgrade client-go version. + +We are actively working on the two issues blocking using `dep`. For the +meantime, we recommend using `glide` or `godeps`. diff --git a/vendor/k8s.io/client-go/OWNERS b/vendor/k8s.io/client-go/OWNERS new file mode 100644 index 000000000..b39916829 --- /dev/null +++ b/vendor/k8s.io/client-go/OWNERS @@ -0,0 +1,15 @@ +approvers: +- caesarxuchao +- deads2k +- lavalamp +- liggitt +- smarterclayton +- sttts +reviewers: +- caesarxuchao +- deads2k +- lavalamp +- liggitt +- soltysh +- sttts +- yliaog diff --git a/vendor/k8s.io/client-go/README.md b/vendor/k8s.io/client-go/README.md new file mode 100644 index 000000000..4e5e42207 --- /dev/null +++ b/vendor/k8s.io/client-go/README.md @@ -0,0 +1,192 @@ +# client-go + +Go clients for talking to a [kubernetes](http://kubernetes.io/) cluster. + +We currently recommend using the v7.0.0 tag. See [INSTALL.md](/INSTALL.md) for +detailed installation instructions. `go get k8s.io/client-go/...` works, but +will give you head and doesn't handle the dependencies well. + +[![BuildStatus Widget]][BuildStatus Result] +[![GoReport Widget]][GoReport Status] +[![GoDocWidget]][GoDocReference] + +[BuildStatus Result]: https://travis-ci.org/kubernetes/client-go +[BuildStatus Widget]: https://travis-ci.org/kubernetes/client-go.svg?branch=master + +[GoReport Status]: https://goreportcard.com/report/github.com/kubernetes/client-go +[GoReport Widget]: https://goreportcard.com/badge/github.com/kubernetes/client-go + +[GoDocWidget]: https://godoc.org/k8s.io/client-go?status.svg +[GoDocReference]:https://godoc.org/k8s.io/client-go + +## Table of Contents + +- [What's included](#whats-included) +- [Versioning](#versioning) + - [Compatibility: your code <-> client-go](#compatibility-your-code---client-go) + - [Compatibility: client-go <-> Kubernetes clusters](#compatibility-client-go---kubernetes-clusters) + - [Compatibility matrix](#compatibility-matrix) + - [Why do the 1.4 and 1.5 branch contain top-level folder named after the version?](#why-do-the-14-and-15-branch-contain-top-level-folder-named-after-the-version) +- [Kubernetes tags](#kubernetes-tags) +- [How to get it](#how-to-get-it) +- [How to use it](#how-to-use-it) +- [Dependency management](#dependency-management) +- [Contributing code](#contributing-code) + +### What's included + +* The `kubernetes` package contains the clientset to access Kubernetes API. +* The `discovery` package is used to discover APIs supported by a Kubernetes API server. +* The `dynamic` package contains a dynamic client that can perform generic operations on arbitrary Kubernetes API objects. +* The `transport` package is used to set up auth and start a connection. +* The `tools/cache` package is useful for writing controllers. + +### Versioning + +`client-go` follows [semver](http://semver.org/). We will not make +backwards-incompatible changes without incrementing the major version number. A +change is backwards-incompatible either if it *i)* changes the public interfaces +of `client-go`, or *ii)* makes `client-go` incompatible with otherwise supported +versions of Kubernetes clusters. + +Changes that add features in a backwards-compatible way will result in bumping +the minor version (second digit) number. + +Bugfixes will result in the patch version (third digit) changing. PRs that are +cherry-picked into an older Kubernetes release branch will result in an update +to the corresponding branch in `client-go`, with a corresponding new tag +changing the patch version. + +A consequence of this is that `client-go` version numbers will be unrelated to +Kubernetes version numbers. + +#### Branches and tags. + +We will create a new branch and tag for each increment in the major version number or +minor version number. We will create only a new tag for each increment in the patch +version number. See [semver](http://semver.org/) for definitions of major, +minor, and patch. + +The master branch will track HEAD in the main Kubernetes repo and +accumulate changes. Consider HEAD to have the version `x.(y+1).0-alpha` or +`(x+1).0.0-alpha` (depending on whether it has accumulated a breaking change or +not), where `x` and `y` are the current major and minor versions. + +#### Compatibility: your code <-> client-go + +`client-go` follows [semver](http://semver.org/), so until the major version of +client-go gets increased, your code will compile and will continue to work with +explicitly supported versions of Kubernetes clusters. You must use a dependency +management system and pin a specific major version of `client-go` to get this +benefit, as HEAD follows the upstream Kubernetes repo. + +#### Compatibility: client-go <-> Kubernetes clusters + +Since Kubernetes is backwards compatible with clients, older `client-go` +versions will work with many different Kubernetes cluster versions. + +We will backport bugfixes--but not new features--into older versions of +`client-go`. + + +#### Compatibility matrix + +| | Kubernetes 1.4 | Kubernetes 1.5 | Kubernetes 1.6 | Kubernetes 1.7 | Kubernetes 1.8 | Kubernetes 1.9 | Kubernetes 1.10 | +|---------------------|----------------|----------------|----------------|----------------|----------------|----------------|-----------------| +| client-go 1.4 | ✓ | - | - | - | - | - | - | +| client-go 1.5 | + | - | - | - | - | - | - | +| client-go 2.0 | +- | ✓ | +- | +- | +- | +- | +- | +| client-go 3.0 | +- | +- | ✓ | - | +- | +- | +- | +| client-go 4.0 | +- | +- | +- | ✓ | +- | +- | +- | +| client-go 5.0 | +- | +- | +- | +- | ✓ | +- | +- | +| client-go 6.0 | +- | +- | +- | +- | +- | ✓ | +- | +| client-go 7.0 | +- | +- | +- | +- | +- | +- | ✓ | +| client-go HEAD | +- | +- | +- | +- | +- | + | + | + +Key: + +* `✓` Exactly the same features / API objects in both client-go and the Kubernetes + version. +* `+` client-go has features or API objects that may not be present in the + Kubernetes cluster, either due to that client-go has additional new API, or + that the server has removed old API. However, everything they have in + common (i.e., most APIs) will work. Please note that alpha APIs may vanish or + change significantly in a single release. +* `-` The Kubernetes cluster has features the client-go library can't use, + either due to the server has additional new API, or that client-go has + removed old API. However, everything they share in common (i.e., most APIs) + will work. + +See the [CHANGELOG](./CHANGELOG.md) for a detailed description of changes +between client-go versions. + +| Branch | Canonical source code location | Maintenance status | +|----------------|--------------------------------------|-------------------------------| +| client-go 1.4 | Kubernetes main repo, 1.4 branch | = - | +| client-go 1.5 | Kubernetes main repo, 1.5 branch | = - | +| client-go 2.0 | Kubernetes main repo, 1.5 branch | = - | +| client-go 3.0 | Kubernetes main repo, 1.6 branch | = - | +| client-go 4.0 | Kubernetes main repo, 1.7 branch | = - | +| client-go 5.0 | Kubernetes main repo, 1.8 branch | ✓ | +| client-go 6.0 | Kubernetes main repo, 1.9 branch | ✓ | +| client-go 7.0 | Kubernetes main repo, 1.10 branch | ✓ | +| client-go HEAD | Kubernetes main repo, master branch | ✓ | + +Key: + +* `✓` Changes in main Kubernetes repo are actively published to client-go by a bot +* `=` Maintenance is manual, only severe security bugs will be patched. +* `-` Deprecated; please upgrade. + +#### Deprecation policy + +We will maintain branches for at least six months after their first stable tag +is cut. (E.g., the clock for the release-2.0 branch started ticking when we +tagged v2.0.0, not when we made the first alpha.) This policy applies to +every version greater than or equal to 2.0. + +#### Why do the 1.4 and 1.5 branch contain top-level folder named after the version? + +For the initial release of client-go, we thought it would be easiest to keep +separate directories for each minor version. That soon proved to be a mistake. +We are keeping the top-level folders in the 1.4 and 1.5 branches so that +existing users won't be broken. + +### Kubernetes tags + +As of April 2018, client-go is still a mirror of +[k8s.io/kubernetes/staging/src/client-go](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/client-go), +the code development is still done in the staging area. Since Kubernetes 1.8 +release, when syncing the code from the staging area, we also sync the Kubernetes +version tags to client-go, prefixed with "kubernetes-". For example, if you check +out the `kubernetes-v1.8.0` tag in client-go, the code you get is exactly the +same as if you check out the `v1.8.0` tag in kubernetes, and change directory to +`staging/src/k8s.io/client-go`. The purpose is to let users quickly find matching +commits among published repos, like +[sample-apiserver](https://github.com/kubernetes/sample-apiserver), +[apiextension-apiserver](https://github.com/kubernetes/apiextensions-apiserver), +etc. The Kubernetes version tag does NOT claim any backwards compatibility +guarantees for client-go. Please check the [semantic versions](#versioning) if +you care about backwards compatibility. + +### How to get it + +You can use `go get k8s.io/client-go/...` to get client-go, but **you will get +the unstable master branch** and `client-go`'s vendored dependencies will not be +added to your `$GOPATH`. So we think most users will want to use a dependency +management system. See [INSTALL.md](/INSTALL.md) for detailed instructions. + +### How to use it + +If your application runs in a Pod in the cluster, please refer to the +in-cluster [example](examples/in-cluster-client-configuration), otherwise please +refer to the out-of-cluster [example](examples/out-of-cluster-client-configuration). + +### Dependency management + +If your application depends on a package that client-go depends on, and you let the Go compiler find the dependency in `GOPATH`, you will end up with duplicated dependencies: one copy from the `GOPATH`, and one from the vendor folder of client-go. This will cause unexpected runtime error like flag redefinition, since the go compiler ends up importing both packages separately, even if they are exactly the same thing. If this happens, you can either +* run `godep restore` ([godep](https://github.com/tools/godep)) in the client-go/ folder, then remove the vendor folder of client-go. Then the packages in your GOPATH will be the only copy +* or run `godep save` in your application folder to flatten all dependencies. + +### Contributing code +Please send pull requests against the client packages in the Kubernetes main [repository](https://github.com/kubernetes/kubernetes). Changes in the staging area will be published to this repository every day. diff --git a/vendor/k8s.io/client-go/SECURITY_CONTACTS b/vendor/k8s.io/client-go/SECURITY_CONTACTS new file mode 100644 index 000000000..0648a8ebf --- /dev/null +++ b/vendor/k8s.io/client-go/SECURITY_CONTACTS @@ -0,0 +1,17 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +cjcullen +jessfraz +liggitt +philips +tallclair diff --git a/vendor/k8s.io/client-go/code-of-conduct.md b/vendor/k8s.io/client-go/code-of-conduct.md new file mode 100644 index 000000000..0d15c00cf --- /dev/null +++ b/vendor/k8s.io/client-go/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/client-go/discovery/cached_discovery.go b/vendor/k8s.io/client-go/discovery/cached_discovery.go new file mode 100644 index 000000000..aca46546e --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/cached_discovery.go @@ -0,0 +1,282 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "errors" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "sync" + "time" + + "github.com/golang/glog" + "github.com/googleapis/gnostic/OpenAPIv2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/kubernetes/scheme" + restclient "k8s.io/client-go/rest" +) + +// CachedDiscoveryClient implements the functions that discovery server-supported API groups, +// versions and resources. +type CachedDiscoveryClient struct { + delegate DiscoveryInterface + + // cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. + cacheDirectory string + + // ttl is how long the cache should be considered valid + ttl time.Duration + + // mutex protects the variables below + mutex sync.Mutex + + // ourFiles are all filenames of cache files created by this process + ourFiles map[string]struct{} + // invalidated is true if all cache files should be ignored that are not ours (e.g. after Invalidate() was called) + invalidated bool + // fresh is true if all used cache files were ours + fresh bool +} + +var _ CachedDiscoveryInterface = &CachedDiscoveryClient{} + +// ServerResourcesForGroupVersion returns the supported resources for a group and version. +func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + filename := filepath.Join(d.cacheDirectory, groupVersion, "serverresources.json") + cachedBytes, err := d.getCachedFile(filename) + // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. + if err == nil { + cachedResources := &metav1.APIResourceList{} + if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { + glog.V(10).Infof("returning cached discovery info from %v", filename) + return cachedResources, nil + } + } + + liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + glog.V(3).Infof("skipped caching discovery info due to %v", err) + return liveResources, err + } + if liveResources == nil || len(liveResources.APIResources) == 0 { + glog.V(3).Infof("skipped caching discovery info, no resources found") + return liveResources, err + } + + if err := d.writeCachedFile(filename, liveResources); err != nil { + glog.V(3).Infof("failed to write cache to %v due to %v", filename, err) + } + + return liveResources, nil +} + +// ServerResources returns the supported resources for all groups and versions. +func (d *CachedDiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { + return ServerResources(d) +} + +func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { + filename := filepath.Join(d.cacheDirectory, "servergroups.json") + cachedBytes, err := d.getCachedFile(filename) + // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. + if err == nil { + cachedGroups := &metav1.APIGroupList{} + if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { + glog.V(10).Infof("returning cached discovery info from %v", filename) + return cachedGroups, nil + } + } + + liveGroups, err := d.delegate.ServerGroups() + if err != nil { + glog.V(3).Infof("skipped caching discovery info due to %v", err) + return liveGroups, err + } + if liveGroups == nil || len(liveGroups.Groups) == 0 { + glog.V(3).Infof("skipped caching discovery info, no groups found") + return liveGroups, err + } + + if err := d.writeCachedFile(filename, liveGroups); err != nil { + glog.V(3).Infof("failed to write cache to %v due to %v", filename, err) + } + + return liveGroups, nil +} + +func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) { + // after invalidation ignore cache files not created by this process + d.mutex.Lock() + _, ourFile := d.ourFiles[filename] + if d.invalidated && !ourFile { + d.mutex.Unlock() + return nil, errors.New("cache invalidated") + } + d.mutex.Unlock() + + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + fileInfo, err := file.Stat() + if err != nil { + return nil, err + } + + if time.Now().After(fileInfo.ModTime().Add(d.ttl)) { + return nil, errors.New("cache expired") + } + + // the cache is present and its valid. Try to read and use it. + cachedBytes, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + + d.mutex.Lock() + defer d.mutex.Unlock() + d.fresh = d.fresh && ourFile + + return cachedBytes, nil +} + +func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error { + if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { + return err + } + + bytes, err := runtime.Encode(scheme.Codecs.LegacyCodec(), obj) + if err != nil { + return err + } + + f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+".") + if err != nil { + return err + } + defer os.Remove(f.Name()) + _, err = f.Write(bytes) + if err != nil { + return err + } + + err = os.Chmod(f.Name(), 0755) + if err != nil { + return err + } + + name := f.Name() + err = f.Close() + if err != nil { + return err + } + + // atomic rename + d.mutex.Lock() + defer d.mutex.Unlock() + err = os.Rename(name, filename) + if err == nil { + d.ourFiles[filename] = struct{}{} + } + return err +} + +func (d *CachedDiscoveryClient) RESTClient() restclient.Interface { + return d.delegate.RESTClient() +} + +func (d *CachedDiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return ServerPreferredResources(d) +} + +func (d *CachedDiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return ServerPreferredNamespacedResources(d) +} + +func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) { + return d.delegate.ServerVersion() +} + +func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { + return d.delegate.OpenAPISchema() +} + +func (d *CachedDiscoveryClient) Fresh() bool { + d.mutex.Lock() + defer d.mutex.Unlock() + + return d.fresh +} + +func (d *CachedDiscoveryClient) Invalidate() { + d.mutex.Lock() + defer d.mutex.Unlock() + + d.ourFiles = map[string]struct{}{} + d.fresh = true + d.invalidated = true +} + +// NewCachedDiscoveryClientForConfig creates a new DiscoveryClient for the given config, and wraps +// the created client in a CachedDiscoveryClient. The provided configuration is updated with a +// custom transport that understands cache responses. +// We receive two distinct cache directories for now, in order to preserve old behavior +// which makes use of the --cache-dir flag value for storing cache data from the CacheRoundTripper, +// and makes use of the hardcoded destination (~/.kube/cache/discovery/...) for storing +// CachedDiscoveryClient cache data. If httpCacheDir is empty, the restconfig's transport will not +// be updated with a roundtripper that understands cache responses. +// If discoveryCacheDir is empty, cached server resource data will be looked up in the current directory. +// TODO(juanvallejo): the value of "--cache-dir" should be honored. Consolidate discoveryCacheDir with httpCacheDir +// so that server resources and http-cache data are stored in the same location, provided via config flags. +func NewCachedDiscoveryClientForConfig(config *restclient.Config, discoveryCacheDir, httpCacheDir string, ttl time.Duration) (*CachedDiscoveryClient, error) { + if len(httpCacheDir) > 0 { + // update the given restconfig with a custom roundtripper that + // understands how to handle cache responses. + wt := config.WrapTransport + config.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + if wt != nil { + rt = wt(rt) + } + return newCacheRoundTripper(httpCacheDir, rt) + } + } + + discoveryClient, err := NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + + return newCachedDiscoveryClient(discoveryClient, discoveryCacheDir, ttl), nil +} + +// NewCachedDiscoveryClient creates a new DiscoveryClient. cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. +func newCachedDiscoveryClient(delegate DiscoveryInterface, cacheDirectory string, ttl time.Duration) *CachedDiscoveryClient { + return &CachedDiscoveryClient{ + delegate: delegate, + cacheDirectory: cacheDirectory, + ttl: ttl, + ourFiles: map[string]struct{}{}, + fresh: true, + } +} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index ff4c57a4f..a96602974 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -22,22 +22,32 @@ import ( "net/url" "sort" "strings" + "sync" + "time" - "github.com/emicklei/go-restful/swagger" + "github.com/golang/protobuf/proto" + "github.com/googleapis/gnostic/OpenAPIv2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/kubernetes/scheme" restclient "k8s.io/client-go/rest" ) -// defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. ThirdPartyResources). -const defaultRetries = 2 +const ( + // defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. ThirdPartyResources). + defaultRetries = 2 + // protobuf mime type + mimePb = "application/com.github.proto-openapi.spec.v2@v1.0+protobuf" + // defaultTimeout is the maximum amount of time per request when no timeout has been set on a RESTClient. + // Defaults to 32s in order to have a distinguishable length of time, relative to other timeouts that exist. + defaultTimeout = 32 * time.Second +) // DiscoveryInterface holds the methods that discover server-supported API groups, // versions and resources. @@ -46,13 +56,17 @@ type DiscoveryInterface interface { ServerGroupsInterface ServerResourcesInterface ServerVersionInterface - SwaggerSchemaInterface + OpenAPISchemaInterface } // CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. type CachedDiscoveryInterface interface { DiscoveryInterface - // Fresh returns true if no cached data was used that had been retrieved before the instantiation. + // Fresh is supposed to tell the caller whether or not to retry if the cache + // fails to find something (false = retry, true = no need to retry). + // + // TODO: this needs to be revisited, this interface can't be locked properly + // and doesn't make a lot of sense. Fresh() bool // Invalidate enforces that no cached data is used in the future that is older than the current time. Invalidate() @@ -85,10 +99,10 @@ type ServerVersionInterface interface { ServerVersion() (*version.Info, error) } -// SwaggerSchemaInterface has a method to retrieve the swagger schema. -type SwaggerSchemaInterface interface { - // SwaggerSchema retrieves and parses the swagger API schema the server supports. - SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error) +// OpenAPISchemaInterface has a method to retrieve the open API schema. +type OpenAPISchemaInterface interface { + // OpenAPISchema retrieves and parses the swagger API schema the server supports. + OpenAPISchema() (*openapi_v2.Document, error) } // DiscoveryClient implements the functions that discover server-supported API groups, @@ -141,9 +155,9 @@ func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err apiGroupList = &metav1.APIGroupList{} } - // append the group retrieved from /api to the list if not empty + // prepend the group retrieved from /api to the list if not empty if len(v.Versions) != 0 { - apiGroupList.Groups = append(apiGroupList.Groups, apiGroup) + apiGroupList.Groups = append([]metav1.APIGroup{apiGroup}, apiGroupList.Groups...) } return apiGroupList, nil } @@ -174,37 +188,8 @@ func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (r } // serverResources returns the supported resources for all groups and versions. -func (d *DiscoveryClient) serverResources(failEarly bool) ([]*metav1.APIResourceList, error) { - apiGroups, err := d.ServerGroups() - if err != nil { - return nil, err - } - - result := []*metav1.APIResourceList{} - failedGroups := make(map[schema.GroupVersion]error) - - for _, apiGroup := range apiGroups.Groups { - for _, version := range apiGroup.Versions { - gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} - resources, err := d.ServerResourcesForGroupVersion(version.GroupVersion) - if err != nil { - // TODO: maybe restrict this to NotFound errors - failedGroups[gv] = err - if failEarly { - return nil, &ErrGroupDiscoveryFailed{Groups: failedGroups} - } - continue - } - - result = append(result, resources) - } - } - - if len(failedGroups) == 0 { - return result, nil - } - - return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} +func (d *DiscoveryClient) serverResources() ([]*metav1.APIResourceList, error) { + return ServerResources(d) } // ServerResources returns the supported resources for all groups and versions. @@ -236,15 +221,47 @@ func IsGroupDiscoveryFailedError(err error) bool { } // serverPreferredResources returns the supported resources with the version preferred by the server. -func (d *DiscoveryClient) serverPreferredResources(failEarly bool) ([]*metav1.APIResourceList, error) { +func (d *DiscoveryClient) serverPreferredResources() ([]*metav1.APIResourceList, error) { + return ServerPreferredResources(d) +} + +// ServerResources uses the provided discovery interface to look up supported resources for all groups and versions. +func ServerResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { + apiGroups, err := d.ServerGroups() + if err != nil { + return nil, err + } + + groupVersionResources, failedGroups := fetchGroupVersionResources(d, apiGroups) + + // order results by group/version discovery order + result := []*metav1.APIResourceList{} + for _, apiGroup := range apiGroups.Groups { + for _, version := range apiGroup.Versions { + gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} + if resources, ok := groupVersionResources[gv]; ok { + result = append(result, resources) + } + } + } + + if len(failedGroups) == 0 { + return result, nil + } + + return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} +} + +// ServerPreferredResources uses the provided discovery interface to look up preferred resources +func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { serverGroupList, err := d.ServerGroups() if err != nil { return nil, err } - result := []*metav1.APIResourceList{} - failedGroups := make(map[schema.GroupVersion]error) + groupVersionResources, failedGroups := fetchGroupVersionResources(d, serverGroupList) + result := []*metav1.APIResourceList{} grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource grApiResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource gvApiResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping @@ -252,13 +269,9 @@ func (d *DiscoveryClient) serverPreferredResources(failEarly bool) ([]*metav1.AP for _, apiGroup := range serverGroupList.Groups { for _, version := range apiGroup.Versions { groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} - apiResourceList, err := d.ServerResourcesForGroupVersion(version.GroupVersion) - if err != nil { - // TODO: maybe restrict this to NotFound errors - failedGroups[groupVersion] = err - if failEarly { - return nil, &ErrGroupDiscoveryFailed{Groups: failedGroups} - } + + apiResourceList, ok := groupVersionResources[groupVersion] + if !ok { continue } @@ -300,18 +313,56 @@ func (d *DiscoveryClient) serverPreferredResources(failEarly bool) ([]*metav1.AP return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} } +// fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel +func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroupList) (map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) { + groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) + failedGroups := make(map[schema.GroupVersion]error) + + wg := &sync.WaitGroup{} + resultLock := &sync.Mutex{} + for _, apiGroup := range apiGroups.Groups { + for _, version := range apiGroup.Versions { + groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} + wg.Add(1) + go func() { + defer wg.Done() + defer utilruntime.HandleCrash() + + apiResourceList, err := d.ServerResourcesForGroupVersion(groupVersion.String()) + + // lock to record results + resultLock.Lock() + defer resultLock.Unlock() + + if err != nil { + // TODO: maybe restrict this to NotFound errors + failedGroups[groupVersion] = err + } else { + groupVersionResources[groupVersion] = apiResourceList + } + }() + } + } + wg.Wait() + + return groupVersionResources, failedGroups +} + // ServerPreferredResources returns the supported resources with the version preferred by the // server. func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { - return withRetries(defaultRetries, func(retryEarly bool) ([]*metav1.APIResourceList, error) { - return d.serverPreferredResources(retryEarly) - }) + return withRetries(defaultRetries, d.serverPreferredResources) } // ServerPreferredNamespacedResources returns the supported namespaced resources with the // version preferred by the server. func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { - all, err := d.ServerPreferredResources() + return ServerPreferredNamespacedResources(d) +} + +// ServerPreferredNamespacedResources uses the provided discovery interface to look up preferred namespaced resources +func ServerPreferredNamespacedResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { + all, err := ServerPreferredResources(d) return FilteredBy(ResourcePredicateFunc(func(groupVersion string, r *metav1.APIResource) bool { return r.Namespaced }), all), err @@ -331,47 +382,35 @@ func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { return &info, nil } -// SwaggerSchema retrieves and parses the swagger API schema the server supports. -func (d *DiscoveryClient) SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error) { - if version.Empty() { - return nil, fmt.Errorf("groupVersion cannot be empty") +// OpenAPISchema fetches the open api schema using a rest client and parses the proto. +func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { + data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", mimePb).Do().Raw() + if err != nil { + if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) { + // single endpoint not found/registered in old server, try to fetch old endpoint + // TODO(roycaihw): remove this in 1.11 + data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do().Raw() + if err != nil { + return nil, err + } + } else { + return nil, err + } } - - groupList, err := d.ServerGroups() + document := &openapi_v2.Document{} + err = proto.Unmarshal(data, document) if err != nil { return nil, err } - groupVersions := metav1.ExtractGroupVersions(groupList) - // This check also takes care the case that kubectl is newer than the running endpoint - if stringDoesntExistIn(version.String(), groupVersions) { - return nil, fmt.Errorf("API version: %v is not supported by the server. Use one of: %v", version, groupVersions) - } - var path string - if len(d.LegacyPrefix) > 0 && version == v1.SchemeGroupVersion { - path = "/swaggerapi" + d.LegacyPrefix + "/" + version.Version - } else { - path = "/swaggerapi/apis/" + version.Group + "/" + version.Version - } - - body, err := d.restClient.Get().AbsPath(path).Do().Raw() - if err != nil { - return nil, err - } - var schema swagger.ApiDeclaration - err = json.Unmarshal(body, &schema) - if err != nil { - return nil, fmt.Errorf("got '%s': %v", string(body), err) - } - return &schema, nil + return document, nil } // withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns. -func withRetries(maxRetries int, f func(failEarly bool) ([]*metav1.APIResourceList, error)) ([]*metav1.APIResourceList, error) { +func withRetries(maxRetries int, f func() ([]*metav1.APIResourceList, error)) ([]*metav1.APIResourceList, error) { var result []*metav1.APIResourceList var err error for i := 0; i < maxRetries; i++ { - failEarly := i < maxRetries-1 - result, err = f(failEarly) + result, err = f() if err == nil { return result, nil } @@ -385,7 +424,10 @@ func withRetries(maxRetries int, f func(failEarly bool) ([]*metav1.APIResourceLi func setDiscoveryDefaults(config *restclient.Config) error { config.APIPath = "" config.GroupVersion = nil - codec := runtime.NoopEncoder{Decoder: api.Codecs.UniversalDecoder()} + if config.Timeout == 0 { + config.Timeout = defaultTimeout + } + codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()} config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) if len(config.UserAgent) == 0 { config.UserAgent = restclient.DefaultKubernetesUserAgent() @@ -404,7 +446,7 @@ func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err } -// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. If +// NewDiscoveryClientForConfigOrDie creates a new DiscoveryClient for the given config. If // there is an error, it panics. func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { client, err := NewDiscoveryClientForConfig(c) @@ -415,20 +457,11 @@ func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { } -// New creates a new DiscoveryClient for the given RESTClient. +// NewDiscoveryClient returns a new DiscoveryClient for the given RESTClient. func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"} } -func stringDoesntExistIn(str string, slice []string) bool { - for _, s := range slice { - if s == str { - return false - } - } - return true -} - // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *DiscoveryClient) RESTClient() restclient.Interface { diff --git a/vendor/k8s.io/client-go/discovery/fake/discovery.go b/vendor/k8s.io/client-go/discovery/fake/discovery.go index 8fbed28d9..984a0ba1e 100644 --- a/vendor/k8s.io/client-go/discovery/fake/discovery.go +++ b/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -19,19 +19,21 @@ package fake import ( "fmt" - "github.com/emicklei/go-restful/swagger" + "github.com/googleapis/gnostic/OpenAPIv2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/pkg/api/v1" kubeversion "k8s.io/client-go/pkg/version" restclient "k8s.io/client-go/rest" "k8s.io/client-go/testing" ) +// FakeDiscovery implements discovery.DiscoveryInterface and sometimes calls testing.Fake.Invoke with an action, +// but doesn't respect the return value if any. There is a way to fake static values like ServerVersion by using the Faked... fields on the struct. type FakeDiscovery struct { *testing.Fake + FakedServerVersion *version.Info } func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { @@ -66,30 +68,62 @@ func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]*metav1.APIResou } func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { - return nil, nil + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "group"}, + } + c.Invokes(action, nil) + + groups := map[string]*metav1.APIGroup{} + + for _, res := range c.Resources { + gv, err := schema.ParseGroupVersion(res.GroupVersion) + if err != nil { + return nil, err + } + group := groups[gv.Group] + if group == nil { + group = &metav1.APIGroup{ + Name: gv.Group, + PreferredVersion: metav1.GroupVersionForDiscovery{ + GroupVersion: res.GroupVersion, + Version: gv.Version, + }, + } + groups[gv.Group] = group + } + + group.Versions = append(group.Versions, metav1.GroupVersionForDiscovery{ + GroupVersion: res.GroupVersion, + Version: gv.Version, + }) + } + + list := &metav1.APIGroupList{} + for _, apiGroup := range groups { + list.Groups = append(list.Groups, *apiGroup) + } + + return list, nil + } func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { action := testing.ActionImpl{} action.Verb = "get" action.Resource = schema.GroupVersionResource{Resource: "version"} - c.Invokes(action, nil) + + if c.FakedServerVersion != nil { + return c.FakedServerVersion, nil + } + versionInfo := kubeversion.Get() return &versionInfo, nil } -func (c *FakeDiscovery) SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error) { - action := testing.ActionImpl{} - action.Verb = "get" - if version == v1.SchemeGroupVersion { - action.Resource = schema.GroupVersionResource{Resource: "/swaggerapi/api/" + version.Version} - } else { - action.Resource = schema.GroupVersionResource{Resource: "/swaggerapi/apis/" + version.Group + "/" + version.Version} - } - - c.Invokes(action, nil) - return &swagger.ApiDeclaration{}, nil +func (c *FakeDiscovery) OpenAPISchema() (*openapi_v2.Document, error) { + return &openapi_v2.Document{}, nil } func (c *FakeDiscovery) RESTClient() restclient.Interface { diff --git a/vendor/k8s.io/client-go/discovery/helper.go b/vendor/k8s.io/client-go/discovery/helper.go index f184bc929..353d34b3c 100644 --- a/vendor/k8s.io/client-go/discovery/helper.go +++ b/vendor/k8s.io/client-go/discovery/helper.go @@ -41,25 +41,13 @@ func MatchesServerVersion(clientVersion apimachineryversion.Info, client Discove return nil } -// NegotiateVersion queries the server's supported api versions to find -// a version that both client and server support. -// - If no version is provided, try registered client versions in order of -// preference. -// - If version is provided and the server does not support it, -// return an error. -// TODO negotiation should be reserved for cases where we need a version for a given group. In those cases, it should return an ordered list of -// server preferences. From that list, a separate function can match from an ordered list of client versions. -// This is not what the function has ever done before, but it makes more logical sense. -func NegotiateVersion(client DiscoveryInterface, requiredGV *schema.GroupVersion, clientRegisteredGVs []schema.GroupVersion) (*schema.GroupVersion, error) { - clientVersions := sets.String{} - for _, gv := range clientRegisteredGVs { - clientVersions.Insert(gv.String()) - } +// ServerSupportsVersion returns an error if the server doesn't have the required version +func ServerSupportsVersion(client DiscoveryInterface, requiredGV schema.GroupVersion) error { groups, err := client.ServerGroups() if err != nil { // This is almost always a connection error, and higher level code should treat this as a generic error, // not a negotiation specific error. - return nil, err + return err } versions := metav1.ExtractGroupVersions(groups) serverVersions := sets.String{} @@ -67,46 +55,17 @@ func NegotiateVersion(client DiscoveryInterface, requiredGV *schema.GroupVersion serverVersions.Insert(v) } - // If version explicitly requested verify that both client and server support it. - // If server does not support warn, but try to negotiate a lower version. - if requiredGV != nil { - if !clientVersions.Has(requiredGV.String()) { - return nil, fmt.Errorf("client does not support API version %q; client supported API versions: %v", requiredGV, clientVersions) - - } - // If the server supports no versions, then we should just use the preferredGV - // This can happen because discovery fails due to 403 Forbidden errors - if len(serverVersions) == 0 { - return requiredGV, nil - } - if serverVersions.Has(requiredGV.String()) { - return requiredGV, nil - } - // If we are using an explicit config version the server does not support, fail. - return nil, fmt.Errorf("server does not support API version %q", requiredGV) + if serverVersions.Has(requiredGV.String()) { + return nil } - for _, clientGV := range clientRegisteredGVs { - if serverVersions.Has(clientGV.String()) { - // Version was not explicitly requested in command config (--api-version). - // Ok to fall back to a supported version with a warning. - // TODO: caesarxuchao: enable the warning message when we have - // proper fix. Please refer to issue #14895. - // if len(version) != 0 { - // glog.Warningf("Server does not support API version '%s'. Falling back to '%s'.", version, clientVersion) - // } - t := clientGV - return &t, nil - } + // If the server supports no versions, then we should pretend it has the version because of old servers. + // This can happen because discovery fails due to 403 Forbidden errors + if len(serverVersions) == 0 { + return nil } - // if we have no server versions and we have no required version, choose the first clientRegisteredVersion - if len(serverVersions) == 0 && len(clientRegisteredGVs) > 0 { - return &clientRegisteredGVs[0], nil - } - - // fall back to an empty GroupVersion. Most client commands no longer respect a GroupVersion anyway - return &schema.GroupVersion{}, nil + return fmt.Errorf("server does not support API version %q", requiredGV) } // GroupVersionResources converts APIResourceLists to the GroupVersionResources. diff --git a/vendor/k8s.io/client-go/discovery/restmapper.go b/vendor/k8s.io/client-go/discovery/restmapper.go deleted file mode 100644 index 9b0769a18..000000000 --- a/vendor/k8s.io/client-go/discovery/restmapper.go +++ /dev/null @@ -1,320 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "fmt" - "sync" - - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/golang/glog" -) - -// APIGroupResources is an API group with a mapping of versions to -// resources. -type APIGroupResources struct { - Group metav1.APIGroup - // A mapping of version string to a slice of APIResources for - // that version. - VersionedResources map[string][]metav1.APIResource -} - -// NewRESTMapper returns a PriorityRESTMapper based on the discovered -// groups and resources passed in. -func NewRESTMapper(groupResources []*APIGroupResources, versionInterfaces meta.VersionInterfacesFunc) meta.RESTMapper { - unionMapper := meta.MultiRESTMapper{} - - var groupPriority []string - // /v1 is special. It should always come first - resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}} - kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}} - - for _, group := range groupResources { - groupPriority = append(groupPriority, group.Group.Name) - - if len(group.Group.PreferredVersion.Version) != 0 { - preferred := group.Group.PreferredVersion.Version - if _, ok := group.VersionedResources[preferred]; ok { - resourcePriority = append(resourcePriority, schema.GroupVersionResource{ - Group: group.Group.Name, - Version: group.Group.PreferredVersion.Version, - Resource: meta.AnyResource, - }) - - kindPriority = append(kindPriority, schema.GroupVersionKind{ - Group: group.Group.Name, - Version: group.Group.PreferredVersion.Version, - Kind: meta.AnyKind, - }) - } - } - - for _, discoveryVersion := range group.Group.Versions { - resources, ok := group.VersionedResources[discoveryVersion.Version] - if !ok { - continue - } - - gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} - versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}, versionInterfaces) - - for _, resource := range resources { - scope := meta.RESTScopeNamespace - if !resource.Namespaced { - scope = meta.RESTScopeRoot - } - versionMapper.Add(gv.WithKind(resource.Kind), scope) - // TODO only do this if it supports listing - versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) - } - // TODO why is this type not in discovery (at least for "v1") - versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot) - unionMapper = append(unionMapper, versionMapper) - } - } - - for _, group := range groupPriority { - resourcePriority = append(resourcePriority, schema.GroupVersionResource{ - Group: group, - Version: meta.AnyVersion, - Resource: meta.AnyResource, - }) - kindPriority = append(kindPriority, schema.GroupVersionKind{ - Group: group, - Version: meta.AnyVersion, - Kind: meta.AnyKind, - }) - } - - return meta.PriorityRESTMapper{ - Delegate: unionMapper, - ResourcePriority: resourcePriority, - KindPriority: kindPriority, - } -} - -// GetAPIGroupResources uses the provided discovery client to gather -// discovery information and populate a slice of APIGroupResources. -func GetAPIGroupResources(cl DiscoveryInterface) ([]*APIGroupResources, error) { - apiGroups, err := cl.ServerGroups() - if err != nil { - return nil, err - } - var result []*APIGroupResources - for _, group := range apiGroups.Groups { - groupResources := &APIGroupResources{ - Group: group, - VersionedResources: make(map[string][]metav1.APIResource), - } - for _, version := range group.Versions { - resources, err := cl.ServerResourcesForGroupVersion(version.GroupVersion) - if err != nil { - if errors.IsNotFound(err) { - continue // ignore as this can race with deletion of 3rd party APIs - } - return nil, err - } - groupResources.VersionedResources[version.Version] = resources.APIResources - } - result = append(result, groupResources) - } - return result, nil -} - -// DeferredDiscoveryRESTMapper is a RESTMapper that will defer -// initialization of the RESTMapper until the first mapping is -// requested. -type DeferredDiscoveryRESTMapper struct { - initMu sync.Mutex - delegate meta.RESTMapper - cl CachedDiscoveryInterface - versionInterface meta.VersionInterfacesFunc -} - -// NewDeferredDiscoveryRESTMapper returns a -// DeferredDiscoveryRESTMapper that will lazily query the provided -// client for discovery information to do REST mappings. -func NewDeferredDiscoveryRESTMapper(cl CachedDiscoveryInterface, versionInterface meta.VersionInterfacesFunc) *DeferredDiscoveryRESTMapper { - return &DeferredDiscoveryRESTMapper{ - cl: cl, - versionInterface: versionInterface, - } -} - -func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { - d.initMu.Lock() - defer d.initMu.Unlock() - - if d.delegate != nil { - return d.delegate, nil - } - - groupResources, err := GetAPIGroupResources(d.cl) - if err != nil { - return nil, err - } - - d.delegate = NewRESTMapper(groupResources, d.versionInterface) - return d.delegate, err -} - -// Reset resets the internally cached Discovery information and will -// cause the next mapping request to re-discover. -func (d *DeferredDiscoveryRESTMapper) Reset() { - glog.V(5).Info("Invalidating discovery information") - - d.initMu.Lock() - defer d.initMu.Unlock() - - d.cl.Invalidate() - d.delegate = nil -} - -// KindFor takes a partial resource and returns back the single match. -// It returns an error if there are multiple matches. -func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) { - del, err := d.getDelegate() - if err != nil { - return schema.GroupVersionKind{}, err - } - gvk, err = del.KindFor(resource) - if err != nil && !d.cl.Fresh() { - d.Reset() - gvk, err = d.KindFor(resource) - } - return -} - -// KindsFor takes a partial resource and returns back the list of -// potential kinds in priority order. -func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) { - del, err := d.getDelegate() - if err != nil { - return nil, err - } - gvks, err = del.KindsFor(resource) - if len(gvks) == 0 && !d.cl.Fresh() { - d.Reset() - gvks, err = d.KindsFor(resource) - } - return -} - -// ResourceFor takes a partial resource and returns back the single -// match. It returns an error if there are multiple matches. -func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) { - del, err := d.getDelegate() - if err != nil { - return schema.GroupVersionResource{}, err - } - gvr, err = del.ResourceFor(input) - if err != nil && !d.cl.Fresh() { - d.Reset() - gvr, err = d.ResourceFor(input) - } - return -} - -// ResourcesFor takes a partial resource and returns back the list of -// potential resource in priority order. -func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) { - del, err := d.getDelegate() - if err != nil { - return nil, err - } - gvrs, err = del.ResourcesFor(input) - if len(gvrs) == 0 && !d.cl.Fresh() { - d.Reset() - gvrs, err = d.ResourcesFor(input) - } - return -} - -// RESTMapping identifies a preferred resource mapping for the -// provided group kind. -func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) { - del, err := d.getDelegate() - if err != nil { - return nil, err - } - m, err = del.RESTMapping(gk, versions...) - if err != nil && !d.cl.Fresh() { - d.Reset() - m, err = d.RESTMapping(gk, versions...) - } - return -} - -// RESTMappings returns the RESTMappings for the provided group kind -// in a rough internal preferred order. If no kind is found, it will -// return a NoResourceMatchError. -func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) { - del, err := d.getDelegate() - if err != nil { - return nil, err - } - ms, err = del.RESTMappings(gk, versions...) - if len(ms) == 0 && !d.cl.Fresh() { - d.Reset() - ms, err = d.RESTMappings(gk, versions...) - } - return -} - -// AliasesForResource returns whether a resource has an alias or not. -func (d *DeferredDiscoveryRESTMapper) AliasesForResource(resource string) (as []string, ok bool) { - del, err := d.getDelegate() - if err != nil { - return nil, false - } - as, ok = del.AliasesForResource(resource) - if len(as) == 0 && !d.cl.Fresh() { - d.Reset() - as, ok = d.AliasesForResource(resource) - } - return -} - -// ResourceSingularizer converts a resource name from plural to -// singular (e.g., from pods to pod). -func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { - del, err := d.getDelegate() - if err != nil { - return resource, err - } - singular, err = del.ResourceSingularizer(resource) - if err != nil && !d.cl.Fresh() { - d.Reset() - singular, err = d.ResourceSingularizer(resource) - } - return -} - -func (d *DeferredDiscoveryRESTMapper) String() string { - del, err := d.getDelegate() - if err != nil { - return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err) - } - return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del) -} - -// Make sure it satisfies the interface -var _ meta.RESTMapper = &DeferredDiscoveryRESTMapper{} diff --git a/vendor/k8s.io/client-go/discovery/round_tripper.go b/vendor/k8s.io/client-go/discovery/round_tripper.go new file mode 100644 index 000000000..2e352b888 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/round_tripper.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package transport provides a round tripper capable of caching HTTP responses. +package discovery + +import ( + "net/http" + "path/filepath" + + "github.com/gregjones/httpcache" + "github.com/gregjones/httpcache/diskcache" + "github.com/peterbourgon/diskv" +) + +type cacheRoundTripper struct { + rt *httpcache.Transport +} + +// newCacheRoundTripper creates a roundtripper that reads the ETag on +// response headers and send the If-None-Match header on subsequent +// corresponding requests. +func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { + d := diskv.New(diskv.Options{ + BasePath: cacheDir, + TempDir: filepath.Join(cacheDir, ".diskv-temp"), + }) + t := httpcache.NewTransport(diskcache.NewWithDiskv(d)) + t.Transport = rt + + return &cacheRoundTripper{rt: t} +} + +func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return rt.rt.RoundTrip(req) +} + +func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } diff --git a/vendor/k8s.io/client-go/discovery/unstructured.go b/vendor/k8s.io/client-go/discovery/unstructured.go index ee72d668b..81913a414 100644 --- a/vendor/k8s.io/client-go/discovery/unstructured.go +++ b/vendor/k8s.io/client-go/discovery/unstructured.go @@ -17,79 +17,65 @@ limitations under the License. package discovery import ( - "fmt" + "reflect" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -// UnstructuredObjectTyper provides a runtime.ObjectTyper implmentation for +// UnstructuredObjectTyper provides a runtime.ObjectTyper implementation for // runtime.Unstructured object based on discovery information. type UnstructuredObjectTyper struct { - registered map[schema.GroupVersionKind]bool + typers []runtime.ObjectTyper } // NewUnstructuredObjectTyper returns a runtime.ObjectTyper for -// unstructred objects based on discovery information. -func NewUnstructuredObjectTyper(groupResources []*APIGroupResources) *UnstructuredObjectTyper { - dot := &UnstructuredObjectTyper{registered: make(map[schema.GroupVersionKind]bool)} - for _, group := range groupResources { - for _, discoveryVersion := range group.Group.Versions { - resources, ok := group.VersionedResources[discoveryVersion.Version] - if !ok { - continue - } - - gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} - for _, resource := range resources { - dot.registered[gv.WithKind(resource.Kind)] = true - } - } +// unstructured objects based on discovery information. It accepts a list of fallback typers +// for handling objects that are not runtime.Unstructured. It does not delegate the Recognizes +// check, only ObjectKinds. +// TODO this only works for the apiextensions server and doesn't recognize any types. Move to point of use. +func NewUnstructuredObjectTyper(typers ...runtime.ObjectTyper) *UnstructuredObjectTyper { + dot := &UnstructuredObjectTyper{ + typers: typers, } return dot } -// ObjectKind returns the group,version,kind of the provided object, or an error -// if the object in not runtime.Unstructured or has no group,version,kind -// information. -func (d *UnstructuredObjectTyper) ObjectKind(obj runtime.Object) (schema.GroupVersionKind, error) { - if _, ok := obj.(runtime.Unstructured); !ok { - return schema.GroupVersionKind{}, fmt.Errorf("type %T is invalid for dynamic object typer", obj) - } - - return obj.GetObjectKind().GroupVersionKind(), nil -} - // ObjectKinds returns a slice of one element with the group,version,kind of the // provided object, or an error if the object is not runtime.Unstructured or // has no group,version,kind information. unversionedType will always be false // because runtime.Unstructured object should always have group,version,kind // information set. func (d *UnstructuredObjectTyper) ObjectKinds(obj runtime.Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) { - gvk, err := d.ObjectKind(obj) - if err != nil { - return nil, false, err + if _, ok := obj.(runtime.Unstructured); ok { + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, false, runtime.NewMissingKindErr("object has no kind field ") + } + if len(gvk.Version) == 0 { + return nil, false, runtime.NewMissingVersionErr("object has no apiVersion field") + } + return []schema.GroupVersionKind{gvk}, false, nil } - - return []schema.GroupVersionKind{gvk}, false, nil + var lastErr error + for _, typer := range d.typers { + gvks, unversioned, err := typer.ObjectKinds(obj) + if err != nil { + lastErr = err + continue + } + return gvks, unversioned, nil + } + if lastErr == nil { + lastErr = runtime.NewNotRegisteredErrForType(reflect.TypeOf(obj)) + } + return nil, false, lastErr } // Recognizes returns true if the provided group,version,kind was in the // discovery information. func (d *UnstructuredObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { - return d.registered[gvk] -} - -// IsUnversioned returns false always because runtime.Unstructured objects -// should always have group,version,kind information set. ok will be true if the -// object's group,version,kind is api.Registry. -func (d *UnstructuredObjectTyper) IsUnversioned(obj runtime.Object) (unversioned bool, ok bool) { - gvk, err := d.ObjectKind(obj) - if err != nil { - return false, false - } - - return false, d.registered[gvk] + return false } var _ runtime.ObjectTyper = &UnstructuredObjectTyper{} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 5fd0c54e8..9d0eace46 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,28 +14,40 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package kubernetes import ( - glog "github.com/golang/glog" discovery "k8s.io/client-go/discovery" + admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" + admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" + appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" + appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" - autoscalingv2alpha1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1" + autoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" + batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" + rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" + schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" + schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" + storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -43,12 +55,15 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface - CoreV1() corev1.CoreV1Interface + AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface + AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface // Deprecated: please explicitly pick a version if possible. - Core() corev1.CoreV1Interface + Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface + AppsV1beta2() appsv1beta2.AppsV1beta2Interface + AppsV1() appsv1.AppsV1Interface // Deprecated: please explicitly pick a version if possible. - Apps() appsv1beta1.AppsV1beta1Interface + Apps() appsv1.AppsV1Interface AuthenticationV1() authenticationv1.AuthenticationV1Interface // Deprecated: please explicitly pick a version if possible. Authentication() authenticationv1.AuthenticationV1Interface @@ -60,24 +75,39 @@ type Interface interface { AutoscalingV1() autoscalingv1.AutoscalingV1Interface // Deprecated: please explicitly pick a version if possible. Autoscaling() autoscalingv1.AutoscalingV1Interface - AutoscalingV2alpha1() autoscalingv2alpha1.AutoscalingV2alpha1Interface + AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface BatchV1() batchv1.BatchV1Interface // Deprecated: please explicitly pick a version if possible. Batch() batchv1.BatchV1Interface + BatchV1beta1() batchv1beta1.BatchV1beta1Interface BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface // Deprecated: please explicitly pick a version if possible. Certificates() certificatesv1beta1.CertificatesV1beta1Interface + CoreV1() corev1.CoreV1Interface + // Deprecated: please explicitly pick a version if possible. + Core() corev1.CoreV1Interface + EventsV1beta1() eventsv1beta1.EventsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Events() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface // Deprecated: please explicitly pick a version if possible. Extensions() extensionsv1beta1.ExtensionsV1beta1Interface + NetworkingV1() networkingv1.NetworkingV1Interface + // Deprecated: please explicitly pick a version if possible. + Networking() networkingv1.NetworkingV1Interface PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface // Deprecated: please explicitly pick a version if possible. Policy() policyv1beta1.PolicyV1beta1Interface - RbacV1beta1() rbacv1beta1.RbacV1beta1Interface + RbacV1() rbacv1.RbacV1Interface // Deprecated: please explicitly pick a version if possible. - Rbac() rbacv1beta1.RbacV1beta1Interface + Rbac() rbacv1.RbacV1Interface + RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface + SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface + SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Scheduling() schedulingv1beta1.SchedulingV1beta1Interface SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface // Deprecated: please explicitly pick a version if possible. Settings() settingsv1alpha1.SettingsV1alpha1Interface @@ -85,282 +115,283 @@ type Interface interface { StorageV1() storagev1.StorageV1Interface // Deprecated: please explicitly pick a version if possible. Storage() storagev1.StorageV1Interface + StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - *corev1.CoreV1Client - *appsv1beta1.AppsV1beta1Client - *authenticationv1.AuthenticationV1Client - *authenticationv1beta1.AuthenticationV1beta1Client - *authorizationv1.AuthorizationV1Client - *authorizationv1beta1.AuthorizationV1beta1Client - *autoscalingv1.AutoscalingV1Client - *autoscalingv2alpha1.AutoscalingV2alpha1Client - *batchv1.BatchV1Client - *batchv2alpha1.BatchV2alpha1Client - *certificatesv1beta1.CertificatesV1beta1Client - *extensionsv1beta1.ExtensionsV1beta1Client - *policyv1beta1.PolicyV1beta1Client - *rbacv1beta1.RbacV1beta1Client - *rbacv1alpha1.RbacV1alpha1Client - *settingsv1alpha1.SettingsV1alpha1Client - *storagev1beta1.StorageV1beta1Client - *storagev1.StorageV1Client + admissionregistrationV1alpha1 *admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Client + admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client + appsV1beta1 *appsv1beta1.AppsV1beta1Client + appsV1beta2 *appsv1beta2.AppsV1beta2Client + appsV1 *appsv1.AppsV1Client + authenticationV1 *authenticationv1.AuthenticationV1Client + authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client + authorizationV1 *authorizationv1.AuthorizationV1Client + authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client + autoscalingV1 *autoscalingv1.AutoscalingV1Client + autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client + batchV1 *batchv1.BatchV1Client + batchV1beta1 *batchv1beta1.BatchV1beta1Client + batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client + certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client + coreV1 *corev1.CoreV1Client + eventsV1beta1 *eventsv1beta1.EventsV1beta1Client + extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client + networkingV1 *networkingv1.NetworkingV1Client + policyV1beta1 *policyv1beta1.PolicyV1beta1Client + rbacV1 *rbacv1.RbacV1Client + rbacV1beta1 *rbacv1beta1.RbacV1beta1Client + rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client + schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client + schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client + settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client + storageV1beta1 *storagev1beta1.StorageV1beta1Client + storageV1 *storagev1.StorageV1Client + storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client } -// CoreV1 retrieves the CoreV1Client -func (c *Clientset) CoreV1() corev1.CoreV1Interface { - if c == nil { - return nil - } - return c.CoreV1Client +// AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client +func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { + return c.admissionregistrationV1alpha1 } -// Deprecated: Core retrieves the default version of CoreClient. +// AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client +func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { + return c.admissionregistrationV1beta1 +} + +// Deprecated: Admissionregistration retrieves the default version of AdmissionregistrationClient. // Please explicitly pick a version. -func (c *Clientset) Core() corev1.CoreV1Interface { - if c == nil { - return nil - } - return c.CoreV1Client +func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { + return c.admissionregistrationV1beta1 } // AppsV1beta1 retrieves the AppsV1beta1Client func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface { - if c == nil { - return nil - } - return c.AppsV1beta1Client + return c.appsV1beta1 +} + +// AppsV1beta2 retrieves the AppsV1beta2Client +func (c *Clientset) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { + return c.appsV1beta2 +} + +// AppsV1 retrieves the AppsV1Client +func (c *Clientset) AppsV1() appsv1.AppsV1Interface { + return c.appsV1 } // Deprecated: Apps retrieves the default version of AppsClient. // Please explicitly pick a version. -func (c *Clientset) Apps() appsv1beta1.AppsV1beta1Interface { - if c == nil { - return nil - } - return c.AppsV1beta1Client +func (c *Clientset) Apps() appsv1.AppsV1Interface { + return c.appsV1 } // AuthenticationV1 retrieves the AuthenticationV1Client func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { - if c == nil { - return nil - } - return c.AuthenticationV1Client + return c.authenticationV1 } // Deprecated: Authentication retrieves the default version of AuthenticationClient. // Please explicitly pick a version. func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface { - if c == nil { - return nil - } - return c.AuthenticationV1Client + return c.authenticationV1 } // AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { - if c == nil { - return nil - } - return c.AuthenticationV1beta1Client + return c.authenticationV1beta1 } // AuthorizationV1 retrieves the AuthorizationV1Client func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { - if c == nil { - return nil - } - return c.AuthorizationV1Client + return c.authorizationV1 } // Deprecated: Authorization retrieves the default version of AuthorizationClient. // Please explicitly pick a version. func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface { - if c == nil { - return nil - } - return c.AuthorizationV1Client + return c.authorizationV1 } // AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { - if c == nil { - return nil - } - return c.AuthorizationV1beta1Client + return c.authorizationV1beta1 } // AutoscalingV1 retrieves the AutoscalingV1Client func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { - if c == nil { - return nil - } - return c.AutoscalingV1Client + return c.autoscalingV1 } // Deprecated: Autoscaling retrieves the default version of AutoscalingClient. // Please explicitly pick a version. func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { - if c == nil { - return nil - } - return c.AutoscalingV1Client + return c.autoscalingV1 } -// AutoscalingV2alpha1 retrieves the AutoscalingV2alpha1Client -func (c *Clientset) AutoscalingV2alpha1() autoscalingv2alpha1.AutoscalingV2alpha1Interface { - if c == nil { - return nil - } - return c.AutoscalingV2alpha1Client +// AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client +func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { + return c.autoscalingV2beta1 } // BatchV1 retrieves the BatchV1Client func (c *Clientset) BatchV1() batchv1.BatchV1Interface { - if c == nil { - return nil - } - return c.BatchV1Client + return c.batchV1 } // Deprecated: Batch retrieves the default version of BatchClient. // Please explicitly pick a version. func (c *Clientset) Batch() batchv1.BatchV1Interface { - if c == nil { - return nil - } - return c.BatchV1Client + return c.batchV1 +} + +// BatchV1beta1 retrieves the BatchV1beta1Client +func (c *Clientset) BatchV1beta1() batchv1beta1.BatchV1beta1Interface { + return c.batchV1beta1 } // BatchV2alpha1 retrieves the BatchV2alpha1Client func (c *Clientset) BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface { - if c == nil { - return nil - } - return c.BatchV2alpha1Client + return c.batchV2alpha1 } // CertificatesV1beta1 retrieves the CertificatesV1beta1Client func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface { - if c == nil { - return nil - } - return c.CertificatesV1beta1Client + return c.certificatesV1beta1 } // Deprecated: Certificates retrieves the default version of CertificatesClient. // Please explicitly pick a version. func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface { - if c == nil { - return nil - } - return c.CertificatesV1beta1Client + return c.certificatesV1beta1 +} + +// CoreV1 retrieves the CoreV1Client +func (c *Clientset) CoreV1() corev1.CoreV1Interface { + return c.coreV1 +} + +// Deprecated: Core retrieves the default version of CoreClient. +// Please explicitly pick a version. +func (c *Clientset) Core() corev1.CoreV1Interface { + return c.coreV1 +} + +// EventsV1beta1 retrieves the EventsV1beta1Client +func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { + return c.eventsV1beta1 +} + +// Deprecated: Events retrieves the default version of EventsClient. +// Please explicitly pick a version. +func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface { + return c.eventsV1beta1 } // ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { - if c == nil { - return nil - } - return c.ExtensionsV1beta1Client + return c.extensionsV1beta1 } // Deprecated: Extensions retrieves the default version of ExtensionsClient. // Please explicitly pick a version. func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { - if c == nil { - return nil - } - return c.ExtensionsV1beta1Client + return c.extensionsV1beta1 +} + +// NetworkingV1 retrieves the NetworkingV1Client +func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { + return c.networkingV1 +} + +// Deprecated: Networking retrieves the default version of NetworkingClient. +// Please explicitly pick a version. +func (c *Clientset) Networking() networkingv1.NetworkingV1Interface { + return c.networkingV1 } // PolicyV1beta1 retrieves the PolicyV1beta1Client func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { - if c == nil { - return nil - } - return c.PolicyV1beta1Client + return c.policyV1beta1 } // Deprecated: Policy retrieves the default version of PolicyClient. // Please explicitly pick a version. func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { - if c == nil { - return nil - } - return c.PolicyV1beta1Client + return c.policyV1beta1 } -// RbacV1beta1 retrieves the RbacV1beta1Client -func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { - if c == nil { - return nil - } - return c.RbacV1beta1Client +// RbacV1 retrieves the RbacV1Client +func (c *Clientset) RbacV1() rbacv1.RbacV1Interface { + return c.rbacV1 } // Deprecated: Rbac retrieves the default version of RbacClient. // Please explicitly pick a version. -func (c *Clientset) Rbac() rbacv1beta1.RbacV1beta1Interface { - if c == nil { - return nil - } - return c.RbacV1beta1Client +func (c *Clientset) Rbac() rbacv1.RbacV1Interface { + return c.rbacV1 +} + +// RbacV1beta1 retrieves the RbacV1beta1Client +func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { + return c.rbacV1beta1 } // RbacV1alpha1 retrieves the RbacV1alpha1Client func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { - if c == nil { - return nil - } - return c.RbacV1alpha1Client + return c.rbacV1alpha1 +} + +// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client +func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { + return c.schedulingV1alpha1 +} + +// SchedulingV1beta1 retrieves the SchedulingV1beta1Client +func (c *Clientset) SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface { + return c.schedulingV1beta1 +} + +// Deprecated: Scheduling retrieves the default version of SchedulingClient. +// Please explicitly pick a version. +func (c *Clientset) Scheduling() schedulingv1beta1.SchedulingV1beta1Interface { + return c.schedulingV1beta1 } // SettingsV1alpha1 retrieves the SettingsV1alpha1Client func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { - if c == nil { - return nil - } - return c.SettingsV1alpha1Client + return c.settingsV1alpha1 } // Deprecated: Settings retrieves the default version of SettingsClient. // Please explicitly pick a version. func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface { - if c == nil { - return nil - } - return c.SettingsV1alpha1Client + return c.settingsV1alpha1 } // StorageV1beta1 retrieves the StorageV1beta1Client func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { - if c == nil { - return nil - } - return c.StorageV1beta1Client + return c.storageV1beta1 } // StorageV1 retrieves the StorageV1Client func (c *Clientset) StorageV1() storagev1.StorageV1Interface { - if c == nil { - return nil - } - return c.StorageV1Client + return c.storageV1 } // Deprecated: Storage retrieves the default version of StorageClient. // Please explicitly pick a version. func (c *Clientset) Storage() storagev1.StorageV1Interface { - if c == nil { - return nil - } - return c.StorageV1Client + return c.storageV1 +} + +// StorageV1alpha1 retrieves the StorageV1alpha1Client +func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { + return c.storageV1alpha1 } // Discovery retrieves the DiscoveryClient @@ -379,82 +410,125 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { } var cs Clientset var err error - cs.CoreV1Client, err = corev1.NewForConfig(&configShallowCopy) + cs.admissionregistrationV1alpha1, err = admissionregistrationv1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.AppsV1beta1Client, err = appsv1beta1.NewForConfig(&configShallowCopy) + cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.AuthenticationV1Client, err = authenticationv1.NewForConfig(&configShallowCopy) + cs.appsV1beta1, err = appsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.AuthenticationV1beta1Client, err = authenticationv1beta1.NewForConfig(&configShallowCopy) + cs.appsV1beta2, err = appsv1beta2.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.AuthorizationV1Client, err = authorizationv1.NewForConfig(&configShallowCopy) + cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.AuthorizationV1beta1Client, err = authorizationv1beta1.NewForConfig(&configShallowCopy) + cs.authenticationV1, err = authenticationv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.AutoscalingV1Client, err = autoscalingv1.NewForConfig(&configShallowCopy) + cs.authenticationV1beta1, err = authenticationv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.AutoscalingV2alpha1Client, err = autoscalingv2alpha1.NewForConfig(&configShallowCopy) + cs.authorizationV1, err = authorizationv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.BatchV1Client, err = batchv1.NewForConfig(&configShallowCopy) + cs.authorizationV1beta1, err = authorizationv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.BatchV2alpha1Client, err = batchv2alpha1.NewForConfig(&configShallowCopy) + cs.autoscalingV1, err = autoscalingv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.CertificatesV1beta1Client, err = certificatesv1beta1.NewForConfig(&configShallowCopy) + cs.autoscalingV2beta1, err = autoscalingv2beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.ExtensionsV1beta1Client, err = extensionsv1beta1.NewForConfig(&configShallowCopy) + cs.batchV1, err = batchv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.PolicyV1beta1Client, err = policyv1beta1.NewForConfig(&configShallowCopy) + cs.batchV1beta1, err = batchv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.RbacV1beta1Client, err = rbacv1beta1.NewForConfig(&configShallowCopy) + cs.batchV2alpha1, err = batchv2alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.RbacV1alpha1Client, err = rbacv1alpha1.NewForConfig(&configShallowCopy) + cs.certificatesV1beta1, err = certificatesv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.SettingsV1alpha1Client, err = settingsv1alpha1.NewForConfig(&configShallowCopy) + cs.coreV1, err = corev1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.StorageV1beta1Client, err = storagev1beta1.NewForConfig(&configShallowCopy) + cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.StorageV1Client, err = storagev1.NewForConfig(&configShallowCopy) + cs.extensionsV1beta1, err = extensionsv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.networkingV1, err = networkingv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.policyV1beta1, err = policyv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.rbacV1, err = rbacv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.rbacV1beta1, err = rbacv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.rbacV1alpha1, err = rbacv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.schedulingV1beta1, err = schedulingv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.settingsV1alpha1, err = settingsv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.storageV1beta1, err = storagev1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.storageV1, err = storagev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.storageV1alpha1, err = storagev1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { - glog.Errorf("failed to create the DiscoveryClient: %v", err) return nil, err } return &cs, nil @@ -464,24 +538,35 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset - cs.CoreV1Client = corev1.NewForConfigOrDie(c) - cs.AppsV1beta1Client = appsv1beta1.NewForConfigOrDie(c) - cs.AuthenticationV1Client = authenticationv1.NewForConfigOrDie(c) - cs.AuthenticationV1beta1Client = authenticationv1beta1.NewForConfigOrDie(c) - cs.AuthorizationV1Client = authorizationv1.NewForConfigOrDie(c) - cs.AuthorizationV1beta1Client = authorizationv1beta1.NewForConfigOrDie(c) - cs.AutoscalingV1Client = autoscalingv1.NewForConfigOrDie(c) - cs.AutoscalingV2alpha1Client = autoscalingv2alpha1.NewForConfigOrDie(c) - cs.BatchV1Client = batchv1.NewForConfigOrDie(c) - cs.BatchV2alpha1Client = batchv2alpha1.NewForConfigOrDie(c) - cs.CertificatesV1beta1Client = certificatesv1beta1.NewForConfigOrDie(c) - cs.ExtensionsV1beta1Client = extensionsv1beta1.NewForConfigOrDie(c) - cs.PolicyV1beta1Client = policyv1beta1.NewForConfigOrDie(c) - cs.RbacV1beta1Client = rbacv1beta1.NewForConfigOrDie(c) - cs.RbacV1alpha1Client = rbacv1alpha1.NewForConfigOrDie(c) - cs.SettingsV1alpha1Client = settingsv1alpha1.NewForConfigOrDie(c) - cs.StorageV1beta1Client = storagev1beta1.NewForConfigOrDie(c) - cs.StorageV1Client = storagev1.NewForConfigOrDie(c) + cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.NewForConfigOrDie(c) + cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) + cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) + cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) + cs.appsV1 = appsv1.NewForConfigOrDie(c) + cs.authenticationV1 = authenticationv1.NewForConfigOrDie(c) + cs.authenticationV1beta1 = authenticationv1beta1.NewForConfigOrDie(c) + cs.authorizationV1 = authorizationv1.NewForConfigOrDie(c) + cs.authorizationV1beta1 = authorizationv1beta1.NewForConfigOrDie(c) + cs.autoscalingV1 = autoscalingv1.NewForConfigOrDie(c) + cs.autoscalingV2beta1 = autoscalingv2beta1.NewForConfigOrDie(c) + cs.batchV1 = batchv1.NewForConfigOrDie(c) + cs.batchV1beta1 = batchv1beta1.NewForConfigOrDie(c) + cs.batchV2alpha1 = batchv2alpha1.NewForConfigOrDie(c) + cs.certificatesV1beta1 = certificatesv1beta1.NewForConfigOrDie(c) + cs.coreV1 = corev1.NewForConfigOrDie(c) + cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) + cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) + cs.networkingV1 = networkingv1.NewForConfigOrDie(c) + cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) + cs.rbacV1 = rbacv1.NewForConfigOrDie(c) + cs.rbacV1beta1 = rbacv1beta1.NewForConfigOrDie(c) + cs.rbacV1alpha1 = rbacv1alpha1.NewForConfigOrDie(c) + cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) + cs.schedulingV1beta1 = schedulingv1beta1.NewForConfigOrDie(c) + cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c) + cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) + cs.storageV1 = storagev1.NewForConfigOrDie(c) + cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -490,24 +575,35 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset - cs.CoreV1Client = corev1.New(c) - cs.AppsV1beta1Client = appsv1beta1.New(c) - cs.AuthenticationV1Client = authenticationv1.New(c) - cs.AuthenticationV1beta1Client = authenticationv1beta1.New(c) - cs.AuthorizationV1Client = authorizationv1.New(c) - cs.AuthorizationV1beta1Client = authorizationv1beta1.New(c) - cs.AutoscalingV1Client = autoscalingv1.New(c) - cs.AutoscalingV2alpha1Client = autoscalingv2alpha1.New(c) - cs.BatchV1Client = batchv1.New(c) - cs.BatchV2alpha1Client = batchv2alpha1.New(c) - cs.CertificatesV1beta1Client = certificatesv1beta1.New(c) - cs.ExtensionsV1beta1Client = extensionsv1beta1.New(c) - cs.PolicyV1beta1Client = policyv1beta1.New(c) - cs.RbacV1beta1Client = rbacv1beta1.New(c) - cs.RbacV1alpha1Client = rbacv1alpha1.New(c) - cs.SettingsV1alpha1Client = settingsv1alpha1.New(c) - cs.StorageV1beta1Client = storagev1beta1.New(c) - cs.StorageV1Client = storagev1.New(c) + cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.New(c) + cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) + cs.appsV1beta1 = appsv1beta1.New(c) + cs.appsV1beta2 = appsv1beta2.New(c) + cs.appsV1 = appsv1.New(c) + cs.authenticationV1 = authenticationv1.New(c) + cs.authenticationV1beta1 = authenticationv1beta1.New(c) + cs.authorizationV1 = authorizationv1.New(c) + cs.authorizationV1beta1 = authorizationv1beta1.New(c) + cs.autoscalingV1 = autoscalingv1.New(c) + cs.autoscalingV2beta1 = autoscalingv2beta1.New(c) + cs.batchV1 = batchv1.New(c) + cs.batchV1beta1 = batchv1beta1.New(c) + cs.batchV2alpha1 = batchv2alpha1.New(c) + cs.certificatesV1beta1 = certificatesv1beta1.New(c) + cs.coreV1 = corev1.New(c) + cs.eventsV1beta1 = eventsv1beta1.New(c) + cs.extensionsV1beta1 = extensionsv1beta1.New(c) + cs.networkingV1 = networkingv1.New(c) + cs.policyV1beta1 = policyv1beta1.New(c) + cs.rbacV1 = rbacv1.New(c) + cs.rbacV1beta1 = rbacv1beta1.New(c) + cs.rbacV1alpha1 = rbacv1alpha1.New(c) + cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) + cs.schedulingV1beta1 = schedulingv1beta1.New(c) + cs.settingsV1alpha1 = settingsv1alpha1.New(c) + cs.storageV1beta1 = storagev1beta1.New(c) + cs.storageV1 = storagev1.New(c) + cs.storageV1alpha1 = storagev1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/k8s.io/client-go/kubernetes/doc.go b/vendor/k8s.io/client-go/kubernetes/doc.go index 2af84c669..b272334ad 100644 --- a/vendor/k8s.io/client-go/kubernetes/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated clientset. package kubernetes diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index d8a4b8352..b764b21b6 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( @@ -21,9 +23,17 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" fakediscovery "k8s.io/client-go/discovery/fake" - kubernetes "k8s.io/client-go/kubernetes" + clientset "k8s.io/client-go/kubernetes" + admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" + fakeadmissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake" + admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" + fakeadmissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake" + appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + fakeappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1/fake" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" fakeappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake" + appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" + fakeappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake" authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" fakeauthenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1/fake" authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" @@ -34,28 +44,42 @@ import ( fakeauthorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake" autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" fakeautoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake" - autoscalingv2alpha1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1" - fakeautoscalingv2alpha1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake" + autoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" + fakeautoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake" batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" fakebatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1/fake" + batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" + fakebatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake" batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" fakebatchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" fakecertificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake" + eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" + fakeeventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" fakeextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake" + networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" + fakenetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1/fake" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" fakepolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake" + rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + fakerbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1/fake" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" fakerbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" fakerbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake" + schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" + fakeschedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake" + schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" + fakeschedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake" settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" fakesettingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" fakestoragev1 "k8s.io/client-go/kubernetes/typed/storage/v1/fake" + storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" + fakestoragev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake" storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" fakestoragev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake" "k8s.io/client-go/testing" @@ -66,42 +90,56 @@ import ( // without applying any validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. func NewSimpleClientset(objects ...runtime.Object) *Clientset { - o := testing.NewObjectTracker(registry, scheme, codecs.UniversalDecoder()) + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { if err := o.Add(obj); err != nil { panic(err) } } - fakePtr := testing.Fake{} - fakePtr.AddReactor("*", "*", testing.ObjectReaction(o, registry.RESTMapper())) + cs := &Clientset{} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) - - return &Clientset{fakePtr} + return cs } -// Clientset implements kubernetes.Interface. Meant to be embedded into a +// Clientset implements clientset.Interface. Meant to be embedded into a // struct to get a default implementation. This makes faking out just the method // you want to test easier. type Clientset struct { testing.Fake + discovery *fakediscovery.FakeDiscovery } func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return &fakediscovery.FakeDiscovery{Fake: &c.Fake} + return c.discovery } -var _ kubernetes.Interface = &Clientset{} +var _ clientset.Interface = &Clientset{} -// CoreV1 retrieves the CoreV1Client -func (c *Clientset) CoreV1() corev1.CoreV1Interface { - return &fakecorev1.FakeCoreV1{Fake: &c.Fake} +// AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client +func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { + return &fakeadmissionregistrationv1alpha1.FakeAdmissionregistrationV1alpha1{Fake: &c.Fake} } -// Core retrieves the CoreV1Client -func (c *Clientset) Core() corev1.CoreV1Interface { - return &fakecorev1.FakeCoreV1{Fake: &c.Fake} +// AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client +func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { + return &fakeadmissionregistrationv1beta1.FakeAdmissionregistrationV1beta1{Fake: &c.Fake} +} + +// Admissionregistration retrieves the AdmissionregistrationV1beta1Client +func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { + return &fakeadmissionregistrationv1beta1.FakeAdmissionregistrationV1beta1{Fake: &c.Fake} } // AppsV1beta1 retrieves the AppsV1beta1Client @@ -109,9 +147,19 @@ func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface { return &fakeappsv1beta1.FakeAppsV1beta1{Fake: &c.Fake} } -// Apps retrieves the AppsV1beta1Client -func (c *Clientset) Apps() appsv1beta1.AppsV1beta1Interface { - return &fakeappsv1beta1.FakeAppsV1beta1{Fake: &c.Fake} +// AppsV1beta2 retrieves the AppsV1beta2Client +func (c *Clientset) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { + return &fakeappsv1beta2.FakeAppsV1beta2{Fake: &c.Fake} +} + +// AppsV1 retrieves the AppsV1Client +func (c *Clientset) AppsV1() appsv1.AppsV1Interface { + return &fakeappsv1.FakeAppsV1{Fake: &c.Fake} +} + +// Apps retrieves the AppsV1Client +func (c *Clientset) Apps() appsv1.AppsV1Interface { + return &fakeappsv1.FakeAppsV1{Fake: &c.Fake} } // AuthenticationV1 retrieves the AuthenticationV1Client @@ -154,9 +202,9 @@ func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { return &fakeautoscalingv1.FakeAutoscalingV1{Fake: &c.Fake} } -// AutoscalingV2alpha1 retrieves the AutoscalingV2alpha1Client -func (c *Clientset) AutoscalingV2alpha1() autoscalingv2alpha1.AutoscalingV2alpha1Interface { - return &fakeautoscalingv2alpha1.FakeAutoscalingV2alpha1{Fake: &c.Fake} +// AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client +func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { + return &fakeautoscalingv2beta1.FakeAutoscalingV2beta1{Fake: &c.Fake} } // BatchV1 retrieves the BatchV1Client @@ -169,6 +217,11 @@ func (c *Clientset) Batch() batchv1.BatchV1Interface { return &fakebatchv1.FakeBatchV1{Fake: &c.Fake} } +// BatchV1beta1 retrieves the BatchV1beta1Client +func (c *Clientset) BatchV1beta1() batchv1beta1.BatchV1beta1Interface { + return &fakebatchv1beta1.FakeBatchV1beta1{Fake: &c.Fake} +} + // BatchV2alpha1 retrieves the BatchV2alpha1Client func (c *Clientset) BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface { return &fakebatchv2alpha1.FakeBatchV2alpha1{Fake: &c.Fake} @@ -184,6 +237,26 @@ func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interf return &fakecertificatesv1beta1.FakeCertificatesV1beta1{Fake: &c.Fake} } +// CoreV1 retrieves the CoreV1Client +func (c *Clientset) CoreV1() corev1.CoreV1Interface { + return &fakecorev1.FakeCoreV1{Fake: &c.Fake} +} + +// Core retrieves the CoreV1Client +func (c *Clientset) Core() corev1.CoreV1Interface { + return &fakecorev1.FakeCoreV1{Fake: &c.Fake} +} + +// EventsV1beta1 retrieves the EventsV1beta1Client +func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { + return &fakeeventsv1beta1.FakeEventsV1beta1{Fake: &c.Fake} +} + +// Events retrieves the EventsV1beta1Client +func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface { + return &fakeeventsv1beta1.FakeEventsV1beta1{Fake: &c.Fake} +} + // ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake} @@ -194,6 +267,16 @@ func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake} } +// NetworkingV1 retrieves the NetworkingV1Client +func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { + return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake} +} + +// Networking retrieves the NetworkingV1Client +func (c *Clientset) Networking() networkingv1.NetworkingV1Interface { + return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake} +} + // PolicyV1beta1 retrieves the PolicyV1beta1Client func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { return &fakepolicyv1beta1.FakePolicyV1beta1{Fake: &c.Fake} @@ -204,13 +287,18 @@ func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { return &fakepolicyv1beta1.FakePolicyV1beta1{Fake: &c.Fake} } -// RbacV1beta1 retrieves the RbacV1beta1Client -func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { - return &fakerbacv1beta1.FakeRbacV1beta1{Fake: &c.Fake} +// RbacV1 retrieves the RbacV1Client +func (c *Clientset) RbacV1() rbacv1.RbacV1Interface { + return &fakerbacv1.FakeRbacV1{Fake: &c.Fake} } -// Rbac retrieves the RbacV1beta1Client -func (c *Clientset) Rbac() rbacv1beta1.RbacV1beta1Interface { +// Rbac retrieves the RbacV1Client +func (c *Clientset) Rbac() rbacv1.RbacV1Interface { + return &fakerbacv1.FakeRbacV1{Fake: &c.Fake} +} + +// RbacV1beta1 retrieves the RbacV1beta1Client +func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { return &fakerbacv1beta1.FakeRbacV1beta1{Fake: &c.Fake} } @@ -219,6 +307,21 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return &fakerbacv1alpha1.FakeRbacV1alpha1{Fake: &c.Fake} } +// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client +func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { + return &fakeschedulingv1alpha1.FakeSchedulingV1alpha1{Fake: &c.Fake} +} + +// SchedulingV1beta1 retrieves the SchedulingV1beta1Client +func (c *Clientset) SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface { + return &fakeschedulingv1beta1.FakeSchedulingV1beta1{Fake: &c.Fake} +} + +// Scheduling retrieves the SchedulingV1beta1Client +func (c *Clientset) Scheduling() schedulingv1beta1.SchedulingV1beta1Interface { + return &fakeschedulingv1beta1.FakeSchedulingV1beta1{Fake: &c.Fake} +} + // SettingsV1alpha1 retrieves the SettingsV1alpha1Client func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { return &fakesettingsv1alpha1.FakeSettingsV1alpha1{Fake: &c.Fake} @@ -243,3 +346,8 @@ func (c *Clientset) StorageV1() storagev1.StorageV1Interface { func (c *Clientset) Storage() storagev1.StorageV1Interface { return &fakestoragev1.FakeStorageV1{Fake: &c.Fake} } + +// StorageV1alpha1 retrieves the StorageV1alpha1Client +func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { + return &fakestoragev1alpha1.FakeStorageV1alpha1{Fake: &c.Fake} +} diff --git a/vendor/k8s.io/client-go/kubernetes/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/fake/doc.go index 5f565b3c8..9b99e7167 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated fake clientset. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go index 3ad666057..88e168fa6 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,55 +14,97 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - announced "k8s.io/apimachinery/pkg/apimachinery/announced" - registered "k8s.io/apimachinery/pkg/apimachinery/registered" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + authenticationv1 "k8s.io/api/authentication/v1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" + authorizationv1 "k8s.io/api/authorization/v1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + batchv2alpha1 "k8s.io/api/batch/v2alpha1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + corev1 "k8s.io/api/core/v1" + eventsv1beta1 "k8s.io/api/events/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + settingsv1alpha1 "k8s.io/api/settings/v1alpha1" + storagev1 "k8s.io/api/storage/v1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" - core "k8s.io/client-go/pkg/api/install" - apps "k8s.io/client-go/pkg/apis/apps/install" - authentication "k8s.io/client-go/pkg/apis/authentication/install" - authorization "k8s.io/client-go/pkg/apis/authorization/install" - autoscaling "k8s.io/client-go/pkg/apis/autoscaling/install" - batch "k8s.io/client-go/pkg/apis/batch/install" - certificates "k8s.io/client-go/pkg/apis/certificates/install" - extensions "k8s.io/client-go/pkg/apis/extensions/install" - policy "k8s.io/client-go/pkg/apis/policy/install" - rbac "k8s.io/client-go/pkg/apis/rbac/install" - settings "k8s.io/client-go/pkg/apis/settings/install" - storage "k8s.io/client-go/pkg/apis/storage/install" - os "os" ) var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) var parameterCodec = runtime.NewParameterCodec(scheme) -var registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) -var groupFactoryRegistry = make(announced.APIGroupFactoryRegistry) - func init() { v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) - Install(groupFactoryRegistry, registry, scheme) + AddToScheme(scheme) } -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - core.Install(groupFactoryRegistry, registry, scheme) - apps.Install(groupFactoryRegistry, registry, scheme) - authentication.Install(groupFactoryRegistry, registry, scheme) - authorization.Install(groupFactoryRegistry, registry, scheme) - autoscaling.Install(groupFactoryRegistry, registry, scheme) - batch.Install(groupFactoryRegistry, registry, scheme) - certificates.Install(groupFactoryRegistry, registry, scheme) - extensions.Install(groupFactoryRegistry, registry, scheme) - policy.Install(groupFactoryRegistry, registry, scheme) - rbac.Install(groupFactoryRegistry, registry, scheme) - settings.Install(groupFactoryRegistry, registry, scheme) - storage.Install(groupFactoryRegistry, registry, scheme) - +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +func AddToScheme(scheme *runtime.Scheme) { + admissionregistrationv1alpha1.AddToScheme(scheme) + admissionregistrationv1beta1.AddToScheme(scheme) + appsv1beta1.AddToScheme(scheme) + appsv1beta2.AddToScheme(scheme) + appsv1.AddToScheme(scheme) + authenticationv1.AddToScheme(scheme) + authenticationv1beta1.AddToScheme(scheme) + authorizationv1.AddToScheme(scheme) + authorizationv1beta1.AddToScheme(scheme) + autoscalingv1.AddToScheme(scheme) + autoscalingv2beta1.AddToScheme(scheme) + batchv1.AddToScheme(scheme) + batchv1beta1.AddToScheme(scheme) + batchv2alpha1.AddToScheme(scheme) + certificatesv1beta1.AddToScheme(scheme) + corev1.AddToScheme(scheme) + eventsv1beta1.AddToScheme(scheme) + extensionsv1beta1.AddToScheme(scheme) + networkingv1.AddToScheme(scheme) + policyv1beta1.AddToScheme(scheme) + rbacv1.AddToScheme(scheme) + rbacv1beta1.AddToScheme(scheme) + rbacv1alpha1.AddToScheme(scheme) + schedulingv1alpha1.AddToScheme(scheme) + schedulingv1beta1.AddToScheme(scheme) + settingsv1alpha1.AddToScheme(scheme) + storagev1beta1.AddToScheme(scheme) + storagev1.AddToScheme(scheme) + storagev1alpha1.AddToScheme(scheme) } diff --git a/vendor/k8s.io/client-go/kubernetes/import.go b/vendor/k8s.io/client-go/kubernetes/import.go new file mode 100644 index 000000000..c4f9a91bc --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/import.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file exists to enforce this clientset's vanity import path. + +package kubernetes // import "k8s.io/client-go/kubernetes" diff --git a/vendor/k8s.io/client-go/kubernetes/import_known_versions.go b/vendor/k8s.io/client-go/kubernetes/import_known_versions.go deleted file mode 100644 index 297466b01..000000000 --- a/vendor/k8s.io/client-go/kubernetes/import_known_versions.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubernetes - -// These imports are the API groups the client will support. -import ( - "fmt" - - "k8s.io/client-go/pkg/api" - _ "k8s.io/client-go/pkg/api/install" - _ "k8s.io/client-go/pkg/apis/apps/install" - _ "k8s.io/client-go/pkg/apis/authentication/install" - _ "k8s.io/client-go/pkg/apis/authorization/install" - _ "k8s.io/client-go/pkg/apis/autoscaling/install" - _ "k8s.io/client-go/pkg/apis/batch/install" - _ "k8s.io/client-go/pkg/apis/certificates/install" - _ "k8s.io/client-go/pkg/apis/extensions/install" - _ "k8s.io/client-go/pkg/apis/policy/install" - _ "k8s.io/client-go/pkg/apis/rbac/install" - _ "k8s.io/client-go/pkg/apis/settings/install" - _ "k8s.io/client-go/pkg/apis/storage/install" -) - -func init() { - if missingVersions := api.Registry.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { - panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/doc.go b/vendor/k8s.io/client-go/kubernetes/scheme/doc.go index 5d8ec824f..7dc375616 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package contains the scheme of the automatically generated clientset. package scheme diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 1fe5dbe4c..86584cf83 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,31 +14,44 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package scheme import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + authenticationv1 "k8s.io/api/authentication/v1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" + authorizationv1 "k8s.io/api/authorization/v1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + batchv2alpha1 "k8s.io/api/batch/v2alpha1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + corev1 "k8s.io/api/core/v1" + eventsv1beta1 "k8s.io/api/events/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + settingsv1alpha1 "k8s.io/api/settings/v1alpha1" + storagev1 "k8s.io/api/storage/v1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" - corev1 "k8s.io/client-go/pkg/api/v1" - appsv1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" - authenticationv1 "k8s.io/client-go/pkg/apis/authentication/v1" - authenticationv1beta1 "k8s.io/client-go/pkg/apis/authentication/v1beta1" - authorizationv1 "k8s.io/client-go/pkg/apis/authorization/v1" - authorizationv1beta1 "k8s.io/client-go/pkg/apis/authorization/v1beta1" - autoscalingv1 "k8s.io/client-go/pkg/apis/autoscaling/v1" - autoscalingv2alpha1 "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" - batchv1 "k8s.io/client-go/pkg/apis/batch/v1" - batchv2alpha1 "k8s.io/client-go/pkg/apis/batch/v2alpha1" - certificatesv1beta1 "k8s.io/client-go/pkg/apis/certificates/v1beta1" - extensionsv1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" - policyv1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" - rbacv1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" - rbacv1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" - settingsv1alpha1 "k8s.io/client-go/pkg/apis/settings/v1alpha1" - storagev1 "k8s.io/client-go/pkg/apis/storage/v1" - storagev1beta1 "k8s.io/client-go/pkg/apis/storage/v1beta1" ) var Scheme = runtime.NewScheme() @@ -55,7 +68,7 @@ func init() { // // import ( // "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kuberentes/scheme" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" // ) // @@ -65,23 +78,33 @@ func init() { // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. func AddToScheme(scheme *runtime.Scheme) { - corev1.AddToScheme(scheme) + admissionregistrationv1alpha1.AddToScheme(scheme) + admissionregistrationv1beta1.AddToScheme(scheme) appsv1beta1.AddToScheme(scheme) + appsv1beta2.AddToScheme(scheme) + appsv1.AddToScheme(scheme) authenticationv1.AddToScheme(scheme) authenticationv1beta1.AddToScheme(scheme) authorizationv1.AddToScheme(scheme) authorizationv1beta1.AddToScheme(scheme) autoscalingv1.AddToScheme(scheme) - autoscalingv2alpha1.AddToScheme(scheme) + autoscalingv2beta1.AddToScheme(scheme) batchv1.AddToScheme(scheme) + batchv1beta1.AddToScheme(scheme) batchv2alpha1.AddToScheme(scheme) certificatesv1beta1.AddToScheme(scheme) + corev1.AddToScheme(scheme) + eventsv1beta1.AddToScheme(scheme) extensionsv1beta1.AddToScheme(scheme) + networkingv1.AddToScheme(scheme) policyv1beta1.AddToScheme(scheme) + rbacv1.AddToScheme(scheme) rbacv1beta1.AddToScheme(scheme) rbacv1alpha1.AddToScheme(scheme) + schedulingv1alpha1.AddToScheme(scheme) + schedulingv1beta1.AddToScheme(scheme) settingsv1alpha1.AddToScheme(scheme) storagev1beta1.AddToScheme(scheme) storagev1.AddToScheme(scheme) - + storagev1alpha1.AddToScheme(scheme) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go new file mode 100644 index 000000000..5e02f7227 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AdmissionregistrationV1alpha1Interface interface { + RESTClient() rest.Interface + InitializerConfigurationsGetter +} + +// AdmissionregistrationV1alpha1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *AdmissionregistrationV1alpha1Client) InitializerConfigurations() InitializerConfigurationInterface { + return newInitializerConfigurations(c) +} + +// NewForConfig creates a new AdmissionregistrationV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AdmissionregistrationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new AdmissionregistrationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AdmissionregistrationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1alpha1Client { + return &AdmissionregistrationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AdmissionregistrationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go similarity index 83% rename from vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go index d29bd3f4e..df51baa4d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package v2alpha1 +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go similarity index 85% rename from vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go new file mode 100644 index 000000000..8457aec27 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAdmissionregistrationV1alpha1 struct { + *testing.Fake +} + +func (c *FakeAdmissionregistrationV1alpha1) InitializerConfigurations() v1alpha1.InitializerConfigurationInterface { + return &FakeInitializerConfigurations{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAdmissionregistrationV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go new file mode 100644 index 000000000..b927dae2c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeInitializerConfigurations implements InitializerConfigurationInterface +type FakeInitializerConfigurations struct { + Fake *FakeAdmissionregistrationV1alpha1 +} + +var initializerconfigurationsResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Resource: "initializerconfigurations"} + +var initializerconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "InitializerConfiguration"} + +// Get takes name of the initializerConfiguration, and returns the corresponding initializerConfiguration object, and an error if there is any. +func (c *FakeInitializerConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.InitializerConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(initializerconfigurationsResource, name), &v1alpha1.InitializerConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InitializerConfiguration), err +} + +// List takes label and field selectors, and returns the list of InitializerConfigurations that match those selectors. +func (c *FakeInitializerConfigurations) List(opts v1.ListOptions) (result *v1alpha1.InitializerConfigurationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(initializerconfigurationsResource, initializerconfigurationsKind, opts), &v1alpha1.InitializerConfigurationList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.InitializerConfigurationList{ListMeta: obj.(*v1alpha1.InitializerConfigurationList).ListMeta} + for _, item := range obj.(*v1alpha1.InitializerConfigurationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested initializerConfigurations. +func (c *FakeInitializerConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(initializerconfigurationsResource, opts)) +} + +// Create takes the representation of a initializerConfiguration and creates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. +func (c *FakeInitializerConfigurations) Create(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(initializerconfigurationsResource, initializerConfiguration), &v1alpha1.InitializerConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InitializerConfiguration), err +} + +// Update takes the representation of a initializerConfiguration and updates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. +func (c *FakeInitializerConfigurations) Update(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(initializerconfigurationsResource, initializerConfiguration), &v1alpha1.InitializerConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InitializerConfiguration), err +} + +// Delete takes name of the initializerConfiguration and deletes it. Returns an error if one occurs. +func (c *FakeInitializerConfigurations) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(initializerconfigurationsResource, name), &v1alpha1.InitializerConfiguration{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeInitializerConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(initializerconfigurationsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.InitializerConfigurationList{}) + return err +} + +// Patch applies the patch and returns the patched initializerConfiguration. +func (c *FakeInitializerConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(initializerconfigurationsResource, name, data, subresources...), &v1alpha1.InitializerConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InitializerConfiguration), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..1e29b96f4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type InitializerConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go new file mode 100644 index 000000000..e014ea72b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go @@ -0,0 +1,147 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// InitializerConfigurationsGetter has a method to return a InitializerConfigurationInterface. +// A group's client should implement this interface. +type InitializerConfigurationsGetter interface { + InitializerConfigurations() InitializerConfigurationInterface +} + +// InitializerConfigurationInterface has methods to work with InitializerConfiguration resources. +type InitializerConfigurationInterface interface { + Create(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error) + Update(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.InitializerConfiguration, error) + List(opts v1.ListOptions) (*v1alpha1.InitializerConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) + InitializerConfigurationExpansion +} + +// initializerConfigurations implements InitializerConfigurationInterface +type initializerConfigurations struct { + client rest.Interface +} + +// newInitializerConfigurations returns a InitializerConfigurations +func newInitializerConfigurations(c *AdmissionregistrationV1alpha1Client) *initializerConfigurations { + return &initializerConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the initializerConfiguration, and returns the corresponding initializerConfiguration object, and an error if there is any. +func (c *initializerConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.InitializerConfiguration, err error) { + result = &v1alpha1.InitializerConfiguration{} + err = c.client.Get(). + Resource("initializerconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of InitializerConfigurations that match those selectors. +func (c *initializerConfigurations) List(opts v1.ListOptions) (result *v1alpha1.InitializerConfigurationList, err error) { + result = &v1alpha1.InitializerConfigurationList{} + err = c.client.Get(). + Resource("initializerconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested initializerConfigurations. +func (c *initializerConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("initializerconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a initializerConfiguration and creates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. +func (c *initializerConfigurations) Create(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { + result = &v1alpha1.InitializerConfiguration{} + err = c.client.Post(). + Resource("initializerconfigurations"). + Body(initializerConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a initializerConfiguration and updates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. +func (c *initializerConfigurations) Update(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { + result = &v1alpha1.InitializerConfiguration{} + err = c.client.Put(). + Resource("initializerconfigurations"). + Name(initializerConfiguration.Name). + Body(initializerConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the initializerConfiguration and deletes it. Returns an error if one occurs. +func (c *initializerConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("initializerconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *initializerConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("initializerconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched initializerConfiguration. +func (c *initializerConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) { + result = &v1alpha1.InitializerConfiguration{} + err = c.client.Patch(pt). + Resource("initializerconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go new file mode 100644 index 000000000..b13ea7953 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AdmissionregistrationV1beta1Interface interface { + RESTClient() rest.Interface + MutatingWebhookConfigurationsGetter + ValidatingWebhookConfigurationsGetter +} + +// AdmissionregistrationV1beta1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1beta1Client struct { + restClient rest.Interface +} + +func (c *AdmissionregistrationV1beta1Client) MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface { + return newMutatingWebhookConfigurations(c) +} + +func (c *AdmissionregistrationV1beta1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { + return newValidatingWebhookConfigurations(c) +} + +// NewForConfig creates a new AdmissionregistrationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AdmissionregistrationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AdmissionregistrationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AdmissionregistrationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1beta1Client { + return &AdmissionregistrationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AdmissionregistrationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go new file mode 100644 index 000000000..771101956 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go new file mode 100644 index 000000000..1a988ddba --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAdmissionregistrationV1beta1 struct { + *testing.Fake +} + +func (c *FakeAdmissionregistrationV1beta1) MutatingWebhookConfigurations() v1beta1.MutatingWebhookConfigurationInterface { + return &FakeMutatingWebhookConfigurations{c} +} + +func (c *FakeAdmissionregistrationV1beta1) ValidatingWebhookConfigurations() v1beta1.ValidatingWebhookConfigurationInterface { + return &FakeValidatingWebhookConfigurations{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAdmissionregistrationV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go new file mode 100644 index 000000000..e06888cc1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeMutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type FakeMutatingWebhookConfigurations struct { + Fake *FakeAdmissionregistrationV1beta1 +} + +var mutatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1beta1", Resource: "mutatingwebhookconfigurations"} + +var mutatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "MutatingWebhookConfiguration"} + +// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. +func (c *FakeMutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &v1beta1.MutatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.MutatingWebhookConfiguration), err +} + +// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. +func (c *FakeMutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &v1beta1.MutatingWebhookConfigurationList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.MutatingWebhookConfigurationList{ListMeta: obj.(*v1beta1.MutatingWebhookConfigurationList).ListMeta} + for _, item := range obj.(*v1beta1.MutatingWebhookConfigurationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. +func (c *FakeMutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts)) +} + +// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *FakeMutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.MutatingWebhookConfiguration), err +} + +// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *FakeMutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.MutatingWebhookConfiguration), err +} + +// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *FakeMutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(mutatingwebhookconfigurationsResource, name), &v1beta1.MutatingWebhookConfiguration{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeMutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.MutatingWebhookConfigurationList{}) + return err +} + +// Patch applies the patch and returns the patched mutatingWebhookConfiguration. +func (c *FakeMutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, data, subresources...), &v1beta1.MutatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.MutatingWebhookConfiguration), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go new file mode 100644 index 000000000..1069634e2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeValidatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type FakeValidatingWebhookConfigurations struct { + Fake *FakeAdmissionregistrationV1beta1 +} + +var validatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1beta1", Resource: "validatingwebhookconfigurations"} + +var validatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "ValidatingWebhookConfiguration"} + +// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. +func (c *FakeValidatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &v1beta1.ValidatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ValidatingWebhookConfiguration), err +} + +// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. +func (c *FakeValidatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &v1beta1.ValidatingWebhookConfigurationList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ValidatingWebhookConfigurationList{ListMeta: obj.(*v1beta1.ValidatingWebhookConfigurationList).ListMeta} + for _, item := range obj.(*v1beta1.ValidatingWebhookConfigurationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. +func (c *FakeValidatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts)) +} + +// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *FakeValidatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ValidatingWebhookConfiguration), err +} + +// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *FakeValidatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ValidatingWebhookConfiguration), err +} + +// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *FakeValidatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(validatingwebhookconfigurationsResource, name), &v1beta1.ValidatingWebhookConfiguration{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeValidatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ValidatingWebhookConfigurationList{}) + return err +} + +// Patch applies the patch and returns the patched validatingWebhookConfiguration. +func (c *FakeValidatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, data, subresources...), &v1beta1.ValidatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ValidatingWebhookConfiguration), err +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/defaults.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go similarity index 74% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/defaults.go rename to vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go index 1a4566479..2aeb9c98a 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/defaults.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 -import ( - "k8s.io/apimachinery/pkg/runtime" -) +type MutatingWebhookConfigurationExpansion interface{} -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return scheme.AddDefaultingFuncs() -} +type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go new file mode 100644 index 000000000..cb0157102 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -0,0 +1,147 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type MutatingWebhookConfigurationsGetter interface { + MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface +} + +// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. +type MutatingWebhookConfigurationInterface interface { + Create(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) + Update(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error) + List(opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) + MutatingWebhookConfigurationExpansion +} + +// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type mutatingWebhookConfigurations struct { + client rest.Interface +} + +// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations +func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations { + return &mutatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. +func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. +func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { + result = &v1beta1.MutatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. +func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Post(). + Resource("mutatingwebhookconfigurations"). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Put(). + Resource("mutatingwebhookconfigurations"). + Name(mutatingWebhookConfiguration.Name). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched mutatingWebhookConfiguration. +func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("mutatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go new file mode 100644 index 000000000..3a9339f6c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -0,0 +1,147 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type ValidatingWebhookConfigurationsGetter interface { + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface +} + +// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. +type ValidatingWebhookConfigurationInterface interface { + Create(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) + Update(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error) + List(opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) + ValidatingWebhookConfigurationExpansion +} + +// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type validatingWebhookConfigurations struct { + client rest.Interface +} + +// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations +func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations { + return &validatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. +func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. +func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { + result = &v1beta1.ValidatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. +func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Post(). + Resource("validatingwebhookconfigurations"). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Put(). + Resource("validatingwebhookconfigurations"). + Name(validatingWebhookConfiguration.Name). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *validatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched validatingWebhookConfiguration. +func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("validatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go new file mode 100644 index 000000000..da19c7596 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -0,0 +1,110 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AppsV1Interface interface { + RESTClient() rest.Interface + ControllerRevisionsGetter + DaemonSetsGetter + DeploymentsGetter + ReplicaSetsGetter + StatefulSetsGetter +} + +// AppsV1Client is used to interact with features provided by the apps group. +type AppsV1Client struct { + restClient rest.Interface +} + +func (c *AppsV1Client) ControllerRevisions(namespace string) ControllerRevisionInterface { + return newControllerRevisions(c, namespace) +} + +func (c *AppsV1Client) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *AppsV1Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *AppsV1Client) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +func (c *AppsV1Client) StatefulSets(namespace string) StatefulSetInterface { + return newStatefulSets(c, namespace) +} + +// NewForConfig creates a new AppsV1Client for the given config. +func NewForConfig(c *rest.Config) (*AppsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AppsV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AppsV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AppsV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AppsV1Client for the given RESTClient. +func New(c rest.Interface) *AppsV1Client { + return &AppsV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AppsV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go new file mode 100644 index 000000000..ac9d62790 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -0,0 +1,157 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. +// A group's client should implement this interface. +type ControllerRevisionsGetter interface { + ControllerRevisions(namespace string) ControllerRevisionInterface +} + +// ControllerRevisionInterface has methods to work with ControllerRevision resources. +type ControllerRevisionInterface interface { + Create(*v1.ControllerRevision) (*v1.ControllerRevision, error) + Update(*v1.ControllerRevision) (*v1.ControllerRevision, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ControllerRevision, error) + List(opts meta_v1.ListOptions) (*v1.ControllerRevisionList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) + ControllerRevisionExpansion +} + +// controllerRevisions implements ControllerRevisionInterface +type controllerRevisions struct { + client rest.Interface + ns string +} + +// newControllerRevisions returns a ControllerRevisions +func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions { + return &controllerRevisions{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. +func (c *controllerRevisions) Get(name string, options meta_v1.GetOptions) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. +func (c *controllerRevisions) List(opts meta_v1.ListOptions) (result *v1.ControllerRevisionList, err error) { + result = &v1.ControllerRevisionList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested controllerRevisions. +func (c *controllerRevisions) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Create(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Post(). + Namespace(c.ns). + Resource("controllerrevisions"). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Update(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Put(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(controllerRevision.Name). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. +func (c *controllerRevisions) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *controllerRevisions) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched controllerRevision. +func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("controllerrevisions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go new file mode 100644 index 000000000..b50ac2557 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DaemonSetsGetter has a method to return a DaemonSetInterface. +// A group's client should implement this interface. +type DaemonSetsGetter interface { + DaemonSets(namespace string) DaemonSetInterface +} + +// DaemonSetInterface has methods to work with DaemonSet resources. +type DaemonSetInterface interface { + Create(*v1.DaemonSet) (*v1.DaemonSet, error) + Update(*v1.DaemonSet) (*v1.DaemonSet, error) + UpdateStatus(*v1.DaemonSet) (*v1.DaemonSet, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.DaemonSet, error) + List(opts meta_v1.ListOptions) (*v1.DaemonSetList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) + DaemonSetExpansion +} + +// daemonSets implements DaemonSetInterface +type daemonSets struct { + client rest.Interface + ns string +} + +// newDaemonSets returns a DaemonSets +func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets { + return &daemonSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string, options meta_v1.GetOptions) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts meta_v1.ListOptions) (result *v1.DaemonSetList, err error) { + result = &v1.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Create(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("daemonsets"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Update(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + Body(daemonSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *daemonSets) UpdateStatus(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + SubResource("status"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *daemonSets) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *daemonSets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched daemonSet. +func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("daemonsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go new file mode 100644 index 000000000..e2b1b1886 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1.Deployment) (*v1.Deployment, error) + Update(*v1.Deployment) (*v1.Deployment, error) + UpdateStatus(*v1.Deployment) (*v1.Deployment, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Deployment, error) + List(opts meta_v1.ListOptions) (*v1.DeploymentList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *AppsV1Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options meta_v1.GetOptions) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts meta_v1.ListOptions) (result *v1.DeploymentList, err error) { + result = &v1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go new file mode 100644 index 000000000..3af5d054f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go new file mode 100644 index 000000000..458df0fa3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/client-go/kubernetes/typed/apps/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAppsV1 struct { + *testing.Fake +} + +func (c *FakeAppsV1) ControllerRevisions(namespace string) v1.ControllerRevisionInterface { + return &FakeControllerRevisions{c, namespace} +} + +func (c *FakeAppsV1) DaemonSets(namespace string) v1.DaemonSetInterface { + return &FakeDaemonSets{c, namespace} +} + +func (c *FakeAppsV1) Deployments(namespace string) v1.DeploymentInterface { + return &FakeDeployments{c, namespace} +} + +func (c *FakeAppsV1) ReplicaSets(namespace string) v1.ReplicaSetInterface { + return &FakeReplicaSets{c, namespace} +} + +func (c *FakeAppsV1) StatefulSets(namespace string) v1.StatefulSetInterface { + return &FakeStatefulSets{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAppsV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go new file mode 100644 index 000000000..0655803d8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go @@ -0,0 +1,128 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + apps_v1 "k8s.io/api/apps/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeControllerRevisions implements ControllerRevisionInterface +type FakeControllerRevisions struct { + Fake *FakeAppsV1 + ns string +} + +var controllerrevisionsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "controllerrevisions"} + +var controllerrevisionsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ControllerRevision"} + +// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. +func (c *FakeControllerRevisions) Get(name string, options v1.GetOptions) (result *apps_v1.ControllerRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &apps_v1.ControllerRevision{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.ControllerRevision), err +} + +// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. +func (c *FakeControllerRevisions) List(opts v1.ListOptions) (result *apps_v1.ControllerRevisionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &apps_v1.ControllerRevisionList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &apps_v1.ControllerRevisionList{ListMeta: obj.(*apps_v1.ControllerRevisionList).ListMeta} + for _, item := range obj.(*apps_v1.ControllerRevisionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested controllerRevisions. +func (c *FakeControllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) + +} + +// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *FakeControllerRevisions) Create(controllerRevision *apps_v1.ControllerRevision) (result *apps_v1.ControllerRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &apps_v1.ControllerRevision{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.ControllerRevision), err +} + +// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *FakeControllerRevisions) Update(controllerRevision *apps_v1.ControllerRevision) (result *apps_v1.ControllerRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &apps_v1.ControllerRevision{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.ControllerRevision), err +} + +// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. +func (c *FakeControllerRevisions) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(controllerrevisionsResource, c.ns, name), &apps_v1.ControllerRevision{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &apps_v1.ControllerRevisionList{}) + return err +} + +// Patch applies the patch and returns the patched controllerRevision. +func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps_v1.ControllerRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, data, subresources...), &apps_v1.ControllerRevision{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.ControllerRevision), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go new file mode 100644 index 000000000..47cdca788 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + apps_v1 "k8s.io/api/apps/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeDaemonSets implements DaemonSetInterface +type FakeDaemonSets struct { + Fake *FakeAppsV1 + ns string +} + +var daemonsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"} + +var daemonsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "DaemonSet"} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *apps_v1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &apps_v1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.DaemonSet), err +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *apps_v1.DaemonSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &apps_v1.DaemonSetList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &apps_v1.DaemonSetList{ListMeta: obj.(*apps_v1.DaemonSetList).ListMeta} + for _, item := range obj.(*apps_v1.DaemonSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *FakeDaemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) + +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *FakeDaemonSets) Create(daemonSet *apps_v1.DaemonSet) (result *apps_v1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &apps_v1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.DaemonSet), err +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *FakeDaemonSets) Update(daemonSet *apps_v1.DaemonSet) (result *apps_v1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &apps_v1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.DaemonSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeDaemonSets) UpdateStatus(daemonSet *apps_v1.DaemonSet) (*apps_v1.DaemonSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &apps_v1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.DaemonSet), err +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &apps_v1.DaemonSet{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &apps_v1.DaemonSetList{}) + return err +} + +// Patch applies the patch and returns the patched daemonSet. +func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps_v1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, data, subresources...), &apps_v1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.DaemonSet), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go new file mode 100644 index 000000000..dab4f40c3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + apps_v1 "k8s.io/api/apps/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeDeployments implements DeploymentInterface +type FakeDeployments struct { + Fake *FakeAppsV1 + ns string +} + +var deploymentsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} + +var deploymentsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *apps_v1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &apps_v1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.Deployment), err +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *FakeDeployments) List(opts v1.ListOptions) (result *apps_v1.DeploymentList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &apps_v1.DeploymentList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &apps_v1.DeploymentList{ListMeta: obj.(*apps_v1.DeploymentList).ListMeta} + for _, item := range obj.(*apps_v1.DeploymentList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) + +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *FakeDeployments) Create(deployment *apps_v1.Deployment) (result *apps_v1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &apps_v1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.Deployment), err +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *FakeDeployments) Update(deployment *apps_v1.Deployment) (result *apps_v1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &apps_v1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.Deployment), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeDeployments) UpdateStatus(deployment *apps_v1.Deployment) (*apps_v1.Deployment, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &apps_v1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.Deployment), err +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &apps_v1.Deployment{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &apps_v1.DeploymentList{}) + return err +} + +// Patch applies the patch and returns the patched deployment. +func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps_v1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &apps_v1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.Deployment), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go new file mode 100644 index 000000000..948091bed --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + apps_v1 "k8s.io/api/apps/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeReplicaSets implements ReplicaSetInterface +type FakeReplicaSets struct { + Fake *FakeAppsV1 + ns string +} + +var replicasetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"} + +var replicasetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *apps_v1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &apps_v1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.ReplicaSet), err +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *apps_v1.ReplicaSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &apps_v1.ReplicaSetList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &apps_v1.ReplicaSetList{ListMeta: obj.(*apps_v1.ReplicaSetList).ListMeta} + for _, item := range obj.(*apps_v1.ReplicaSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *FakeReplicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) + +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *FakeReplicaSets) Create(replicaSet *apps_v1.ReplicaSet) (result *apps_v1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &apps_v1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.ReplicaSet), err +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *FakeReplicaSets) Update(replicaSet *apps_v1.ReplicaSet) (result *apps_v1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &apps_v1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.ReplicaSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeReplicaSets) UpdateStatus(replicaSet *apps_v1.ReplicaSet) (*apps_v1.ReplicaSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &apps_v1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.ReplicaSet), err +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &apps_v1.ReplicaSet{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &apps_v1.ReplicaSetList{}) + return err +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps_v1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, data, subresources...), &apps_v1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.ReplicaSet), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go new file mode 100644 index 000000000..19cb2fad5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + apps_v1 "k8s.io/api/apps/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeStatefulSets implements StatefulSetInterface +type FakeStatefulSets struct { + Fake *FakeAppsV1 + ns string +} + +var statefulsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"} + +var statefulsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "StatefulSet"} + +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *apps_v1.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &apps_v1.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.StatefulSet), err +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *apps_v1.StatefulSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &apps_v1.StatefulSetList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &apps_v1.StatefulSetList{ListMeta: obj.(*apps_v1.StatefulSetList).ListMeta} + for _, item := range obj.(*apps_v1.StatefulSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *FakeStatefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) + +} + +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *FakeStatefulSets) Create(statefulSet *apps_v1.StatefulSet) (result *apps_v1.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &apps_v1.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.StatefulSet), err +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *FakeStatefulSets) Update(statefulSet *apps_v1.StatefulSet) (result *apps_v1.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &apps_v1.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.StatefulSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeStatefulSets) UpdateStatus(statefulSet *apps_v1.StatefulSet) (*apps_v1.StatefulSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &apps_v1.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.StatefulSet), err +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &apps_v1.StatefulSet{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &apps_v1.StatefulSetList{}) + return err +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps_v1.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, data, subresources...), &apps_v1.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*apps_v1.StatefulSet), err +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/defaults.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go similarity index 66% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1/defaults.go rename to vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go index d63d91754..88cfe4ecb 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/defaults.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 -import ( - "k8s.io/apimachinery/pkg/runtime" -) +type ControllerRevisionExpansion interface{} -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return scheme.AddDefaultingFuncs() -} +type DaemonSetExpansion interface{} + +type DeploymentExpansion interface{} + +type ReplicaSetExpansion interface{} + +type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go new file mode 100644 index 000000000..21614cbe1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*v1.ReplicaSet) (*v1.ReplicaSet, error) + Update(*v1.ReplicaSet) (*v1.ReplicaSet, error) + UpdateStatus(*v1.ReplicaSet) (*v1.ReplicaSet, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ReplicaSet, error) + List(opts meta_v1.ListOptions) (*v1.ReplicaSetList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client rest.Interface + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets { + return &replicaSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string, options meta_v1.GetOptions) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts meta_v1.ListOptions) (result *v1.ReplicaSetList, err error) { + result = &v1.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *replicaSets) UpdateStatus(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicasets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go new file mode 100644 index 000000000..3ef5b8880 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// StatefulSetsGetter has a method to return a StatefulSetInterface. +// A group's client should implement this interface. +type StatefulSetsGetter interface { + StatefulSets(namespace string) StatefulSetInterface +} + +// StatefulSetInterface has methods to work with StatefulSet resources. +type StatefulSetInterface interface { + Create(*v1.StatefulSet) (*v1.StatefulSet, error) + Update(*v1.StatefulSet) (*v1.StatefulSet, error) + UpdateStatus(*v1.StatefulSet) (*v1.StatefulSet, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.StatefulSet, error) + List(opts meta_v1.ListOptions) (*v1.StatefulSetList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) + StatefulSetExpansion +} + +// statefulSets implements StatefulSetInterface +type statefulSets struct { + client rest.Interface + ns string +} + +// newStatefulSets returns a StatefulSets +func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets { + return &statefulSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *statefulSets) Get(name string, options meta_v1.GetOptions) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *statefulSets) List(opts meta_v1.ListOptions) (result *v1.StatefulSetList, err error) { + result = &v1.StatefulSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *statefulSets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Create(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("statefulsets"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Update(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + Body(statefulSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *statefulSets) UpdateStatus(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + SubResource("status"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *statefulSets) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *statefulSets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("statefulsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go index a520f87fc..4d882e26e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,17 +14,20 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/apps/v1beta1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" rest "k8s.io/client-go/rest" ) type AppsV1beta1Interface interface { RESTClient() rest.Interface + ControllerRevisionsGetter DeploymentsGetter ScalesGetter StatefulSetsGetter @@ -35,6 +38,10 @@ type AppsV1beta1Client struct { restClient rest.Interface } +func (c *AppsV1beta1Client) ControllerRevisions(namespace string) ControllerRevisionInterface { + return newControllerRevisions(c, namespace) +} + func (c *AppsV1beta1Client) Deployments(namespace string) DeploymentInterface { return newDeployments(c, namespace) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go new file mode 100644 index 000000000..ec8fa9242 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -0,0 +1,157 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. +// A group's client should implement this interface. +type ControllerRevisionsGetter interface { + ControllerRevisions(namespace string) ControllerRevisionInterface +} + +// ControllerRevisionInterface has methods to work with ControllerRevision resources. +type ControllerRevisionInterface interface { + Create(*v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error) + Update(*v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ControllerRevision, error) + List(opts v1.ListOptions) (*v1beta1.ControllerRevisionList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) + ControllerRevisionExpansion +} + +// controllerRevisions implements ControllerRevisionInterface +type controllerRevisions struct { + client rest.Interface + ns string +} + +// newControllerRevisions returns a ControllerRevisions +func newControllerRevisions(c *AppsV1beta1Client, namespace string) *controllerRevisions { + return &controllerRevisions{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. +func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { + result = &v1beta1.ControllerRevision{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. +func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { + result = &v1beta1.ControllerRevisionList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested controllerRevisions. +func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Create(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { + result = &v1beta1.ControllerRevision{} + err = c.client.Post(). + Namespace(c.ns). + Resource("controllerrevisions"). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Update(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { + result = &v1beta1.ControllerRevision{} + err = c.client.Put(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(controllerRevision.Name). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. +func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched controllerRevision. +func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) { + result = &v1beta1.ControllerRevision{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("controllerrevisions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index b30965ee7..365e06f3f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" rest "k8s.io/client-go/rest" ) @@ -59,6 +61,41 @@ func newDeployments(c *AppsV1beta1Client, namespace string) *deployments { } } +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + result = &v1beta1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} @@ -85,7 +122,7 @@ func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.De } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} @@ -122,41 +159,6 @@ func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1 Error() } -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - result = &v1beta1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched deployment. func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go index 11b523897..771101956 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go index 2a0bede6d..2ff602be9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( @@ -26,6 +28,10 @@ type FakeAppsV1beta1 struct { *testing.Fake } +func (c *FakeAppsV1beta1) ControllerRevisions(namespace string) v1beta1.ControllerRevisionInterface { + return &FakeControllerRevisions{c, namespace} +} + func (c *FakeAppsV1beta1) Deployments(namespace string) v1beta1.DeploymentInterface { return &FakeDeployments{c, namespace} } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go new file mode 100644 index 000000000..924194891 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go @@ -0,0 +1,128 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeControllerRevisions implements ControllerRevisionInterface +type FakeControllerRevisions struct { + Fake *FakeAppsV1beta1 + ns string +} + +var controllerrevisionsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "controllerrevisions"} + +var controllerrevisionsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta1", Kind: "ControllerRevision"} + +// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. +func (c *FakeControllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta1.ControllerRevision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ControllerRevision), err +} + +// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. +func (c *FakeControllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta1.ControllerRevisionList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ControllerRevisionList{ListMeta: obj.(*v1beta1.ControllerRevisionList).ListMeta} + for _, item := range obj.(*v1beta1.ControllerRevisionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested controllerRevisions. +func (c *FakeControllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) + +} + +// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *FakeControllerRevisions) Create(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ControllerRevision), err +} + +// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *FakeControllerRevisions) Update(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ControllerRevision), err +} + +// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. +func (c *FakeControllerRevisions) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(controllerrevisionsResource, c.ns, name), &v1beta1.ControllerRevision{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ControllerRevisionList{}) + return err +} + +// Patch applies the patch and returns the patched controllerRevision. +func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, data, subresources...), &v1beta1.ControllerRevision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ControllerRevision), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go index a929ac8b0..c4749c52b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" testing "k8s.io/client-go/testing" ) @@ -34,50 +36,9 @@ type FakeDeployments struct { var deploymentsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "deployments"} -func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) - - return err -} - -func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) - return err -} +var deploymentsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta1", Kind: "Deployment"} +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) @@ -88,9 +49,10 @@ func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1bet return obj.(*v1beta1.Deployment), err } +// List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, c.ns, opts), &v1beta1.DeploymentList{}) + Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{}) if obj == nil { return nil, err @@ -100,7 +62,7 @@ func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentL if label == nil { label = labels.Everything() } - list := &v1beta1.DeploymentList{} + list := &v1beta1.DeploymentList{ListMeta: obj.(*v1beta1.DeploymentList).ListMeta} for _, item := range obj.(*v1beta1.DeploymentList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -116,6 +78,56 @@ func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { } +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) + return err +} + // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go index 8abb429ac..de71947e5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake // FakeScales implements ScaleInterface diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go index 430e4a224..b0f194a7d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" testing "k8s.io/client-go/testing" ) @@ -34,50 +36,9 @@ type FakeStatefulSets struct { var statefulsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "statefulsets"} -func (c *FakeStatefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StatefulSet), err -} - -func (c *FakeStatefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StatefulSet), err -} - -func (c *FakeStatefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StatefulSet), err -} - -func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{}) - - return err -} - -func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.StatefulSetList{}) - return err -} +var statefulsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta1", Kind: "StatefulSet"} +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{}) @@ -88,9 +49,10 @@ func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *v1be return obj.(*v1beta1.StatefulSet), err } +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(statefulsetsResource, c.ns, opts), &v1beta1.StatefulSetList{}) + Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta1.StatefulSetList{}) if obj == nil { return nil, err @@ -100,7 +62,7 @@ func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSe if label == nil { label = labels.Everything() } - list := &v1beta1.StatefulSetList{} + list := &v1beta1.StatefulSetList{ListMeta: obj.(*v1beta1.StatefulSetList).ListMeta} for _, item := range obj.(*v1beta1.StatefulSetList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -116,6 +78,56 @@ func (c *FakeStatefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { } +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *FakeStatefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StatefulSet), err +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *FakeStatefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StatefulSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeStatefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta1.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StatefulSet), err +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.StatefulSetList{}) + return err +} + // Patch applies the patch and returns the patched statefulSet. func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go index deca5c866..b2bfd73a7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,8 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 +type ControllerRevisionExpansion interface{} + type DeploymentExpansion interface{} type ScaleExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go index d3bf9e103..cef27bd14 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index 8b5a7822f..651745451 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" rest "k8s.io/client-go/rest" ) @@ -59,6 +61,41 @@ func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets { } } +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { + result = &v1beta1.StatefulSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. func (c *statefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { result = &v1beta1.StatefulSet{} @@ -85,7 +122,7 @@ func (c *statefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1 } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *statefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { result = &v1beta1.StatefulSet{} @@ -122,41 +159,6 @@ func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v Error() } -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { - result = &v1beta1.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched statefulSet. func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { result = &v1beta1.StatefulSet{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go new file mode 100644 index 000000000..27549499f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -0,0 +1,115 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AppsV1beta2Interface interface { + RESTClient() rest.Interface + ControllerRevisionsGetter + DaemonSetsGetter + DeploymentsGetter + ReplicaSetsGetter + ScalesGetter + StatefulSetsGetter +} + +// AppsV1beta2Client is used to interact with features provided by the apps group. +type AppsV1beta2Client struct { + restClient rest.Interface +} + +func (c *AppsV1beta2Client) ControllerRevisions(namespace string) ControllerRevisionInterface { + return newControllerRevisions(c, namespace) +} + +func (c *AppsV1beta2Client) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *AppsV1beta2Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *AppsV1beta2Client) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +func (c *AppsV1beta2Client) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + +func (c *AppsV1beta2Client) StatefulSets(namespace string) StatefulSetInterface { + return newStatefulSets(c, namespace) +} + +// NewForConfig creates a new AppsV1beta2Client for the given config. +func NewForConfig(c *rest.Config) (*AppsV1beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AppsV1beta2Client{client}, nil +} + +// NewForConfigOrDie creates a new AppsV1beta2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AppsV1beta2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AppsV1beta2Client for the given RESTClient. +func New(c rest.Interface) *AppsV1beta2Client { + return &AppsV1beta2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AppsV1beta2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go new file mode 100644 index 000000000..1271cc623 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -0,0 +1,157 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. +// A group's client should implement this interface. +type ControllerRevisionsGetter interface { + ControllerRevisions(namespace string) ControllerRevisionInterface +} + +// ControllerRevisionInterface has methods to work with ControllerRevision resources. +type ControllerRevisionInterface interface { + Create(*v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error) + Update(*v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta2.ControllerRevision, error) + List(opts v1.ListOptions) (*v1beta2.ControllerRevisionList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) + ControllerRevisionExpansion +} + +// controllerRevisions implements ControllerRevisionInterface +type controllerRevisions struct { + client rest.Interface + ns string +} + +// newControllerRevisions returns a ControllerRevisions +func newControllerRevisions(c *AppsV1beta2Client, namespace string) *controllerRevisions { + return &controllerRevisions{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. +func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { + result = &v1beta2.ControllerRevision{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. +func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { + result = &v1beta2.ControllerRevisionList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested controllerRevisions. +func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Create(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { + result = &v1beta2.ControllerRevision{} + err = c.client.Post(). + Namespace(c.ns). + Resource("controllerrevisions"). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Update(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { + result = &v1beta2.ControllerRevision{} + err = c.client.Put(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(controllerRevision.Name). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. +func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched controllerRevision. +func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) { + result = &v1beta2.ControllerRevision{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("controllerrevisions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go new file mode 100644 index 000000000..683c06812 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DaemonSetsGetter has a method to return a DaemonSetInterface. +// A group's client should implement this interface. +type DaemonSetsGetter interface { + DaemonSets(namespace string) DaemonSetInterface +} + +// DaemonSetInterface has methods to work with DaemonSet resources. +type DaemonSetInterface interface { + Create(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) + Update(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) + UpdateStatus(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta2.DaemonSet, error) + List(opts v1.ListOptions) (*v1beta2.DaemonSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) + DaemonSetExpansion +} + +// daemonSets implements DaemonSetInterface +type daemonSets struct { + client rest.Interface + ns string +} + +// newDaemonSets returns a DaemonSets +func newDaemonSets(c *AppsV1beta2Client, namespace string) *daemonSets { + return &daemonSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { + result = &v1beta2.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { + result = &v1beta2.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Create(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { + result = &v1beta2.DaemonSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("daemonsets"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Update(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { + result = &v1beta2.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + Body(daemonSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *daemonSets) UpdateStatus(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { + result = &v1beta2.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + SubResource("status"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched daemonSet. +func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) { + result = &v1beta2.DaemonSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("daemonsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go new file mode 100644 index 000000000..9a04513f1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1beta2.Deployment) (*v1beta2.Deployment, error) + Update(*v1beta2.Deployment) (*v1beta2.Deployment, error) + UpdateStatus(*v1beta2.Deployment) (*v1beta2.Deployment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta2.Deployment, error) + List(opts v1.ListOptions) (*v1beta2.DeploymentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *AppsV1beta2Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { + result = &v1beta2.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { + result = &v1beta2.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { + result = &v1beta2.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { + result = &v1beta2.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { + result = &v1beta2.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) { + result = &v1beta2.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/apimachinery/pkg/openapi/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go similarity index 76% rename from vendor/k8s.io/apimachinery/pkg/openapi/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go index 5ed572cc1..56518ef7f 100644 --- a/vendor/k8s.io/apimachinery/pkg/openapi/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// package openapi holds shared codes and types between open API code generator and spec generator. -package openapi +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go new file mode 100644 index 000000000..f7d79d352 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go @@ -0,0 +1,60 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAppsV1beta2 struct { + *testing.Fake +} + +func (c *FakeAppsV1beta2) ControllerRevisions(namespace string) v1beta2.ControllerRevisionInterface { + return &FakeControllerRevisions{c, namespace} +} + +func (c *FakeAppsV1beta2) DaemonSets(namespace string) v1beta2.DaemonSetInterface { + return &FakeDaemonSets{c, namespace} +} + +func (c *FakeAppsV1beta2) Deployments(namespace string) v1beta2.DeploymentInterface { + return &FakeDeployments{c, namespace} +} + +func (c *FakeAppsV1beta2) ReplicaSets(namespace string) v1beta2.ReplicaSetInterface { + return &FakeReplicaSets{c, namespace} +} + +func (c *FakeAppsV1beta2) Scales(namespace string) v1beta2.ScaleInterface { + return &FakeScales{c, namespace} +} + +func (c *FakeAppsV1beta2) StatefulSets(namespace string) v1beta2.StatefulSetInterface { + return &FakeStatefulSets{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAppsV1beta2) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go new file mode 100644 index 000000000..954ac35df --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go @@ -0,0 +1,128 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeControllerRevisions implements ControllerRevisionInterface +type FakeControllerRevisions struct { + Fake *FakeAppsV1beta2 + ns string +} + +var controllerrevisionsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "controllerrevisions"} + +var controllerrevisionsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ControllerRevision"} + +// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. +func (c *FakeControllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta2.ControllerRevision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.ControllerRevision), err +} + +// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. +func (c *FakeControllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta2.ControllerRevisionList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta2.ControllerRevisionList{ListMeta: obj.(*v1beta2.ControllerRevisionList).ListMeta} + for _, item := range obj.(*v1beta2.ControllerRevisionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested controllerRevisions. +func (c *FakeControllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) + +} + +// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *FakeControllerRevisions) Create(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.ControllerRevision), err +} + +// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *FakeControllerRevisions) Update(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.ControllerRevision), err +} + +// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. +func (c *FakeControllerRevisions) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(controllerrevisionsResource, c.ns, name), &v1beta2.ControllerRevision{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta2.ControllerRevisionList{}) + return err +} + +// Patch applies the patch and returns the patched controllerRevision. +func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, data, subresources...), &v1beta2.ControllerRevision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.ControllerRevision), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go new file mode 100644 index 000000000..38a147550 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeDaemonSets implements DaemonSetInterface +type FakeDaemonSets struct { + Fake *FakeAppsV1beta2 + ns string +} + +var daemonsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "daemonsets"} + +var daemonsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "DaemonSet"} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta2.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.DaemonSet), err +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta2.DaemonSetList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta2.DaemonSetList{ListMeta: obj.(*v1beta2.DaemonSetList).ListMeta} + for _, item := range obj.(*v1beta2.DaemonSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *FakeDaemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) + +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *FakeDaemonSets) Create(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.DaemonSet), err +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *FakeDaemonSets) Update(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.DaemonSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeDaemonSets) UpdateStatus(daemonSet *v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta2.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.DaemonSet), err +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &v1beta2.DaemonSet{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta2.DaemonSetList{}) + return err +} + +// Patch applies the patch and returns the patched daemonSet. +func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, data, subresources...), &v1beta2.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.DaemonSet), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go new file mode 100644 index 000000000..cae232242 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeDeployments implements DeploymentInterface +type FakeDeployments struct { + Fake *FakeAppsV1beta2 + ns string +} + +var deploymentsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "deployments"} + +var deploymentsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta2.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.Deployment), err +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta2.DeploymentList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta2.DeploymentList{ListMeta: obj.(*v1beta2.DeploymentList).ListMeta} + for _, item := range obj.(*v1beta2.DeploymentList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) + +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *FakeDeployments) Create(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.Deployment), err +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *FakeDeployments) Update(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.Deployment), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeDeployments) UpdateStatus(deployment *v1beta2.Deployment) (*v1beta2.Deployment, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta2.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.Deployment), err +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta2.Deployment{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta2.DeploymentList{}) + return err +} + +// Patch applies the patch and returns the patched deployment. +func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &v1beta2.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.Deployment), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go new file mode 100644 index 000000000..05fa78931 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeReplicaSets implements ReplicaSetInterface +type FakeReplicaSets struct { + Fake *FakeAppsV1beta2 + ns string +} + +var replicasetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "replicasets"} + +var replicasetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta2.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.ReplicaSet), err +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta2.ReplicaSetList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta2.ReplicaSetList{ListMeta: obj.(*v1beta2.ReplicaSetList).ListMeta} + for _, item := range obj.(*v1beta2.ReplicaSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *FakeReplicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) + +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *FakeReplicaSets) Create(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.ReplicaSet), err +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *FakeReplicaSets) Update(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.ReplicaSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeReplicaSets) UpdateStatus(replicaSet *v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta2.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.ReplicaSet), err +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &v1beta2.ReplicaSet{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta2.ReplicaSetList{}) + return err +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, data, subresources...), &v1beta2.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.ReplicaSet), err +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go similarity index 73% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go rename to vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go index d63d91754..b06b7e8e3 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +// Code generated by client-gen. DO NOT EDIT. -import ( - "k8s.io/apimachinery/pkg/runtime" -) +package fake -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return scheme.AddDefaultingFuncs() +// FakeScales implements ScaleInterface +type FakeScales struct { + Fake *FakeAppsV1beta2 + ns string } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go new file mode 100644 index 000000000..fe7851286 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go @@ -0,0 +1,162 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeStatefulSets implements StatefulSetInterface +type FakeStatefulSets struct { + Fake *FakeAppsV1beta2 + ns string +} + +var statefulsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "statefulsets"} + +var statefulsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "StatefulSet"} + +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta2.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.StatefulSet), err +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta2.StatefulSetList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta2.StatefulSetList{ListMeta: obj.(*v1beta2.StatefulSetList).ListMeta} + for _, item := range obj.(*v1beta2.StatefulSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *FakeStatefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) + +} + +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *FakeStatefulSets) Create(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.StatefulSet), err +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *FakeStatefulSets) Update(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.StatefulSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeStatefulSets) UpdateStatus(statefulSet *v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta2.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.StatefulSet), err +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &v1beta2.StatefulSet{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta2.StatefulSetList{}) + return err +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, data, subresources...), &v1beta2.StatefulSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.StatefulSet), err +} + +// GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. +func (c *FakeStatefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &v1beta2.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.Scale), err +} + +// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *FakeStatefulSets) UpdateScale(statefulSetName string, scale *v1beta2.Scale) (result *v1beta2.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &v1beta2.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta2.Scale), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go new file mode 100644 index 000000000..bceae5986 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go @@ -0,0 +1,31 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +type ControllerRevisionExpansion interface{} + +type DaemonSetExpansion interface{} + +type DeploymentExpansion interface{} + +type ReplicaSetExpansion interface{} + +type ScaleExpansion interface{} + +type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go new file mode 100644 index 000000000..9fd9de930 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) + Update(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) + UpdateStatus(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta2.ReplicaSet, error) + List(opts v1.ListOptions) (*v1beta2.ReplicaSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client rest.Interface + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *AppsV1beta2Client, namespace string) *replicaSets { + return &replicaSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { + result = &v1beta2.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { + result = &v1beta2.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { + result = &v1beta2.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { + result = &v1beta2.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *replicaSets) UpdateStatus(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { + result = &v1beta2.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) { + result = &v1beta2.ReplicaSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicasets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go new file mode 100644 index 000000000..f8d6a7fb0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + rest "k8s.io/client-go/rest" +) + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client rest.Interface + ns string +} + +// newScales returns a Scales +func newScales(c *AppsV1beta2Client, namespace string) *scales { + return &scales{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go new file mode 100644 index 000000000..095601e15 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// StatefulSetsGetter has a method to return a StatefulSetInterface. +// A group's client should implement this interface. +type StatefulSetsGetter interface { + StatefulSets(namespace string) StatefulSetInterface +} + +// StatefulSetInterface has methods to work with StatefulSet resources. +type StatefulSetInterface interface { + Create(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) + Update(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) + UpdateStatus(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta2.StatefulSet, error) + List(opts v1.ListOptions) (*v1beta2.StatefulSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) + GetScale(statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error) + UpdateScale(statefulSetName string, scale *v1beta2.Scale) (*v1beta2.Scale, error) + + StatefulSetExpansion +} + +// statefulSets implements StatefulSetInterface +type statefulSets struct { + client rest.Interface + ns string +} + +// newStatefulSets returns a StatefulSets +func newStatefulSets(c *AppsV1beta2Client, namespace string) *statefulSets { + return &statefulSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { + result = &v1beta2.StatefulSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { + result = &v1beta2.StatefulSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Create(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { + result = &v1beta2.StatefulSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("statefulsets"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Update(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { + result = &v1beta2.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + Body(statefulSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *statefulSets) UpdateStatus(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { + result = &v1beta2.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + SubResource("status"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) { + result = &v1beta2.StatefulSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("statefulsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} + +// GetScale takes name of the statefulSet, and returns the corresponding v1beta2.Scale object, and an error if there is any. +func (c *statefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { + result = &v1beta2.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *statefulSets) UpdateScale(statefulSetName string, scale *v1beta2.Scale) (result *v1beta2.Scale, err error) { + result = &v1beta2.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go index bd53fc249..3bdcee598 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/authentication/v1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/apis/authentication/v1" rest "k8s.io/client-go/rest" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go index 54673bfa7..3af5d054f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go index 85eb00ddf..ee06a6cdd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go index b1c527a71..e2a7f72b6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake // FakeTokenReviews implements TokenReviewInterface diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go index 7bd524598..7008c927c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package fake import ( - authenticationapi "k8s.io/client-go/pkg/apis/authentication/v1" + authenticationapi "k8s.io/api/authentication/v1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go index 42e76d5e4..177209ec6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,4 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go index 9cfef4e6a..25a8d6a17 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go index fb41782fe..ea21f1b4a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - authenticationapi "k8s.io/client-go/pkg/apis/authentication/v1" + authenticationapi "k8s.io/api/authentication/v1" ) type TokenReviewExpansion interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 419dc2cb6..7f3334a0c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/authentication/v1beta1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/authentication/v1beta1" rest "k8s.io/client-go/rest" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go index 11b523897..771101956 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go index 8f66d8c5b..7299653ca 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go index e8c57210b..63b6b6a85 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake // FakeTokenReviews implements TokenReviewInterface diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go index e27e09290..92ef5d1a1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package fake import ( - authenticationapi "k8s.io/client-go/pkg/apis/authentication/v1beta1" + authenticationapi "k8s.io/api/authentication/v1beta1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go index 2b7e8ca0b..f6df76963 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,4 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go index 7f9f1e9fa..0ac3561e1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go index 375b6f637..8f186fa76 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - authenticationapi "k8s.io/client-go/pkg/apis/authentication/v1beta1" + authenticationapi "k8s.io/api/authentication/v1beta1" ) type TokenReviewExpansion interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go index af2924a30..e84b90084 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/authorization/v1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/apis/authorization/v1" rest "k8s.io/client-go/rest" ) @@ -27,6 +29,7 @@ type AuthorizationV1Interface interface { RESTClient() rest.Interface LocalSubjectAccessReviewsGetter SelfSubjectAccessReviewsGetter + SelfSubjectRulesReviewsGetter SubjectAccessReviewsGetter } @@ -43,6 +46,10 @@ func (c *AuthorizationV1Client) SelfSubjectAccessReviews() SelfSubjectAccessRevi return newSelfSubjectAccessReviews(c) } +func (c *AuthorizationV1Client) SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface { + return newSelfSubjectRulesReviews(c) +} + func (c *AuthorizationV1Client) SubjectAccessReviews() SubjectAccessReviewInterface { return newSubjectAccessReviews(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go index 54673bfa7..3af5d054f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go index 5107d5fc8..f7e823450 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( @@ -34,6 +36,10 @@ func (c *FakeAuthorizationV1) SelfSubjectAccessReviews() v1.SelfSubjectAccessRev return &FakeSelfSubjectAccessReviews{c} } +func (c *FakeAuthorizationV1) SelfSubjectRulesReviews() v1.SelfSubjectRulesReviewInterface { + return &FakeSelfSubjectRulesReviews{c} +} + func (c *FakeAuthorizationV1) SubjectAccessReviews() v1.SubjectAccessReviewInterface { return &FakeSubjectAccessReviews{c} } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go index a49fc9b72..778ba9cea 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake // FakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go index e587d2032..a01e415c8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package fake import ( - authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" + authorizationapi "k8s.io/api/authorization/v1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go index 26d9011b4..a43a980ba 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake // FakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go index e0bbcb1ef..91acbe029 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package fake import ( - authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" + authorizationapi "k8s.io/api/authorization/v1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/conversion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go similarity index 69% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/conversion.go rename to vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go index 51f3adfc7..243f2e89e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/conversion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,13 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +// Code generated by client-gen. DO NOT EDIT. -import ( - "k8s.io/apimachinery/pkg/runtime" -) +package fake -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - return scheme.AddConversionFuncs() +// FakeSelfSubjectRulesReviews implements SelfSubjectRulesReviewInterface +type FakeSelfSubjectRulesReviews struct { + Fake *FakeAuthorizationV1 } diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go similarity index 54% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go rename to vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go index 6df448eb9..a6dc95134 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go @@ -1,5 +1,3 @@ -// +build !ignore_autogenerated - /* Copyright 2017 The Kubernetes Authors. @@ -16,17 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1 +package fake import ( - runtime "k8s.io/apimachinery/pkg/runtime" + authorizationapi "k8s.io/api/authorization/v1" + core "k8s.io/client-go/testing" ) -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil +func (c *FakeSelfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { + obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectrulesreviews"), srr), &authorizationapi.SelfSubjectRulesReview{}) + return obj.(*authorizationapi.SelfSubjectRulesReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go index 778d06e59..d07e56254 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake // FakeSubjectAccessReviews implements SubjectAccessReviewInterface diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go index 823d1542c..7ddc58ff4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package fake import ( - authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" + authorizationapi "k8s.io/api/authorization/v1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go index 42e76d5e4..177209ec6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,4 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go index b2085bceb..0292c7861 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go index c3b487c72..0c123b07c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" + authorizationapi "k8s.io/api/authorization/v1" ) type LocalSubjectAccessReviewExpansion interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go index cfb019eaa..1e3a45817 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go index 107615080..5b70a27dd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" + authorizationapi "k8s.io/api/authorization/v1" ) type SelfSubjectAccessReviewExpansion interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go new file mode 100644 index 000000000..50a0233eb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. +// A group's client should implement this interface. +type SelfSubjectRulesReviewsGetter interface { + SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface +} + +// SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. +type SelfSubjectRulesReviewInterface interface { + SelfSubjectRulesReviewExpansion +} + +// selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface +type selfSubjectRulesReviews struct { + client rest.Interface +} + +// newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews +func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesReviews { + return &selfSubjectRulesReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go similarity index 53% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.defaults.go rename to vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go index 6df448eb9..e2cad880e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go @@ -1,5 +1,3 @@ -// +build !ignore_autogenerated - /* Copyright 2017 The Kubernetes Authors. @@ -16,17 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by defaulter-gen. Do not edit it manually! - package v1 import ( - runtime "k8s.io/apimachinery/pkg/runtime" + authorizationapi "k8s.io/api/authorization/v1" ) -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil +type SelfSubjectRulesReviewExpansion interface { + Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) +} + +func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { + result = &authorizationapi.SelfSubjectRulesReview{} + err = c.client.Post(). + Resource("selfsubjectrulesreviews"). + Body(srr). + Do(). + Into(result) + return } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go index 08f6d6095..9c09008c3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go index dfdf6521a..b5ed87d30 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" + authorizationapi "k8s.io/api/authorization/v1" ) // The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go index b49b3b30b..7f236f6e3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/authorization/v1beta1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/authorization/v1beta1" rest "k8s.io/client-go/rest" ) @@ -27,6 +29,7 @@ type AuthorizationV1beta1Interface interface { RESTClient() rest.Interface LocalSubjectAccessReviewsGetter SelfSubjectAccessReviewsGetter + SelfSubjectRulesReviewsGetter SubjectAccessReviewsGetter } @@ -43,6 +46,10 @@ func (c *AuthorizationV1beta1Client) SelfSubjectAccessReviews() SelfSubjectAcces return newSelfSubjectAccessReviews(c) } +func (c *AuthorizationV1beta1Client) SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface { + return newSelfSubjectRulesReviews(c) +} + func (c *AuthorizationV1beta1Client) SubjectAccessReviews() SubjectAccessReviewInterface { return newSubjectAccessReviews(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go index 11b523897..771101956 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go index 89b0b2d78..8e328a57b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( @@ -34,6 +36,10 @@ func (c *FakeAuthorizationV1beta1) SelfSubjectAccessReviews() v1beta1.SelfSubjec return &FakeSelfSubjectAccessReviews{c} } +func (c *FakeAuthorizationV1beta1) SelfSubjectRulesReviews() v1beta1.SelfSubjectRulesReviewInterface { + return &FakeSelfSubjectRulesReviews{c} +} + func (c *FakeAuthorizationV1beta1) SubjectAccessReviews() v1beta1.SubjectAccessReviewInterface { return &FakeSubjectAccessReviews{c} } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go index 11987f125..d02d05e5d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake // FakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go index 0db666db0..5211628f2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package fake import ( - authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" + authorizationapi "k8s.io/api/authorization/v1beta1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go index aeba43895..8f98ce7a3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake // FakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go index d1f775638..6e3af12a7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package fake import ( - authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" + authorizationapi "k8s.io/api/authorization/v1beta1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go similarity index 68% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go rename to vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go index 2ff5732d6..d8466b4c8 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,13 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +// Code generated by client-gen. DO NOT EDIT. -import ( - "k8s.io/apimachinery/pkg/runtime" -) +package fake -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - return scheme.AddConversionFuncs() +// FakeSelfSubjectRulesReviews implements SelfSubjectRulesReviewInterface +type FakeSelfSubjectRulesReviews struct { + Fake *FakeAuthorizationV1beta1 } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go new file mode 100644 index 000000000..f92ffd717 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + authorizationapi "k8s.io/api/authorization/v1beta1" + core "k8s.io/client-go/testing" +) + +func (c *FakeSelfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { + obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectrulesreviews"), srr), &authorizationapi.SelfSubjectRulesReview{}) + return obj.(*authorizationapi.SelfSubjectRulesReview), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go index 4413c6d34..0d0abdb72 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake // FakeSubjectAccessReviews implements SubjectAccessReviewInterface diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go index 58ced8eca..b0b18b099 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package fake import ( - authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" + authorizationapi "k8s.io/api/authorization/v1beta1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go index 2b7e8ca0b..f6df76963 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,4 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go index 9b8e10341..f5e86a76a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go index d2ce4f0d7..bf1b8a5f1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" + authorizationapi "k8s.io/api/authorization/v1beta1" ) type LocalSubjectAccessReviewExpansion interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go index 1ef3e49af..906712cc3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go index d341eb14a..58fecfd85 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" + authorizationapi "k8s.io/api/authorization/v1beta1" ) type SelfSubjectAccessReviewExpansion interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go new file mode 100644 index 000000000..56c0f99d4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. +// A group's client should implement this interface. +type SelfSubjectRulesReviewsGetter interface { + SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface +} + +// SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. +type SelfSubjectRulesReviewInterface interface { + SelfSubjectRulesReviewExpansion +} + +// selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface +type selfSubjectRulesReviews struct { + client rest.Interface +} + +// newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews +func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRulesReviews { + return &selfSubjectRulesReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go similarity index 53% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.defaults.go rename to vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go index e24e70be3..5f1f37ef7 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go @@ -1,5 +1,3 @@ -// +build !ignore_autogenerated - /* Copyright 2017 The Kubernetes Authors. @@ -16,17 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by defaulter-gen. Do not edit it manually! - package v1beta1 import ( - runtime "k8s.io/apimachinery/pkg/runtime" + authorizationapi "k8s.io/api/authorization/v1beta1" ) -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil +type SelfSubjectRulesReviewExpansion interface { + Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) +} + +func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { + result = &authorizationapi.SelfSubjectRulesReview{} + err = c.client.Post(). + Resource("selfsubjectrulesreviews"). + Body(srr). + Do(). + Into(result) + return } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go index cd60e9df6..79f1ec535 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go index 8d03b0811..4f93689e8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" + authorizationapi "k8s.io/api/authorization/v1beta1" ) // The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go index b235891c9..2bd49e2db 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/autoscaling/v1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/apis/autoscaling/v1" rest "k8s.io/client-go/rest" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go index 54673bfa7..3af5d054f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go index c3e0d3734..99e26fcf3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go index 66ef7be49..f774fc5dc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + autoscaling_v1 "k8s.io/api/autoscaling/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/apis/autoscaling/v1" testing "k8s.io/client-go/testing" ) @@ -34,63 +36,23 @@ type FakeHorizontalPodAutoscalers struct { var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v1", Resource: "horizontalpodautoscalers"} -func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { +var horizontalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "HorizontalPodAutoscaler"} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *FakeHorizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *autoscaling_v1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &autoscaling_v1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*v1.HorizontalPodAutoscaler), err + return obj.(*autoscaling_v1.HorizontalPodAutoscaler), err } -func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *autoscaling_v1.HorizontalPodAutoscalerList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v1.HorizontalPodAutoscaler{}) - - return err -} - -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.HorizontalPodAutoscalerList{}) - return err -} - -func (c *FakeHorizontalPodAutoscalers) Get(name string, options meta_v1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalers) List(opts meta_v1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, c.ns, opts), &v1.HorizontalPodAutoscalerList{}) + Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &autoscaling_v1.HorizontalPodAutoscalerList{}) if obj == nil { return nil, err @@ -100,8 +62,8 @@ func (c *FakeHorizontalPodAutoscalers) List(opts meta_v1.ListOptions) (result *v if label == nil { label = labels.Everything() } - list := &v1.HorizontalPodAutoscalerList{} - for _, item := range obj.(*v1.HorizontalPodAutoscalerList).Items { + list := &autoscaling_v1.HorizontalPodAutoscalerList{ListMeta: obj.(*autoscaling_v1.HorizontalPodAutoscalerList).ListMeta} + for _, item := range obj.(*autoscaling_v1.HorizontalPodAutoscalerList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -110,19 +72,69 @@ func (c *FakeHorizontalPodAutoscalers) List(opts meta_v1.ListOptions) (result *v } // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeHorizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) } -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscaling_v1.HorizontalPodAutoscaler) (result *autoscaling_v1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscaling_v1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*v1.HorizontalPodAutoscaler), err + return obj.(*autoscaling_v1.HorizontalPodAutoscaler), err +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscaling_v1.HorizontalPodAutoscaler) (result *autoscaling_v1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscaling_v1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscaling_v1.HorizontalPodAutoscaler), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscaling_v1.HorizontalPodAutoscaler) (*autoscaling_v1.HorizontalPodAutoscaler, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &autoscaling_v1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscaling_v1.HorizontalPodAutoscaler), err +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &autoscaling_v1.HorizontalPodAutoscaler{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &autoscaling_v1.HorizontalPodAutoscalerList{}) + return err +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *autoscaling_v1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &autoscaling_v1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscaling_v1.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go index effefbd50..c60028b05 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index f9c790af2..2da25e96b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/autoscaling/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/apis/autoscaling/v1" rest "k8s.io/client-go/rest" ) @@ -59,6 +61,41 @@ func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *hori } } +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string, options meta_v1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts meta_v1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { + result = &v1.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { result = &v1.HorizontalPodAutoscaler{} @@ -85,7 +122,7 @@ func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.Horizontal } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { result = &v1.HorizontalPodAutoscaler{} @@ -122,41 +159,6 @@ func (c *horizontalPodAutoscalers) DeleteCollection(options *meta_v1.DeleteOptio Error() } -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(name string, options meta_v1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts meta_v1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { - result = &v1.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { result = &v1.HorizontalPodAutoscaler{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go similarity index 60% rename from vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/autoscaling_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go index 0d16aa3e9..3a49b26b3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,31 +14,33 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v2alpha1 +// Code generated by client-gen. DO NOT EDIT. + +package v2beta1 import ( + v2beta1 "k8s.io/api/autoscaling/v2beta1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v2alpha1 "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" rest "k8s.io/client-go/rest" ) -type AutoscalingV2alpha1Interface interface { +type AutoscalingV2beta1Interface interface { RESTClient() rest.Interface HorizontalPodAutoscalersGetter } -// AutoscalingV2alpha1Client is used to interact with features provided by the autoscaling group. -type AutoscalingV2alpha1Client struct { +// AutoscalingV2beta1Client is used to interact with features provided by the autoscaling group. +type AutoscalingV2beta1Client struct { restClient rest.Interface } -func (c *AutoscalingV2alpha1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { +func (c *AutoscalingV2beta1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { return newHorizontalPodAutoscalers(c, namespace) } -// NewForConfig creates a new AutoscalingV2alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*AutoscalingV2alpha1Client, error) { +// NewForConfig creates a new AutoscalingV2beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AutoscalingV2beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -47,12 +49,12 @@ func NewForConfig(c *rest.Config) (*AutoscalingV2alpha1Client, error) { if err != nil { return nil, err } - return &AutoscalingV2alpha1Client{client}, nil + return &AutoscalingV2beta1Client{client}, nil } -// NewForConfigOrDie creates a new AutoscalingV2alpha1Client for the given config and +// NewForConfigOrDie creates a new AutoscalingV2beta1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AutoscalingV2alpha1Client { +func NewForConfigOrDie(c *rest.Config) *AutoscalingV2beta1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -60,13 +62,13 @@ func NewForConfigOrDie(c *rest.Config) *AutoscalingV2alpha1Client { return client } -// New creates a new AutoscalingV2alpha1Client for the given RESTClient. -func New(c rest.Interface) *AutoscalingV2alpha1Client { - return &AutoscalingV2alpha1Client{c} +// New creates a new AutoscalingV2beta1Client for the given RESTClient. +func New(c rest.Interface) *AutoscalingV2beta1Client { + return &AutoscalingV2beta1Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v2alpha1.SchemeGroupVersion + gv := v2beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} @@ -80,7 +82,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *AutoscalingV2alpha1Client) RESTClient() rest.Interface { +func (c *AutoscalingV2beta1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go new file mode 100644 index 000000000..06fd344c0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v2beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go similarity index 69% rename from vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_autoscaling_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go index d9518a718..be8e0f48e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,25 +14,27 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - v2alpha1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1" + v2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) -type FakeAutoscalingV2alpha1 struct { +type FakeAutoscalingV2beta1 struct { *testing.Fake } -func (c *FakeAutoscalingV2alpha1) HorizontalPodAutoscalers(namespace string) v2alpha1.HorizontalPodAutoscalerInterface { +func (c *FakeAutoscalingV2beta1) HorizontalPodAutoscalers(namespace string) v2beta1.HorizontalPodAutoscalerInterface { return &FakeHorizontalPodAutoscalers{c, namespace} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeAutoscalingV2alpha1) RESTClient() rest.Interface { +func (c *FakeAutoscalingV2beta1) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go similarity index 52% rename from vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_horizontalpodautoscaler.go rename to vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go index f47662a0e..2d860341a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,83 +14,45 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v2alpha1 "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" testing "k8s.io/client-go/testing" ) // FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type FakeHorizontalPodAutoscalers struct { - Fake *FakeAutoscalingV2alpha1 + Fake *FakeAutoscalingV2beta1 ns string } -var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v2alpha1", Resource: "horizontalpodautoscalers"} +var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v2beta1", Resource: "horizontalpodautoscalers"} -func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { +var horizontalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v2beta1", Kind: "HorizontalPodAutoscaler"} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *FakeHorizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2alpha1.HorizontalPodAutoscaler{}) + Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2beta1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*v2alpha1.HorizontalPodAutoscaler), err + return obj.(*v2beta1.HorizontalPodAutoscaler), err } -func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2alpha1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2alpha1.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2alpha1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2alpha1.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v2alpha1.HorizontalPodAutoscaler{}) - - return err -} - -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v2alpha1.HorizontalPodAutoscalerList{}) - return err -} - -func (c *FakeHorizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2alpha1.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2alpha1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2alpha1.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2alpha1.HorizontalPodAutoscalerList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, c.ns, opts), &v2alpha1.HorizontalPodAutoscalerList{}) + Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2beta1.HorizontalPodAutoscalerList{}) if obj == nil { return nil, err @@ -100,8 +62,8 @@ func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2alph if label == nil { label = labels.Everything() } - list := &v2alpha1.HorizontalPodAutoscalerList{} - for _, item := range obj.(*v2alpha1.HorizontalPodAutoscalerList).Items { + list := &v2beta1.HorizontalPodAutoscalerList{ListMeta: obj.(*v2beta1.HorizontalPodAutoscalerList).ListMeta} + for _, item := range obj.(*v2beta1.HorizontalPodAutoscalerList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -116,13 +78,63 @@ func (c *FakeHorizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interfa } -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.HorizontalPodAutoscaler, err error) { +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &v2alpha1.HorizontalPodAutoscaler{}) + Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*v2alpha1.HorizontalPodAutoscaler), err + return obj.(*v2beta1.HorizontalPodAutoscaler), err +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2beta1.HorizontalPodAutoscaler), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2beta1.HorizontalPodAutoscaler), err +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v2beta1.HorizontalPodAutoscaler{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v2beta1.HorizontalPodAutoscalerList{}) + return err +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &v2beta1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v2beta1.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go similarity index 85% rename from vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go index e40f2c5a1..6f1704f1e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v2alpha1 +// Code generated by client-gen. DO NOT EDIT. + +package v2beta1 type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go similarity index 76% rename from vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go rename to vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index c85bfd9f3..4ac8cce71 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v2alpha1 +// Code generated by client-gen. DO NOT EDIT. + +package v2beta1 import ( + v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v2alpha1 "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" rest "k8s.io/client-go/rest" ) @@ -33,15 +35,15 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(*v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) - Update(*v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) - UpdateStatus(*v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) + Create(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) + Update(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) + UpdateStatus(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v2alpha1.HorizontalPodAutoscaler, error) - List(opts v1.ListOptions) (*v2alpha1.HorizontalPodAutoscalerList, error) + Get(name string, options v1.GetOptions) (*v2beta1.HorizontalPodAutoscaler, error) + List(opts v1.ListOptions) (*v2beta1.HorizontalPodAutoscalerList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.HorizontalPodAutoscaler, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } @@ -52,16 +54,51 @@ type horizontalPodAutoscalers struct { } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers -func newHorizontalPodAutoscalers(c *AutoscalingV2alpha1Client, namespace string) *horizontalPodAutoscalers { +func newHorizontalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ client: c.RESTClient(), ns: namespace, } } +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { + result = &v2beta1.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { + result = &v2beta1.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { - result = &v2alpha1.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { + result = &v2beta1.HorizontalPodAutoscaler{} err = c.client.Post(). Namespace(c.ns). Resource("horizontalpodautoscalers"). @@ -72,8 +109,8 @@ func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2alpha1.Hori } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { - result = &v2alpha1.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { + result = &v2beta1.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). @@ -85,10 +122,10 @@ func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2alpha1.Hori } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { - result = &v2alpha1.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { + result = &v2beta1.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). @@ -122,44 +159,9 @@ func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, l Error() } -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2alpha1.HorizontalPodAutoscaler, err error) { - result = &v2alpha1.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2alpha1.HorizontalPodAutoscalerList, err error) { - result = &v2alpha1.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.HorizontalPodAutoscaler, err error) { - result = &v2alpha1.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { + result = &v2beta1.HorizontalPodAutoscaler{} err = c.client.Patch(pt). Namespace(c.ns). Resource("horizontalpodautoscalers"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go index c8766f5b6..d5e35e6b2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/batch/v1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/apis/batch/v1" rest "k8s.io/client-go/rest" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go index 54673bfa7..3af5d054f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go index 4e2d361e8..c90dd7561 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go index e193a7765..6a83115ba 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + batch_v1 "k8s.io/api/batch/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/apis/batch/v1" testing "k8s.io/client-go/testing" ) @@ -34,63 +36,23 @@ type FakeJobs struct { var jobsResource = schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"} -func (c *FakeJobs) Create(job *v1.Job) (result *v1.Job, err error) { +var jobsKind = schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"} + +// Get takes name of the job, and returns the corresponding job object, and an error if there is any. +func (c *FakeJobs) Get(name string, options v1.GetOptions) (result *batch_v1.Job, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(jobsResource, c.ns, job), &v1.Job{}) + Invokes(testing.NewGetAction(jobsResource, c.ns, name), &batch_v1.Job{}) if obj == nil { return nil, err } - return obj.(*v1.Job), err + return obj.(*batch_v1.Job), err } -func (c *FakeJobs) Update(job *v1.Job) (result *v1.Job, err error) { +// List takes label and field selectors, and returns the list of Jobs that match those selectors. +func (c *FakeJobs) List(opts v1.ListOptions) (result *batch_v1.JobList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(jobsResource, c.ns, job), &v1.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Job), err -} - -func (c *FakeJobs) UpdateStatus(job *v1.Job) (*v1.Job, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &v1.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Job), err -} - -func (c *FakeJobs) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(jobsResource, c.ns, name), &v1.Job{}) - - return err -} - -func (c *FakeJobs) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(jobsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.JobList{}) - return err -} - -func (c *FakeJobs) Get(name string, options meta_v1.GetOptions) (result *v1.Job, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(jobsResource, c.ns, name), &v1.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Job), err -} - -func (c *FakeJobs) List(opts meta_v1.ListOptions) (result *v1.JobList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(jobsResource, c.ns, opts), &v1.JobList{}) + Invokes(testing.NewListAction(jobsResource, jobsKind, c.ns, opts), &batch_v1.JobList{}) if obj == nil { return nil, err @@ -100,8 +62,8 @@ func (c *FakeJobs) List(opts meta_v1.ListOptions) (result *v1.JobList, err error if label == nil { label = labels.Everything() } - list := &v1.JobList{} - for _, item := range obj.(*v1.JobList).Items { + list := &batch_v1.JobList{ListMeta: obj.(*batch_v1.JobList).ListMeta} + for _, item := range obj.(*batch_v1.JobList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -110,19 +72,69 @@ func (c *FakeJobs) List(opts meta_v1.ListOptions) (result *v1.JobList, err error } // Watch returns a watch.Interface that watches the requested jobs. -func (c *FakeJobs) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(jobsResource, c.ns, opts)) } -// Patch applies the patch and returns the patched job. -func (c *FakeJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) { +// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. +func (c *FakeJobs) Create(job *batch_v1.Job) (result *batch_v1.Job, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, data, subresources...), &v1.Job{}) + Invokes(testing.NewCreateAction(jobsResource, c.ns, job), &batch_v1.Job{}) if obj == nil { return nil, err } - return obj.(*v1.Job), err + return obj.(*batch_v1.Job), err +} + +// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. +func (c *FakeJobs) Update(job *batch_v1.Job) (result *batch_v1.Job, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(jobsResource, c.ns, job), &batch_v1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*batch_v1.Job), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeJobs) UpdateStatus(job *batch_v1.Job) (*batch_v1.Job, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &batch_v1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*batch_v1.Job), err +} + +// Delete takes name of the job and deletes it. Returns an error if one occurs. +func (c *FakeJobs) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(jobsResource, c.ns, name), &batch_v1.Job{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(jobsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &batch_v1.JobList{}) + return err +} + +// Patch applies the patch and returns the patched job. +func (c *FakeJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *batch_v1.Job, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, data, subresources...), &batch_v1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*batch_v1.Job), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go index 68d7741fa..dc4142934 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 type JobExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index c8120f6d8..f714d3de5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/batch/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/apis/batch/v1" rest "k8s.io/client-go/rest" ) @@ -59,6 +61,41 @@ func newJobs(c *BatchV1Client, namespace string) *jobs { } } +// Get takes name of the job, and returns the corresponding job object, and an error if there is any. +func (c *jobs) Get(name string, options meta_v1.GetOptions) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Jobs that match those selectors. +func (c *jobs) List(opts meta_v1.ListOptions) (result *v1.JobList, err error) { + result = &v1.JobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *jobs) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. func (c *jobs) Create(job *v1.Job) (result *v1.Job, err error) { result = &v1.Job{} @@ -85,7 +122,7 @@ func (c *jobs) Update(job *v1.Job) (result *v1.Job, err error) { } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *jobs) UpdateStatus(job *v1.Job) (result *v1.Job, err error) { result = &v1.Job{} @@ -122,41 +159,6 @@ func (c *jobs) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta Error() } -// Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *jobs) Get(name string, options meta_v1.GetOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *jobs) List(opts meta_v1.ListOptions) (result *v1.JobList, err error) { - result = &v1.JobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *jobs) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched job. func (c *jobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) { result = &v1.Job{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go new file mode 100644 index 000000000..aa71ca833 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/batch/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type BatchV1beta1Interface interface { + RESTClient() rest.Interface + CronJobsGetter +} + +// BatchV1beta1Client is used to interact with features provided by the batch group. +type BatchV1beta1Client struct { + restClient rest.Interface +} + +func (c *BatchV1beta1Client) CronJobs(namespace string) CronJobInterface { + return newCronJobs(c, namespace) +} + +// NewForConfig creates a new BatchV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*BatchV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new BatchV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *BatchV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BatchV1beta1Client for the given RESTClient. +func New(c rest.Interface) *BatchV1beta1Client { + return &BatchV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BatchV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go new file mode 100644 index 000000000..04637c36a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/batch/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// CronJobsGetter has a method to return a CronJobInterface. +// A group's client should implement this interface. +type CronJobsGetter interface { + CronJobs(namespace string) CronJobInterface +} + +// CronJobInterface has methods to work with CronJob resources. +type CronJobInterface interface { + Create(*v1beta1.CronJob) (*v1beta1.CronJob, error) + Update(*v1beta1.CronJob) (*v1beta1.CronJob, error) + UpdateStatus(*v1beta1.CronJob) (*v1beta1.CronJob, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.CronJob, error) + List(opts v1.ListOptions) (*v1beta1.CronJobList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) + CronJobExpansion +} + +// cronJobs implements CronJobInterface +type cronJobs struct { + client rest.Interface + ns string +} + +// newCronJobs returns a CronJobs +func newCronJobs(c *BatchV1beta1Client, namespace string) *cronJobs { + return &cronJobs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. +func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { + result = &v1beta1.CronJob{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CronJobs that match those selectors. +func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { + result = &v1beta1.CronJobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cronJobs. +func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *cronJobs) Create(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { + result = &v1beta1.CronJob{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cronjobs"). + Body(cronJob). + Do(). + Into(result) + return +} + +// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *cronJobs) Update(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { + result = &v1beta1.CronJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cronjobs"). + Name(cronJob.Name). + Body(cronJob). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *cronJobs) UpdateStatus(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { + result = &v1beta1.CronJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cronjobs"). + Name(cronJob.Name). + SubResource("status"). + Body(cronJob). + Do(). + Into(result) + return +} + +// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. +func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cronjobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched cronJob. +func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) { + result = &v1beta1.CronJob{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cronjobs"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go new file mode 100644 index 000000000..771101956 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go new file mode 100644 index 000000000..6f350aed9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeBatchV1beta1 struct { + *testing.Fake +} + +func (c *FakeBatchV1beta1) CronJobs(namespace string) v1beta1.CronJobInterface { + return &FakeCronJobs{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeBatchV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go new file mode 100644 index 000000000..d80ef5e67 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/batch/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCronJobs implements CronJobInterface +type FakeCronJobs struct { + Fake *FakeBatchV1beta1 + ns string +} + +var cronjobsResource = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"} + +var cronjobsKind = schema.GroupVersionKind{Group: "batch", Version: "v1beta1", Kind: "CronJob"} + +// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. +func (c *FakeCronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v1beta1.CronJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CronJob), err +} + +// List takes label and field selectors, and returns the list of CronJobs that match those selectors. +func (c *FakeCronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v1beta1.CronJobList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.CronJobList{ListMeta: obj.(*v1beta1.CronJobList).ListMeta} + for _, item := range obj.(*v1beta1.CronJobList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested cronJobs. +func (c *FakeCronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts)) + +} + +// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *FakeCronJobs) Create(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CronJob), err +} + +// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *FakeCronJobs) Update(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CronJob), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCronJobs) UpdateStatus(cronJob *v1beta1.CronJob) (*v1beta1.CronJob, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v1beta1.CronJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CronJob), err +} + +// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. +func (c *FakeCronJobs) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(cronjobsResource, c.ns, name), &v1beta1.CronJob{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.CronJobList{}) + return err +} + +// Patch applies the patch and returns the patched cronJob. +func (c *FakeCronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, data, subresources...), &v1beta1.CronJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CronJob), err +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go similarity index 82% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go index 738b0b6d2..145e14a99 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +groupName=authorization.k8s.io +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 + +type CronJobExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go index e2b2dd5cd..e6c6306b8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v2alpha1 import ( + v2alpha1 "k8s.io/api/batch/v2alpha1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v2alpha1 "k8s.io/client-go/pkg/apis/batch/v2alpha1" rest "k8s.io/client-go/rest" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go index 2447f2add..4d922f9ae 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v2alpha1 import ( + v2alpha1 "k8s.io/api/batch/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v2alpha1 "k8s.io/client-go/pkg/apis/batch/v2alpha1" rest "k8s.io/client-go/rest" ) @@ -59,6 +61,41 @@ func newCronJobs(c *BatchV2alpha1Client, namespace string) *cronJobs { } } +// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. +func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CronJobs that match those selectors. +func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { + result = &v2alpha1.CronJobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cronJobs. +func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. func (c *cronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { result = &v2alpha1.CronJob{} @@ -85,7 +122,7 @@ func (c *cronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *cronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { result = &v2alpha1.CronJob{} @@ -122,41 +159,6 @@ func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li Error() } -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) { - result = &v2alpha1.CronJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { - result = &v2alpha1.CronJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched cronJob. func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { result = &v2alpha1.CronJob{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go index d29bd3f4e..3efe0d284 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v2alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go index c8f5a40de..3e478cde9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go index 9e570a8a4..75c0b1733 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v2alpha1 "k8s.io/api/batch/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v2alpha1 "k8s.io/client-go/pkg/apis/batch/v2alpha1" testing "k8s.io/client-go/testing" ) @@ -34,50 +36,9 @@ type FakeCronJobs struct { var cronjobsResource = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"} -func (c *FakeCronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v2alpha1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v2alpha1.CronJob), err -} - -func (c *FakeCronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v2alpha1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v2alpha1.CronJob), err -} - -func (c *FakeCronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (*v2alpha1.CronJob, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v2alpha1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v2alpha1.CronJob), err -} - -func (c *FakeCronJobs) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(cronjobsResource, c.ns, name), &v2alpha1.CronJob{}) - - return err -} - -func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v2alpha1.CronJobList{}) - return err -} +var cronjobsKind = schema.GroupVersionKind{Group: "batch", Version: "v2alpha1", Kind: "CronJob"} +// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. func (c *FakeCronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v2alpha1.CronJob{}) @@ -88,9 +49,10 @@ func (c *FakeCronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1 return obj.(*v2alpha1.CronJob), err } +// List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *FakeCronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(cronjobsResource, c.ns, opts), &v2alpha1.CronJobList{}) + Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v2alpha1.CronJobList{}) if obj == nil { return nil, err @@ -100,7 +62,7 @@ func (c *FakeCronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, if label == nil { label = labels.Everything() } - list := &v2alpha1.CronJobList{} + list := &v2alpha1.CronJobList{ListMeta: obj.(*v2alpha1.CronJobList).ListMeta} for _, item := range obj.(*v2alpha1.CronJobList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -116,6 +78,56 @@ func (c *FakeCronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { } +// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *FakeCronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v2alpha1.CronJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CronJob), err +} + +// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *FakeCronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v2alpha1.CronJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CronJob), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (*v2alpha1.CronJob, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v2alpha1.CronJob{}) + + if obj == nil { + return nil, err + } + return obj.(*v2alpha1.CronJob), err +} + +// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. +func (c *FakeCronJobs) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(cronjobsResource, c.ns, name), &v2alpha1.CronJob{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v2alpha1.CronJobList{}) + return err +} + // Patch applies the patch and returns the patched cronJob. func (c *FakeCronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go index 078027ef4..34dafc464 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v2alpha1 type CronJobExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go index c9c39acb2..baac42ee2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/certificates/v1beta1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/certificates/v1beta1" rest "k8s.io/client-go/rest" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index 7407ef068..b39169a8f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/certificates/v1beta1" rest "k8s.io/client-go/rest" ) @@ -57,6 +59,38 @@ func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSig } } +// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. +func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Get(). + Resource("certificatesigningrequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. +func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { + result = &v1beta1.CertificateSigningRequestList{} + err = c.client.Get(). + Resource("certificatesigningrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested certificateSigningRequests. +func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("certificatesigningrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *certificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { result = &v1beta1.CertificateSigningRequest{} @@ -81,7 +115,7 @@ func (c *certificateSigningRequests) Update(certificateSigningRequest *v1beta1.C } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *certificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { result = &v1beta1.CertificateSigningRequest{} @@ -115,38 +149,6 @@ func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, Error() } -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { - result = &v1beta1.CertificateSigningRequestList{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched certificateSigningRequest. func (c *certificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { result = &v1beta1.CertificateSigningRequest{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go index 4765bba8a..c63b80638 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - certificates "k8s.io/client-go/pkg/apis/certificates/v1beta1" + certificates "k8s.io/api/certificates/v1beta1" ) type CertificateSigningRequestExpansion interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go index 11b523897..771101956 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go index 550c5bba1..29d8b088e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go index 3ff1fe5a1..dfd517195 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/certificates/v1beta1" testing "k8s.io/client-go/testing" ) @@ -33,46 +35,9 @@ type FakeCertificateSigningRequests struct { var certificatesigningrequestsResource = schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"} -func (c *FakeCertificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -func (c *FakeCertificateSigningRequests) Update(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -func (c *FakeCertificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -func (c *FakeCertificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{}) - return err -} - -func (c *FakeCertificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.CertificateSigningRequestList{}) - return err -} +var certificatesigningrequestsKind = schema.GroupVersionKind{Group: "certificates.k8s.io", Version: "v1beta1", Kind: "CertificateSigningRequest"} +// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. func (c *FakeCertificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{}) @@ -82,9 +47,10 @@ func (c *FakeCertificateSigningRequests) Get(name string, options v1.GetOptions) return obj.(*v1beta1.CertificateSigningRequest), err } +// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. func (c *FakeCertificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(certificatesigningrequestsResource, opts), &v1beta1.CertificateSigningRequestList{}) + Invokes(testing.NewRootListAction(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), &v1beta1.CertificateSigningRequestList{}) if obj == nil { return nil, err } @@ -93,7 +59,7 @@ func (c *FakeCertificateSigningRequests) List(opts v1.ListOptions) (result *v1be if label == nil { label = labels.Everything() } - list := &v1beta1.CertificateSigningRequestList{} + list := &v1beta1.CertificateSigningRequestList{ListMeta: obj.(*v1beta1.CertificateSigningRequestList).ListMeta} for _, item := range obj.(*v1beta1.CertificateSigningRequestList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -108,6 +74,52 @@ func (c *FakeCertificateSigningRequests) Watch(opts v1.ListOptions) (watch.Inter InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts)) } +// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. +func (c *FakeCertificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequest), err +} + +// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. +func (c *FakeCertificateSigningRequests) Update(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequest), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCertificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequest), err +} + +// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. +func (c *FakeCertificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCertificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.CertificateSigningRequestList{}) + return err +} + // Patch applies the patch and returns the patched certificateSigningRequest. func (c *FakeCertificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go index 2333c0f3e..8af33e62a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package fake import ( - certificates "k8s.io/client-go/pkg/apis/certificates/v1beta1" + certificates "k8s.io/api/certificates/v1beta1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go index 2b7e8ca0b..f6df76963 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,4 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index 50671976a..6ae5a6681 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -56,6 +58,38 @@ func newComponentStatuses(c *CoreV1Client) *componentStatuses { } } +// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. +func (c *componentStatuses) Get(name string, options meta_v1.GetOptions) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Get(). + Resource("componentstatuses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. +func (c *componentStatuses) List(opts meta_v1.ListOptions) (result *v1.ComponentStatusList, err error) { + result = &v1.ComponentStatusList{} + err = c.client.Get(). + Resource("componentstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested componentStatuses. +func (c *componentStatuses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("componentstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. func (c *componentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { result = &v1.ComponentStatus{} @@ -99,38 +133,6 @@ func (c *componentStatuses) DeleteCollection(options *meta_v1.DeleteOptions, lis Error() } -// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *componentStatuses) Get(name string, options meta_v1.GetOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Get(). - Resource("componentstatuses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *componentStatuses) List(opts meta_v1.ListOptions) (result *v1.ComponentStatusList, err error) { - result = &v1.ComponentStatusList{} - err = c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *componentStatuses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched componentStatus. func (c *componentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) { result = &v1.ComponentStatus{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index bb21636e2..3ae229f42 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -58,6 +60,41 @@ func newConfigMaps(c *CoreV1Client, namespace string) *configMaps { } } +// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. +func (c *configMaps) Get(name string, options meta_v1.GetOptions) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. +func (c *configMaps) List(opts meta_v1.ListOptions) (result *v1.ConfigMapList, err error) { + result = &v1.ConfigMapList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested configMaps. +func (c *configMaps) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. func (c *configMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { result = &v1.ConfigMap{} @@ -105,41 +142,6 @@ func (c *configMaps) DeleteCollection(options *meta_v1.DeleteOptions, listOption Error() } -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *configMaps) Get(name string, options meta_v1.GetOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *configMaps) List(opts meta_v1.ListOptions) (result *v1.ConfigMapList, err error) { - result = &v1.ConfigMapList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *configMaps) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched configMap. func (c *configMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) { result = &v1.ConfigMap{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go index 0972960d1..044a28ebd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go index 54673bfa7..3af5d054f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index 3580742a8..2868baaa1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -58,6 +60,41 @@ func newEndpoints(c *CoreV1Client, namespace string) *endpoints { } } +// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. +func (c *endpoints) Get(name string, options meta_v1.GetOptions) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Endpoints that match those selectors. +func (c *endpoints) List(opts meta_v1.ListOptions) (result *v1.EndpointsList, err error) { + result = &v1.EndpointsList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpoints. +func (c *endpoints) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. func (c *endpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { result = &v1.Endpoints{} @@ -105,41 +142,6 @@ func (c *endpoints) DeleteCollection(options *meta_v1.DeleteOptions, listOptions Error() } -// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *endpoints) Get(name string, options meta_v1.GetOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *endpoints) List(opts meta_v1.ListOptions) (result *v1.EndpointsList, err error) { - result = &v1.EndpointsList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpoints. -func (c *endpoints) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched endpoints. func (c *endpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) { result = &v1.Endpoints{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go index c4ac11006..565062345 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -58,6 +60,41 @@ func newEvents(c *CoreV1Client, namespace string) *events { } } +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *events) Get(name string, options meta_v1.GetOptions) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *events) List(opts meta_v1.ListOptions) (result *v1.EventList, err error) { + result = &v1.EventList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *events) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. func (c *events) Create(event *v1.Event) (result *v1.Event, err error) { result = &v1.Event{} @@ -105,41 +142,6 @@ func (c *events) DeleteCollection(options *meta_v1.DeleteOptions, listOptions me Error() } -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(name string, options meta_v1.GetOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(opts meta_v1.ListOptions) (result *v1.EventList, err error) { - result = &v1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched event. func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) { result = &v1.Event{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go index 9b4490ea9..6929ade1d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -19,11 +19,12 @@ package v1 import ( "fmt" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/pkg/api/v1" + ref "k8s.io/client-go/tools/reference" ) // The EventExpansion interface allows manually adding extra methods to the EventInterface. @@ -99,7 +100,7 @@ func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) // object must match this event's client namespace unless the event client // was made with the "" namespace. func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) { - ref, err := v1.GetReference(scheme, objOrRef) + ref, err := ref.GetReference(scheme, objOrRef) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go index 6e2111083..2f2420730 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -33,49 +35,22 @@ type FakeComponentStatuses struct { var componentstatusesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "componentstatuses"} -func (c *FakeComponentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { +var componentstatusesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ComponentStatus"} + +// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. +func (c *FakeComponentStatuses) Get(name string, options v1.GetOptions) (result *core_v1.ComponentStatus, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) + Invokes(testing.NewRootGetAction(componentstatusesResource, name), &core_v1.ComponentStatus{}) if obj == nil { return nil, err } - return obj.(*v1.ComponentStatus), err + return obj.(*core_v1.ComponentStatus), err } -func (c *FakeComponentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { +// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. +func (c *FakeComponentStatuses) List(opts v1.ListOptions) (result *core_v1.ComponentStatusList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ComponentStatus), err -} - -func (c *FakeComponentStatuses) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(componentstatusesResource, name), &v1.ComponentStatus{}) - return err -} - -func (c *FakeComponentStatuses) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(componentstatusesResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1.ComponentStatusList{}) - return err -} - -func (c *FakeComponentStatuses) Get(name string, options meta_v1.GetOptions) (result *v1.ComponentStatus, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(componentstatusesResource, name), &v1.ComponentStatus{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ComponentStatus), err -} - -func (c *FakeComponentStatuses) List(opts meta_v1.ListOptions) (result *v1.ComponentStatusList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(componentstatusesResource, opts), &v1.ComponentStatusList{}) + Invokes(testing.NewRootListAction(componentstatusesResource, componentstatusesKind, opts), &core_v1.ComponentStatusList{}) if obj == nil { return nil, err } @@ -84,8 +59,8 @@ func (c *FakeComponentStatuses) List(opts meta_v1.ListOptions) (result *v1.Compo if label == nil { label = labels.Everything() } - list := &v1.ComponentStatusList{} - for _, item := range obj.(*v1.ComponentStatusList).Items { + list := &core_v1.ComponentStatusList{ListMeta: obj.(*core_v1.ComponentStatusList).ListMeta} + for _, item := range obj.(*core_v1.ComponentStatusList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -94,17 +69,52 @@ func (c *FakeComponentStatuses) List(opts meta_v1.ListOptions) (result *v1.Compo } // Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *FakeComponentStatuses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeComponentStatuses) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(componentstatusesResource, opts)) } -// Patch applies the patch and returns the patched componentStatus. -func (c *FakeComponentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) { +// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *FakeComponentStatuses) Create(componentStatus *core_v1.ComponentStatus) (result *core_v1.ComponentStatus, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, data, subresources...), &v1.ComponentStatus{}) + Invokes(testing.NewRootCreateAction(componentstatusesResource, componentStatus), &core_v1.ComponentStatus{}) if obj == nil { return nil, err } - return obj.(*v1.ComponentStatus), err + return obj.(*core_v1.ComponentStatus), err +} + +// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *FakeComponentStatuses) Update(componentStatus *core_v1.ComponentStatus) (result *core_v1.ComponentStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(componentstatusesResource, componentStatus), &core_v1.ComponentStatus{}) + if obj == nil { + return nil, err + } + return obj.(*core_v1.ComponentStatus), err +} + +// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. +func (c *FakeComponentStatuses) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(componentstatusesResource, name), &core_v1.ComponentStatus{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeComponentStatuses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(componentstatusesResource, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.ComponentStatusList{}) + return err +} + +// Patch applies the patch and returns the patched componentStatus. +func (c *FakeComponentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.ComponentStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, data, subresources...), &core_v1.ComponentStatus{}) + if obj == nil { + return nil, err + } + return obj.(*core_v1.ComponentStatus), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go index 560ad7a40..1bc7757fb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -34,53 +36,23 @@ type FakeConfigMaps struct { var configmapsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} -func (c *FakeConfigMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { +var configmapsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} + +// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. +func (c *FakeConfigMaps) Get(name string, options v1.GetOptions) (result *core_v1.ConfigMap, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) + Invokes(testing.NewGetAction(configmapsResource, c.ns, name), &core_v1.ConfigMap{}) if obj == nil { return nil, err } - return obj.(*v1.ConfigMap), err + return obj.(*core_v1.ConfigMap), err } -func (c *FakeConfigMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { +// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. +func (c *FakeConfigMaps) List(opts v1.ListOptions) (result *core_v1.ConfigMapList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ConfigMap), err -} - -func (c *FakeConfigMaps) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(configmapsResource, c.ns, name), &v1.ConfigMap{}) - - return err -} - -func (c *FakeConfigMaps) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(configmapsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.ConfigMapList{}) - return err -} - -func (c *FakeConfigMaps) Get(name string, options meta_v1.GetOptions) (result *v1.ConfigMap, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(configmapsResource, c.ns, name), &v1.ConfigMap{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ConfigMap), err -} - -func (c *FakeConfigMaps) List(opts meta_v1.ListOptions) (result *v1.ConfigMapList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(configmapsResource, c.ns, opts), &v1.ConfigMapList{}) + Invokes(testing.NewListAction(configmapsResource, configmapsKind, c.ns, opts), &core_v1.ConfigMapList{}) if obj == nil { return nil, err @@ -90,8 +62,8 @@ func (c *FakeConfigMaps) List(opts meta_v1.ListOptions) (result *v1.ConfigMapLis if label == nil { label = labels.Everything() } - list := &v1.ConfigMapList{} - for _, item := range obj.(*v1.ConfigMapList).Items { + list := &core_v1.ConfigMapList{ListMeta: obj.(*core_v1.ConfigMapList).ListMeta} + for _, item := range obj.(*core_v1.ConfigMapList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -100,19 +72,57 @@ func (c *FakeConfigMaps) List(opts meta_v1.ListOptions) (result *v1.ConfigMapLis } // Watch returns a watch.Interface that watches the requested configMaps. -func (c *FakeConfigMaps) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeConfigMaps) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(configmapsResource, c.ns, opts)) } -// Patch applies the patch and returns the patched configMap. -func (c *FakeConfigMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) { +// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *FakeConfigMaps) Create(configMap *core_v1.ConfigMap) (result *core_v1.ConfigMap, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, data, subresources...), &v1.ConfigMap{}) + Invokes(testing.NewCreateAction(configmapsResource, c.ns, configMap), &core_v1.ConfigMap{}) if obj == nil { return nil, err } - return obj.(*v1.ConfigMap), err + return obj.(*core_v1.ConfigMap), err +} + +// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *FakeConfigMaps) Update(configMap *core_v1.ConfigMap) (result *core_v1.ConfigMap, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(configmapsResource, c.ns, configMap), &core_v1.ConfigMap{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.ConfigMap), err +} + +// Delete takes name of the configMap and deletes it. Returns an error if one occurs. +func (c *FakeConfigMaps) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(configmapsResource, c.ns, name), &core_v1.ConfigMap{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeConfigMaps) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(configmapsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.ConfigMapList{}) + return err +} + +// Patch applies the patch and returns the patched configMap. +func (c *FakeConfigMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.ConfigMap, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, data, subresources...), &core_v1.ConfigMap{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.ConfigMap), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go index 0956b4c63..5ad90943c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go index d4b7ad068..eb5eac981 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -34,53 +36,23 @@ type FakeEndpoints struct { var endpointsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "endpoints"} -func (c *FakeEndpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { +var endpointsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Endpoints"} + +// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. +func (c *FakeEndpoints) Get(name string, options v1.GetOptions) (result *core_v1.Endpoints, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) + Invokes(testing.NewGetAction(endpointsResource, c.ns, name), &core_v1.Endpoints{}) if obj == nil { return nil, err } - return obj.(*v1.Endpoints), err + return obj.(*core_v1.Endpoints), err } -func (c *FakeEndpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { +// List takes label and field selectors, and returns the list of Endpoints that match those selectors. +func (c *FakeEndpoints) List(opts v1.ListOptions) (result *core_v1.EndpointsList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Endpoints), err -} - -func (c *FakeEndpoints) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(endpointsResource, c.ns, name), &v1.Endpoints{}) - - return err -} - -func (c *FakeEndpoints) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(endpointsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.EndpointsList{}) - return err -} - -func (c *FakeEndpoints) Get(name string, options meta_v1.GetOptions) (result *v1.Endpoints, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(endpointsResource, c.ns, name), &v1.Endpoints{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Endpoints), err -} - -func (c *FakeEndpoints) List(opts meta_v1.ListOptions) (result *v1.EndpointsList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(endpointsResource, c.ns, opts), &v1.EndpointsList{}) + Invokes(testing.NewListAction(endpointsResource, endpointsKind, c.ns, opts), &core_v1.EndpointsList{}) if obj == nil { return nil, err @@ -90,8 +62,8 @@ func (c *FakeEndpoints) List(opts meta_v1.ListOptions) (result *v1.EndpointsList if label == nil { label = labels.Everything() } - list := &v1.EndpointsList{} - for _, item := range obj.(*v1.EndpointsList).Items { + list := &core_v1.EndpointsList{ListMeta: obj.(*core_v1.EndpointsList).ListMeta} + for _, item := range obj.(*core_v1.EndpointsList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -100,19 +72,57 @@ func (c *FakeEndpoints) List(opts meta_v1.ListOptions) (result *v1.EndpointsList } // Watch returns a watch.Interface that watches the requested endpoints. -func (c *FakeEndpoints) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeEndpoints) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(endpointsResource, c.ns, opts)) } -// Patch applies the patch and returns the patched endpoints. -func (c *FakeEndpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) { +// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *FakeEndpoints) Create(endpoints *core_v1.Endpoints) (result *core_v1.Endpoints, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, data, subresources...), &v1.Endpoints{}) + Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &core_v1.Endpoints{}) if obj == nil { return nil, err } - return obj.(*v1.Endpoints), err + return obj.(*core_v1.Endpoints), err +} + +// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *FakeEndpoints) Update(endpoints *core_v1.Endpoints) (result *core_v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &core_v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.Endpoints), err +} + +// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. +func (c *FakeEndpoints) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(endpointsResource, c.ns, name), &core_v1.Endpoints{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeEndpoints) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(endpointsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.EndpointsList{}) + return err +} + +// Patch applies the patch and returns the patched endpoints. +func (c *FakeEndpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, data, subresources...), &core_v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.Endpoints), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go index 7abe81587..458b8c153 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -34,53 +36,23 @@ type FakeEvents struct { var eventsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "events"} -func (c *FakeEvents) Create(event *v1.Event) (result *v1.Event, err error) { +var eventsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Event"} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *FakeEvents) Get(name string, options v1.GetOptions) (result *core_v1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1.Event{}) + Invokes(testing.NewGetAction(eventsResource, c.ns, name), &core_v1.Event{}) if obj == nil { return nil, err } - return obj.(*v1.Event), err + return obj.(*core_v1.Event), err } -func (c *FakeEvents) Update(event *v1.Event) (result *v1.Event, err error) { +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *FakeEvents) List(opts v1.ListOptions) (result *core_v1.EventList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Event), err -} - -func (c *FakeEvents) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(eventsResource, c.ns, name), &v1.Event{}) - - return err -} - -func (c *FakeEvents) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.EventList{}) - return err -} - -func (c *FakeEvents) Get(name string, options meta_v1.GetOptions) (result *v1.Event, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Event), err -} - -func (c *FakeEvents) List(opts meta_v1.ListOptions) (result *v1.EventList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(eventsResource, c.ns, opts), &v1.EventList{}) + Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &core_v1.EventList{}) if obj == nil { return nil, err @@ -90,8 +62,8 @@ func (c *FakeEvents) List(opts meta_v1.ListOptions) (result *v1.EventList, err e if label == nil { label = labels.Everything() } - list := &v1.EventList{} - for _, item := range obj.(*v1.EventList).Items { + list := &core_v1.EventList{ListMeta: obj.(*core_v1.EventList).ListMeta} + for _, item := range obj.(*core_v1.EventList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -100,19 +72,57 @@ func (c *FakeEvents) List(opts meta_v1.ListOptions) (result *v1.EventList, err e } // Watch returns a watch.Interface that watches the requested events. -func (c *FakeEvents) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeEvents) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) } -// Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) { +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *FakeEvents) Create(event *core_v1.Event) (result *core_v1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, data, subresources...), &v1.Event{}) + Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &core_v1.Event{}) if obj == nil { return nil, err } - return obj.(*v1.Event), err + return obj.(*core_v1.Event), err +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *FakeEvents) Update(event *core_v1.Event) (result *core_v1.Event, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &core_v1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.Event), err +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *FakeEvents) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(eventsResource, c.ns, name), &core_v1.Event{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeEvents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.EventList{}) + return err +} + +// Patch applies the patch and returns the patched event. +func (c *FakeEvents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.Event, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, data, subresources...), &core_v1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.Event), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go index b4de7bcda..dd3fb8392 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go @@ -17,10 +17,10 @@ limitations under the License. package fake import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/api/v1" core "k8s.io/client-go/testing" ) @@ -67,9 +67,9 @@ func (c *FakeEvents) PatchWithEventNamespace(event *v1.Event, data []byte) (*v1. // Search returns a list of events matching the specified object. func (c *FakeEvents) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) { - action := core.NewRootListAction(eventsResource, api.ListOptions{}) + action := core.NewRootListAction(eventsResource, eventsKind, metav1.ListOptions{}) if c.ns != "" { - action = core.NewListAction(eventsResource, c.ns, api.ListOptions{}) + action = core.NewListAction(eventsResource, eventsKind, c.ns, metav1.ListOptions{}) } obj, err := c.Fake.Invokes(action, &v1.EventList{}) if obj == nil { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go index 278635838..c5156c7c0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -34,53 +36,23 @@ type FakeLimitRanges struct { var limitrangesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "limitranges"} -func (c *FakeLimitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { +var limitrangesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "LimitRange"} + +// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. +func (c *FakeLimitRanges) Get(name string, options v1.GetOptions) (result *core_v1.LimitRange, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) + Invokes(testing.NewGetAction(limitrangesResource, c.ns, name), &core_v1.LimitRange{}) if obj == nil { return nil, err } - return obj.(*v1.LimitRange), err + return obj.(*core_v1.LimitRange), err } -func (c *FakeLimitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { +// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. +func (c *FakeLimitRanges) List(opts v1.ListOptions) (result *core_v1.LimitRangeList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.LimitRange), err -} - -func (c *FakeLimitRanges) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(limitrangesResource, c.ns, name), &v1.LimitRange{}) - - return err -} - -func (c *FakeLimitRanges) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(limitrangesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.LimitRangeList{}) - return err -} - -func (c *FakeLimitRanges) Get(name string, options meta_v1.GetOptions) (result *v1.LimitRange, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(limitrangesResource, c.ns, name), &v1.LimitRange{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.LimitRange), err -} - -func (c *FakeLimitRanges) List(opts meta_v1.ListOptions) (result *v1.LimitRangeList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(limitrangesResource, c.ns, opts), &v1.LimitRangeList{}) + Invokes(testing.NewListAction(limitrangesResource, limitrangesKind, c.ns, opts), &core_v1.LimitRangeList{}) if obj == nil { return nil, err @@ -90,8 +62,8 @@ func (c *FakeLimitRanges) List(opts meta_v1.ListOptions) (result *v1.LimitRangeL if label == nil { label = labels.Everything() } - list := &v1.LimitRangeList{} - for _, item := range obj.(*v1.LimitRangeList).Items { + list := &core_v1.LimitRangeList{ListMeta: obj.(*core_v1.LimitRangeList).ListMeta} + for _, item := range obj.(*core_v1.LimitRangeList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -100,19 +72,57 @@ func (c *FakeLimitRanges) List(opts meta_v1.ListOptions) (result *v1.LimitRangeL } // Watch returns a watch.Interface that watches the requested limitRanges. -func (c *FakeLimitRanges) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeLimitRanges) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(limitrangesResource, c.ns, opts)) } -// Patch applies the patch and returns the patched limitRange. -func (c *FakeLimitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) { +// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *FakeLimitRanges) Create(limitRange *core_v1.LimitRange) (result *core_v1.LimitRange, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, data, subresources...), &v1.LimitRange{}) + Invokes(testing.NewCreateAction(limitrangesResource, c.ns, limitRange), &core_v1.LimitRange{}) if obj == nil { return nil, err } - return obj.(*v1.LimitRange), err + return obj.(*core_v1.LimitRange), err +} + +// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *FakeLimitRanges) Update(limitRange *core_v1.LimitRange) (result *core_v1.LimitRange, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(limitrangesResource, c.ns, limitRange), &core_v1.LimitRange{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.LimitRange), err +} + +// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. +func (c *FakeLimitRanges) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(limitrangesResource, c.ns, name), &core_v1.LimitRange{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeLimitRanges) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(limitrangesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.LimitRangeList{}) + return err +} + +// Patch applies the patch and returns the patched limitRange. +func (c *FakeLimitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.LimitRange, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, data, subresources...), &core_v1.LimitRange{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.LimitRange), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go index 2ec737676..32aae1389 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -33,58 +35,22 @@ type FakeNamespaces struct { var namespacesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} -func (c *FakeNamespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { +var namespacesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"} + +// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. +func (c *FakeNamespaces) Get(name string, options v1.GetOptions) (result *core_v1.Namespace, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &v1.Namespace{}) + Invokes(testing.NewRootGetAction(namespacesResource, name), &core_v1.Namespace{}) if obj == nil { return nil, err } - return obj.(*v1.Namespace), err + return obj.(*core_v1.Namespace), err } -func (c *FakeNamespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { +// List takes label and field selectors, and returns the list of Namespaces that match those selectors. +func (c *FakeNamespaces) List(opts v1.ListOptions) (result *core_v1.NamespaceList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &v1.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Namespace), err -} - -func (c *FakeNamespaces) UpdateStatus(namespace *v1.Namespace) (*v1.Namespace, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &v1.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Namespace), err -} - -func (c *FakeNamespaces) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(namespacesResource, name), &v1.Namespace{}) - return err -} - -func (c *FakeNamespaces) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(namespacesResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1.NamespaceList{}) - return err -} - -func (c *FakeNamespaces) Get(name string, options meta_v1.GetOptions) (result *v1.Namespace, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(namespacesResource, name), &v1.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Namespace), err -} - -func (c *FakeNamespaces) List(opts meta_v1.ListOptions) (result *v1.NamespaceList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(namespacesResource, opts), &v1.NamespaceList{}) + Invokes(testing.NewRootListAction(namespacesResource, namespacesKind, opts), &core_v1.NamespaceList{}) if obj == nil { return nil, err } @@ -93,8 +59,8 @@ func (c *FakeNamespaces) List(opts meta_v1.ListOptions) (result *v1.NamespaceLis if label == nil { label = labels.Everything() } - list := &v1.NamespaceList{} - for _, item := range obj.(*v1.NamespaceList).Items { + list := &core_v1.NamespaceList{ListMeta: obj.(*core_v1.NamespaceList).ListMeta} + for _, item := range obj.(*core_v1.NamespaceList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -103,17 +69,55 @@ func (c *FakeNamespaces) List(opts meta_v1.ListOptions) (result *v1.NamespaceLis } // Watch returns a watch.Interface that watches the requested namespaces. -func (c *FakeNamespaces) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeNamespaces) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(namespacesResource, opts)) } -// Patch applies the patch and returns the patched namespace. -func (c *FakeNamespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) { +// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *FakeNamespaces) Create(namespace *core_v1.Namespace) (result *core_v1.Namespace, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, data, subresources...), &v1.Namespace{}) + Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &core_v1.Namespace{}) if obj == nil { return nil, err } - return obj.(*v1.Namespace), err + return obj.(*core_v1.Namespace), err +} + +// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *FakeNamespaces) Update(namespace *core_v1.Namespace) (result *core_v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &core_v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*core_v1.Namespace), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeNamespaces) UpdateStatus(namespace *core_v1.Namespace) (*core_v1.Namespace, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &core_v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*core_v1.Namespace), err +} + +// Delete takes name of the namespace and deletes it. Returns an error if one occurs. +func (c *FakeNamespaces) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(namespacesResource, name), &core_v1.Namespace{}) + return err +} + +// Patch applies the patch and returns the patched namespace. +func (c *FakeNamespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, data, subresources...), &core_v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*core_v1.Namespace), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go index 1ac7e75d7..a0eae3490 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package fake import ( - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go index 357297228..8b99bb8e4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -33,58 +35,22 @@ type FakeNodes struct { var nodesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"} -func (c *FakeNodes) Create(node *v1.Node) (result *v1.Node, err error) { +var nodesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"} + +// Get takes name of the node, and returns the corresponding node object, and an error if there is any. +func (c *FakeNodes) Get(name string, options v1.GetOptions) (result *core_v1.Node, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(nodesResource, node), &v1.Node{}) + Invokes(testing.NewRootGetAction(nodesResource, name), &core_v1.Node{}) if obj == nil { return nil, err } - return obj.(*v1.Node), err + return obj.(*core_v1.Node), err } -func (c *FakeNodes) Update(node *v1.Node) (result *v1.Node, err error) { +// List takes label and field selectors, and returns the list of Nodes that match those selectors. +func (c *FakeNodes) List(opts v1.ListOptions) (result *core_v1.NodeList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(nodesResource, node), &v1.Node{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Node), err -} - -func (c *FakeNodes) UpdateStatus(node *v1.Node) (*v1.Node, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &v1.Node{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Node), err -} - -func (c *FakeNodes) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(nodesResource, name), &v1.Node{}) - return err -} - -func (c *FakeNodes) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(nodesResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1.NodeList{}) - return err -} - -func (c *FakeNodes) Get(name string, options meta_v1.GetOptions) (result *v1.Node, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(nodesResource, name), &v1.Node{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Node), err -} - -func (c *FakeNodes) List(opts meta_v1.ListOptions) (result *v1.NodeList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(nodesResource, opts), &v1.NodeList{}) + Invokes(testing.NewRootListAction(nodesResource, nodesKind, opts), &core_v1.NodeList{}) if obj == nil { return nil, err } @@ -93,8 +59,8 @@ func (c *FakeNodes) List(opts meta_v1.ListOptions) (result *v1.NodeList, err err if label == nil { label = labels.Everything() } - list := &v1.NodeList{} - for _, item := range obj.(*v1.NodeList).Items { + list := &core_v1.NodeList{ListMeta: obj.(*core_v1.NodeList).ListMeta} + for _, item := range obj.(*core_v1.NodeList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -103,17 +69,63 @@ func (c *FakeNodes) List(opts meta_v1.ListOptions) (result *v1.NodeList, err err } // Watch returns a watch.Interface that watches the requested nodes. -func (c *FakeNodes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeNodes) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(nodesResource, opts)) } -// Patch applies the patch and returns the patched node. -func (c *FakeNodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) { +// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. +func (c *FakeNodes) Create(node *core_v1.Node) (result *core_v1.Node, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, data, subresources...), &v1.Node{}) + Invokes(testing.NewRootCreateAction(nodesResource, node), &core_v1.Node{}) if obj == nil { return nil, err } - return obj.(*v1.Node), err + return obj.(*core_v1.Node), err +} + +// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. +func (c *FakeNodes) Update(node *core_v1.Node) (result *core_v1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(nodesResource, node), &core_v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*core_v1.Node), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeNodes) UpdateStatus(node *core_v1.Node) (*core_v1.Node, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &core_v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*core_v1.Node), err +} + +// Delete takes name of the node and deletes it. Returns an error if one occurs. +func (c *FakeNodes) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(nodesResource, name), &core_v1.Node{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeNodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(nodesResource, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.NodeList{}) + return err +} + +// Patch applies the patch and returns the patched node. +func (c *FakeNodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, data, subresources...), &core_v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*core_v1.Node), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go index 597334582..eb684fd29 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package fake import ( - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go index 8c3bed76e..5742936d2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -33,58 +35,22 @@ type FakePersistentVolumes struct { var persistentvolumesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumes"} -func (c *FakePersistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { +var persistentvolumesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PersistentVolume"} + +// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. +func (c *FakePersistentVolumes) Get(name string, options v1.GetOptions) (result *core_v1.PersistentVolume, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) + Invokes(testing.NewRootGetAction(persistentvolumesResource, name), &core_v1.PersistentVolume{}) if obj == nil { return nil, err } - return obj.(*v1.PersistentVolume), err + return obj.(*core_v1.PersistentVolume), err } -func (c *FakePersistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { +// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. +func (c *FakePersistentVolumes) List(opts v1.ListOptions) (result *core_v1.PersistentVolumeList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolume), err -} - -func (c *FakePersistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (*v1.PersistentVolume, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &v1.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolume), err -} - -func (c *FakePersistentVolumes) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(persistentvolumesResource, name), &v1.PersistentVolume{}) - return err -} - -func (c *FakePersistentVolumes) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(persistentvolumesResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1.PersistentVolumeList{}) - return err -} - -func (c *FakePersistentVolumes) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolume, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(persistentvolumesResource, name), &v1.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolume), err -} - -func (c *FakePersistentVolumes) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(persistentvolumesResource, opts), &v1.PersistentVolumeList{}) + Invokes(testing.NewRootListAction(persistentvolumesResource, persistentvolumesKind, opts), &core_v1.PersistentVolumeList{}) if obj == nil { return nil, err } @@ -93,8 +59,8 @@ func (c *FakePersistentVolumes) List(opts meta_v1.ListOptions) (result *v1.Persi if label == nil { label = labels.Everything() } - list := &v1.PersistentVolumeList{} - for _, item := range obj.(*v1.PersistentVolumeList).Items { + list := &core_v1.PersistentVolumeList{ListMeta: obj.(*core_v1.PersistentVolumeList).ListMeta} + for _, item := range obj.(*core_v1.PersistentVolumeList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -103,17 +69,63 @@ func (c *FakePersistentVolumes) List(opts meta_v1.ListOptions) (result *v1.Persi } // Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *FakePersistentVolumes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakePersistentVolumes) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(persistentvolumesResource, opts)) } -// Patch applies the patch and returns the patched persistentVolume. -func (c *FakePersistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) { +// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *FakePersistentVolumes) Create(persistentVolume *core_v1.PersistentVolume) (result *core_v1.PersistentVolume, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, data, subresources...), &v1.PersistentVolume{}) + Invokes(testing.NewRootCreateAction(persistentvolumesResource, persistentVolume), &core_v1.PersistentVolume{}) if obj == nil { return nil, err } - return obj.(*v1.PersistentVolume), err + return obj.(*core_v1.PersistentVolume), err +} + +// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *FakePersistentVolumes) Update(persistentVolume *core_v1.PersistentVolume) (result *core_v1.PersistentVolume, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &core_v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*core_v1.PersistentVolume), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePersistentVolumes) UpdateStatus(persistentVolume *core_v1.PersistentVolume) (*core_v1.PersistentVolume, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &core_v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*core_v1.PersistentVolume), err +} + +// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. +func (c *FakePersistentVolumes) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(persistentvolumesResource, name), &core_v1.PersistentVolume{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePersistentVolumes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(persistentvolumesResource, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.PersistentVolumeList{}) + return err +} + +// Patch applies the patch and returns the patched persistentVolume. +func (c *FakePersistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.PersistentVolume, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, data, subresources...), &core_v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*core_v1.PersistentVolume), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go index f909a7f08..d2d17984c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -34,63 +36,23 @@ type FakePersistentVolumeClaims struct { var persistentvolumeclaimsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"} -func (c *FakePersistentVolumeClaims) Create(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { +var persistentvolumeclaimsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PersistentVolumeClaim"} + +// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. +func (c *FakePersistentVolumeClaims) Get(name string, options v1.GetOptions) (result *core_v1.PersistentVolumeClaim, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewGetAction(persistentvolumeclaimsResource, c.ns, name), &core_v1.PersistentVolumeClaim{}) if obj == nil { return nil, err } - return obj.(*v1.PersistentVolumeClaim), err + return obj.(*core_v1.PersistentVolumeClaim), err } -func (c *FakePersistentVolumeClaims) Update(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { +// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. +func (c *FakePersistentVolumeClaims) List(opts v1.ListOptions) (result *core_v1.PersistentVolumeClaimList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -func (c *FakePersistentVolumeClaims) UpdateStatus(persistentVolumeClaim *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -func (c *FakePersistentVolumeClaims) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(persistentvolumeclaimsResource, c.ns, name), &v1.PersistentVolumeClaim{}) - - return err -} - -func (c *FakePersistentVolumeClaims) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(persistentvolumeclaimsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.PersistentVolumeClaimList{}) - return err -} - -func (c *FakePersistentVolumeClaims) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(persistentvolumeclaimsResource, c.ns, name), &v1.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -func (c *FakePersistentVolumeClaims) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(persistentvolumeclaimsResource, c.ns, opts), &v1.PersistentVolumeClaimList{}) + Invokes(testing.NewListAction(persistentvolumeclaimsResource, persistentvolumeclaimsKind, c.ns, opts), &core_v1.PersistentVolumeClaimList{}) if obj == nil { return nil, err @@ -100,8 +62,8 @@ func (c *FakePersistentVolumeClaims) List(opts meta_v1.ListOptions) (result *v1. if label == nil { label = labels.Everything() } - list := &v1.PersistentVolumeClaimList{} - for _, item := range obj.(*v1.PersistentVolumeClaimList).Items { + list := &core_v1.PersistentVolumeClaimList{ListMeta: obj.(*core_v1.PersistentVolumeClaimList).ListMeta} + for _, item := range obj.(*core_v1.PersistentVolumeClaimList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -110,19 +72,69 @@ func (c *FakePersistentVolumeClaims) List(opts meta_v1.ListOptions) (result *v1. } // Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *FakePersistentVolumeClaims) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakePersistentVolumeClaims) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(persistentvolumeclaimsResource, c.ns, opts)) } -// Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *FakePersistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { +// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. +func (c *FakePersistentVolumeClaims) Create(persistentVolumeClaim *core_v1.PersistentVolumeClaim) (result *core_v1.PersistentVolumeClaim, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, data, subresources...), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &core_v1.PersistentVolumeClaim{}) if obj == nil { return nil, err } - return obj.(*v1.PersistentVolumeClaim), err + return obj.(*core_v1.PersistentVolumeClaim), err +} + +// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. +func (c *FakePersistentVolumeClaims) Update(persistentVolumeClaim *core_v1.PersistentVolumeClaim) (result *core_v1.PersistentVolumeClaim, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &core_v1.PersistentVolumeClaim{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.PersistentVolumeClaim), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePersistentVolumeClaims) UpdateStatus(persistentVolumeClaim *core_v1.PersistentVolumeClaim) (*core_v1.PersistentVolumeClaim, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &core_v1.PersistentVolumeClaim{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.PersistentVolumeClaim), err +} + +// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. +func (c *FakePersistentVolumeClaims) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(persistentvolumeclaimsResource, c.ns, name), &core_v1.PersistentVolumeClaim{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePersistentVolumeClaims) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(persistentvolumeclaimsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.PersistentVolumeClaimList{}) + return err +} + +// Patch applies the patch and returns the patched persistentVolumeClaim. +func (c *FakePersistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.PersistentVolumeClaim, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, data, subresources...), &core_v1.PersistentVolumeClaim{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.PersistentVolumeClaim), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go index 3f921fe02..2960b12ae 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -34,63 +36,23 @@ type FakePods struct { var podsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} -func (c *FakePods) Create(pod *v1.Pod) (result *v1.Pod, err error) { +var podsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} + +// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. +func (c *FakePods) Get(name string, options v1.GetOptions) (result *core_v1.Pod, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &v1.Pod{}) + Invokes(testing.NewGetAction(podsResource, c.ns, name), &core_v1.Pod{}) if obj == nil { return nil, err } - return obj.(*v1.Pod), err + return obj.(*core_v1.Pod), err } -func (c *FakePods) Update(pod *v1.Pod) (result *v1.Pod, err error) { +// List takes label and field selectors, and returns the list of Pods that match those selectors. +func (c *FakePods) List(opts v1.ListOptions) (result *core_v1.PodList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &v1.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Pod), err -} - -func (c *FakePods) UpdateStatus(pod *v1.Pod) (*v1.Pod, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &v1.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Pod), err -} - -func (c *FakePods) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(podsResource, c.ns, name), &v1.Pod{}) - - return err -} - -func (c *FakePods) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.PodList{}) - return err -} - -func (c *FakePods) Get(name string, options meta_v1.GetOptions) (result *v1.Pod, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(podsResource, c.ns, name), &v1.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Pod), err -} - -func (c *FakePods) List(opts meta_v1.ListOptions) (result *v1.PodList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(podsResource, c.ns, opts), &v1.PodList{}) + Invokes(testing.NewListAction(podsResource, podsKind, c.ns, opts), &core_v1.PodList{}) if obj == nil { return nil, err @@ -100,8 +62,8 @@ func (c *FakePods) List(opts meta_v1.ListOptions) (result *v1.PodList, err error if label == nil { label = labels.Everything() } - list := &v1.PodList{} - for _, item := range obj.(*v1.PodList).Items { + list := &core_v1.PodList{ListMeta: obj.(*core_v1.PodList).ListMeta} + for _, item := range obj.(*core_v1.PodList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -110,19 +72,69 @@ func (c *FakePods) List(opts meta_v1.ListOptions) (result *v1.PodList, err error } // Watch returns a watch.Interface that watches the requested pods. -func (c *FakePods) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakePods) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(podsResource, c.ns, opts)) } -// Patch applies the patch and returns the patched pod. -func (c *FakePods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) { +// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *FakePods) Create(pod *core_v1.Pod) (result *core_v1.Pod, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, data, subresources...), &v1.Pod{}) + Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &core_v1.Pod{}) if obj == nil { return nil, err } - return obj.(*v1.Pod), err + return obj.(*core_v1.Pod), err +} + +// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *FakePods) Update(pod *core_v1.Pod) (result *core_v1.Pod, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &core_v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.Pod), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePods) UpdateStatus(pod *core_v1.Pod) (*core_v1.Pod, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &core_v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.Pod), err +} + +// Delete takes name of the pod and deletes it. Returns an error if one occurs. +func (c *FakePods) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(podsResource, c.ns, name), &core_v1.Pod{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePods) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.PodList{}) + return err +} + +// Patch applies the patch and returns the patched pod. +func (c *FakePods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.Pod, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, data, subresources...), &core_v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.Pod), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go index c9d404e16..497cc7857 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go @@ -17,8 +17,8 @@ limitations under the License. package fake import ( - "k8s.io/client-go/pkg/api/v1" - policy "k8s.io/client-go/pkg/apis/policy/v1beta1" + "k8s.io/api/core/v1" + policy "k8s.io/api/policy/v1beta1" restclient "k8s.io/client-go/rest" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go index 86091e412..3ae6ca551 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -34,53 +36,23 @@ type FakePodTemplates struct { var podtemplatesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "podtemplates"} -func (c *FakePodTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { +var podtemplatesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PodTemplate"} + +// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. +func (c *FakePodTemplates) Get(name string, options v1.GetOptions) (result *core_v1.PodTemplate, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) + Invokes(testing.NewGetAction(podtemplatesResource, c.ns, name), &core_v1.PodTemplate{}) if obj == nil { return nil, err } - return obj.(*v1.PodTemplate), err + return obj.(*core_v1.PodTemplate), err } -func (c *FakePodTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { +// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. +func (c *FakePodTemplates) List(opts v1.ListOptions) (result *core_v1.PodTemplateList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodTemplate), err -} - -func (c *FakePodTemplates) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(podtemplatesResource, c.ns, name), &v1.PodTemplate{}) - - return err -} - -func (c *FakePodTemplates) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.PodTemplateList{}) - return err -} - -func (c *FakePodTemplates) Get(name string, options meta_v1.GetOptions) (result *v1.PodTemplate, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(podtemplatesResource, c.ns, name), &v1.PodTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodTemplate), err -} - -func (c *FakePodTemplates) List(opts meta_v1.ListOptions) (result *v1.PodTemplateList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(podtemplatesResource, c.ns, opts), &v1.PodTemplateList{}) + Invokes(testing.NewListAction(podtemplatesResource, podtemplatesKind, c.ns, opts), &core_v1.PodTemplateList{}) if obj == nil { return nil, err @@ -90,8 +62,8 @@ func (c *FakePodTemplates) List(opts meta_v1.ListOptions) (result *v1.PodTemplat if label == nil { label = labels.Everything() } - list := &v1.PodTemplateList{} - for _, item := range obj.(*v1.PodTemplateList).Items { + list := &core_v1.PodTemplateList{ListMeta: obj.(*core_v1.PodTemplateList).ListMeta} + for _, item := range obj.(*core_v1.PodTemplateList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -100,19 +72,57 @@ func (c *FakePodTemplates) List(opts meta_v1.ListOptions) (result *v1.PodTemplat } // Watch returns a watch.Interface that watches the requested podTemplates. -func (c *FakePodTemplates) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakePodTemplates) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(podtemplatesResource, c.ns, opts)) } -// Patch applies the patch and returns the patched podTemplate. -func (c *FakePodTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) { +// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *FakePodTemplates) Create(podTemplate *core_v1.PodTemplate) (result *core_v1.PodTemplate, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, data, subresources...), &v1.PodTemplate{}) + Invokes(testing.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &core_v1.PodTemplate{}) if obj == nil { return nil, err } - return obj.(*v1.PodTemplate), err + return obj.(*core_v1.PodTemplate), err +} + +// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *FakePodTemplates) Update(podTemplate *core_v1.PodTemplate) (result *core_v1.PodTemplate, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &core_v1.PodTemplate{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.PodTemplate), err +} + +// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. +func (c *FakePodTemplates) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(podtemplatesResource, c.ns, name), &core_v1.PodTemplate{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePodTemplates) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.PodTemplateList{}) + return err +} + +// Patch applies the patch and returns the patched podTemplate. +func (c *FakePodTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.PodTemplate, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, data, subresources...), &core_v1.PodTemplate{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.PodTemplate), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go index f2bb8cef9..f25235ff2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -34,63 +37,23 @@ type FakeReplicationControllers struct { var replicationcontrollersResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"} -func (c *FakeReplicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { +var replicationcontrollersKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ReplicationController"} + +// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. +func (c *FakeReplicationControllers) Get(name string, options v1.GetOptions) (result *core_v1.ReplicationController, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) + Invokes(testing.NewGetAction(replicationcontrollersResource, c.ns, name), &core_v1.ReplicationController{}) if obj == nil { return nil, err } - return obj.(*v1.ReplicationController), err + return obj.(*core_v1.ReplicationController), err } -func (c *FakeReplicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { +// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. +func (c *FakeReplicationControllers) List(opts v1.ListOptions) (result *core_v1.ReplicationControllerList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicationController), err -} - -func (c *FakeReplicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (*v1.ReplicationController, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &v1.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicationController), err -} - -func (c *FakeReplicationControllers) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(replicationcontrollersResource, c.ns, name), &v1.ReplicationController{}) - - return err -} - -func (c *FakeReplicationControllers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.ReplicationControllerList{}) - return err -} - -func (c *FakeReplicationControllers) Get(name string, options meta_v1.GetOptions) (result *v1.ReplicationController, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(replicationcontrollersResource, c.ns, name), &v1.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicationController), err -} - -func (c *FakeReplicationControllers) List(opts meta_v1.ListOptions) (result *v1.ReplicationControllerList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(replicationcontrollersResource, c.ns, opts), &v1.ReplicationControllerList{}) + Invokes(testing.NewListAction(replicationcontrollersResource, replicationcontrollersKind, c.ns, opts), &core_v1.ReplicationControllerList{}) if obj == nil { return nil, err @@ -100,8 +63,8 @@ func (c *FakeReplicationControllers) List(opts meta_v1.ListOptions) (result *v1. if label == nil { label = labels.Everything() } - list := &v1.ReplicationControllerList{} - for _, item := range obj.(*v1.ReplicationControllerList).Items { + list := &core_v1.ReplicationControllerList{ListMeta: obj.(*core_v1.ReplicationControllerList).ListMeta} + for _, item := range obj.(*core_v1.ReplicationControllerList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -110,19 +73,91 @@ func (c *FakeReplicationControllers) List(opts meta_v1.ListOptions) (result *v1. } // Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *FakeReplicationControllers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeReplicationControllers) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(replicationcontrollersResource, c.ns, opts)) } -// Patch applies the patch and returns the patched replicationController. -func (c *FakeReplicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) { +// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *FakeReplicationControllers) Create(replicationController *core_v1.ReplicationController) (result *core_v1.ReplicationController, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, data, subresources...), &v1.ReplicationController{}) + Invokes(testing.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &core_v1.ReplicationController{}) if obj == nil { return nil, err } - return obj.(*v1.ReplicationController), err + return obj.(*core_v1.ReplicationController), err +} + +// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *FakeReplicationControllers) Update(replicationController *core_v1.ReplicationController) (result *core_v1.ReplicationController, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &core_v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.ReplicationController), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeReplicationControllers) UpdateStatus(replicationController *core_v1.ReplicationController) (*core_v1.ReplicationController, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &core_v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.ReplicationController), err +} + +// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. +func (c *FakeReplicationControllers) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(replicationcontrollersResource, c.ns, name), &core_v1.ReplicationController{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeReplicationControllers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.ReplicationControllerList{}) + return err +} + +// Patch applies the patch and returns the patched replicationController. +func (c *FakeReplicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.ReplicationController, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, data, subresources...), &core_v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.ReplicationController), err +} + +// GetScale takes name of the replicationController, and returns the corresponding scale object, and an error if there is any. +func (c *FakeReplicationControllers) GetScale(replicationControllerName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(replicationcontrollersResource, c.ns, "scale", replicationControllerName), &v1beta1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Scale), err +} + +// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *FakeReplicationControllers) UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "scale", c.ns, scale), &v1beta1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go index 0389f8e87..9c6ae20a2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -34,63 +36,23 @@ type FakeResourceQuotas struct { var resourcequotasResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "resourcequotas"} -func (c *FakeResourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { +var resourcequotasKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ResourceQuota"} + +// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. +func (c *FakeResourceQuotas) Get(name string, options v1.GetOptions) (result *core_v1.ResourceQuota, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) + Invokes(testing.NewGetAction(resourcequotasResource, c.ns, name), &core_v1.ResourceQuota{}) if obj == nil { return nil, err } - return obj.(*v1.ResourceQuota), err + return obj.(*core_v1.ResourceQuota), err } -func (c *FakeResourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { +// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. +func (c *FakeResourceQuotas) List(opts v1.ListOptions) (result *core_v1.ResourceQuotaList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ResourceQuota), err -} - -func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &v1.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ResourceQuota), err -} - -func (c *FakeResourceQuotas) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(resourcequotasResource, c.ns, name), &v1.ResourceQuota{}) - - return err -} - -func (c *FakeResourceQuotas) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.ResourceQuotaList{}) - return err -} - -func (c *FakeResourceQuotas) Get(name string, options meta_v1.GetOptions) (result *v1.ResourceQuota, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(resourcequotasResource, c.ns, name), &v1.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ResourceQuota), err -} - -func (c *FakeResourceQuotas) List(opts meta_v1.ListOptions) (result *v1.ResourceQuotaList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(resourcequotasResource, c.ns, opts), &v1.ResourceQuotaList{}) + Invokes(testing.NewListAction(resourcequotasResource, resourcequotasKind, c.ns, opts), &core_v1.ResourceQuotaList{}) if obj == nil { return nil, err @@ -100,8 +62,8 @@ func (c *FakeResourceQuotas) List(opts meta_v1.ListOptions) (result *v1.Resource if label == nil { label = labels.Everything() } - list := &v1.ResourceQuotaList{} - for _, item := range obj.(*v1.ResourceQuotaList).Items { + list := &core_v1.ResourceQuotaList{ListMeta: obj.(*core_v1.ResourceQuotaList).ListMeta} + for _, item := range obj.(*core_v1.ResourceQuotaList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -110,19 +72,69 @@ func (c *FakeResourceQuotas) List(opts meta_v1.ListOptions) (result *v1.Resource } // Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *FakeResourceQuotas) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeResourceQuotas) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(resourcequotasResource, c.ns, opts)) } -// Patch applies the patch and returns the patched resourceQuota. -func (c *FakeResourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) { +// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *FakeResourceQuotas) Create(resourceQuota *core_v1.ResourceQuota) (result *core_v1.ResourceQuota, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, data, subresources...), &v1.ResourceQuota{}) + Invokes(testing.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &core_v1.ResourceQuota{}) if obj == nil { return nil, err } - return obj.(*v1.ResourceQuota), err + return obj.(*core_v1.ResourceQuota), err +} + +// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *FakeResourceQuotas) Update(resourceQuota *core_v1.ResourceQuota) (result *core_v1.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &core_v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.ResourceQuota), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *core_v1.ResourceQuota) (*core_v1.ResourceQuota, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &core_v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.ResourceQuota), err +} + +// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. +func (c *FakeResourceQuotas) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(resourcequotasResource, c.ns, name), &core_v1.ResourceQuota{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeResourceQuotas) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.ResourceQuotaList{}) + return err +} + +// Patch applies the patch and returns the patched resourceQuota. +func (c *FakeResourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, data, subresources...), &core_v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.ResourceQuota), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go index a0b305504..e1eeea5f8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -34,53 +36,23 @@ type FakeSecrets struct { var secretsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} -func (c *FakeSecrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { +var secretsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"} + +// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. +func (c *FakeSecrets) Get(name string, options v1.GetOptions) (result *core_v1.Secret, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &v1.Secret{}) + Invokes(testing.NewGetAction(secretsResource, c.ns, name), &core_v1.Secret{}) if obj == nil { return nil, err } - return obj.(*v1.Secret), err + return obj.(*core_v1.Secret), err } -func (c *FakeSecrets) Update(secret *v1.Secret) (result *v1.Secret, err error) { +// List takes label and field selectors, and returns the list of Secrets that match those selectors. +func (c *FakeSecrets) List(opts v1.ListOptions) (result *core_v1.SecretList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &v1.Secret{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Secret), err -} - -func (c *FakeSecrets) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(secretsResource, c.ns, name), &v1.Secret{}) - - return err -} - -func (c *FakeSecrets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(secretsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.SecretList{}) - return err -} - -func (c *FakeSecrets) Get(name string, options meta_v1.GetOptions) (result *v1.Secret, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(secretsResource, c.ns, name), &v1.Secret{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Secret), err -} - -func (c *FakeSecrets) List(opts meta_v1.ListOptions) (result *v1.SecretList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(secretsResource, c.ns, opts), &v1.SecretList{}) + Invokes(testing.NewListAction(secretsResource, secretsKind, c.ns, opts), &core_v1.SecretList{}) if obj == nil { return nil, err @@ -90,8 +62,8 @@ func (c *FakeSecrets) List(opts meta_v1.ListOptions) (result *v1.SecretList, err if label == nil { label = labels.Everything() } - list := &v1.SecretList{} - for _, item := range obj.(*v1.SecretList).Items { + list := &core_v1.SecretList{ListMeta: obj.(*core_v1.SecretList).ListMeta} + for _, item := range obj.(*core_v1.SecretList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -100,19 +72,57 @@ func (c *FakeSecrets) List(opts meta_v1.ListOptions) (result *v1.SecretList, err } // Watch returns a watch.Interface that watches the requested secrets. -func (c *FakeSecrets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeSecrets) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(secretsResource, c.ns, opts)) } -// Patch applies the patch and returns the patched secret. -func (c *FakeSecrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) { +// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *FakeSecrets) Create(secret *core_v1.Secret) (result *core_v1.Secret, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, data, subresources...), &v1.Secret{}) + Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &core_v1.Secret{}) if obj == nil { return nil, err } - return obj.(*v1.Secret), err + return obj.(*core_v1.Secret), err +} + +// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *FakeSecrets) Update(secret *core_v1.Secret) (result *core_v1.Secret, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &core_v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.Secret), err +} + +// Delete takes name of the secret and deletes it. Returns an error if one occurs. +func (c *FakeSecrets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(secretsResource, c.ns, name), &core_v1.Secret{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeSecrets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(secretsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.SecretList{}) + return err +} + +// Patch applies the patch and returns the patched secret. +func (c *FakeSecrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.Secret, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, data, subresources...), &core_v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.Secret), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go index 4cc8ed0e9..aa23e0efa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -34,63 +36,23 @@ type FakeServices struct { var servicesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"} -func (c *FakeServices) Create(service *v1.Service) (result *v1.Service, err error) { +var servicesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Service"} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *FakeServices) Get(name string, options v1.GetOptions) (result *core_v1.Service, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &v1.Service{}) + Invokes(testing.NewGetAction(servicesResource, c.ns, name), &core_v1.Service{}) if obj == nil { return nil, err } - return obj.(*v1.Service), err + return obj.(*core_v1.Service), err } -func (c *FakeServices) Update(service *v1.Service) (result *v1.Service, err error) { +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *FakeServices) List(opts v1.ListOptions) (result *core_v1.ServiceList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &v1.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Service), err -} - -func (c *FakeServices) UpdateStatus(service *v1.Service) (*v1.Service, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Service), err -} - -func (c *FakeServices) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(servicesResource, c.ns, name), &v1.Service{}) - - return err -} - -func (c *FakeServices) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(servicesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.ServiceList{}) - return err -} - -func (c *FakeServices) Get(name string, options meta_v1.GetOptions) (result *v1.Service, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(servicesResource, c.ns, name), &v1.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Service), err -} - -func (c *FakeServices) List(opts meta_v1.ListOptions) (result *v1.ServiceList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(servicesResource, c.ns, opts), &v1.ServiceList{}) + Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &core_v1.ServiceList{}) if obj == nil { return nil, err @@ -100,8 +62,8 @@ func (c *FakeServices) List(opts meta_v1.ListOptions) (result *v1.ServiceList, e if label == nil { label = labels.Everything() } - list := &v1.ServiceList{} - for _, item := range obj.(*v1.ServiceList).Items { + list := &core_v1.ServiceList{ListMeta: obj.(*core_v1.ServiceList).ListMeta} + for _, item := range obj.(*core_v1.ServiceList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -110,19 +72,61 @@ func (c *FakeServices) List(opts meta_v1.ListOptions) (result *v1.ServiceList, e } // Watch returns a watch.Interface that watches the requested services. -func (c *FakeServices) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeServices) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts)) } -// Patch applies the patch and returns the patched service. -func (c *FakeServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) { +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *FakeServices) Create(service *core_v1.Service) (result *core_v1.Service, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, data, subresources...), &v1.Service{}) + Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &core_v1.Service{}) if obj == nil { return nil, err } - return obj.(*v1.Service), err + return obj.(*core_v1.Service), err +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *FakeServices) Update(service *core_v1.Service) (result *core_v1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &core_v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.Service), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServices) UpdateStatus(service *core_v1.Service) (*core_v1.Service, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &core_v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.Service), err +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *FakeServices) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(servicesResource, c.ns, name), &core_v1.Service{}) + + return err +} + +// Patch applies the patch and returns the patched service. +func (c *FakeServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, data, subresources...), &core_v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.Service), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go index c049d7dec..1d5837f8b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/api/v1" testing "k8s.io/client-go/testing" ) @@ -34,53 +36,23 @@ type FakeServiceAccounts struct { var serviceaccountsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"} -func (c *FakeServiceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { +var serviceaccountsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ServiceAccount"} + +// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. +func (c *FakeServiceAccounts) Get(name string, options v1.GetOptions) (result *core_v1.ServiceAccount, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) + Invokes(testing.NewGetAction(serviceaccountsResource, c.ns, name), &core_v1.ServiceAccount{}) if obj == nil { return nil, err } - return obj.(*v1.ServiceAccount), err + return obj.(*core_v1.ServiceAccount), err } -func (c *FakeServiceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { +// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. +func (c *FakeServiceAccounts) List(opts v1.ListOptions) (result *core_v1.ServiceAccountList, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ServiceAccount), err -} - -func (c *FakeServiceAccounts) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(serviceaccountsResource, c.ns, name), &v1.ServiceAccount{}) - - return err -} - -func (c *FakeServiceAccounts) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1.ServiceAccountList{}) - return err -} - -func (c *FakeServiceAccounts) Get(name string, options meta_v1.GetOptions) (result *v1.ServiceAccount, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(serviceaccountsResource, c.ns, name), &v1.ServiceAccount{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ServiceAccount), err -} - -func (c *FakeServiceAccounts) List(opts meta_v1.ListOptions) (result *v1.ServiceAccountList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(serviceaccountsResource, c.ns, opts), &v1.ServiceAccountList{}) + Invokes(testing.NewListAction(serviceaccountsResource, serviceaccountsKind, c.ns, opts), &core_v1.ServiceAccountList{}) if obj == nil { return nil, err @@ -90,8 +62,8 @@ func (c *FakeServiceAccounts) List(opts meta_v1.ListOptions) (result *v1.Service if label == nil { label = labels.Everything() } - list := &v1.ServiceAccountList{} - for _, item := range obj.(*v1.ServiceAccountList).Items { + list := &core_v1.ServiceAccountList{ListMeta: obj.(*core_v1.ServiceAccountList).ListMeta} + for _, item := range obj.(*core_v1.ServiceAccountList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -100,19 +72,57 @@ func (c *FakeServiceAccounts) List(opts meta_v1.ListOptions) (result *v1.Service } // Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *FakeServiceAccounts) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeServiceAccounts) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(serviceaccountsResource, c.ns, opts)) } -// Patch applies the patch and returns the patched serviceAccount. -func (c *FakeServiceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) { +// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *FakeServiceAccounts) Create(serviceAccount *core_v1.ServiceAccount) (result *core_v1.ServiceAccount, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, data, subresources...), &v1.ServiceAccount{}) + Invokes(testing.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &core_v1.ServiceAccount{}) if obj == nil { return nil, err } - return obj.(*v1.ServiceAccount), err + return obj.(*core_v1.ServiceAccount), err +} + +// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *FakeServiceAccounts) Update(serviceAccount *core_v1.ServiceAccount) (result *core_v1.ServiceAccount, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &core_v1.ServiceAccount{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.ServiceAccount), err +} + +// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. +func (c *FakeServiceAccounts) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(serviceaccountsResource, c.ns, name), &core_v1.ServiceAccount{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServiceAccounts) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &core_v1.ServiceAccountList{}) + return err +} + +// Patch applies the patch and returns the patched serviceAccount. +func (c *FakeServiceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core_v1.ServiceAccount, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, data, subresources...), &core_v1.ServiceAccount{}) + + if obj == nil { + return nil, err + } + return obj.(*core_v1.ServiceAccount), err } diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/defaults.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go similarity index 52% rename from vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/defaults.go rename to vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go index 6da07cc7d..a0efbcc2f 100644 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/defaults.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,24 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v2alpha1 +package fake import ( - "k8s.io/apimachinery/pkg/runtime" + authenticationv1 "k8s.io/api/authentication/v1" + core "k8s.io/client-go/testing" ) -func addDefaultingFuncs(scheme *runtime.Scheme) error { - RegisterDefaults(scheme) - return scheme.AddDefaultingFuncs( - SetDefaults_CronJob, - ) -} +func (c *FakeServiceAccounts) CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { + obj, err := c.Fake.Invokes(core.NewCreateSubresourceAction(serviceaccountsResource, name, "token", c.ns, tr), &authenticationv1.TokenRequest{}) -func SetDefaults_CronJob(obj *CronJob) { - if obj.Spec.ConcurrencyPolicy == "" { - obj.Spec.ConcurrencyPolicy = AllowConcurrent - } - if obj.Spec.Suspend == nil { - obj.Spec.Suspend = new(bool) + if obj == nil { + return nil, err } + return obj.(*authenticationv1.TokenRequest), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go index 5fe0585b4..6e8591b12 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 type ComponentStatusExpansion interface{} @@ -35,5 +37,3 @@ type ReplicationControllerExpansion interface{} type ResourceQuotaExpansion interface{} type SecretExpansion interface{} - -type ServiceAccountExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index 998f03452..396e5ca4b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -58,6 +60,41 @@ func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges { } } +// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. +func (c *limitRanges) Get(name string, options meta_v1.GetOptions) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. +func (c *limitRanges) List(opts meta_v1.ListOptions) (result *v1.LimitRangeList, err error) { + result = &v1.LimitRangeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested limitRanges. +func (c *limitRanges) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. func (c *limitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { result = &v1.LimitRange{} @@ -105,41 +142,6 @@ func (c *limitRanges) DeleteCollection(options *meta_v1.DeleteOptions, listOptio Error() } -// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *limitRanges) Get(name string, options meta_v1.GetOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *limitRanges) List(opts meta_v1.ListOptions) (result *v1.LimitRangeList, err error) { - result = &v1.LimitRangeList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested limitRanges. -func (c *limitRanges) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched limitRange. func (c *limitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) { result = &v1.LimitRange{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index 3092bd8e1..707b3e971 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -37,7 +39,6 @@ type NamespaceInterface interface { Update(*v1.Namespace) (*v1.Namespace, error) UpdateStatus(*v1.Namespace) (*v1.Namespace, error) Delete(name string, options *meta_v1.DeleteOptions) error - DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error Get(name string, options meta_v1.GetOptions) (*v1.Namespace, error) List(opts meta_v1.ListOptions) (*v1.NamespaceList, error) Watch(opts meta_v1.ListOptions) (watch.Interface, error) @@ -57,64 +58,6 @@ func newNamespaces(c *CoreV1Client) *namespaces { } } -// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Post(). - Resource("namespaces"). - Body(namespace). - Do(). - Into(result) - return -} - -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - Body(namespace). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). - -func (c *namespaces) UpdateStatus(namespace *v1.Namespace) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - SubResource("status"). - Body(namespace). - Do(). - Into(result) - return -} - -// Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *namespaces) Delete(name string, options *meta_v1.DeleteOptions) error { - return c.client.Delete(). - Resource("namespaces"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *namespaces) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - return c.client.Delete(). - Resource("namespaces"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - // Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. func (c *namespaces) Get(name string, options meta_v1.GetOptions) (result *v1.Namespace, err error) { result = &v1.Namespace{} @@ -147,6 +90,54 @@ func (c *namespaces) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { Watch() } +// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Post(). + Resource("namespaces"). + Body(namespace). + Do(). + Into(result) + return +} + +// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + Body(namespace). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *namespaces) UpdateStatus(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + SubResource("status"). + Body(namespace). + Do(). + Into(result) + return +} + +// Delete takes name of the namespace and deletes it. Returns an error if one occurs. +func (c *namespaces) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("namespaces"). + Name(name). + Body(options). + Do(). + Error() +} + // Patch applies the patch and returns the patched namespace. func (c *namespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) { result = &v1.Namespace{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go index 203430000..17effe29c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go @@ -16,7 +16,7 @@ limitations under the License. package v1 -import "k8s.io/client-go/pkg/api/v1" +import "k8s.io/api/core/v1" // The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. type NamespaceExpansion interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go index 6b82d4fab..1462d2549 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -57,6 +59,38 @@ func newNodes(c *CoreV1Client) *nodes { } } +// Get takes name of the node, and returns the corresponding node object, and an error if there is any. +func (c *nodes) Get(name string, options meta_v1.GetOptions) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Get(). + Resource("nodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Nodes that match those selectors. +func (c *nodes) List(opts meta_v1.ListOptions) (result *v1.NodeList, err error) { + result = &v1.NodeList{} + err = c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *nodes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. func (c *nodes) Create(node *v1.Node) (result *v1.Node, err error) { result = &v1.Node{} @@ -81,7 +115,7 @@ func (c *nodes) Update(node *v1.Node) (result *v1.Node, err error) { } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *nodes) UpdateStatus(node *v1.Node) (result *v1.Node, err error) { result = &v1.Node{} @@ -115,38 +149,6 @@ func (c *nodes) DeleteCollection(options *meta_v1.DeleteOptions, listOptions met Error() } -// Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *nodes) Get(name string, options meta_v1.GetOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *nodes) List(opts meta_v1.ListOptions) (result *v1.NodeList, err error) { - result = &v1.NodeList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *nodes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched node. func (c *nodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) { result = &v1.Node{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go index 29c12aabc..5db29c3f7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go @@ -17,8 +17,8 @@ limitations under the License. package v1 import ( + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/pkg/api/v1" ) // The NodeExpansion interface allows manually adding extra methods to the NodeInterface. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index 16a4b7316..f9010119b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -57,6 +59,38 @@ func newPersistentVolumes(c *CoreV1Client) *persistentVolumes { } } +// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. +func (c *persistentVolumes) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Get(). + Resource("persistentvolumes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. +func (c *persistentVolumes) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeList, err error) { + result = &v1.PersistentVolumeList{} + err = c.client.Get(). + Resource("persistentvolumes"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumes. +func (c *persistentVolumes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("persistentvolumes"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. func (c *persistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { result = &v1.PersistentVolume{} @@ -81,7 +115,7 @@ func (c *persistentVolumes) Update(persistentVolume *v1.PersistentVolume) (resul } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *persistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { result = &v1.PersistentVolume{} @@ -115,38 +149,6 @@ func (c *persistentVolumes) DeleteCollection(options *meta_v1.DeleteOptions, lis Error() } -// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *persistentVolumes) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Get(). - Resource("persistentvolumes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *persistentVolumes) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeList, err error) { - result = &v1.PersistentVolumeList{} - err = c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *persistentVolumes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched persistentVolume. func (c *persistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) { result = &v1.PersistentVolume{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index ae7cc4a4f..59253d5ef 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -59,6 +61,41 @@ func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVol } } +// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. +func (c *persistentVolumeClaims) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. +func (c *persistentVolumeClaims) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { + result = &v1.PersistentVolumeClaimList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. +func (c *persistentVolumeClaims) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. func (c *persistentVolumeClaims) Create(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { result = &v1.PersistentVolumeClaim{} @@ -85,7 +122,7 @@ func (c *persistentVolumeClaims) Update(persistentVolumeClaim *v1.PersistentVolu } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { result = &v1.PersistentVolumeClaim{} @@ -122,41 +159,6 @@ func (c *persistentVolumeClaims) DeleteCollection(options *meta_v1.DeleteOptions Error() } -// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *persistentVolumeClaims) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *persistentVolumeClaims) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { - result = &v1.PersistentVolumeClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *persistentVolumeClaims) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched persistentVolumeClaim. func (c *persistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { result = &v1.PersistentVolumeClaim{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 5648750ea..8eb7ba8bd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -59,6 +61,41 @@ func newPods(c *CoreV1Client, namespace string) *pods { } } +// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. +func (c *pods) Get(name string, options meta_v1.GetOptions) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Pods that match those selectors. +func (c *pods) List(opts meta_v1.ListOptions) (result *v1.PodList, err error) { + result = &v1.PodList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *pods) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. func (c *pods) Create(pod *v1.Pod) (result *v1.Pod, err error) { result = &v1.Pod{} @@ -85,7 +122,7 @@ func (c *pods) Update(pod *v1.Pod) (result *v1.Pod, err error) { } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *pods) UpdateStatus(pod *v1.Pod) (result *v1.Pod, err error) { result = &v1.Pod{} @@ -122,41 +159,6 @@ func (c *pods) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta Error() } -// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *pods) Get(name string, options meta_v1.GetOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *pods) List(opts meta_v1.ListOptions) (result *v1.PodList, err error) { - result = &v1.PodList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pods. -func (c *pods) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched pod. func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) { result = &v1.Pod{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go index 82f3b7fba..ed876be8b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go @@ -17,9 +17,9 @@ limitations under the License. package v1 import ( - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/api/v1" - policy "k8s.io/client-go/pkg/apis/policy/v1beta1" + "k8s.io/api/core/v1" + policy "k8s.io/api/policy/v1beta1" + "k8s.io/client-go/kubernetes/scheme" restclient "k8s.io/client-go/rest" ) @@ -41,5 +41,5 @@ func (c *pods) Evict(eviction *policy.Eviction) error { // Get constructs a request for getting the logs for a pod func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { - return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec) + return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index 19c82f17b..d99d8c3b5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -58,6 +60,41 @@ func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates { } } +// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. +func (c *podTemplates) Get(name string, options meta_v1.GetOptions) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. +func (c *podTemplates) List(opts meta_v1.ListOptions) (result *v1.PodTemplateList, err error) { + result = &v1.PodTemplateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podTemplates. +func (c *podTemplates) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. func (c *podTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { result = &v1.PodTemplate{} @@ -105,41 +142,6 @@ func (c *podTemplates) DeleteCollection(options *meta_v1.DeleteOptions, listOpti Error() } -// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *podTemplates) Get(name string, options meta_v1.GetOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *podTemplates) List(opts meta_v1.ListOptions) (result *v1.PodTemplateList, err error) { - result = &v1.PodTemplateList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podTemplates. -func (c *podTemplates) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched podTemplate. func (c *podTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) { result = &v1.PodTemplate{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index 2f4f4fa9e..7741f970a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" + v1beta1 "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -42,6 +45,9 @@ type ReplicationControllerInterface interface { List(opts meta_v1.ListOptions) (*v1.ReplicationControllerList, error) Watch(opts meta_v1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) + GetScale(replicationControllerName string, options meta_v1.GetOptions) (*v1beta1.Scale, error) + UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) + ReplicationControllerExpansion } @@ -59,69 +65,6 @@ func newReplicationControllers(c *CoreV1Client, namespace string) *replicationCo } } -// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Body(replicationController). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - Body(replicationController). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). - -func (c *replicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - SubResource("status"). - Body(replicationController). - Do(). - Into(result) - return -} - -// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *replicationControllers) Delete(name string, options *meta_v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicationControllers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - // Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. func (c *replicationControllers) Get(name string, options meta_v1.GetOptions) (result *v1.ReplicationController, err error) { result = &v1.ReplicationController{} @@ -157,6 +100,69 @@ func (c *replicationControllers) Watch(opts meta_v1.ListOptions) (watch.Interfac Watch() } +// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + Body(replicationController). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *replicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + SubResource("status"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. +func (c *replicationControllers) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicationControllers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + // Patch applies the patch and returns the patched replicationController. func (c *replicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) { result = &v1.ReplicationController{} @@ -170,3 +176,31 @@ func (c *replicationControllers) Patch(name string, pt types.PatchType, data []b Into(result) return } + +// GetScale takes name of the replicationController, and returns the corresponding v1beta1.Scale object, and an error if there is any. +func (c *replicationControllers) GetScale(replicationControllerName string, options meta_v1.GetOptions) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationControllerName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationControllerName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 565fe1e6d..7662a0280 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -59,6 +61,41 @@ func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas { } } +// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. +func (c *resourceQuotas) Get(name string, options meta_v1.GetOptions) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. +func (c *resourceQuotas) List(opts meta_v1.ListOptions) (result *v1.ResourceQuotaList, err error) { + result = &v1.ResourceQuotaList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceQuotas. +func (c *resourceQuotas) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. func (c *resourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { result = &v1.ResourceQuota{} @@ -85,7 +122,7 @@ func (c *resourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.Res } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *resourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { result = &v1.ResourceQuota{} @@ -122,41 +159,6 @@ func (c *resourceQuotas) DeleteCollection(options *meta_v1.DeleteOptions, listOp Error() } -// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *resourceQuotas) Get(name string, options meta_v1.GetOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *resourceQuotas) List(opts meta_v1.ListOptions) (result *v1.ResourceQuotaList, err error) { - result = &v1.ResourceQuotaList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *resourceQuotas) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched resourceQuota. func (c *resourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) { result = &v1.ResourceQuota{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index fbcede818..5d149f8cc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -58,6 +60,41 @@ func newSecrets(c *CoreV1Client, namespace string) *secrets { } } +// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. +func (c *secrets) Get(name string, options meta_v1.GetOptions) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Secrets that match those selectors. +func (c *secrets) List(opts meta_v1.ListOptions) (result *v1.SecretList, err error) { + result = &v1.SecretList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested secrets. +func (c *secrets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. func (c *secrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { result = &v1.Secret{} @@ -105,41 +142,6 @@ func (c *secrets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions m Error() } -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *secrets) Get(name string, options meta_v1.GetOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *secrets) List(opts meta_v1.ListOptions) (result *v1.SecretList, err error) { - result = &v1.SecretList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *secrets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched secret. func (c *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) { result = &v1.Secret{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go index 0eccf79a8..ec8646a27 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -37,7 +39,6 @@ type ServiceInterface interface { Update(*v1.Service) (*v1.Service, error) UpdateStatus(*v1.Service) (*v1.Service, error) Delete(name string, options *meta_v1.DeleteOptions) error - DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error Get(name string, options meta_v1.GetOptions) (*v1.Service, error) List(opts meta_v1.ListOptions) (*v1.ServiceList, error) Watch(opts meta_v1.ListOptions) (watch.Interface, error) @@ -59,69 +60,6 @@ func newServices(c *CoreV1Client, namespace string) *services { } } -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(service *v1.Service) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Post(). - Namespace(c.ns). - Resource("services"). - Body(service). - Do(). - Into(result) - return -} - -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(service *v1.Service) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - Body(service). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). - -func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - SubResource("status"). - Body(service). - Do(). - Into(result) - return -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *services) Delete(name string, options *meta_v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *services) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - // Get takes name of the service, and returns the corresponding service object, and an error if there is any. func (c *services) Get(name string, options meta_v1.GetOptions) (result *v1.Service, err error) { result = &v1.Service{} @@ -157,6 +95,58 @@ func (c *services) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { Watch() } +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Create(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Post(). + Namespace(c.ns). + Resource("services"). + Body(service). + Do(). + Into(result) + return +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Update(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + Body(service). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + SubResource("status"). + Body(service). + Do(). + Into(result) + return +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *services) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + Name(name). + Body(options). + Do(). + Error() +} + // Patch applies the patch and returns the patched service. func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) { result = &v1.Service{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index f71789f6a..ba2a27948 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/api/v1" rest "k8s.io/client-go/rest" ) @@ -58,6 +60,41 @@ func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts { } } +// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. +func (c *serviceAccounts) Get(name string, options meta_v1.GetOptions) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. +func (c *serviceAccounts) List(opts meta_v1.ListOptions) (result *v1.ServiceAccountList, err error) { + result = &v1.ServiceAccountList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceAccounts. +func (c *serviceAccounts) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. func (c *serviceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { result = &v1.ServiceAccount{} @@ -105,41 +142,6 @@ func (c *serviceAccounts) DeleteCollection(options *meta_v1.DeleteOptions, listO Error() } -// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *serviceAccounts) Get(name string, options meta_v1.GetOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *serviceAccounts) List(opts meta_v1.ListOptions) (result *v1.ServiceAccountList, err error) { - result = &v1.ServiceAccountList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *serviceAccounts) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched serviceAccount. func (c *serviceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) { result = &v1.ServiceAccount{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go new file mode 100644 index 000000000..eaf643f15 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authenticationv1 "k8s.io/api/authentication/v1" +) + +// The ServiceAccountExpansion interface allows manually adding extra methods +// to the ServiceAccountInterface. +type ServiceAccountExpansion interface { + CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) +} + +// CreateToken creates a new token for a serviceaccount. +func (c *serviceAccounts) CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { + result := &authenticationv1.TokenRequest{} + err := c.client.Post(). + Namespace(c.ns). + Resource("serviceaccounts"). + SubResource("token"). + Name(name). + Body(tr). + Do(). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go new file mode 100644 index 000000000..771101956 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go new file mode 100644 index 000000000..af7d060d5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -0,0 +1,157 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// EventsGetter has a method to return a EventInterface. +// A group's client should implement this interface. +type EventsGetter interface { + Events(namespace string) EventInterface +} + +// EventInterface has methods to work with Event resources. +type EventInterface interface { + Create(*v1beta1.Event) (*v1beta1.Event, error) + Update(*v1beta1.Event) (*v1beta1.Event, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Event, error) + List(opts v1.ListOptions) (*v1beta1.EventList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) + EventExpansion +} + +// events implements EventInterface +type events struct { + client rest.Interface + ns string +} + +// newEvents returns a Events +func newEvents(c *EventsV1beta1Client, namespace string) *events { + return &events{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { + result = &v1beta1.EventList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Create(event *v1beta1.Event) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Post(). + Namespace(c.ns). + Resource("events"). + Body(event). + Do(). + Into(result) + return +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Update(event *v1beta1.Event) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Put(). + Namespace(c.ns). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *events) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched event. +func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("events"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go new file mode 100644 index 000000000..fb59635bb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type EventsV1beta1Interface interface { + RESTClient() rest.Interface + EventsGetter +} + +// EventsV1beta1Client is used to interact with features provided by the events.k8s.io group. +type EventsV1beta1Client struct { + restClient rest.Interface +} + +func (c *EventsV1beta1Client) Events(namespace string) EventInterface { + return newEvents(c, namespace) +} + +// NewForConfig creates a new EventsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*EventsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &EventsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new EventsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *EventsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new EventsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *EventsV1beta1Client { + return &EventsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *EventsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go new file mode 100644 index 000000000..b210e40a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go @@ -0,0 +1,128 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeEvents implements EventInterface +type FakeEvents struct { + Fake *FakeEventsV1beta1 + ns string +} + +var eventsResource = schema.GroupVersionResource{Group: "events.k8s.io", Version: "v1beta1", Resource: "events"} + +var eventsKind = schema.GroupVersionKind{Group: "events.k8s.io", Version: "v1beta1", Kind: "Event"} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *FakeEvents) Get(name string, options v1.GetOptions) (result *v1beta1.Event, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1beta1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Event), err +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *FakeEvents) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1beta1.EventList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.EventList{ListMeta: obj.(*v1beta1.EventList).ListMeta} + for _, item := range obj.(*v1beta1.EventList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *FakeEvents) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) + +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *FakeEvents) Create(event *v1beta1.Event) (result *v1beta1.Event, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1beta1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Event), err +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *FakeEvents) Update(event *v1beta1.Event) (result *v1beta1.Event, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1beta1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Event), err +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *FakeEvents) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(eventsResource, c.ns, name), &v1beta1.Event{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeEvents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.EventList{}) + return err +} + +// Patch applies the patch and returns the patched event. +func (c *FakeEvents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, data, subresources...), &v1beta1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Event), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go new file mode 100644 index 000000000..875c774e3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeEventsV1beta1 struct { + *testing.Fake +} + +func (c *FakeEventsV1beta1) Events(namespace string) v1beta1.EventInterface { + return &FakeEvents{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeEventsV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go similarity index 82% rename from vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go index 6f257909d..e27f693f8 100644 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +groupName=certificates.k8s.io +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 + +type EventExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index 8c132db9b..85294be4b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" rest "k8s.io/client-go/rest" ) @@ -59,6 +61,41 @@ func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets { } } +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { + result = &v1beta1.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. func (c *daemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { result = &v1beta1.DaemonSet{} @@ -85,7 +122,7 @@ func (c *daemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.Daemo } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *daemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { result = &v1beta1.DaemonSet{} @@ -122,41 +159,6 @@ func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1. Error() } -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { - result = &v1beta1.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched daemonSet. func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { result = &v1beta1.DaemonSet{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index 7d0122c2a..89183d285 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" rest "k8s.io/client-go/rest" ) @@ -42,6 +44,9 @@ type DeploymentInterface interface { List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) Watch(opts v1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) + GetScale(deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error) + UpdateScale(deploymentName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) + DeploymentExpansion } @@ -59,69 +64,6 @@ func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments { } } -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - Body(deployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - Body(deployment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - Body(deployment). - Do(). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} @@ -157,6 +99,69 @@ func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { Watch() } +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + // Patch applies the patch and returns the patched deployment. func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { result = &v1beta1.Deployment{} @@ -170,3 +175,31 @@ func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subres Into(result) return } + +// GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any. +func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *deployments) UpdateScale(deploymentName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go index e737f09ae..24734be6a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go @@ -16,7 +16,7 @@ limitations under the License. package v1beta1 -import "k8s.io/client-go/pkg/apis/extensions/v1beta1" +import "k8s.io/api/extensions/v1beta1" // The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. type DeploymentExpansion interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go index 11b523897..771101956 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 5284346e1..1961ffc7c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/extensions/v1beta1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" rest "k8s.io/client-go/rest" ) @@ -31,7 +33,6 @@ type ExtensionsV1beta1Interface interface { PodSecurityPoliciesGetter ReplicaSetsGetter ScalesGetter - ThirdPartyResourcesGetter } // ExtensionsV1beta1Client is used to interact with features provided by the extensions group. @@ -63,10 +64,6 @@ func (c *ExtensionsV1beta1Client) Scales(namespace string) ScaleInterface { return newScales(c, namespace) } -func (c *ExtensionsV1beta1Client) ThirdPartyResources() ThirdPartyResourceInterface { - return newThirdPartyResources(c) -} - // NewForConfig creates a new ExtensionsV1beta1Client for the given config. func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { config := *c diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go index e7413703b..3a760b317 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" testing "k8s.io/client-go/testing" ) @@ -34,50 +36,9 @@ type FakeDaemonSets struct { var daemonsetsResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "daemonsets"} -func (c *FakeDaemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.DaemonSet), err -} - -func (c *FakeDaemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.DaemonSet), err -} - -func (c *FakeDaemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.DaemonSet), err -} - -func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) - - return err -} - -func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.DaemonSetList{}) - return err -} +var daemonsetsKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "DaemonSet"} +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) @@ -88,9 +49,10 @@ func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *v1beta return obj.(*v1beta1.DaemonSet), err } +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(daemonsetsResource, c.ns, opts), &v1beta1.DaemonSetList{}) + Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta1.DaemonSetList{}) if obj == nil { return nil, err @@ -100,7 +62,7 @@ func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetLis if label == nil { label = labels.Everything() } - list := &v1beta1.DaemonSetList{} + list := &v1beta1.DaemonSetList{ListMeta: obj.(*v1beta1.DaemonSetList).ListMeta} for _, item := range obj.(*v1beta1.DaemonSetList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -116,6 +78,56 @@ func (c *FakeDaemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { } +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *FakeDaemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *FakeDaemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeDaemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.DaemonSetList{}) + return err +} + // Patch applies the patch and returns the patched daemonSet. func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go index e4a2a0305..f032a5563 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" testing "k8s.io/client-go/testing" ) @@ -34,50 +36,9 @@ type FakeDeployments struct { var deploymentsResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"} -func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) - - return err -} - -func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) - return err -} +var deploymentsKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Deployment"} +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) @@ -88,9 +49,10 @@ func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1bet return obj.(*v1beta1.Deployment), err } +// List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, c.ns, opts), &v1beta1.DeploymentList{}) + Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{}) if obj == nil { return nil, err @@ -100,7 +62,7 @@ func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentL if label == nil { label = labels.Everything() } - list := &v1beta1.DeploymentList{} + list := &v1beta1.DeploymentList{ListMeta: obj.(*v1beta1.DeploymentList).ListMeta} for _, item := range obj.(*v1beta1.DeploymentList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -116,6 +78,56 @@ func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { } +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) + return err +} + // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. @@ -126,3 +138,25 @@ func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, su } return obj.(*v1beta1.Deployment), err } + +// GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. +func (c *FakeDeployments) GetScale(deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &v1beta1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Scale), err +} + +// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *FakeDeployments) UpdateScale(deploymentName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &v1beta1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Scale), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go index f49c33666..af2bc0f71 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package fake import ( - "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/api/extensions/v1beta1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go index c0489371a..1aba34f9d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( @@ -50,10 +52,6 @@ func (c *FakeExtensionsV1beta1) Scales(namespace string) v1beta1.ScaleInterface return &FakeScales{c, namespace} } -func (c *FakeExtensionsV1beta1) ThirdPartyResources() v1beta1.ThirdPartyResourceInterface { - return &FakeThirdPartyResources{c} -} - // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeExtensionsV1beta1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go index 7ad797e38..55257a88a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" testing "k8s.io/client-go/testing" ) @@ -34,50 +36,9 @@ type FakeIngresses struct { var ingressesResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"} -func (c *FakeIngresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -func (c *FakeIngresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -func (c *FakeIngresses) UpdateStatus(ingress *v1beta1.Ingress) (*v1beta1.Ingress, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -func (c *FakeIngresses) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) - - return err -} - -func (c *FakeIngresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) - return err -} +var ingressesKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Ingress"} +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. func (c *FakeIngresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) @@ -88,9 +49,10 @@ func (c *FakeIngresses) Get(name string, options v1.GetOptions) (result *v1beta1 return obj.(*v1beta1.Ingress), err } +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *FakeIngresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(ingressesResource, c.ns, opts), &v1beta1.IngressList{}) + Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1beta1.IngressList{}) if obj == nil { return nil, err @@ -100,7 +62,7 @@ func (c *FakeIngresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, if label == nil { label = labels.Everything() } - list := &v1beta1.IngressList{} + list := &v1beta1.IngressList{ListMeta: obj.(*v1beta1.IngressList).ListMeta} for _, item := range obj.(*v1beta1.IngressList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -116,6 +78,56 @@ func (c *FakeIngresses) Watch(opts v1.ListOptions) (watch.Interface, error) { } +// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *FakeIngresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *FakeIngresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeIngresses) UpdateStatus(ingress *v1beta1.Ingress) (*v1beta1.Ingress, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +// Delete takes name of the ingress and deletes it. Returns an error if one occurs. +func (c *FakeIngresses) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeIngresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) + return err +} + // Patch applies the patch and returns the patched ingress. func (c *FakeIngresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go index 78b0868cb..70b5dac28 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" testing "k8s.io/client-go/testing" ) @@ -33,37 +35,9 @@ type FakePodSecurityPolicies struct { var podsecuritypoliciesResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "podsecuritypolicies"} -func (c *FakePodSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -func (c *FakePodSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -func (c *FakePodSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) - return err -} - -func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.PodSecurityPolicyList{}) - return err -} +var podsecuritypoliciesKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "PodSecurityPolicy"} +// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. func (c *FakePodSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) @@ -73,9 +47,10 @@ func (c *FakePodSecurityPolicies) Get(name string, options v1.GetOptions) (resul return obj.(*v1beta1.PodSecurityPolicy), err } +// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. func (c *FakePodSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(podsecuritypoliciesResource, opts), &v1beta1.PodSecurityPolicyList{}) + Invokes(testing.NewRootListAction(podsecuritypoliciesResource, podsecuritypoliciesKind, opts), &v1beta1.PodSecurityPolicyList{}) if obj == nil { return nil, err } @@ -84,7 +59,7 @@ func (c *FakePodSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.Pod if label == nil { label = labels.Everything() } - list := &v1beta1.PodSecurityPolicyList{} + list := &v1beta1.PodSecurityPolicyList{ListMeta: obj.(*v1beta1.PodSecurityPolicyList).ListMeta} for _, item := range obj.(*v1beta1.PodSecurityPolicyList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -99,6 +74,41 @@ func (c *FakePodSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, e InvokesWatch(testing.NewRootWatchAction(podsecuritypoliciesResource, opts)) } +// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *FakePodSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodSecurityPolicy), err +} + +// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *FakePodSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodSecurityPolicy), err +} + +// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. +func (c *FakePodSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.PodSecurityPolicyList{}) + return err +} + // Patch applies the patch and returns the patched podSecurityPolicy. func (c *FakePodSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go index b5fbc6b5a..2ab8f244f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" testing "k8s.io/client-go/testing" ) @@ -34,50 +36,9 @@ type FakeReplicaSets struct { var replicasetsResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "replicasets"} -func (c *FakeReplicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -func (c *FakeReplicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -func (c *FakeReplicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) - - return err -} - -func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.ReplicaSetList{}) - return err -} +var replicasetsKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "ReplicaSet"} +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) @@ -88,9 +49,10 @@ func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *v1bet return obj.(*v1beta1.ReplicaSet), err } +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(replicasetsResource, c.ns, opts), &v1beta1.ReplicaSetList{}) + Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta1.ReplicaSetList{}) if obj == nil { return nil, err @@ -100,7 +62,7 @@ func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetL if label == nil { label = labels.Everything() } - list := &v1beta1.ReplicaSetList{} + list := &v1beta1.ReplicaSetList{ListMeta: obj.(*v1beta1.ReplicaSetList).ListMeta} for _, item := range obj.(*v1beta1.ReplicaSetList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -116,6 +78,56 @@ func (c *FakeReplicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { } +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *FakeReplicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *FakeReplicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeReplicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ReplicaSetList{}) + return err +} + // Patch applies the patch and returns the patched replicaSet. func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { obj, err := c.Fake. @@ -126,3 +138,25 @@ func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, su } return obj.(*v1beta1.ReplicaSet), err } + +// GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. +func (c *FakeReplicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &v1beta1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Scale), err +} + +// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *FakeReplicaSets) UpdateScale(replicaSetName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &v1beta1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Scale), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go index 77f8c61a7..02c4d0bab 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake // FakeScales implements ScaleInterface diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go index db13b4c37..1f1d49ba1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go @@ -17,8 +17,8 @@ limitations under the License. package fake import ( + "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/pkg/apis/extensions/v1beta1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go deleted file mode 100644 index cb625070a..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeThirdPartyResources implements ThirdPartyResourceInterface -type FakeThirdPartyResources struct { - Fake *FakeExtensionsV1beta1 -} - -var thirdpartyresourcesResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "thirdpartyresources"} - -func (c *FakeThirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(thirdpartyresourcesResource, thirdPartyResource), &v1beta1.ThirdPartyResource{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ThirdPartyResource), err -} - -func (c *FakeThirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(thirdpartyresourcesResource, thirdPartyResource), &v1beta1.ThirdPartyResource{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ThirdPartyResource), err -} - -func (c *FakeThirdPartyResources) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(thirdpartyresourcesResource, name), &v1beta1.ThirdPartyResource{}) - return err -} - -func (c *FakeThirdPartyResources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(thirdpartyresourcesResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.ThirdPartyResourceList{}) - return err -} - -func (c *FakeThirdPartyResources) Get(name string, options v1.GetOptions) (result *v1beta1.ThirdPartyResource, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(thirdpartyresourcesResource, name), &v1beta1.ThirdPartyResource{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ThirdPartyResource), err -} - -func (c *FakeThirdPartyResources) List(opts v1.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(thirdpartyresourcesResource, opts), &v1beta1.ThirdPartyResourceList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ThirdPartyResourceList{} - for _, item := range obj.(*v1beta1.ThirdPartyResourceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested thirdPartyResources. -func (c *FakeThirdPartyResources) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(thirdpartyresourcesResource, opts)) -} - -// Patch applies the patch and returns the patched thirdPartyResource. -func (c *FakeThirdPartyResources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(thirdpartyresourcesResource, name, data, subresources...), &v1beta1.ThirdPartyResource{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ThirdPartyResource), err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index d0a3d64bc..cfaeebd05 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 type DaemonSetExpansion interface{} @@ -23,5 +25,3 @@ type IngressExpansion interface{} type PodSecurityPolicyExpansion interface{} type ReplicaSetExpansion interface{} - -type ThirdPartyResourceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index 86bf65b8f..f8b664cbd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" rest "k8s.io/client-go/rest" ) @@ -59,6 +61,41 @@ func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses { } } +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. +func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. +func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + result = &v1beta1.IngressList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} @@ -85,7 +122,7 @@ func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, e } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} @@ -122,41 +159,6 @@ func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L Error() } -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - result = &v1beta1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched ingress. func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { result = &v1beta1.Ingress{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go index ad57a6cab..8099d7730 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" rest "k8s.io/client-go/rest" ) @@ -56,6 +58,38 @@ func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies { } } +// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. +func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. +func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + result = &v1beta1.PodSecurityPolicyList{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} @@ -99,38 +133,6 @@ func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOp Error() } -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - result = &v1beta1.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched podSecurityPolicy. func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { result = &v1beta1.PodSecurityPolicy{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index aa6f505a2..7e61fa2d1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" rest "k8s.io/client-go/rest" ) @@ -42,6 +44,9 @@ type ReplicaSetInterface interface { List(opts v1.ListOptions) (*v1beta1.ReplicaSetList, error) Watch(opts v1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) + GetScale(replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error) + UpdateScale(replicaSetName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) + ReplicaSetExpansion } @@ -59,69 +64,6 @@ func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets { } } -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - Body(replicaSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). - -func (c *replicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { result = &v1beta1.ReplicaSet{} @@ -157,6 +99,69 @@ func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { Watch() } +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *replicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + // Patch applies the patch and returns the patched replicaSet. func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { result = &v1beta1.ReplicaSet{} @@ -170,3 +175,31 @@ func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subres Into(result) return } + +// GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any. +func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *replicaSets) UpdateScale(replicaSetName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go index 733012ade..6ee677acd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go index efeae4b01..c9733cb28 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go @@ -17,9 +17,9 @@ limitations under the License. package v1beta1 import ( + "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) // The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. @@ -34,7 +34,7 @@ func (c *scales) Get(kind string, name string) (result *v1beta1.Scale, err error // TODO this method needs to take a proper unambiguous kind fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} - resource, _ := meta.KindToResource(fullyQualifiedKind) + resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) err = c.client.Get(). Namespace(c.ns). @@ -51,7 +51,7 @@ func (c *scales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scal // TODO this method needs to take a proper unambiguous kind fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} - resource, _ := meta.KindToResource(fullyQualifiedKind) + resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) err = c.client.Put(). Namespace(scale.Namespace). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go deleted file mode 100644 index 617c2069b..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" - rest "k8s.io/client-go/rest" -) - -// ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. -// A group's client should implement this interface. -type ThirdPartyResourcesGetter interface { - ThirdPartyResources() ThirdPartyResourceInterface -} - -// ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. -type ThirdPartyResourceInterface interface { - Create(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) - Update(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ThirdPartyResource, error) - List(opts v1.ListOptions) (*v1beta1.ThirdPartyResourceList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) - ThirdPartyResourceExpansion -} - -// thirdPartyResources implements ThirdPartyResourceInterface -type thirdPartyResources struct { - client rest.Interface -} - -// newThirdPartyResources returns a ThirdPartyResources -func newThirdPartyResources(c *ExtensionsV1beta1Client) *thirdPartyResources { - return &thirdPartyResources{ - client: c.RESTClient(), - } -} - -// Create takes the representation of a thirdPartyResource and creates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Post(). - Resource("thirdpartyresources"). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Update takes the representation of a thirdPartyResource and updates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Put(). - Resource("thirdpartyresources"). - Name(thirdPartyResource.Name). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. -func (c *thirdPartyResources) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *thirdPartyResources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the thirdPartyResource, and returns the corresponding thirdPartyResource object, and an error if there is any. -func (c *thirdPartyResources) Get(name string, options v1.GetOptions) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Get(). - Resource("thirdpartyresources"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ThirdPartyResources that match those selectors. -func (c *thirdPartyResources) List(opts v1.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { - result = &v1beta1.ThirdPartyResourceList{} - err = c.client.Get(). - Resource("thirdpartyresources"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested thirdPartyResources. -func (c *thirdPartyResources) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("thirdpartyresources"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Patch applies the patch and returns the patched thirdPartyResource. -func (c *thirdPartyResources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Patch(pt). - Resource("thirdpartyresources"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go new file mode 100644 index 000000000..3af5d054f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go new file mode 100644 index 000000000..6b135c636 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/client-go/kubernetes/typed/networking/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeNetworkingV1 struct { + *testing.Fake +} + +func (c *FakeNetworkingV1) NetworkPolicies(namespace string) v1.NetworkPolicyInterface { + return &FakeNetworkPolicies{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeNetworkingV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go new file mode 100644 index 000000000..4bf7ef7ca --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go @@ -0,0 +1,128 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + networking_v1 "k8s.io/api/networking/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeNetworkPolicies implements NetworkPolicyInterface +type FakeNetworkPolicies struct { + Fake *FakeNetworkingV1 + ns string +} + +var networkpoliciesResource = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "networkpolicies"} + +var networkpoliciesKind = schema.GroupVersionKind{Group: "networking.k8s.io", Version: "v1", Kind: "NetworkPolicy"} + +// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. +func (c *FakeNetworkPolicies) Get(name string, options v1.GetOptions) (result *networking_v1.NetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &networking_v1.NetworkPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*networking_v1.NetworkPolicy), err +} + +// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. +func (c *FakeNetworkPolicies) List(opts v1.ListOptions) (result *networking_v1.NetworkPolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &networking_v1.NetworkPolicyList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &networking_v1.NetworkPolicyList{ListMeta: obj.(*networking_v1.NetworkPolicyList).ListMeta} + for _, item := range obj.(*networking_v1.NetworkPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested networkPolicies. +func (c *FakeNetworkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts)) + +} + +// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *FakeNetworkPolicies) Create(networkPolicy *networking_v1.NetworkPolicy) (result *networking_v1.NetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &networking_v1.NetworkPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*networking_v1.NetworkPolicy), err +} + +// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *FakeNetworkPolicies) Update(networkPolicy *networking_v1.NetworkPolicy) (result *networking_v1.NetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &networking_v1.NetworkPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*networking_v1.NetworkPolicy), err +} + +// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. +func (c *FakeNetworkPolicies) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(networkpoliciesResource, c.ns, name), &networking_v1.NetworkPolicy{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeNetworkPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &networking_v1.NetworkPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched networkPolicy. +func (c *FakeNetworkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *networking_v1.NetworkPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, data, subresources...), &networking_v1.NetworkPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*networking_v1.NetworkPolicy), err +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go similarity index 81% rename from vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go index c7be42d5a..7d77495fa 100644 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,4 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 + +type NetworkPolicyExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go new file mode 100644 index 000000000..8684db456 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/networking/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NetworkingV1Interface interface { + RESTClient() rest.Interface + NetworkPoliciesGetter +} + +// NetworkingV1Client is used to interact with features provided by the networking.k8s.io group. +type NetworkingV1Client struct { + restClient rest.Interface +} + +func (c *NetworkingV1Client) NetworkPolicies(namespace string) NetworkPolicyInterface { + return newNetworkPolicies(c, namespace) +} + +// NewForConfig creates a new NetworkingV1Client for the given config. +func NewForConfig(c *rest.Config) (*NetworkingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NetworkingV1Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkingV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkingV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkingV1Client for the given RESTClient. +func New(c rest.Interface) *NetworkingV1Client { + return &NetworkingV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkingV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go new file mode 100644 index 000000000..c2eb2e792 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -0,0 +1,157 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/networking/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. +// A group's client should implement this interface. +type NetworkPoliciesGetter interface { + NetworkPolicies(namespace string) NetworkPolicyInterface +} + +// NetworkPolicyInterface has methods to work with NetworkPolicy resources. +type NetworkPolicyInterface interface { + Create(*v1.NetworkPolicy) (*v1.NetworkPolicy, error) + Update(*v1.NetworkPolicy) (*v1.NetworkPolicy, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.NetworkPolicy, error) + List(opts meta_v1.ListOptions) (*v1.NetworkPolicyList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error) + NetworkPolicyExpansion +} + +// networkPolicies implements NetworkPolicyInterface +type networkPolicies struct { + client rest.Interface + ns string +} + +// newNetworkPolicies returns a NetworkPolicies +func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies { + return &networkPolicies{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. +func (c *networkPolicies) Get(name string, options meta_v1.GetOptions) (result *v1.NetworkPolicy, err error) { + result = &v1.NetworkPolicy{} + err = c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. +func (c *networkPolicies) List(opts meta_v1.ListOptions) (result *v1.NetworkPolicyList, err error) { + result = &v1.NetworkPolicyList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested networkPolicies. +func (c *networkPolicies) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *networkPolicies) Create(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) { + result = &v1.NetworkPolicy{} + err = c.client.Post(). + Namespace(c.ns). + Resource("networkpolicies"). + Body(networkPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *networkPolicies) Update(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) { + result = &v1.NetworkPolicy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(networkPolicy.Name). + Body(networkPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. +func (c *networkPolicies) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *networkPolicies) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched networkPolicy. +func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error) { + result = &v1.NetworkPolicy{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("networkpolicies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go index 11b523897..771101956 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go index 9c4133e36..12e8e76ed 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go index bde1baca6..40bad265f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - policy "k8s.io/client-go/pkg/apis/policy/v1beta1" + policy "k8s.io/api/policy/v1beta1" ) // The EvictionExpansion interface allows manually adding extra methods to the ScaleInterface. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go index a091d8de3..b8f6f3eae 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake // FakeEvictions implements EvictionInterface diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go index 35c40713c..2f0d8e953 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go @@ -17,8 +17,8 @@ limitations under the License. package fake import ( + policy "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/runtime/schema" - policy "k8s.io/client-go/pkg/apis/policy/v1beta1" core "k8s.io/client-go/testing" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go index 309686585..3f2e78b31 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" testing "k8s.io/client-go/testing" ) @@ -34,50 +36,9 @@ type FakePodDisruptionBudgets struct { var poddisruptionbudgetsResource = schema.GroupVersionResource{Group: "policy", Version: "v1beta1", Resource: "poddisruptionbudgets"} -func (c *FakePodDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -func (c *FakePodDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -func (c *FakePodDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -func (c *FakePodDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{}) - - return err -} - -func (c *FakePodDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.PodDisruptionBudgetList{}) - return err -} +var poddisruptionbudgetsKind = schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodDisruptionBudget"} +// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. func (c *FakePodDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{}) @@ -88,9 +49,10 @@ func (c *FakePodDisruptionBudgets) Get(name string, options v1.GetOptions) (resu return obj.(*v1beta1.PodDisruptionBudget), err } +// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. func (c *FakePodDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(poddisruptionbudgetsResource, c.ns, opts), &v1beta1.PodDisruptionBudgetList{}) + Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &v1beta1.PodDisruptionBudgetList{}) if obj == nil { return nil, err @@ -100,7 +62,7 @@ func (c *FakePodDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.Po if label == nil { label = labels.Everything() } - list := &v1beta1.PodDisruptionBudgetList{} + list := &v1beta1.PodDisruptionBudgetList{ListMeta: obj.(*v1beta1.PodDisruptionBudgetList).ListMeta} for _, item := range obj.(*v1beta1.PodDisruptionBudgetList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -116,6 +78,56 @@ func (c *FakePodDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, } +// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. +func (c *FakePodDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodDisruptionBudget), err +} + +// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. +func (c *FakePodDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodDisruptionBudget), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePodDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodDisruptionBudget), err +} + +// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. +func (c *FakePodDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePodDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.PodDisruptionBudgetList{}) + return err +} + // Patch applies the patch and returns the patched podDisruptionBudget. func (c *FakePodDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go new file mode 100644 index 000000000..0df9aa15f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/policy/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePodSecurityPolicies implements PodSecurityPolicyInterface +type FakePodSecurityPolicies struct { + Fake *FakePolicyV1beta1 +} + +var podsecuritypoliciesResource = schema.GroupVersionResource{Group: "policy", Version: "v1beta1", Resource: "podsecuritypolicies"} + +var podsecuritypoliciesKind = schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicy"} + +// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. +func (c *FakePodSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodSecurityPolicy), err +} + +// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. +func (c *FakePodSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(podsecuritypoliciesResource, podsecuritypoliciesKind, opts), &v1beta1.PodSecurityPolicyList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.PodSecurityPolicyList{ListMeta: obj.(*v1beta1.PodSecurityPolicyList).ListMeta} + for _, item := range obj.(*v1beta1.PodSecurityPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *FakePodSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(podsecuritypoliciesResource, opts)) +} + +// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *FakePodSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodSecurityPolicy), err +} + +// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *FakePodSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodSecurityPolicy), err +} + +// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. +func (c *FakePodSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.PodSecurityPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched podSecurityPolicy. +func (c *FakePodSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, data, subresources...), &v1beta1.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodSecurityPolicy), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go index c9039b519..9c780bf1f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( @@ -34,6 +36,10 @@ func (c *FakePolicyV1beta1) PodDisruptionBudgets(namespace string) v1beta1.PodDi return &FakePodDisruptionBudgets{c, namespace} } +func (c *FakePolicyV1beta1) PodSecurityPolicies() v1beta1.PodSecurityPolicyInterface { + return &FakePodSecurityPolicies{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakePolicyV1beta1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go index 511adc6ef..078c16d5c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 type PodDisruptionBudgetExpansion interface{} + +type PodSecurityPolicyExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index 8088dd019..a11f27eb2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" rest "k8s.io/client-go/rest" ) @@ -59,6 +61,41 @@ func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisru } } +// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. +func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. +func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { + result = &v1beta1.PodDisruptionBudgetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. +func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. func (c *podDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { result = &v1beta1.PodDisruptionBudget{} @@ -85,7 +122,7 @@ func (c *podDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruption } // UpdateStatus was generated because the type contains a Status member. -// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *podDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { result = &v1beta1.PodDisruptionBudget{} @@ -122,41 +159,6 @@ func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listO Error() } -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { - result = &v1beta1.PodDisruptionBudgetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched podDisruptionBudget. func (c *podDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { result = &v1beta1.PodDisruptionBudget{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go new file mode 100644 index 000000000..355be1e9c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go @@ -0,0 +1,147 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/policy/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. +// A group's client should implement this interface. +type PodSecurityPoliciesGetter interface { + PodSecurityPolicies() PodSecurityPolicyInterface +} + +// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. +type PodSecurityPolicyInterface interface { + Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) + List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) + PodSecurityPolicyExpansion +} + +// podSecurityPolicies implements PodSecurityPolicyInterface +type podSecurityPolicies struct { + client rest.Interface +} + +// newPodSecurityPolicies returns a PodSecurityPolicies +func newPodSecurityPolicies(c *PolicyV1beta1Client) *podSecurityPolicies { + return &podSecurityPolicies{ + client: c.RESTClient(), + } +} + +// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. +func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. +func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + result = &v1beta1.PodSecurityPolicyList{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Post(). + Resource("podsecuritypolicies"). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Put(). + Resource("podsecuritypolicies"). + Name(podSecurityPolicy.Name). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. +func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched podSecurityPolicy. +func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Patch(pt). + Resource("podsecuritypolicies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index 9d62aad04..020e185e6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/policy/v1beta1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" rest "k8s.io/client-go/rest" ) @@ -27,6 +29,7 @@ type PolicyV1beta1Interface interface { RESTClient() rest.Interface EvictionsGetter PodDisruptionBudgetsGetter + PodSecurityPoliciesGetter } // PolicyV1beta1Client is used to interact with features provided by the policy group. @@ -42,6 +45,10 @@ func (c *PolicyV1beta1Client) PodDisruptionBudgets(namespace string) PodDisrupti return newPodDisruptionBudgets(c, namespace) } +func (c *PolicyV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { + return newPodSecurityPolicies(c) +} + // NewForConfig creates a new PolicyV1beta1Client for the given config. func NewForConfig(c *rest.Config) (*PolicyV1beta1Client, error) { config := *c diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go new file mode 100644 index 000000000..f3db3beb5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -0,0 +1,147 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(*v1.ClusterRole) (*v1.ClusterRole, error) + Update(*v1.ClusterRole) (*v1.ClusterRole, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ClusterRole, error) + List(opts meta_v1.ListOptions) (*v1.ClusterRoleList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + client rest.Interface +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *RbacV1Client) *clusterRoles { + return &clusterRoles{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string, options meta_v1.GetOptions) (result *v1.ClusterRole, err error) { + result = &v1.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts meta_v1.ListOptions) (result *v1.ClusterRoleList, err error) { + result = &v1.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Create(clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) { + result = &v1.ClusterRole{} + err = c.client.Post(). + Resource("clusterroles"). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Update(clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) { + result = &v1.ClusterRole{} + err = c.client.Put(). + Resource("clusterroles"). + Name(clusterRole.Name). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoles) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error) { + result = &v1.ClusterRole{} + err = c.client.Patch(pt). + Resource("clusterroles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go new file mode 100644 index 000000000..21dda4980 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -0,0 +1,147 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(*v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error) + Update(*v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ClusterRoleBinding, error) + List(opts meta_v1.ListOptions) (*v1.ClusterRoleBindingList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + client rest.Interface +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *RbacV1Client) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string, options meta_v1.GetOptions) (result *v1.ClusterRoleBinding, err error) { + result = &v1.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts meta_v1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { + result = &v1.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Create(clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) { + result = &v1.ClusterRoleBinding{} + err = c.client.Post(). + Resource("clusterrolebindings"). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Update(clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) { + result = &v1.ClusterRoleBinding{} + err = c.client.Put(). + Resource("clusterrolebindings"). + Name(clusterRoleBinding.Name). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoleBindings) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error) { + result = &v1.ClusterRoleBinding{} + err = c.client.Patch(pt). + Resource("clusterrolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go new file mode 100644 index 000000000..3af5d054f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go new file mode 100644 index 000000000..01e62aeb5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rbac_v1 "k8s.io/api/rbac/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterRoles implements ClusterRoleInterface +type FakeClusterRoles struct { + Fake *FakeRbacV1 +} + +var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"} + +var clusterrolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole"} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *rbac_v1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusterrolesResource, name), &rbac_v1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.ClusterRole), err +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *rbac_v1.ClusterRoleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &rbac_v1.ClusterRoleList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &rbac_v1.ClusterRoleList{ListMeta: obj.(*rbac_v1.ClusterRoleList).ListMeta} + for _, item := range obj.(*rbac_v1.ClusterRoleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *FakeClusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *FakeClusterRoles) Create(clusterRole *rbac_v1.ClusterRole) (result *rbac_v1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &rbac_v1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.ClusterRole), err +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *FakeClusterRoles) Update(clusterRole *rbac_v1.ClusterRole) (result *rbac_v1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &rbac_v1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.ClusterRole), err +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *FakeClusterRoles) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &rbac_v1.ClusterRole{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions) + + _, err := c.Fake.Invokes(action, &rbac_v1.ClusterRoleList{}) + return err +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac_v1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &rbac_v1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.ClusterRole), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go new file mode 100644 index 000000000..e5c054c9c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rbac_v1 "k8s.io/api/rbac/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterRoleBindings implements ClusterRoleBindingInterface +type FakeClusterRoleBindings struct { + Fake *FakeRbacV1 +} + +var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterrolebindings"} + +var clusterrolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRoleBinding"} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (result *rbac_v1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &rbac_v1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.ClusterRoleBinding), err +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *rbac_v1.ClusterRoleBindingList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &rbac_v1.ClusterRoleBindingList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &rbac_v1.ClusterRoleBindingList{ListMeta: obj.(*rbac_v1.ClusterRoleBindingList).ListMeta} + for _, item := range obj.(*rbac_v1.ClusterRoleBindingList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *FakeClusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *rbac_v1.ClusterRoleBinding) (result *rbac_v1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &rbac_v1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.ClusterRoleBinding), err +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *rbac_v1.ClusterRoleBinding) (result *rbac_v1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &rbac_v1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.ClusterRoleBinding), err +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *FakeClusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &rbac_v1.ClusterRoleBinding{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions) + + _, err := c.Fake.Invokes(action, &rbac_v1.ClusterRoleBindingList{}) + return err +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac_v1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &rbac_v1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.ClusterRoleBinding), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go new file mode 100644 index 000000000..426fd70d6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeRbacV1 struct { + *testing.Fake +} + +func (c *FakeRbacV1) ClusterRoles() v1.ClusterRoleInterface { + return &FakeClusterRoles{c} +} + +func (c *FakeRbacV1) ClusterRoleBindings() v1.ClusterRoleBindingInterface { + return &FakeClusterRoleBindings{c} +} + +func (c *FakeRbacV1) Roles(namespace string) v1.RoleInterface { + return &FakeRoles{c, namespace} +} + +func (c *FakeRbacV1) RoleBindings(namespace string) v1.RoleBindingInterface { + return &FakeRoleBindings{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeRbacV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go new file mode 100644 index 000000000..413c70c09 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go @@ -0,0 +1,128 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rbac_v1 "k8s.io/api/rbac/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeRoles implements RoleInterface +type FakeRoles struct { + Fake *FakeRbacV1 + ns string +} + +var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "roles"} + +var rolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "Role"} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *rbac_v1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(rolesResource, c.ns, name), &rbac_v1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.Role), err +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *FakeRoles) List(opts v1.ListOptions) (result *rbac_v1.RoleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &rbac_v1.RoleList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &rbac_v1.RoleList{ListMeta: obj.(*rbac_v1.RoleList).ListMeta} + for _, item := range obj.(*rbac_v1.RoleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *FakeRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) + +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *FakeRoles) Create(role *rbac_v1.Role) (result *rbac_v1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &rbac_v1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.Role), err +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *FakeRoles) Update(role *rbac_v1.Role) (result *rbac_v1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &rbac_v1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.Role), err +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &rbac_v1.Role{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &rbac_v1.RoleList{}) + return err +} + +// Patch applies the patch and returns the patched role. +func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac_v1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &rbac_v1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.Role), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go new file mode 100644 index 000000000..536377b30 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go @@ -0,0 +1,128 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rbac_v1 "k8s.io/api/rbac/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeRoleBindings implements RoleBindingInterface +type FakeRoleBindings struct { + Fake *FakeRbacV1 + ns string +} + +var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "rolebindings"} + +var rolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBinding"} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *rbac_v1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &rbac_v1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.RoleBinding), err +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *rbac_v1.RoleBindingList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &rbac_v1.RoleBindingList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &rbac_v1.RoleBindingList{ListMeta: obj.(*rbac_v1.RoleBindingList).ListMeta} + for _, item := range obj.(*rbac_v1.RoleBindingList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *FakeRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) + +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *FakeRoleBindings) Create(roleBinding *rbac_v1.RoleBinding) (result *rbac_v1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &rbac_v1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.RoleBinding), err +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *FakeRoleBindings) Update(roleBinding *rbac_v1.RoleBinding) (result *rbac_v1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &rbac_v1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.RoleBinding), err +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &rbac_v1.RoleBinding{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &rbac_v1.RoleBindingList{}) + return err +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac_v1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &rbac_v1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac_v1.RoleBinding), err +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/conversion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go similarity index 69% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1/conversion.go rename to vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go index 2ff5732d6..e3f1b02e3 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/conversion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,13 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 -import ( - "k8s.io/apimachinery/pkg/runtime" -) +type ClusterRoleExpansion interface{} -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - return scheme.AddConversionFuncs() -} +type ClusterRoleBindingExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go new file mode 100644 index 000000000..e3855bb9b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go @@ -0,0 +1,105 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type RbacV1Interface interface { + RESTClient() rest.Interface + ClusterRolesGetter + ClusterRoleBindingsGetter + RolesGetter + RoleBindingsGetter +} + +// RbacV1Client is used to interact with features provided by the rbac.authorization.k8s.io group. +type RbacV1Client struct { + restClient rest.Interface +} + +func (c *RbacV1Client) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *RbacV1Client) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacV1Client) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacV1Client) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +// NewForConfig creates a new RbacV1Client for the given config. +func NewForConfig(c *rest.Config) (*RbacV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacV1Client{client}, nil +} + +// NewForConfigOrDie creates a new RbacV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RbacV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RbacV1Client for the given RESTClient. +func New(c rest.Interface) *RbacV1Client { + return &RbacV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RbacV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go new file mode 100644 index 000000000..cb7c5c4e8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -0,0 +1,157 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(*v1.Role) (*v1.Role, error) + Update(*v1.Role) (*v1.Role, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Role, error) + List(opts meta_v1.ListOptions) (*v1.RoleList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + client rest.Interface + ns string +} + +// newRoles returns a Roles +func newRoles(c *RbacV1Client, namespace string) *roles { + return &roles{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string, options meta_v1.GetOptions) (result *v1.Role, err error) { + result = &v1.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts meta_v1.ListOptions) (result *v1.RoleList, err error) { + result = &v1.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Create(role *v1.Role) (result *v1.Role, err error) { + result = &v1.Role{} + err = c.client.Post(). + Namespace(c.ns). + Resource("roles"). + Body(role). + Do(). + Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Update(role *v1.Role) (result *v1.Role, err error) { + result = &v1.Role{} + err = c.client.Put(). + Namespace(c.ns). + Resource("roles"). + Name(role.Name). + Body(role). + Do(). + Into(result) + return +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roles) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched role. +func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error) { + result = &v1.Role{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("roles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go new file mode 100644 index 000000000..a19010fa8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -0,0 +1,157 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(*v1.RoleBinding) (*v1.RoleBinding, error) + Update(*v1.RoleBinding) (*v1.RoleBinding, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.RoleBinding, error) + List(opts meta_v1.ListOptions) (*v1.RoleBindingList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + client rest.Interface + ns string +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *RbacV1Client, namespace string) *roleBindings { + return &roleBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string, options meta_v1.GetOptions) (result *v1.RoleBinding, err error) { + result = &v1.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts meta_v1.ListOptions) (result *v1.RoleBindingList, err error) { + result = &v1.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Create(roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) { + result = &v1.RoleBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("rolebindings"). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Update(roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) { + result = &v1.RoleBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("rolebindings"). + Name(roleBinding.Name). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBindings) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error) { + result = &v1.RoleBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("rolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index 241061025..37a545762 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" rest "k8s.io/client-go/rest" ) @@ -56,6 +58,38 @@ func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles { } } +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { + result = &v1alpha1.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *clusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { result = &v1alpha1.ClusterRole{} @@ -99,38 +133,6 @@ func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v Error() } -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { - result = &v1alpha1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched clusterRole. func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { result = &v1alpha1.ClusterRole{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index bc48873a5..605078906 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" rest "k8s.io/client-go/rest" ) @@ -56,6 +58,38 @@ func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings { } } +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { + result = &v1alpha1.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *clusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { result = &v1alpha1.ClusterRoleBinding{} @@ -99,38 +133,6 @@ func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOp Error() } -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { - result = &v1alpha1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched clusterRoleBinding. func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { result = &v1alpha1.ClusterRoleBinding{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go index ba8d10d3b..df51baa4d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go index fed2d4e8c..13fbce4e7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" testing "k8s.io/client-go/testing" ) @@ -33,37 +35,9 @@ type FakeClusterRoles struct { var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "clusterroles"} -func (c *FakeClusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRole), err -} - -func (c *FakeClusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRole), err -} - -func (c *FakeClusterRoles) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &v1alpha1.ClusterRole{}) - return err -} - -func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleList{}) - return err -} +var clusterrolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "ClusterRole"} +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1alpha1.ClusterRole{}) @@ -73,9 +47,10 @@ func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *v1al return obj.(*v1alpha1.ClusterRole), err } +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolesResource, opts), &v1alpha1.ClusterRoleList{}) + Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1alpha1.ClusterRoleList{}) if obj == nil { return nil, err } @@ -84,7 +59,7 @@ func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRo if label == nil { label = labels.Everything() } - list := &v1alpha1.ClusterRoleList{} + list := &v1alpha1.ClusterRoleList{ListMeta: obj.(*v1alpha1.ClusterRoleList).ListMeta} for _, item := range obj.(*v1alpha1.ClusterRoleList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -99,6 +74,41 @@ func (c *FakeClusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) } +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *FakeClusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRole), err +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *FakeClusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRole), err +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *FakeClusterRoles) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &v1alpha1.ClusterRole{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleList{}) + return err +} + // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go index 9e577cdf0..5076543d9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" testing "k8s.io/client-go/testing" ) @@ -33,37 +35,9 @@ type FakeClusterRoleBindings struct { var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "clusterrolebindings"} -func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRoleBinding), err -} - -func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRoleBinding), err -} - -func (c *FakeClusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{}) - return err -} - -func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleBindingList{}) - return err -} +var clusterrolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "ClusterRoleBinding"} +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{}) @@ -73,9 +47,10 @@ func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (resul return obj.(*v1alpha1.ClusterRoleBinding), err } +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolebindingsResource, opts), &v1alpha1.ClusterRoleBindingList{}) + Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1alpha1.ClusterRoleBindingList{}) if obj == nil { return nil, err } @@ -84,7 +59,7 @@ func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.Cl if label == nil { label = labels.Everything() } - list := &v1alpha1.ClusterRoleBindingList{} + list := &v1alpha1.ClusterRoleBindingList{ListMeta: obj.(*v1alpha1.ClusterRoleBindingList).ListMeta} for _, item := range obj.(*v1alpha1.ClusterRoleBindingList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -99,6 +74,41 @@ func (c *FakeClusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, e InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) } +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRoleBinding), err +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRoleBinding), err +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *FakeClusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleBindingList{}) + return err +} + // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go index 11470696d..3447e9be8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go index 511b21049..24d8efee3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" testing "k8s.io/client-go/testing" ) @@ -34,40 +36,9 @@ type FakeRoles struct { var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "roles"} -func (c *FakeRoles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Role), err -} - -func (c *FakeRoles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Role), err -} - -func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &v1alpha1.Role{}) - - return err -} - -func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.RoleList{}) - return err -} +var rolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "Role"} +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1alpha1.Role{}) @@ -78,9 +49,10 @@ func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.Ro return obj.(*v1alpha1.Role), err } +// List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *FakeRoles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(rolesResource, c.ns, opts), &v1alpha1.RoleList{}) + Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1alpha1.RoleList{}) if obj == nil { return nil, err @@ -90,7 +62,7 @@ func (c *FakeRoles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err er if label == nil { label = labels.Everything() } - list := &v1alpha1.RoleList{} + list := &v1alpha1.RoleList{ListMeta: obj.(*v1alpha1.RoleList).ListMeta} for _, item := range obj.(*v1alpha1.RoleList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -106,6 +78,44 @@ func (c *FakeRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { } +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *FakeRoles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Role), err +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *FakeRoles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Role), err +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &v1alpha1.Role{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.RoleList{}) + return err +} + // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go index 85d6c7edb..cb01ef99d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" testing "k8s.io/client-go/testing" ) @@ -34,40 +36,9 @@ type FakeRoleBindings struct { var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "rolebindings"} -func (c *FakeRoleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RoleBinding), err -} - -func (c *FakeRoleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RoleBinding), err -} - -func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{}) - - return err -} - -func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.RoleBindingList{}) - return err -} +var rolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "RoleBinding"} +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{}) @@ -78,9 +49,10 @@ func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *v1al return obj.(*v1alpha1.RoleBinding), err } +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(rolebindingsResource, c.ns, opts), &v1alpha1.RoleBindingList{}) + Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1alpha1.RoleBindingList{}) if obj == nil { return nil, err @@ -90,7 +62,7 @@ func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindi if label == nil { label = labels.Everything() } - list := &v1alpha1.RoleBindingList{} + list := &v1alpha1.RoleBindingList{ListMeta: obj.(*v1alpha1.RoleBindingList).ListMeta} for _, item := range obj.(*v1alpha1.RoleBindingList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -106,6 +78,44 @@ func (c *FakeRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { } +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *FakeRoleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RoleBinding), err +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *FakeRoleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RoleBinding), err +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.RoleBindingList{}) + return err +} + // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go index f506fc346..b8b5c7869 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 type ClusterRoleExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go index 67256d50f..de83531ed 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" rest "k8s.io/client-go/rest" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index 9c4fa915e..aa6954bb5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" rest "k8s.io/client-go/rest" ) @@ -58,6 +60,41 @@ func newRoles(c *RbacV1alpha1Client, namespace string) *roles { } } +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { + result = &v1alpha1.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. func (c *roles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { result = &v1alpha1.Role{} @@ -105,41 +142,6 @@ func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListO Error() } -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { - result = &v1alpha1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched role. func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { result = &v1alpha1.Role{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index 5e81967f1..0941b8e86 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" rest "k8s.io/client-go/rest" ) @@ -58,6 +60,41 @@ func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings { } } +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { + result = &v1alpha1.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *roleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { result = &v1alpha1.RoleBinding{} @@ -105,41 +142,6 @@ func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v Error() } -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { - result = &v1alpha1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched roleBinding. func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { result = &v1alpha1.RoleBinding{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index 6a63571f3..bac951c87 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" rest "k8s.io/client-go/rest" ) @@ -56,6 +58,38 @@ func newClusterRoles(c *RbacV1beta1Client) *clusterRoles { } } +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { + result = &v1beta1.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *clusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { result = &v1beta1.ClusterRole{} @@ -99,38 +133,6 @@ func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v Error() } -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { - result = &v1beta1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched clusterRole. func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { result = &v1beta1.ClusterRole{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index a2f387509..96c91de6e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" rest "k8s.io/client-go/rest" ) @@ -56,6 +58,38 @@ func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings { } } +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { + result = &v1beta1.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *clusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { result = &v1beta1.ClusterRoleBinding{} @@ -99,38 +133,6 @@ func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOp Error() } -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { - result = &v1beta1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched clusterRoleBinding. func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { result = &v1beta1.ClusterRoleBinding{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go index 11b523897..771101956 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go index 25e04d15a..62a832197 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" testing "k8s.io/client-go/testing" ) @@ -33,37 +35,9 @@ type FakeClusterRoles struct { var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterroles"} -func (c *FakeClusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRole), err -} - -func (c *FakeClusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRole), err -} - -func (c *FakeClusterRoles) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &v1beta1.ClusterRole{}) - return err -} - -func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleList{}) - return err -} +var clusterrolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "ClusterRole"} +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1beta1.ClusterRole{}) @@ -73,9 +47,10 @@ func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *v1be return obj.(*v1beta1.ClusterRole), err } +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolesResource, opts), &v1beta1.ClusterRoleList{}) + Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1beta1.ClusterRoleList{}) if obj == nil { return nil, err } @@ -84,7 +59,7 @@ func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRol if label == nil { label = labels.Everything() } - list := &v1beta1.ClusterRoleList{} + list := &v1beta1.ClusterRoleList{ListMeta: obj.(*v1beta1.ClusterRoleList).ListMeta} for _, item := range obj.(*v1beta1.ClusterRoleList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -99,6 +74,41 @@ func (c *FakeClusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) } +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *FakeClusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ClusterRole), err +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *FakeClusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ClusterRole), err +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *FakeClusterRoles) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &v1beta1.ClusterRole{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleList{}) + return err +} + // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go index 38fb39c89..c9ab47269 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" testing "k8s.io/client-go/testing" ) @@ -33,37 +35,9 @@ type FakeClusterRoleBindings struct { var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterrolebindings"} -func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRoleBinding), err -} - -func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRoleBinding), err -} - -func (c *FakeClusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{}) - return err -} - -func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleBindingList{}) - return err -} +var clusterrolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "ClusterRoleBinding"} +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{}) @@ -73,9 +47,10 @@ func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (resul return obj.(*v1beta1.ClusterRoleBinding), err } +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolebindingsResource, opts), &v1beta1.ClusterRoleBindingList{}) + Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1beta1.ClusterRoleBindingList{}) if obj == nil { return nil, err } @@ -84,7 +59,7 @@ func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.Clu if label == nil { label = labels.Everything() } - list := &v1beta1.ClusterRoleBindingList{} + list := &v1beta1.ClusterRoleBindingList{ListMeta: obj.(*v1beta1.ClusterRoleBindingList).ListMeta} for _, item := range obj.(*v1beta1.ClusterRoleBindingList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -99,6 +74,41 @@ func (c *FakeClusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, e InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) } +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ClusterRoleBinding), err +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ClusterRoleBinding), err +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *FakeClusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleBindingList{}) + return err +} + // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go index 929d03529..bdbc246b7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go index fd7637613..45b07a001 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" testing "k8s.io/client-go/testing" ) @@ -34,40 +36,9 @@ type FakeRoles struct { var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"} -func (c *FakeRoles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1beta1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Role), err -} - -func (c *FakeRoles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1beta1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Role), err -} - -func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &v1beta1.Role{}) - - return err -} - -func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.RoleList{}) - return err -} +var rolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "Role"} +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1beta1.Role{}) @@ -78,9 +49,10 @@ func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *v1beta1.Rol return obj.(*v1beta1.Role), err } +// List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *FakeRoles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(rolesResource, c.ns, opts), &v1beta1.RoleList{}) + Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1beta1.RoleList{}) if obj == nil { return nil, err @@ -90,7 +62,7 @@ func (c *FakeRoles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err err if label == nil { label = labels.Everything() } - list := &v1beta1.RoleList{} + list := &v1beta1.RoleList{ListMeta: obj.(*v1beta1.RoleList).ListMeta} for _, item := range obj.(*v1beta1.RoleList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -106,6 +78,44 @@ func (c *FakeRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { } +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *FakeRoles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1beta1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Role), err +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *FakeRoles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1beta1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Role), err +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &v1beta1.Role{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.RoleList{}) + return err +} + // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go index 5e55996fb..1efd40005 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" testing "k8s.io/client-go/testing" ) @@ -34,40 +36,9 @@ type FakeRoleBindings struct { var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "rolebindings"} -func (c *FakeRoleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.RoleBinding), err -} - -func (c *FakeRoleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.RoleBinding), err -} - -func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{}) - - return err -} - -func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.RoleBindingList{}) - return err -} +var rolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "RoleBinding"} +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{}) @@ -78,9 +49,10 @@ func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *v1be return obj.(*v1beta1.RoleBinding), err } +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(rolebindingsResource, c.ns, opts), &v1beta1.RoleBindingList{}) + Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1beta1.RoleBindingList{}) if obj == nil { return nil, err @@ -90,7 +62,7 @@ func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindin if label == nil { label = labels.Everything() } - list := &v1beta1.RoleBindingList{} + list := &v1beta1.RoleBindingList{ListMeta: obj.(*v1beta1.RoleBindingList).ListMeta} for _, item := range obj.(*v1beta1.RoleBindingList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -106,6 +78,44 @@ func (c *FakeRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { } +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *FakeRoleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.RoleBinding), err +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *FakeRoleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.RoleBinding), err +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.RoleBindingList{}) + return err +} + // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go index d7f80c004..e7be79f8d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 type ClusterRoleExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go index a24ef4f9a..46718d731 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/rbac/v1beta1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" rest "k8s.io/client-go/rest" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index aa57ac319..66f382c07 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" rest "k8s.io/client-go/rest" ) @@ -58,6 +60,41 @@ func newRoles(c *RbacV1beta1Client, namespace string) *roles { } } +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { + result = &v1beta1.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. func (c *roles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) { result = &v1beta1.Role{} @@ -105,41 +142,6 @@ func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListO Error() } -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { - result = &v1beta1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched role. func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { result = &v1beta1.Role{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index 3dee9107e..67d3d331b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" rest "k8s.io/client-go/rest" ) @@ -58,6 +60,41 @@ func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings { } } +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { + result = &v1beta1.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *roleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { result = &v1beta1.RoleBinding{} @@ -105,41 +142,6 @@ func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v Error() } -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { - result = &v1beta1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched roleBinding. func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { result = &v1beta1.RoleBinding{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go new file mode 100644 index 000000000..8ab4421a9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/api/scheduling/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePriorityClasses implements PriorityClassInterface +type FakePriorityClasses struct { + Fake *FakeSchedulingV1alpha1 +} + +var priorityclassesResource = schema.GroupVersionResource{Group: "scheduling.k8s.io", Version: "v1alpha1", Resource: "priorityclasses"} + +var priorityclassesKind = schema.GroupVersionKind{Group: "scheduling.k8s.io", Version: "v1alpha1", Kind: "PriorityClass"} + +// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. +func (c *FakePriorityClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1alpha1.PriorityClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PriorityClass), err +} + +// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. +func (c *FakePriorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1alpha1.PriorityClassList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.PriorityClassList{ListMeta: obj.(*v1alpha1.PriorityClassList).ListMeta} + for _, item := range obj.(*v1alpha1.PriorityClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested priorityClasses. +func (c *FakePriorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) +} + +// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *FakePriorityClasses) Create(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PriorityClass), err +} + +// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *FakePriorityClasses) Update(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PriorityClass), err +} + +// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. +func (c *FakePriorityClasses) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(priorityclassesResource, name), &v1alpha1.PriorityClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.PriorityClassList{}) + return err +} + +// Patch applies the patch and returns the patched priorityClass. +func (c *FakePriorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, data, subresources...), &v1alpha1.PriorityClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PriorityClass), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go new file mode 100644 index 000000000..974ba193f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeSchedulingV1alpha1 struct { + *testing.Fake +} + +func (c *FakeSchedulingV1alpha1) PriorityClasses() v1alpha1.PriorityClassInterface { + return &FakePriorityClasses{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeSchedulingV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go similarity index 81% rename from vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go index f43f7bc08..52f81d881 100644 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +groupName=rbac.authorization.k8s.io +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 + +type PriorityClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go new file mode 100644 index 000000000..6845d25c3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -0,0 +1,147 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/scheduling/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityClassesGetter has a method to return a PriorityClassInterface. +// A group's client should implement this interface. +type PriorityClassesGetter interface { + PriorityClasses() PriorityClassInterface +} + +// PriorityClassInterface has methods to work with PriorityClass resources. +type PriorityClassInterface interface { + Create(*v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error) + Update(*v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.PriorityClass, error) + List(opts v1.ListOptions) (*v1alpha1.PriorityClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) + PriorityClassExpansion +} + +// priorityClasses implements PriorityClassInterface +type priorityClasses struct { + client rest.Interface +} + +// newPriorityClasses returns a PriorityClasses +func newPriorityClasses(c *SchedulingV1alpha1Client) *priorityClasses { + return &priorityClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. +func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { + result = &v1alpha1.PriorityClass{} + err = c.client.Get(). + Resource("priorityclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. +func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { + result = &v1alpha1.PriorityClassList{} + err = c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityClasses. +func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Create(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { + result = &v1alpha1.PriorityClass{} + err = c.client.Post(). + Resource("priorityclasses"). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Update(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { + result = &v1alpha1.PriorityClass{} + err = c.client.Put(). + Resource("priorityclasses"). + Name(priorityClass.Name). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. +func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("priorityclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("priorityclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched priorityClass. +func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) { + result = &v1alpha1.PriorityClass{} + err = c.client.Patch(pt). + Resource("priorityclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go new file mode 100644 index 000000000..375f41b8d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/scheduling/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type SchedulingV1alpha1Interface interface { + RESTClient() rest.Interface + PriorityClassesGetter +} + +// SchedulingV1alpha1Client is used to interact with features provided by the scheduling.k8s.io group. +type SchedulingV1alpha1Client struct { + restClient rest.Interface +} + +func (c *SchedulingV1alpha1Client) PriorityClasses() PriorityClassInterface { + return newPriorityClasses(c) +} + +// NewForConfig creates a new SchedulingV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*SchedulingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SchedulingV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new SchedulingV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SchedulingV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SchedulingV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *SchedulingV1alpha1Client { + return &SchedulingV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SchedulingV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go new file mode 100644 index 000000000..771101956 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go new file mode 100644 index 000000000..e234fec66 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/scheduling/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePriorityClasses implements PriorityClassInterface +type FakePriorityClasses struct { + Fake *FakeSchedulingV1beta1 +} + +var priorityclassesResource = schema.GroupVersionResource{Group: "scheduling.k8s.io", Version: "v1beta1", Resource: "priorityclasses"} + +var priorityclassesKind = schema.GroupVersionKind{Group: "scheduling.k8s.io", Version: "v1beta1", Kind: "PriorityClass"} + +// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. +func (c *FakePriorityClasses) Get(name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1beta1.PriorityClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityClass), err +} + +// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. +func (c *FakePriorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1beta1.PriorityClassList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.PriorityClassList{ListMeta: obj.(*v1beta1.PriorityClassList).ListMeta} + for _, item := range obj.(*v1beta1.PriorityClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested priorityClasses. +func (c *FakePriorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) +} + +// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *FakePriorityClasses) Create(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityClass), err +} + +// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *FakePriorityClasses) Update(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityClass), err +} + +// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. +func (c *FakePriorityClasses) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(priorityclassesResource, name), &v1beta1.PriorityClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.PriorityClassList{}) + return err +} + +// Patch applies the patch and returns the patched priorityClass. +func (c *FakePriorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, data, subresources...), &v1beta1.PriorityClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PriorityClass), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go new file mode 100644 index 000000000..4a6878a45 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeSchedulingV1beta1 struct { + *testing.Fake +} + +func (c *FakeSchedulingV1beta1) PriorityClasses() v1beta1.PriorityClassInterface { + return &FakePriorityClasses{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeSchedulingV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go new file mode 100644 index 000000000..3bab873e6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type PriorityClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go new file mode 100644 index 000000000..57b9766e4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -0,0 +1,147 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/scheduling/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityClassesGetter has a method to return a PriorityClassInterface. +// A group's client should implement this interface. +type PriorityClassesGetter interface { + PriorityClasses() PriorityClassInterface +} + +// PriorityClassInterface has methods to work with PriorityClass resources. +type PriorityClassInterface interface { + Create(*v1beta1.PriorityClass) (*v1beta1.PriorityClass, error) + Update(*v1beta1.PriorityClass) (*v1beta1.PriorityClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.PriorityClass, error) + List(opts v1.ListOptions) (*v1beta1.PriorityClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) + PriorityClassExpansion +} + +// priorityClasses implements PriorityClassInterface +type priorityClasses struct { + client rest.Interface +} + +// newPriorityClasses returns a PriorityClasses +func newPriorityClasses(c *SchedulingV1beta1Client) *priorityClasses { + return &priorityClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. +func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { + result = &v1beta1.PriorityClass{} + err = c.client.Get(). + Resource("priorityclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. +func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { + result = &v1beta1.PriorityClassList{} + err = c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityClasses. +func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Create(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { + result = &v1beta1.PriorityClass{} + err = c.client.Post(). + Resource("priorityclasses"). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Update(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { + result = &v1beta1.PriorityClass{} + err = c.client.Put(). + Resource("priorityclasses"). + Name(priorityClass.Name). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. +func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("priorityclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("priorityclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched priorityClass. +func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) { + result = &v1beta1.PriorityClass{} + err = c.client.Patch(pt). + Resource("priorityclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go new file mode 100644 index 000000000..6feec4aec --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/scheduling/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type SchedulingV1beta1Interface interface { + RESTClient() rest.Interface + PriorityClassesGetter +} + +// SchedulingV1beta1Client is used to interact with features provided by the scheduling.k8s.io group. +type SchedulingV1beta1Client struct { + restClient rest.Interface +} + +func (c *SchedulingV1beta1Client) PriorityClasses() PriorityClassInterface { + return newPriorityClasses(c) +} + +// NewForConfig creates a new SchedulingV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*SchedulingV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SchedulingV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new SchedulingV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SchedulingV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SchedulingV1beta1Client for the given RESTClient. +func New(c rest.Interface) *SchedulingV1beta1Client { + return &SchedulingV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SchedulingV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go index ba8d10d3b..df51baa4d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go index aa2fd101b..90eaccec5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1alpha1 "k8s.io/api/settings/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1alpha1 "k8s.io/client-go/pkg/apis/settings/v1alpha1" testing "k8s.io/client-go/testing" ) @@ -34,40 +36,9 @@ type FakePodPresets struct { var podpresetsResource = schema.GroupVersionResource{Group: "settings.k8s.io", Version: "v1alpha1", Resource: "podpresets"} -func (c *FakePodPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PodPreset), err -} - -func (c *FakePodPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PodPreset), err -} - -func (c *FakePodPresets) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(podpresetsResource, c.ns, name), &v1alpha1.PodPreset{}) - - return err -} - -func (c *FakePodPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podpresetsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.PodPresetList{}) - return err -} +var podpresetsKind = schema.GroupVersionKind{Group: "settings.k8s.io", Version: "v1alpha1", Kind: "PodPreset"} +// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. func (c *FakePodPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(podpresetsResource, c.ns, name), &v1alpha1.PodPreset{}) @@ -78,9 +49,10 @@ func (c *FakePodPresets) Get(name string, options v1.GetOptions) (result *v1alph return obj.(*v1alpha1.PodPreset), err } +// List takes label and field selectors, and returns the list of PodPresets that match those selectors. func (c *FakePodPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(podpresetsResource, c.ns, opts), &v1alpha1.PodPresetList{}) + Invokes(testing.NewListAction(podpresetsResource, podpresetsKind, c.ns, opts), &v1alpha1.PodPresetList{}) if obj == nil { return nil, err @@ -90,7 +62,7 @@ func (c *FakePodPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetLi if label == nil { label = labels.Everything() } - list := &v1alpha1.PodPresetList{} + list := &v1alpha1.PodPresetList{ListMeta: obj.(*v1alpha1.PodPresetList).ListMeta} for _, item := range obj.(*v1alpha1.PodPresetList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -106,6 +78,44 @@ func (c *FakePodPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { } +// Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. +func (c *FakePodPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodPreset), err +} + +// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. +func (c *FakePodPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodPreset), err +} + +// Delete takes name of the podPreset and deletes it. Returns an error if one occurs. +func (c *FakePodPresets) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(podpresetsResource, c.ns, name), &v1alpha1.PodPreset{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePodPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podpresetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.PodPresetList{}) + return err +} + // Patch applies the patch and returns the patched podPreset. func (c *FakePodPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go index 6feb9b218..a142edfed 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go index d599b2935..23d9f94d5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 type PodPresetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go index b8fc0a022..f000ae486 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( + v1alpha1 "k8s.io/api/settings/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1alpha1 "k8s.io/client-go/pkg/apis/settings/v1alpha1" rest "k8s.io/client-go/rest" ) @@ -58,6 +60,41 @@ func newPodPresets(c *SettingsV1alpha1Client, namespace string) *podPresets { } } +// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. +func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodPresets that match those selectors. +func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { + result = &v1alpha1.PodPresetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podPresets. +func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. func (c *podPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { result = &v1alpha1.PodPreset{} @@ -105,41 +142,6 @@ func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1. Error() } -// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. -func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodPresets that match those selectors. -func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { - result = &v1alpha1.PodPresetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podPresets. -func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched podPreset. func (c *podPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { result = &v1alpha1.PodPreset{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go index 1a90034bc..c2a03b960 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( + v1alpha1 "k8s.io/api/settings/v1alpha1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1alpha1 "k8s.io/client-go/pkg/apis/settings/v1alpha1" rest "k8s.io/client-go/rest" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go index 54673bfa7..3af5d054f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go index 97972f0f7..fc6f98cf6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go index 3f3a7c077..2b0e279bb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + storage_v1 "k8s.io/api/storage/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1 "k8s.io/client-go/pkg/apis/storage/v1" testing "k8s.io/client-go/testing" ) @@ -33,49 +35,22 @@ type FakeStorageClasses struct { var storageclassesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1", Resource: "storageclasses"} -func (c *FakeStorageClasses) Create(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { +var storageclassesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "StorageClass"} + +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. +func (c *FakeStorageClasses) Get(name string, options v1.GetOptions) (result *storage_v1.StorageClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1.StorageClass{}) + Invokes(testing.NewRootGetAction(storageclassesResource, name), &storage_v1.StorageClass{}) if obj == nil { return nil, err } - return obj.(*v1.StorageClass), err + return obj.(*storage_v1.StorageClass), err } -func (c *FakeStorageClasses) Update(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. +func (c *FakeStorageClasses) List(opts v1.ListOptions) (result *storage_v1.StorageClassList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.StorageClass), err -} - -func (c *FakeStorageClasses) Delete(name string, options *meta_v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(storageclassesResource, name), &v1.StorageClass{}) - return err -} - -func (c *FakeStorageClasses) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1.StorageClassList{}) - return err -} - -func (c *FakeStorageClasses) Get(name string, options meta_v1.GetOptions) (result *v1.StorageClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.StorageClass), err -} - -func (c *FakeStorageClasses) List(opts meta_v1.ListOptions) (result *v1.StorageClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageclassesResource, opts), &v1.StorageClassList{}) + Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &storage_v1.StorageClassList{}) if obj == nil { return nil, err } @@ -84,8 +59,8 @@ func (c *FakeStorageClasses) List(opts meta_v1.ListOptions) (result *v1.StorageC if label == nil { label = labels.Everything() } - list := &v1.StorageClassList{} - for _, item := range obj.(*v1.StorageClassList).Items { + list := &storage_v1.StorageClassList{ListMeta: obj.(*storage_v1.StorageClassList).ListMeta} + for _, item := range obj.(*storage_v1.StorageClassList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -94,17 +69,52 @@ func (c *FakeStorageClasses) List(opts meta_v1.ListOptions) (result *v1.StorageC } // Watch returns a watch.Interface that watches the requested storageClasses. -func (c *FakeStorageClasses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { +func (c *FakeStorageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts)) } -// Patch applies the patch and returns the patched storageClass. -func (c *FakeStorageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) { +// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *FakeStorageClasses) Create(storageClass *storage_v1.StorageClass) (result *storage_v1.StorageClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &v1.StorageClass{}) + Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &storage_v1.StorageClass{}) if obj == nil { return nil, err } - return obj.(*v1.StorageClass), err + return obj.(*storage_v1.StorageClass), err +} + +// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *FakeStorageClasses) Update(storageClass *storage_v1.StorageClass) (result *storage_v1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &storage_v1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*storage_v1.StorageClass), err +} + +// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. +func (c *FakeStorageClasses) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(storageclassesResource, name), &storage_v1.StorageClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOptions) + + _, err := c.Fake.Invokes(action, &storage_v1.StorageClassList{}) + return err +} + +// Patch applies the patch and returns the patched storageClass. +func (c *FakeStorageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storage_v1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &storage_v1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*storage_v1.StorageClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go index 39df9fb87..2bea7ec7f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 type StorageClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index a95d30b2d..ac48f4916 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/storage/v1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/apis/storage/v1" rest "k8s.io/client-go/rest" ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index 0283c9970..74410b245 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1 import ( + v1 "k8s.io/api/storage/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/pkg/apis/storage/v1" rest "k8s.io/client-go/rest" ) @@ -56,6 +58,38 @@ func newStorageClasses(c *StorageV1Client) *storageClasses { } } +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. +func (c *storageClasses) Get(name string, options meta_v1.GetOptions) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Get(). + Resource("storageclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. +func (c *storageClasses) List(opts meta_v1.ListOptions) (result *v1.StorageClassList, err error) { + result = &v1.StorageClassList{} + err = c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *storageClasses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. func (c *storageClasses) Create(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { result = &v1.StorageClass{} @@ -99,38 +133,6 @@ func (c *storageClasses) DeleteCollection(options *meta_v1.DeleteOptions, listOp Error() } -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(name string, options meta_v1.GetOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(opts meta_v1.ListOptions) (result *v1.StorageClassList, err error) { - result = &v1.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched storageClass. func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) { result = &v1.StorageClass{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go new file mode 100644 index 000000000..1a4d9f56f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeStorageV1alpha1 struct { + *testing.Fake +} + +func (c *FakeStorageV1alpha1) VolumeAttachments() v1alpha1.VolumeAttachmentInterface { + return &FakeVolumeAttachments{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeStorageV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go new file mode 100644 index 000000000..af04b681c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go @@ -0,0 +1,131 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeAttachments implements VolumeAttachmentInterface +type FakeVolumeAttachments struct { + Fake *FakeStorageV1alpha1 +} + +var volumeattachmentsResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1alpha1", Resource: "volumeattachments"} + +var volumeattachmentsKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1alpha1", Kind: "VolumeAttachment"} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *FakeVolumeAttachments) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1alpha1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttachment), err +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *FakeVolumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1alpha1.VolumeAttachmentList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.VolumeAttachmentList{ListMeta: obj.(*v1alpha1.VolumeAttachmentList).ListMeta} + for _, item := range obj.(*v1alpha1.VolumeAttachmentList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *FakeVolumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *FakeVolumeAttachments) Create(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttachment), err +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *FakeVolumeAttachments) Update(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttachment), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeVolumeAttachments) UpdateStatus(volumeAttachment *v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1alpha1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttachment), err +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *FakeVolumeAttachments) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(volumeattachmentsResource, name), &v1alpha1.VolumeAttachment{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttachmentList{}) + return err +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *FakeVolumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, data, subresources...), &v1alpha1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttachment), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..cdb7ab2f8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go new file mode 100644 index 000000000..c52f630ac --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type StorageV1alpha1Interface interface { + RESTClient() rest.Interface + VolumeAttachmentsGetter +} + +// StorageV1alpha1Client is used to interact with features provided by the storage.k8s.io group. +type StorageV1alpha1Client struct { + restClient rest.Interface +} + +func (c *StorageV1alpha1Client) VolumeAttachments() VolumeAttachmentInterface { + return newVolumeAttachments(c) +} + +// NewForConfig creates a new StorageV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*StorageV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new StorageV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *StorageV1alpha1Client { + return &StorageV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go new file mode 100644 index 000000000..e6af00185 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -0,0 +1,163 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. +// A group's client should implement this interface. +type VolumeAttachmentsGetter interface { + VolumeAttachments() VolumeAttachmentInterface +} + +// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. +type VolumeAttachmentInterface interface { + Create(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + Update(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + UpdateStatus(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.VolumeAttachment, error) + List(opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) + VolumeAttachmentExpansion +} + +// volumeAttachments implements VolumeAttachmentInterface +type volumeAttachments struct { + client rest.Interface +} + +// newVolumeAttachments returns a VolumeAttachments +func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments { + return &volumeAttachments{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Get(). + Resource("volumeattachments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { + result = &v1alpha1.VolumeAttachmentList{} + err = c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Create(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Post(). + Resource("volumeattachments"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Update(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + SubResource("status"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Patch(pt). + Resource("volumeattachments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go index 11b523897..771101956 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go index c6548330a..16f443990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. +// Code generated by client-gen. DO NOT EDIT. // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go index a178091d9..e0e3f1d78 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( @@ -30,6 +32,10 @@ func (c *FakeStorageV1beta1) StorageClasses() v1beta1.StorageClassInterface { return &FakeStorageClasses{c} } +func (c *FakeStorageV1beta1) VolumeAttachments() v1beta1.VolumeAttachmentInterface { + return &FakeVolumeAttachments{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeStorageV1beta1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go index b8c937d46..cbfbab1a3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( + v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - v1beta1 "k8s.io/client-go/pkg/apis/storage/v1beta1" testing "k8s.io/client-go/testing" ) @@ -33,37 +35,9 @@ type FakeStorageClasses struct { var storageclassesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1beta1", Resource: "storageclasses"} -func (c *FakeStorageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StorageClass), err -} - -func (c *FakeStorageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StorageClass), err -} - -func (c *FakeStorageClasses) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(storageclassesResource, name), &v1beta1.StorageClass{}) - return err -} - -func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.StorageClassList{}) - return err -} +var storageclassesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1beta1", Kind: "StorageClass"} +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. func (c *FakeStorageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { obj, err := c.Fake. Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1beta1.StorageClass{}) @@ -73,9 +47,10 @@ func (c *FakeStorageClasses) Get(name string, options v1.GetOptions) (result *v1 return obj.(*v1beta1.StorageClass), err } +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *FakeStorageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageclassesResource, opts), &v1beta1.StorageClassList{}) + Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &v1beta1.StorageClassList{}) if obj == nil { return nil, err } @@ -84,7 +59,7 @@ func (c *FakeStorageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageC if label == nil { label = labels.Everything() } - list := &v1beta1.StorageClassList{} + list := &v1beta1.StorageClassList{ListMeta: obj.(*v1beta1.StorageClassList).ListMeta} for _, item := range obj.(*v1beta1.StorageClassList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) @@ -99,6 +74,41 @@ func (c *FakeStorageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts)) } +// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *FakeStorageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StorageClass), err +} + +// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *FakeStorageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.StorageClass), err +} + +// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. +func (c *FakeStorageClasses) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(storageclassesResource, name), &v1beta1.StorageClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.StorageClassList{}) + return err +} + // Patch applies the patch and returns the patched storageClass. func (c *FakeStorageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go new file mode 100644 index 000000000..04c0c463a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go @@ -0,0 +1,131 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeAttachments implements VolumeAttachmentInterface +type FakeVolumeAttachments struct { + Fake *FakeStorageV1beta1 +} + +var volumeattachmentsResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1beta1", Resource: "volumeattachments"} + +var volumeattachmentsKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1beta1", Kind: "VolumeAttachment"} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *FakeVolumeAttachments) Get(name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1beta1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VolumeAttachment), err +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *FakeVolumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1beta1.VolumeAttachmentList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.VolumeAttachmentList{ListMeta: obj.(*v1beta1.VolumeAttachmentList).ListMeta} + for _, item := range obj.(*v1beta1.VolumeAttachmentList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *FakeVolumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *FakeVolumeAttachments) Create(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VolumeAttachment), err +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *FakeVolumeAttachments) Update(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VolumeAttachment), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeVolumeAttachments) UpdateStatus(volumeAttachment *v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1beta1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VolumeAttachment), err +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *FakeVolumeAttachments) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(volumeattachmentsResource, name), &v1beta1.VolumeAttachment{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.VolumeAttachmentList{}) + return err +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *FakeVolumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, data, subresources...), &v1beta1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VolumeAttachment), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go index 6f3f0c55e..559f88f67 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 type StorageClassExpansion interface{} + +type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index 7abb99e57..4bdebb878 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,18 +14,21 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/storage/v1beta1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/storage/v1beta1" rest "k8s.io/client-go/rest" ) type StorageV1beta1Interface interface { RESTClient() rest.Interface StorageClassesGetter + VolumeAttachmentsGetter } // StorageV1beta1Client is used to interact with features provided by the storage.k8s.io group. @@ -37,6 +40,10 @@ func (c *StorageV1beta1Client) StorageClasses() StorageClassInterface { return newStorageClasses(c) } +func (c *StorageV1beta1Client) VolumeAttachments() VolumeAttachmentInterface { + return newVolumeAttachments(c) +} + // NewForConfig creates a new StorageV1beta1Client for the given config. func NewForConfig(c *rest.Config) (*StorageV1beta1Client, error) { config := *c diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index 6980fc78b..fbe1fd4c2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1beta1 import ( + v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" - v1beta1 "k8s.io/client-go/pkg/apis/storage/v1beta1" rest "k8s.io/client-go/rest" ) @@ -56,6 +58,38 @@ func newStorageClasses(c *StorageV1beta1Client) *storageClasses { } } +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. +func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Get(). + Resource("storageclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. +func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { + result = &v1beta1.StorageClassList{} + err = c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. func (c *storageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { result = &v1beta1.StorageClass{} @@ -99,38 +133,6 @@ func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions Error() } -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { - result = &v1beta1.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - // Patch applies the patch and returns the patched storageClass. func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { result = &v1beta1.StorageClass{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go new file mode 100644 index 000000000..5cd2d3919 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -0,0 +1,163 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. +// A group's client should implement this interface. +type VolumeAttachmentsGetter interface { + VolumeAttachments() VolumeAttachmentInterface +} + +// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. +type VolumeAttachmentInterface interface { + Create(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) + Update(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) + UpdateStatus(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.VolumeAttachment, error) + List(opts v1.ListOptions) (*v1beta1.VolumeAttachmentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) + VolumeAttachmentExpansion +} + +// volumeAttachments implements VolumeAttachmentInterface +type volumeAttachments struct { + client rest.Interface +} + +// newVolumeAttachments returns a VolumeAttachments +func newVolumeAttachments(c *StorageV1beta1Client) *volumeAttachments { + return &volumeAttachments{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { + result = &v1beta1.VolumeAttachment{} + err = c.client.Get(). + Resource("volumeattachments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { + result = &v1beta1.VolumeAttachmentList{} + err = c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Create(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { + result = &v1beta1.VolumeAttachment{} + err = c.client.Post(). + Resource("volumeattachments"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Update(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { + result = &v1beta1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { + result = &v1beta1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + SubResource("status"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { + result = &v1beta1.VolumeAttachment{} + err = c.client.Patch(pt). + Resource("volumeattachments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/pkg/api/OWNERS b/vendor/k8s.io/client-go/pkg/api/OWNERS deleted file mode 100644 index 3a9b0c6d1..000000000 --- a/vendor/k8s.io/client-go/pkg/api/OWNERS +++ /dev/null @@ -1,44 +0,0 @@ -approvers: -- erictune -- lavalamp -- smarterclayton -- thockin -reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- yujuhong -- brendandburns -- derekwaynecarr -- caesarxuchao -- vishh -- mikedanese -- liggitt -- nikhiljindal -- bprashanth -- gmarek -- erictune -- davidopp -- pmorie -- sttts -- kargakis -- dchen1107 -- saad-ali -- zmerlynn -- luxas -- janetkuo -- justinsb -- pwittrock -- roberthbailey -- ncdc -- timstclair -- yifan-gu -- eparis -- mwielgus -- timothysc -- soltysh -- piosz -- jsafrane -- jbeda diff --git a/vendor/k8s.io/client-go/pkg/api/defaults.go b/vendor/k8s.io/client-go/pkg/api/defaults.go deleted file mode 100644 index baa49a8d7..000000000 --- a/vendor/k8s.io/client-go/pkg/api/defaults.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return scheme.AddDefaultingFuncs( - func(obj *ListOptions) { - if obj.LabelSelector == nil { - obj.LabelSelector = labels.Everything() - } - if obj.FieldSelector == nil { - obj.FieldSelector = fields.Everything() - } - }, - ) -} diff --git a/vendor/k8s.io/client-go/pkg/api/doc.go b/vendor/k8s.io/client-go/pkg/api/doc.go deleted file mode 100644 index a4262ab36..000000000 --- a/vendor/k8s.io/client-go/pkg/api/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package api contains the latest (or "internal") version of the -// Kubernetes API objects. This is the API objects as represented in memory. -// The contract presented to clients is located in the versioned packages, -// which are sub-directories. The first one is "v1". Those packages -// describe how a particular version is serialized to storage/network. -package api diff --git a/vendor/k8s.io/client-go/pkg/api/field_constants.go b/vendor/k8s.io/client-go/pkg/api/field_constants.go deleted file mode 100644 index 5ead0f13f..000000000 --- a/vendor/k8s.io/client-go/pkg/api/field_constants.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -// Field path constants that are specific to the internal API -// representation. -const ( - NodeUnschedulableField = "spec.unschedulable" - ObjectNameField = "metadata.name" - PodHostField = "spec.nodeName" - PodStatusField = "status.phase" - SecretTypeField = "type" - - EventReasonField = "reason" - EventSourceField = "source" - EventTypeField = "type" - EventInvolvedKindField = "involvedObject.kind" - EventInvolvedNamespaceField = "involvedObject.namespace" - EventInvolvedNameField = "involvedObject.name" - EventInvolvedUIDField = "involvedObject.uid" - EventInvolvedAPIVersionField = "involvedObject.apiVersion" - EventInvolvedResourceVersionField = "involvedObject.resourceVersion" - EventInvolvedFieldPathField = "involvedObject.fieldPath" -) diff --git a/vendor/k8s.io/client-go/pkg/api/helpers.go b/vendor/k8s.io/client-go/pkg/api/helpers.go deleted file mode 100644 index faa7df7c0..000000000 --- a/vendor/k8s.io/client-go/pkg/api/helpers.go +++ /dev/null @@ -1,691 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "crypto/md5" - "encoding/json" - "fmt" - "reflect" - "strings" - "time" - - "github.com/davecgh/go-spew/spew" - - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/util/sets" -) - -// Conversion error conveniently packages up errors in conversions. -type ConversionError struct { - In, Out interface{} - Message string -} - -// Return a helpful string about the error -func (c *ConversionError) Error() string { - return spew.Sprintf( - "Conversion error: %s. (in: %v(%+v) out: %v)", - c.Message, reflect.TypeOf(c.In), c.In, reflect.TypeOf(c.Out), - ) -} - -const ( - // annotation key prefix used to identify non-convertible json paths. - NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" -) - -// NonConvertibleFields iterates over the provided map and filters out all but -// any keys with the "non-convertible.kubernetes.io" prefix. -func NonConvertibleFields(annotations map[string]string) map[string]string { - nonConvertibleKeys := map[string]string{} - for key, value := range annotations { - if strings.HasPrefix(key, NonConvertibleAnnotationPrefix) { - nonConvertibleKeys[key] = value - } - } - return nonConvertibleKeys -} - -// Semantic can do semantic deep equality checks for api objects. -// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true -var Semantic = conversion.EqualitiesOrDie( - func(a, b resource.Quantity) bool { - // Ignore formatting, only care that numeric value stayed the same. - // TODO: if we decide it's important, it should be safe to start comparing the format. - // - // Uninitialized quantities are equivalent to 0 quantities. - return a.Cmp(b) == 0 - }, - func(a, b metav1.Time) bool { - return a.UTC() == b.UTC() - }, - func(a, b labels.Selector) bool { - return a.String() == b.String() - }, - func(a, b fields.Selector) bool { - return a.String() == b.String() - }, -) - -var standardResourceQuotaScopes = sets.NewString( - string(ResourceQuotaScopeTerminating), - string(ResourceQuotaScopeNotTerminating), - string(ResourceQuotaScopeBestEffort), - string(ResourceQuotaScopeNotBestEffort), -) - -// IsStandardResourceQuotaScope returns true if the scope is a standard value -func IsStandardResourceQuotaScope(str string) bool { - return standardResourceQuotaScopes.Has(str) -} - -var podObjectCountQuotaResources = sets.NewString( - string(ResourcePods), -) - -var podComputeQuotaResources = sets.NewString( - string(ResourceCPU), - string(ResourceMemory), - string(ResourceLimitsCPU), - string(ResourceLimitsMemory), - string(ResourceRequestsCPU), - string(ResourceRequestsMemory), -) - -// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope -func IsResourceQuotaScopeValidForResource(scope ResourceQuotaScope, resource string) bool { - switch scope { - case ResourceQuotaScopeTerminating, ResourceQuotaScopeNotTerminating, ResourceQuotaScopeNotBestEffort: - return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource) - case ResourceQuotaScopeBestEffort: - return podObjectCountQuotaResources.Has(resource) - default: - return true - } -} - -var standardContainerResources = sets.NewString( - string(ResourceCPU), - string(ResourceMemory), -) - -// IsStandardContainerResourceName returns true if the container can make a resource request -// for the specified resource -func IsStandardContainerResourceName(str string) bool { - return standardContainerResources.Has(str) -} - -// IsOpaqueIntResourceName returns true if the resource name has the opaque -// integer resource prefix. -func IsOpaqueIntResourceName(name ResourceName) bool { - return strings.HasPrefix(string(name), ResourceOpaqueIntPrefix) -} - -// OpaqueIntResourceName returns a ResourceName with the canonical opaque -// integer prefix prepended. If the argument already has the prefix, it is -// returned unmodified. -func OpaqueIntResourceName(name string) ResourceName { - if IsOpaqueIntResourceName(ResourceName(name)) { - return ResourceName(name) - } - return ResourceName(fmt.Sprintf("%s%s", ResourceOpaqueIntPrefix, name)) -} - -var standardLimitRangeTypes = sets.NewString( - string(LimitTypePod), - string(LimitTypeContainer), - string(LimitTypePersistentVolumeClaim), -) - -// IsStandardLimitRangeType returns true if the type is Pod or Container -func IsStandardLimitRangeType(str string) bool { - return standardLimitRangeTypes.Has(str) -} - -var standardQuotaResources = sets.NewString( - string(ResourceCPU), - string(ResourceMemory), - string(ResourceRequestsCPU), - string(ResourceRequestsMemory), - string(ResourceRequestsStorage), - string(ResourceLimitsCPU), - string(ResourceLimitsMemory), - string(ResourcePods), - string(ResourceQuotas), - string(ResourceServices), - string(ResourceReplicationControllers), - string(ResourceSecrets), - string(ResourcePersistentVolumeClaims), - string(ResourceConfigMaps), - string(ResourceServicesNodePorts), - string(ResourceServicesLoadBalancers), -) - -// IsStandardQuotaResourceName returns true if the resource is known to -// the quota tracking system -func IsStandardQuotaResourceName(str string) bool { - return standardQuotaResources.Has(str) -} - -var standardResources = sets.NewString( - string(ResourceCPU), - string(ResourceMemory), - string(ResourceRequestsCPU), - string(ResourceRequestsMemory), - string(ResourceLimitsCPU), - string(ResourceLimitsMemory), - string(ResourcePods), - string(ResourceQuotas), - string(ResourceServices), - string(ResourceReplicationControllers), - string(ResourceSecrets), - string(ResourceConfigMaps), - string(ResourcePersistentVolumeClaims), - string(ResourceStorage), - string(ResourceRequestsStorage), -) - -// IsStandardResourceName returns true if the resource is known to the system -func IsStandardResourceName(str string) bool { - return standardResources.Has(str) -} - -var integerResources = sets.NewString( - string(ResourcePods), - string(ResourceQuotas), - string(ResourceServices), - string(ResourceReplicationControllers), - string(ResourceSecrets), - string(ResourceConfigMaps), - string(ResourcePersistentVolumeClaims), - string(ResourceServicesNodePorts), - string(ResourceServicesLoadBalancers), -) - -// IsIntegerResourceName returns true if the resource is measured in integer values -func IsIntegerResourceName(str string) bool { - return integerResources.Has(str) || IsOpaqueIntResourceName(ResourceName(str)) -} - -// this function aims to check if the service's ClusterIP is set or not -// the objective is not to perform validation here -func IsServiceIPSet(service *Service) bool { - return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != "" -} - -// this function aims to check if the service's cluster IP is requested or not -func IsServiceIPRequested(service *Service) bool { - // ExternalName services are CNAME aliases to external ones. Ignore the IP. - if service.Spec.Type == ServiceTypeExternalName { - return false - } - return service.Spec.ClusterIP == "" -} - -var standardFinalizers = sets.NewString( - string(FinalizerKubernetes), - metav1.FinalizerOrphanDependents, -) - -// HasAnnotation returns a bool if passed in annotation exists -func HasAnnotation(obj ObjectMeta, ann string) bool { - _, found := obj.Annotations[ann] - return found -} - -// SetMetaDataAnnotation sets the annotation and value -func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) { - if obj.Annotations == nil { - obj.Annotations = make(map[string]string) - } - obj.Annotations[ann] = value -} - -func IsStandardFinalizerName(str string) bool { - return standardFinalizers.Has(str) -} - -// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, -// only if they do not already exist -func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) { - for _, add := range addAddresses { - exists := false - for _, existing := range *addresses { - if existing.Address == add.Address && existing.Type == add.Type { - exists = true - break - } - } - if !exists { - *addresses = append(*addresses, add) - } - } -} - -func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) { - data, err := runtime.Encode(codec, obj) - if err != nil { - return "", err - } - return fmt.Sprintf("%x", md5.Sum(data)), nil -} - -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusEqual(l, r *LoadBalancerStatus) bool { - return ingressSliceEqual(l.Ingress, r.Ingress) -} - -func ingressSliceEqual(lhs, rhs []LoadBalancerIngress) bool { - if len(lhs) != len(rhs) { - return false - } - for i := range lhs { - if !ingressEqual(&lhs[i], &rhs[i]) { - return false - } - } - return true -} - -func ingressEqual(lhs, rhs *LoadBalancerIngress) bool { - if lhs.IP != rhs.IP { - return false - } - if lhs.Hostname != rhs.Hostname { - return false - } - return true -} - -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusDeepCopy(lb *LoadBalancerStatus) *LoadBalancerStatus { - c := &LoadBalancerStatus{} - c.Ingress = make([]LoadBalancerIngress, len(lb.Ingress)) - for i := range lb.Ingress { - c.Ingress[i] = lb.Ingress[i] - } - return c -} - -// GetAccessModesAsString returns a string representation of an array of access modes. -// modes, when present, are always in the same order: RWO,ROX,RWX. -func GetAccessModesAsString(modes []PersistentVolumeAccessMode) string { - modes = removeDuplicateAccessModes(modes) - modesStr := []string{} - if containsAccessMode(modes, ReadWriteOnce) { - modesStr = append(modesStr, "RWO") - } - if containsAccessMode(modes, ReadOnlyMany) { - modesStr = append(modesStr, "ROX") - } - if containsAccessMode(modes, ReadWriteMany) { - modesStr = append(modesStr, "RWX") - } - return strings.Join(modesStr, ",") -} - -// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString -func GetAccessModesFromString(modes string) []PersistentVolumeAccessMode { - strmodes := strings.Split(modes, ",") - accessModes := []PersistentVolumeAccessMode{} - for _, s := range strmodes { - s = strings.Trim(s, " ") - switch { - case s == "RWO": - accessModes = append(accessModes, ReadWriteOnce) - case s == "ROX": - accessModes = append(accessModes, ReadOnlyMany) - case s == "RWX": - accessModes = append(accessModes, ReadWriteMany) - } - } - return accessModes -} - -// removeDuplicateAccessModes returns an array of access modes without any duplicates -func removeDuplicateAccessModes(modes []PersistentVolumeAccessMode) []PersistentVolumeAccessMode { - accessModes := []PersistentVolumeAccessMode{} - for _, m := range modes { - if !containsAccessMode(accessModes, m) { - accessModes = append(accessModes, m) - } - } - return accessModes -} - -func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolumeAccessMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - -// ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format. -func ParseRFC3339(s string, nowFn func() metav1.Time) (metav1.Time, error) { - if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil { - return metav1.Time{Time: t}, nil - } - t, err := time.Parse(time.RFC3339, s) - if err != nil { - return metav1.Time{}, err - } - return metav1.Time{Time: t}, nil -} - -// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements -// labels.Selector. -func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.Selector, error) { - if len(nsm) == 0 { - return labels.Nothing(), nil - } - selector := labels.NewSelector() - for _, expr := range nsm { - var op selection.Operator - switch expr.Operator { - case NodeSelectorOpIn: - op = selection.In - case NodeSelectorOpNotIn: - op = selection.NotIn - case NodeSelectorOpExists: - op = selection.Exists - case NodeSelectorOpDoesNotExist: - op = selection.DoesNotExist - case NodeSelectorOpGt: - op = selection.GreaterThan - case NodeSelectorOpLt: - op = selection.LessThan - default: - return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) - } - r, err := labels.NewRequirement(expr.Key, op, expr.Values) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - } - return selector, nil -} - -const ( - // TolerationsAnnotationKey represents the key of tolerations data (json serialized) - // in the Annotations of a Pod. - TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" - - // TaintsAnnotationKey represents the key of taints data (json serialized) - // in the Annotations of a Node. - TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" - - // SeccompPodAnnotationKey represents the key of a seccomp profile applied - // to all containers of a pod. - SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" - - // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied - // to one container of a pod. - SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" - - // CreatedByAnnotation represents the key used to store the spec(json) - // used to create the resource. - CreatedByAnnotation = "kubernetes.io/created-by" - - // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) - // in the Annotations of a Node. - PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" - - // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure - // container of a pod. The annotation value is a comma separated list of sysctl_name=value - // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by - // the kubelet. Pods with other sysctls will fail to launch. - SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" - - // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure - // container of a pod. The annotation value is a comma separated list of sysctl_name=value - // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly - // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use - // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet - // will fail to launch. - UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" - - // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache - // an object (e.g. secret, config map) before fetching it again from apiserver. - // This annotation can be attached to node. - ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" - - // AffinityAnnotationKey represents the key of affinity data (json serialized) - // in the Annotations of a Pod. - // TODO: remove when alpha support for affinity is removed - AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" -) - -// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations -// and converts it to the []Toleration type in api. -func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]Toleration, error) { - var tolerations []Toleration - if len(annotations) > 0 && annotations[TolerationsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[TolerationsAnnotationKey]), &tolerations) - if err != nil { - return tolerations, err - } - } - return tolerations, nil -} - -// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. -// Returns true if something was updated, false otherwise. -func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) { - podTolerations := pod.Spec.Tolerations - - var newTolerations []Toleration - updated := false - for i := range podTolerations { - if toleration.MatchToleration(&podTolerations[i]) { - if Semantic.DeepEqual(toleration, podTolerations[i]) { - return false, nil - } - newTolerations = append(newTolerations, *toleration) - updated = true - continue - } - - newTolerations = append(newTolerations, podTolerations[i]) - } - - if !updated { - newTolerations = append(newTolerations, *toleration) - } - - pod.Spec.Tolerations = newTolerations - return true, nil -} - -// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , -// if the two tolerations have same combination, regard as they match. -// TODO: uniqueness check for tolerations in api validations. -func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { - return t.Key == tolerationToMatch.Key && - t.Effect == tolerationToMatch.Effect && - t.Operator == tolerationToMatch.Operator && - t.Value == tolerationToMatch.Value -} - -// TolerationToleratesTaint checks if the toleration tolerates the taint. -func TolerationToleratesTaint(toleration *Toleration, taint *Taint) bool { - if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect { - return false - } - - if toleration.Key != taint.Key { - return false - } - // TODO: Use proper defaulting when Toleration becomes a field of PodSpec - if (len(toleration.Operator) == 0 || toleration.Operator == TolerationOpEqual) && toleration.Value == taint.Value { - return true - } - if toleration.Operator == TolerationOpExists { - return true - } - return false -} - -// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations. -func TaintToleratedByTolerations(taint *Taint, tolerations []Toleration) bool { - tolerated := false - for i := range tolerations { - if TolerationToleratesTaint(&tolerations[i], taint) { - tolerated = true - break - } - } - return tolerated -} - -// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, -// if the two taints have same key:effect, regard as they match. -func (t *Taint) MatchTaint(taintToMatch Taint) bool { - return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect -} - -// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. -func (t *Taint) ToString() string { - if len(t.Value) == 0 { - return fmt.Sprintf("%v:%v", t.Key, t.Effect) - } - return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) -} - -// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations -// and converts it to the []Taint type in api. -func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]Taint, error) { - var taints []Taint - if len(annotations) > 0 && annotations[TaintsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[TaintsAnnotationKey]), &taints) - if err != nil { - return []Taint{}, err - } - } - return taints, nil -} - -// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls -// and a slice of unsafe Sysctls. This is only a convenience wrapper around -// SysctlsFromPodAnnotation. -func SysctlsFromPodAnnotations(a map[string]string) ([]Sysctl, []Sysctl, error) { - safe, err := SysctlsFromPodAnnotation(a[SysctlsPodAnnotationKey]) - if err != nil { - return nil, nil, err - } - unsafe, err := SysctlsFromPodAnnotation(a[UnsafeSysctlsPodAnnotationKey]) - if err != nil { - return nil, nil, err - } - - return safe, unsafe, nil -} - -// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. -func SysctlsFromPodAnnotation(annotation string) ([]Sysctl, error) { - if len(annotation) == 0 { - return nil, nil - } - - kvs := strings.Split(annotation, ",") - sysctls := make([]Sysctl, len(kvs)) - for i, kv := range kvs { - cs := strings.Split(kv, "=") - if len(cs) != 2 || len(cs[0]) == 0 { - return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv) - } - sysctls[i].Name = cs[0] - sysctls[i].Value = cs[1] - } - return sysctls, nil -} - -// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. -func PodAnnotationsFromSysctls(sysctls []Sysctl) string { - if len(sysctls) == 0 { - return "" - } - - kvs := make([]string, len(sysctls)) - for i := range sysctls { - kvs[i] = fmt.Sprintf("%s=%s", sysctls[i].Name, sysctls[i].Value) - } - return strings.Join(kvs, ",") -} - -// GetAffinityFromPodAnnotations gets the json serialized affinity data from Pod.Annotations -// and converts it to the Affinity type in api. -// TODO: remove when alpha support for affinity is removed -func GetAffinityFromPodAnnotations(annotations map[string]string) (*Affinity, error) { - if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" { - var affinity Affinity - err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity) - if err != nil { - return nil, err - } - return &affinity, nil - } - return nil, nil -} - -// GetPersistentVolumeClass returns StorageClassName. -func GetPersistentVolumeClass(volume *PersistentVolume) string { - // Use beta annotation first - if class, found := volume.Annotations[BetaStorageClassAnnotation]; found { - return class - } - - return volume.Spec.StorageClassName -} - -// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was -// requested, it returns "". -func GetPersistentVolumeClaimClass(claim *PersistentVolumeClaim) string { - // Use beta annotation first - if class, found := claim.Annotations[BetaStorageClassAnnotation]; found { - return class - } - - if claim.Spec.StorageClassName != nil { - return *claim.Spec.StorageClassName - } - - return "" -} - -// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. -func PersistentVolumeClaimHasClass(claim *PersistentVolumeClaim) bool { - // Use beta annotation first - if _, found := claim.Annotations[BetaStorageClassAnnotation]; found { - return true - } - - if claim.Spec.StorageClassName != nil { - return true - } - - return false -} diff --git a/vendor/k8s.io/client-go/pkg/api/install/OWNERS b/vendor/k8s.io/client-go/pkg/api/install/OWNERS deleted file mode 100755 index 01d6b3702..000000000 --- a/vendor/k8s.io/client-go/pkg/api/install/OWNERS +++ /dev/null @@ -1,11 +0,0 @@ -reviewers: -- lavalamp -- smarterclayton -- deads2k -- caesarxuchao -- liggitt -- nikhiljindal -- dims -- krousey -- david-mcmahon -- feihujiang diff --git a/vendor/k8s.io/client-go/pkg/api/install/install.go b/vendor/k8s.io/client-go/pkg/api/install/install.go deleted file mode 100644 index 1b9553b70..000000000 --- a/vendor/k8s.io/client-go/pkg/api/install/install.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the v1 monolithic api, making it available as an -// option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/api/v1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: api.GroupName, - VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/client-go/pkg/api", - AddInternalObjectsToScheme: api.AddToScheme, - RootScopedKinds: sets.NewString( - "Node", - "Namespace", - "PersistentVolume", - "ComponentStatus", - ), - IgnoredKinds: sets.NewString( - "ListOptions", - "DeleteOptions", - "Status", - "PodLogOptions", - "PodExecOptions", - "PodAttachOptions", - "PodPortForwardOptions", - "PodProxyOptions", - "NodeProxyOptions", - "ServiceProxyOptions", - "ThirdPartyResource", - "ThirdPartyResourceData", - "ThirdPartyResourceList", - ), - }, - announced.VersionToSchemeFunc{ - v1.SchemeGroupVersion.Version: v1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/client-go/pkg/api/json.go b/vendor/k8s.io/client-go/pkg/api/json.go deleted file mode 100644 index 3a6e04c18..000000000 --- a/vendor/k8s.io/client-go/pkg/api/json.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import "encoding/json" - -// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations -// to prevent anyone from marshaling these internal structs. - -var _ = json.Marshaler(&AvoidPods{}) -var _ = json.Unmarshaler(&AvoidPods{}) - -func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } -func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/client-go/pkg/api/ref.go b/vendor/k8s.io/client-go/pkg/api/ref.go deleted file mode 100644 index 370cf5513..000000000 --- a/vendor/k8s.io/client-go/pkg/api/ref.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "errors" - "fmt" - "net/url" - "strings" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - // Errors that could be returned by GetReference. - ErrNilObject = errors.New("can't reference a nil object") - ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") -) - -// GetReference returns an ObjectReference which refers to the given -// object, or an error if the object doesn't follow the conventions -// that would allow this. -// TODO: should take a meta.Interface see http://issue.k8s.io/7127 -func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, error) { - if obj == nil { - return nil, ErrNilObject - } - if ref, ok := obj.(*ObjectReference); ok { - // Don't make a reference to a reference. - return ref, nil - } - - gvk := obj.GetObjectKind().GroupVersionKind() - - // if the object referenced is actually persisted, we can just get kind from meta - // if we are building an object reference to something not yet persisted, we should fallback to scheme - kind := gvk.Kind - if len(kind) == 0 { - // TODO: this is wrong - gvks, _, err := scheme.ObjectKinds(obj) - if err != nil { - return nil, err - } - kind = gvks[0].Kind - } - - // An object that implements only List has enough metadata to build a reference - var listMeta meta.List - objectMeta, err := meta.Accessor(obj) - if err != nil { - listMeta, err = meta.ListAccessor(obj) - if err != nil { - return nil, err - } - } else { - listMeta = objectMeta - } - - // if the object referenced is actually persisted, we can also get version from meta - version := gvk.GroupVersion().String() - if len(version) == 0 { - selfLink := listMeta.GetSelfLink() - if len(selfLink) == 0 { - return nil, ErrNoSelfLink - } - selfLinkUrl, err := url.Parse(selfLink) - if err != nil { - return nil, err - } - // example paths: ///* - parts := strings.Split(selfLinkUrl.Path, "/") - if len(parts) < 3 { - return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) - } - version = parts[2] - } - - // only has list metadata - if objectMeta == nil { - return &ObjectReference{ - Kind: kind, - APIVersion: version, - ResourceVersion: listMeta.GetResourceVersion(), - }, nil - } - - return &ObjectReference{ - Kind: kind, - APIVersion: version, - Name: objectMeta.GetName(), - Namespace: objectMeta.GetNamespace(), - UID: objectMeta.GetUID(), - ResourceVersion: objectMeta.GetResourceVersion(), - }, nil -} - -// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. -func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*ObjectReference, error) { - ref, err := GetReference(scheme, obj) - if err != nil { - return nil, err - } - ref.FieldPath = fieldPath - return ref, nil -} - -// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that -// intend only to get a reference to that object. This simplifies the event recording interface. -func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} -func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { - return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/client-go/pkg/api/register.go b/vendor/k8s.io/client-go/pkg/api/register.go deleted file mode 100644 index bd842b182..000000000 --- a/vendor/k8s.io/client-go/pkg/api/register.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "os" - - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" -) - -// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details) -var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry) - -// Registry is an instance of an API registry. This is an interim step to start removing the idea of a global -// API registry. -var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) - -// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. -// NOTE: If you are copying this file to start a new api group, STOP! Copy the -// extensions group instead. This Scheme is special and should appear ONLY in -// the api group, unless you really know what you're doing. -// TODO(lavalamp): make the above error impossible. -var Scheme = runtime.NewScheme() - -// Codecs provides access to encoding and decoding for the scheme -var Codecs = serializer.NewCodecFactory(Scheme) - -// GroupName is the group name use in this package -const GroupName = "" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Unversioned is group version for unversioned API objects -// TODO: this should be v1 probably -var Unversioned = schema.GroupVersion{Group: "", Version: "v1"} - -// ParameterCodec handles versioning of objects that are converted to query parameters. -var ParameterCodec = runtime.NewParameterCodec(Scheme) - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { - return err - } - scheme.AddKnownTypes(SchemeGroupVersion, - &Pod{}, - &PodList{}, - &PodStatusResult{}, - &PodTemplate{}, - &PodTemplateList{}, - &ReplicationControllerList{}, - &ReplicationController{}, - &ServiceList{}, - &Service{}, - &ServiceProxyOptions{}, - &NodeList{}, - &Node{}, - &NodeProxyOptions{}, - &Endpoints{}, - &EndpointsList{}, - &Binding{}, - &Event{}, - &EventList{}, - &List{}, - &LimitRange{}, - &LimitRangeList{}, - &ResourceQuota{}, - &ResourceQuotaList{}, - &Namespace{}, - &NamespaceList{}, - &ServiceAccount{}, - &ServiceAccountList{}, - &Secret{}, - &SecretList{}, - &PersistentVolume{}, - &PersistentVolumeList{}, - &PersistentVolumeClaim{}, - &PersistentVolumeClaimList{}, - &PodAttachOptions{}, - &PodLogOptions{}, - &PodExecOptions{}, - &PodPortForwardOptions{}, - &PodProxyOptions{}, - &ComponentStatus{}, - &ComponentStatusList{}, - &SerializedReference{}, - &RangeAllocation{}, - &ConfigMap{}, - &ConfigMapList{}, - ) - - // Register Unversioned types under their own special group - scheme.AddUnversionedTypes(Unversioned, - &metav1.Status{}, - &metav1.APIVersions{}, - &metav1.APIGroupList{}, - &metav1.APIGroup{}, - &metav1.APIResourceList{}, - ) - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/api/resource_helpers.go b/vendor/k8s.io/client-go/pkg/api/resource_helpers.go deleted file mode 100644 index 88d0f80d7..000000000 --- a/vendor/k8s.io/client-go/pkg/api/resource_helpers.go +++ /dev/null @@ -1,229 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "time" - - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Returns string version of ResourceName. -func (self ResourceName) String() string { - return string(self) -} - -// Returns the CPU limit if specified. -func (self *ResourceList) Cpu() *resource.Quantity { - if val, ok := (*self)[ResourceCPU]; ok { - return &val - } - return &resource.Quantity{Format: resource.DecimalSI} -} - -// Returns the Memory limit if specified. -func (self *ResourceList) Memory() *resource.Quantity { - if val, ok := (*self)[ResourceMemory]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} -} - -func (self *ResourceList) Pods() *resource.Quantity { - if val, ok := (*self)[ResourcePods]; ok { - return &val - } - return &resource.Quantity{} -} - -func (self *ResourceList) NvidiaGPU() *resource.Quantity { - if val, ok := (*self)[ResourceNvidiaGPU]; ok { - return &val - } - return &resource.Quantity{} -} - -func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) { - for i := range statuses { - if statuses[i].Name == name { - return statuses[i], true - } - } - return ContainerStatus{}, false -} - -func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus { - for i := range statuses { - if statuses[i].Name == name { - return statuses[i] - } - } - return ContainerStatus{} -} - -// IsPodAvailable returns true if a pod is available; false otherwise. -// Precondition for an available pod is that it must be ready. On top -// of that, there are two cases when a pod can be considered available: -// 1. minReadySeconds == 0, or -// 2. LastTransitionTime (is set) + minReadySeconds < current time -func IsPodAvailable(pod *Pod, minReadySeconds int32, now metav1.Time) bool { - if !IsPodReady(pod) { - return false - } - - c := GetPodReadyCondition(pod.Status) - minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second - if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { - return true - } - return false -} - -// IsPodReady returns true if a pod is ready; false otherwise. -func IsPodReady(pod *Pod) bool { - return IsPodReadyConditionTrue(pod.Status) -} - -// IsPodReady retruns true if a pod is ready; false otherwise. -func IsPodReadyConditionTrue(status PodStatus) bool { - condition := GetPodReadyCondition(status) - return condition != nil && condition.Status == ConditionTrue -} - -// Extracts the pod ready condition from the given status and returns that. -// Returns nil if the condition is not present. -func GetPodReadyCondition(status PodStatus) *PodCondition { - _, condition := GetPodCondition(&status, PodReady) - return condition -} - -// GetPodCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the index of the located condition. -func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) { - if status == nil { - return -1, nil - } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return i, &status.Conditions[i] - } - } - return -1, nil -} - -// GetNodeCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the index of the located condition. -func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) { - if status == nil { - return -1, nil - } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return i, &status.Conditions[i] - } - } - return -1, nil -} - -// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the -// status has changed. -// Returns true if pod condition has changed or has been added. -func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { - condition.LastTransitionTime = metav1.Now() - // Try to find this pod condition. - conditionIndex, oldCondition := GetPodCondition(status, condition.Type) - - if oldCondition == nil { - // We are adding new pod condition. - status.Conditions = append(status.Conditions, *condition) - return true - } else { - // We are updating an existing condition, so we need to check if it has changed. - if condition.Status == oldCondition.Status { - condition.LastTransitionTime = oldCondition.LastTransitionTime - } - - isEqual := condition.Status == oldCondition.Status && - condition.Reason == oldCondition.Reason && - condition.Message == oldCondition.Message && - condition.LastProbeTime.Equal(oldCondition.LastProbeTime) && - condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) - - status.Conditions[conditionIndex] = *condition - // Return true if one of the fields have changed. - return !isEqual - } -} - -// IsNodeReady returns true if a node is ready; false otherwise. -func IsNodeReady(node *Node) bool { - for _, c := range node.Status.Conditions { - if c.Type == NodeReady { - return c.Status == ConditionTrue - } - } - return false -} - -// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all -// containers of the pod. -func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) { - reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{} - for _, container := range pod.Spec.Containers { - for name, quantity := range container.Resources.Requests { - if value, ok := reqs[name]; !ok { - reqs[name] = *quantity.Copy() - } else { - value.Add(quantity) - reqs[name] = value - } - } - for name, quantity := range container.Resources.Limits { - if value, ok := limits[name]; !ok { - limits[name] = *quantity.Copy() - } else { - value.Add(quantity) - limits[name] = value - } - } - } - // init containers define the minimum of any resource - for _, container := range pod.Spec.InitContainers { - for name, quantity := range container.Resources.Requests { - value, ok := reqs[name] - if !ok { - reqs[name] = *quantity.Copy() - continue - } - if quantity.Cmp(value) > 0 { - reqs[name] = *quantity.Copy() - } - } - for name, quantity := range container.Resources.Limits { - value, ok := limits[name] - if !ok { - limits[name] = *quantity.Copy() - continue - } - if quantity.Cmp(value) > 0 { - limits[name] = *quantity.Copy() - } - } - } - return -} diff --git a/vendor/k8s.io/client-go/pkg/api/types.go b/vendor/k8s.io/client-go/pkg/api/types.go deleted file mode 100644 index 4f4236950..000000000 --- a/vendor/k8s.io/client-go/pkg/api/types.go +++ /dev/null @@ -1,3822 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// Common string formats -// --------------------- -// Many fields in this API have formatting requirements. The commonly used -// formats are defined here. -// -// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" -// in the C language. This is captured by the following regex: -// [A-Za-z_][A-Za-z0-9_]* -// This defines the format, but not the length restriction, which should be -// specified at the definition of any field of this type. -// -// DNS_LABEL: This is a string, no more than 63 characters long, that conforms -// to the definition of a "label" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])? -// -// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms -// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* -// or more simply: -// DNS_LABEL(\.DNS_LABEL)* -// -// IANA_SVC_NAME: This is a string, no more than 15 characters long, that -// conforms to the definition of IANA service name in RFC 6335. -// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. -// Hypens ('-') cannot be leading or trailing character of the string -// and cannot be adjacent to other hyphens. - -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. -// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. -type ObjectMeta struct { - // Name is unique within a namespace. Name is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - // +optional - Name string - - // GenerateName indicates that the name should be made unique by the server prior to persisting - // it. A non-empty value for the field indicates the name will be made unique (and the name - // returned to the client will be different than the name passed). The value of this field will - // be combined with a unique suffix on the server if the Name field has not been provided. - // The provided value must be valid within the rules for Name, and may be truncated by the length - // of the suffix required to make the value unique on the server. - // - // If this field is specified, and Name is not present, the server will NOT return a 409 if the - // generated name exists - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - // +optional - GenerateName string - - // Namespace defines the space within which name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - // +optional - Namespace string - - // SelfLink is a URL representing this object. - // +optional - SelfLink string - - // UID is the unique in time and space value for this object. It is typically generated by - // the server on successful creation of a resource and is not allowed to change on PUT - // operations. - // +optional - UID types.UID - - // An opaque value that represents the version of this resource. May be used for optimistic - // concurrency, change detection, and the watch operation on a resource or set of resources. - // Clients must treat these values as opaque and values may only be valid for a particular - // resource or set of resources. Only servers will generate resource versions. - // +optional - ResourceVersion string - - // A sequence number representing a specific generation of the desired state. - // Populated by the system. Read-only. - // +optional - Generation int64 - - // CreationTimestamp is a timestamp representing the server time when this object was - // created. It is not guaranteed to be set in happens-before order across separate operations. - // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // +optional - CreationTimestamp metav1.Time - - // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This - // field is set by the server when a graceful deletion is requested by the user, and is not - // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. - // If not set, graceful deletion of the object has not been requested. - // - // Populated by the system when a graceful deletion is requested. - // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - DeletionTimestamp *metav1.Time - - // DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion - // was requested. Represents the most recent grace period, and may only be shortened once set. - // +optional - DeletionGracePeriodSeconds *int64 - - // Labels are key value pairs that may be used to scope and select individual resources. - // Label keys are of the form: - // label-key ::= prefixed-name | name - // prefixed-name ::= prefix '/' name - // prefix ::= DNS_SUBDOMAIN - // name ::= DNS_LABEL - // The prefix is optional. If the prefix is not specified, the key is assumed to be private - // to the user. Other system components that wish to use labels must specify a prefix. The - // "kubernetes.io/" prefix is reserved for use by kubernetes components. - // +optional - Labels map[string]string - - // Annotations are unstructured key value data stored with a resource that may be set by - // external tooling. They are not queryable and should be preserved when modifying - // objects. Annotation keys have the same formatting restrictions as Label keys. See the - // comments on Labels for details. - // +optional - Annotations map[string]string - - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - // +optional - OwnerReferences []metav1.OwnerReference - - // Must be empty before the object is deleted from the registry. Each entry - // is an identifier for the responsible component that will remove the entry - // from the list. If the deletionTimestamp of the object is non-nil, entries - // in this list can only be removed. - // +optional - Finalizers []string - - // The name of the cluster which the object belongs to. - // This is used to distinguish resources with same name and namespace in different clusters. - // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. - // +optional - ClusterName string -} - -const ( - // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients - NamespaceDefault string = "default" - // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces - NamespaceAll string = "" - // NamespaceNone is the argument for a context when there is no namespace. - NamespaceNone string = "" - // NamespaceSystem is the system namespace where we place system components. - NamespaceSystem string = "kube-system" - // NamespacePublic is the namespace where we place public info (ConfigMaps) - NamespacePublic string = "kube-public" - // TerminationMessagePathDefault means the default path to capture the application termination message running in a container - TerminationMessagePathDefault string = "/dev/termination-log" -) - -// Volume represents a named volume in a pod that may be accessed by any containers in the pod. -type Volume struct { - // Required: This must be a DNS_LABEL. Each volume in a pod must have - // a unique name. - Name string - // The VolumeSource represents the location and type of a volume to mount. - // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. - // This implied behavior is deprecated and will be removed in a future version. - // +optional - VolumeSource -} - -// VolumeSource represents the source location of a volume to mount. -// Only one of its members may be specified. -type VolumeSource struct { - // HostPath represents file or directory on the host machine that is - // directly exposed to the container. This is generally used for system - // agents or other privileged things that are allowed to see the host - // machine. Most containers will NOT need this. - // --- - // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - // mount host directories as read/write. - // +optional - HostPath *HostPathVolumeSource - // EmptyDir represents a temporary directory that shares a pod's lifetime. - // +optional - EmptyDir *EmptyDirVolumeSource - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // GitRepo represents a git repository at a particular revision. - // +optional - GitRepo *GitRepoVolumeSource - // Secret represents a secret that should populate this volume. - // +optional - Secret *SecretVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime - // +optional - NFS *NFSVolumeSource - // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - ISCSI *ISCSIVolumeSource - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime - // +optional - Glusterfs *GlusterfsVolumeSource - // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace - // +optional - PersistentVolumeClaim *PersistentVolumeClaimVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - // +optional - RBD *RBDVolumeSource - - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. - // +optional - FlexVolume *FlexVolumeSource - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // +optional - Cinder *CinderVolumeSource - - // CephFS represents a Cephfs mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSVolumeSource - - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource - - // DownwardAPI represents metadata about the pod that should populate this volume - // +optional - DownwardAPI *DownwardAPIVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFileVolumeSource - // ConfigMap represents a configMap that should populate this volume - // +optional - ConfigMap *ConfigMapVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource - // Items for all in one resources secrets, configmaps, and downward API - Projected *ProjectedVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOVolumeSource -} - -// Similar to VolumeSource but meant for the administrator who creates PVs. -// Exactly one of its members must be set. -type PersistentVolumeSource struct { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // HostPath represents a directory on the host. - // Provisioned by a developer or tester. - // This is useful for single-node development and testing only! - // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // +optional - HostPath *HostPathVolumeSource - // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod - // +optional - Glusterfs *GlusterfsVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime - // +optional - NFS *NFSVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - // +optional - RBD *RBDVolumeSource - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource - // ISCSIVolumeSource represents an ISCSI resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - ISCSI *ISCSIVolumeSource - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. - // +optional - FlexVolume *FlexVolumeSource - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // +optional - Cinder *CinderVolumeSource - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFileVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOVolumeSource -} - -type PersistentVolumeClaimVolumeSource struct { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume - ClaimName string - // Optional: Defaults to false (read/write). ReadOnly here - // will force the ReadOnly setting in VolumeMounts - // +optional - ReadOnly bool -} - -const ( - // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. - // It's currently still used and will be held for backwards compatibility - BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" -) - -// +genclient=true -// +nonNamespaced=true - -type PersistentVolume struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - //Spec defines a persistent volume owned by the cluster - // +optional - Spec PersistentVolumeSpec - - // Status represents the current information about persistent volume. - // +optional - Status PersistentVolumeStatus -} - -type PersistentVolumeSpec struct { - // Resources represents the actual resources of the volume - Capacity ResourceList - // Source represents the location and type of a volume to mount. - PersistentVolumeSource - // AccessModes contains all ways the volume can be mounted - // +optional - AccessModes []PersistentVolumeAccessMode - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. - // ClaimRef is expected to be non-nil when bound. - // claim.VolumeName is the authoritative bind between PV and PVC. - // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is - // ignored, i.e. labels of this PV do not need to match PVC selector. - // +optional - ClaimRef *ObjectReference - // Optional: what happens to a persistent volume when released from its claim. - // +optional - PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy - // Name of StorageClass to which this persistent volume belongs. Empty value - // means that this volume does not belong to any StorageClass. - // +optional - StorageClassName string -} - -// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes -type PersistentVolumeReclaimPolicy string - -const ( - // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. - // The volume plugin must support Recycling. - PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" - // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. - // The volume plugin must support Deletion. - PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" - // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. - // The default policy is Retain. - PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" -) - -type PersistentVolumeStatus struct { - // Phase indicates if a volume is available, bound to a claim, or released by a claim - // +optional - Phase PersistentVolumePhase - // A human-readable message indicating details about why the volume is in this state. - // +optional - Message string - // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI - // +optional - Reason string -} - -type PersistentVolumeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []PersistentVolume -} - -// +genclient=true - -// PersistentVolumeClaim is a user's request for and claim to a persistent volume -type PersistentVolumeClaim struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the volume requested by a pod author - // +optional - Spec PersistentVolumeClaimSpec - - // Status represents the current information about a claim - // +optional - Status PersistentVolumeClaimStatus -} - -type PersistentVolumeClaimList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []PersistentVolumeClaim -} - -// PersistentVolumeClaimSpec describes the common attributes of storage devices -// and allows a Source for provider-specific attributes -type PersistentVolumeClaimSpec struct { - // Contains the types of access modes required - // +optional - AccessModes []PersistentVolumeAccessMode - // A label query over volumes to consider for binding. This selector is - // ignored when VolumeName is set - // +optional - Selector *metav1.LabelSelector - // Resources represents the minimum resources required - // +optional - Resources ResourceRequirements - // VolumeName is the binding reference to the PersistentVolume backing this - // claim. When set to non-empty value Selector is not evaluated - // +optional - VolumeName string - // Name of the StorageClass required by the claim. - // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 - // +optional - StorageClassName *string -} - -type PersistentVolumeClaimStatus struct { - // Phase represents the current phase of PersistentVolumeClaim - // +optional - Phase PersistentVolumeClaimPhase - // AccessModes contains all ways the volume backing the PVC can be mounted - // +optional - AccessModes []PersistentVolumeAccessMode - // Represents the actual resources of the underlying volume - // +optional - Capacity ResourceList -} - -type PersistentVolumeAccessMode string - -const ( - // can be mounted read/write mode to exactly 1 host - ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" - // can be mounted in read-only mode to many hosts - ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" - // can be mounted in read/write mode to many hosts - ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" -) - -type PersistentVolumePhase string - -const ( - // used for PersistentVolumes that are not available - VolumePending PersistentVolumePhase = "Pending" - // used for PersistentVolumes that are not yet bound - // Available volumes are held by the binder and matched to PersistentVolumeClaims - VolumeAvailable PersistentVolumePhase = "Available" - // used for PersistentVolumes that are bound - VolumeBound PersistentVolumePhase = "Bound" - // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted - // released volumes must be recycled before becoming available again - // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource - VolumeReleased PersistentVolumePhase = "Released" - // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim - VolumeFailed PersistentVolumePhase = "Failed" -) - -type PersistentVolumeClaimPhase string - -const ( - // used for PersistentVolumeClaims that are not yet bound - ClaimPending PersistentVolumeClaimPhase = "Pending" - // used for PersistentVolumeClaims that are bound - ClaimBound PersistentVolumeClaimPhase = "Bound" - // used for PersistentVolumeClaims that lost their underlying - // PersistentVolume. The claim was bound to a PersistentVolume and this - // volume does not exist any longer and all data on it was lost. - ClaimLost PersistentVolumeClaimPhase = "Lost" -) - -// Represents a host path mapped into a pod. -// Host path volumes do not support ownership management or SELinux relabeling. -type HostPathVolumeSource struct { - Path string -} - -// Represents an empty directory for a pod. -// Empty directory volumes support ownership management and SELinux relabeling. -type EmptyDirVolumeSource struct { - // TODO: Longer term we want to represent the selection of underlying - // media more like a scheduling problem - user says what traits they - // need, we give them a backing store that satisfies that. For now - // this will cover the most common needs. - // Optional: what type of storage medium should back this directory. - // The default is "" which means to use the node's default medium. - // +optional - Medium StorageMedium -} - -// StorageMedium defines ways that storage can be allocated to a volume. -type StorageMedium string - -const ( - StorageMediumDefault StorageMedium = "" // use whatever the default is for the node - StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) -) - -// Protocol defines network protocols supported for things like container ports. -type Protocol string - -const ( - // ProtocolTCP is the TCP protocol. - ProtocolTCP Protocol = "TCP" - // ProtocolUDP is the UDP protocol. - ProtocolUDP Protocol = "UDP" -) - -// Represents a Persistent Disk resource in Google Compute Engine. -// -// A GCE PD must exist before mounting to a container. The disk must -// also be in the same GCE project and zone as the kubelet. A GCE PD -// can only be mounted as read/write once or read-only many times. GCE -// PDs support ownership management and SELinux relabeling. -type GCEPersistentDiskVolumeSource struct { - // Unique name of the PD resource. Used to identify the disk in GCE - PDName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - // +optional - Partition int32 - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -type ISCSIVolumeSource struct { - // Required: iSCSI target portal - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - TargetPortal string - // Required: target iSCSI Qualified Name - // +optional - IQN string - // Required: iSCSI target lun number - // +optional - Lun int32 - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - // +optional - ISCSIInterface string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Required: list of iSCSI target portal ips for high availability. - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - Portals []string -} - -// Represents a Fibre Channel volume. -// Fibre Channel volumes can only be mounted as read/write once. -// Fibre Channel volumes support ownership management and SELinux relabeling. -type FCVolumeSource struct { - // Required: FC target worldwide names (WWNs) - TargetWWNs []string - // Required: FC target lun number - Lun *int32 - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. -type FlexVolumeSource struct { - // Driver is the name of the driver to use for this volume. - Driver string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - // +optional - FSType string - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: Extra driver options if any. - // +optional - Options map[string]string -} - -// Represents a Persistent Disk resource in AWS. -// -// An AWS EBS disk must exist before mounting to a container. The disk -// must also be in the same AWS zone as the kubelet. An AWS EBS disk -// can only be mounted as read/write once. AWS EBS volumes support -// ownership management and SELinux relabeling. -type AWSElasticBlockStoreVolumeSource struct { - // Unique id of the persistent disk resource. Used to identify the disk in AWS - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - // +optional - Partition int32 - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a volume that is populated with the contents of a git repository. -// Git repo volumes do not support ownership management. -// Git repo volumes support SELinux relabeling. -type GitRepoVolumeSource struct { - // Repository URL - Repository string - // Commit hash, this is optional - // +optional - Revision string - // Clone target, this is optional - // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - // git repository. Otherwise, if specified, the volume will contain the git repository in - // the subdirectory with the given name. - // +optional - Directory string - // TODO: Consider credentials here. -} - -// Adapts a Secret into a volume. -// -// The contents of the target Secret's Data field will be presented in a volume -// as files using the keys in the Data field as the file names. -// Secret volumes support ownership management and SELinux relabeling. -type SecretVolumeSource struct { - // Name of the secret in the pod's namespace to use. - // +optional - SecretName string - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool -} - -// Adapts a secret into a projected volume. -// -// The contents of the target Secret's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names. -// Note that this is identical to a secret volume source without the default -// mode. -type SecretProjection struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool -} - -// Represents an NFS mount that lasts the lifetime of a pod. -// NFS volumes do not support ownership management or SELinux relabeling. -type NFSVolumeSource struct { - // Server is the hostname or IP address of the NFS server - Server string - - // Path is the exported NFS share - Path string - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the NFS export to be mounted with read-only permissions - // +optional - ReadOnly bool -} - -// Represents a Quobyte mount that lasts the lifetime of a pod. -// Quobyte volumes do not support ownership management or SELinux relabeling. -type QuobyteVolumeSource struct { - // Registry represents a single or multiple Quobyte Registry services - // specified as a string as host:port pair (multiple entries are separated with commas) - // which acts as the central registry for volumes - Registry string - - // Volume is a string that references an already created Quobyte volume by name. - Volume string - - // Defaults to false (read/write). ReadOnly here will force - // the Quobyte to be mounted with read-only permissions - // +optional - ReadOnly bool - - // User to map volume access to - // Defaults to the root user - // +optional - User string - - // Group to map volume access to - // Default is no group - // +optional - Group string -} - -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -type GlusterfsVolumeSource struct { - // Required: EndpointsName is the endpoint name that details Glusterfs topology - EndpointsName string - - // Required: Path is the Glusterfs volume path - Path string - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the Glusterfs to be mounted with read-only permissions - // +optional - ReadOnly bool -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -type RBDVolumeSource struct { - // Required: CephMonitors is a collection of Ceph monitors - CephMonitors []string - // Required: RBDImage is the rados image name - RBDImage string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: RadosPool is the rados pool name,default is rbd - // +optional - RBDPool string - // Optional: RBDUser is the rados user name, default is admin - // +optional - RadosUser string - // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring - // +optional - Keyring string - // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a cinder volume resource in Openstack. A Cinder volume -// must exist before mounting to a container. The volume must also be -// in the same region as the kubelet. Cinder volumes support ownership -// management and SELinux relabeling. -type CinderVolumeSource struct { - // Unique id of the volume used to identify the cinder volume - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - Monitors []string - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - Path string - // Optional: User is the rados user name, default is admin - // +optional - User string - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // +optional - SecretFile string - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a Flocker volume mounted by the Flocker agent. -// One and only one of datasetName and datasetUUID should be set. -// Flocker volumes do not support ownership management or SELinux relabeling. -type FlockerVolumeSource struct { - // Name of the dataset stored as metadata -> name on the dataset for Flocker - // should be considered as deprecated - // +optional - DatasetName string - // UUID of the dataset. This is unique identifier of a Flocker dataset - // +optional - DatasetUUID string -} - -// Represents a volume containing downward API info. -// Downward API volumes support ownership management and SELinux relabeling. -type DownwardAPIVolumeSource struct { - // Items is a list of DownwardAPIVolume file - // +optional - Items []DownwardAPIVolumeFile - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 -} - -// Represents a single file containing information from the downward API -type DownwardAPIVolumeFile struct { - // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - Path string - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - // +optional - FieldRef *ObjectFieldSelector - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 -} - -// Represents downward API info for projecting into a projected volume. -// Note that this is identical to a downwardAPI volume source without the default -// mode. -type DownwardAPIProjection struct { - // Items is a list of DownwardAPIVolume file - // +optional - Items []DownwardAPIVolumeFile -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFileVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string - // Share Name - ShareName string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a vSphere volume resource. -type VsphereVirtualDiskVolumeSource struct { - // Path that identifies vSphere volume vmdk - VolumePath string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string -} - -// Represents a Photon Controller persistent disk resource. -type PhotonPersistentDiskVolumeSource struct { - // ID that identifies Photon Controller persistent disk - PdID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - FSType string -} - -// PortworxVolumeSource represents a Portworx volume resource. -type PortworxVolumeSource struct { - // VolumeID uniquely identifies a Portworx volume - VolumeID string - // FSType represents the filesystem type to mount - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -type AzureDataDiskCachingMode string - -const ( - AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" - AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" - AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" -) - -// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. -type AzureDiskVolumeSource struct { - // The Name of the data disk in the blob storage - DiskName string - // The URI the the data disk in the blob storage - DataDiskURI string - // Host Caching mode: None, Read Only, Read Write. - // +optional - CachingMode *AzureDataDiskCachingMode - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType *string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly *bool -} - -// ScaleIOVolumeSource represents a persistent ScaleIO volume -type ScaleIOVolumeSource struct { - // The host address of the ScaleIO API Gateway. - Gateway string - // The name of the storage system as configured in ScaleIO. - System string - // SecretRef references to the secret for ScaleIO user and other - // sensitive information. If this is not provided, Login operation will fail. - SecretRef *LocalObjectReference - // Flag to enable/disable SSL communication with Gateway, default false - // +optional - SSLEnabled bool - // The name of the Protection Domain for the configured storage (defaults to "default"). - // +optional - ProtectionDomain string - // The Storage Pool associated with the protection domain (defaults to "default"). - // +optional - StoragePool string - // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). - // +optional - StorageMode string - // The name of a volume already created in the ScaleIO system - // that is associated with this volume source. - VolumeName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Adapts a ConfigMap into a volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// volume as files using the keys in the Data field as the file names, unless -// the items element is populated with specific mappings of keys to paths. -// ConfigMap volumes support ownership management and SELinux relabeling. -type ConfigMapVolumeSource struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 - // Specify whether the ConfigMap or it's keys must be defined - // +optional - Optional *bool -} - -// Adapts a ConfigMap into a projected volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names, -// unless the items element is populated with specific mappings of keys to paths. -// Note that this is identical to a configmap volume source without the default -// mode. -type ConfigMapProjection struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Specify whether the ConfigMap or it's keys must be defined - // +optional - Optional *bool -} - -// Represents a projected volume source -type ProjectedVolumeSource struct { - // list of volume projections - Sources []VolumeProjection - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 -} - -// Projection that may be projected along with other supported volume types -type VolumeProjection struct { - // all types below are the supported types for projection into the same volume - - // information about the secret data to project - Secret *SecretProjection - // information about the downwardAPI data to project - DownwardAPI *DownwardAPIProjection - // information about the configMap data to project - ConfigMap *ConfigMapProjection -} - -// Maps a string key to a path within a volume. -type KeyToPath struct { - // The key to project. - Key string - - // The relative path of the file to map the key to. - // May not be an absolute path. - // May not contain the path element '..'. - // May not start with the string '..'. - Path string - // Optional: mode bits to use on this file, should be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 -} - -// ContainerPort represents a network port in a single container -type ContainerPort struct { - // Optional: If specified, this must be an IANA_SVC_NAME Each named port - // in a pod must have a unique name. - // +optional - Name string - // Optional: If specified, this must be a valid port number, 0 < x < 65536. - // If HostNetwork is specified, this must match ContainerPort. - // +optional - HostPort int32 - // Required: This must be a valid port number, 0 < x < 65536. - ContainerPort int32 - // Required: Supports "TCP" and "UDP". - // +optional - Protocol Protocol - // Optional: What host IP to bind the external port to. - // +optional - HostIP string -} - -// VolumeMount describes a mounting of a Volume within a container. -type VolumeMount struct { - // Required: This must match the Name of a Volume [above]. - Name string - // Optional: Defaults to false (read-write). - // +optional - ReadOnly bool - // Required. Must not contain ':'. - MountPath string - // Path within the volume from which the container's volume should be mounted. - // Defaults to "" (volume's root). - // +optional - SubPath string -} - -// EnvVar represents an environment variable present in a Container. -type EnvVar struct { - // Required: This must be a C_IDENTIFIER. - Name string - // Optional: no more than one of the following may be specified. - // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and - // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable - // exists or not. - // +optional - Value string - // Optional: Specifies a source the value of this var should come from. - // +optional - ValueFrom *EnvVarSource -} - -// EnvVarSource represents a source for the value of an EnvVar. -// Only one of its fields may be set. -type EnvVarSource struct { - // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.podIP. - // +optional - FieldRef *ObjectFieldSelector - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector - // Selects a key of a ConfigMap. - // +optional - ConfigMapKeyRef *ConfigMapKeySelector - // Selects a key of a secret in the pod's namespace. - // +optional - SecretKeyRef *SecretKeySelector -} - -// ObjectFieldSelector selects an APIVersioned field of an object. -type ObjectFieldSelector struct { - // Required: Version of the schema the FieldPath is written in terms of. - // If no value is specified, it will be defaulted to the APIVersion of the - // enclosing object. - APIVersion string - // Required: Path of the field to select in the specified API version - FieldPath string -} - -// ResourceFieldSelector represents container resources (cpu, memory) and their output format -type ResourceFieldSelector struct { - // Container name: required for volumes, optional for env vars - // +optional - ContainerName string - // Required: resource to select - Resource string - // Specifies the output format of the exposed resources, defaults to "1" - // +optional - Divisor resource.Quantity -} - -// Selects a key from a ConfigMap. -type ConfigMapKeySelector struct { - // The ConfigMap to select from. - LocalObjectReference - // The key to select. - Key string - // Specify whether the ConfigMap or it's key must be defined - // +optional - Optional *bool -} - -// SecretKeySelector selects a key of a Secret. -type SecretKeySelector struct { - // The name of the secret in the pod's namespace to select from. - LocalObjectReference - // The key of the secret to select from. Must be a valid secret key. - Key string - // Specify whether the Secret or it's key must be defined - // +optional - Optional *bool -} - -// EnvFromSource represents the source of a set of ConfigMaps -type EnvFromSource struct { - // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. - // +optional - Prefix string - // The ConfigMap to select from. - //+optional - ConfigMapRef *ConfigMapEnvSource - // The Secret to select from. - //+optional - SecretRef *SecretEnvSource -} - -// ConfigMapEnvSource selects a ConfigMap to populate the environment -// variables with. -// -// The contents of the target ConfigMap's Data field will represent the -// key-value pairs as environment variables. -type ConfigMapEnvSource struct { - // The ConfigMap to select from. - LocalObjectReference - // Specify whether the ConfigMap must be defined - // +optional - Optional *bool -} - -// SecretEnvSource selects a Secret to populate the environment -// variables with. -// -// The contents of the target Secret's Data field will represent the -// key-value pairs as environment variables. -type SecretEnvSource struct { - // The Secret to select from. - LocalObjectReference - // Specify whether the Secret must be defined - // +optional - Optional *bool -} - -// HTTPHeader describes a custom header to be used in HTTP probes -type HTTPHeader struct { - // The header field name - Name string - // The header field value - Value string -} - -// HTTPGetAction describes an action based on HTTP Get requests. -type HTTPGetAction struct { - // Optional: Path to access on the HTTP server. - // +optional - Path string - // Required: Name or number of the port to access on the container. - // +optional - Port intstr.IntOrString - // Optional: Host name to connect to, defaults to the pod IP. You - // probably want to set "Host" in httpHeaders instead. - // +optional - Host string - // Optional: Scheme to use for connecting to the host, defaults to HTTP. - // +optional - Scheme URIScheme - // Optional: Custom headers to set in the request. HTTP allows repeated headers. - // +optional - HTTPHeaders []HTTPHeader -} - -// URIScheme identifies the scheme used for connection to a host for Get actions -type URIScheme string - -const ( - // URISchemeHTTP means that the scheme used will be http:// - URISchemeHTTP URIScheme = "HTTP" - // URISchemeHTTPS means that the scheme used will be https:// - URISchemeHTTPS URIScheme = "HTTPS" -) - -// TCPSocketAction describes an action based on opening a socket -type TCPSocketAction struct { - // Required: Port to connect to. - // +optional - Port intstr.IntOrString -} - -// ExecAction describes a "run in container" action. -type ExecAction struct { - // Command is the command line to execute inside the container, the working directory for the - // command is root ('/') in the container's filesystem. The command is simply exec'd, it is - // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - // a shell, you need to explicitly call out to that shell. - // +optional - Command []string -} - -// Probe describes a health check to be performed against a container to determine whether it is -// alive or ready to receive traffic. -type Probe struct { - // The action taken to determine the health of a container - Handler - // Length of time before health checking is activated. In seconds. - // +optional - InitialDelaySeconds int32 - // Length of time before health checking times out. In seconds. - // +optional - TimeoutSeconds int32 - // How often (in seconds) to perform the probe. - // +optional - PeriodSeconds int32 - // Minimum consecutive successes for the probe to be considered successful after having failed. - // Must be 1 for liveness. - // +optional - SuccessThreshold int32 - // Minimum consecutive failures for the probe to be considered failed after having succeeded. - // +optional - FailureThreshold int32 -} - -// PullPolicy describes a policy for if/when to pull a container image -type PullPolicy string - -const ( - // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - PullAlways PullPolicy = "Always" - // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present - PullNever PullPolicy = "Never" - // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - PullIfNotPresent PullPolicy = "IfNotPresent" -) - -// TerminationMessagePolicy describes how termination messages are retrieved from a container. -type TerminationMessagePolicy string - -const ( - // TerminationMessageReadFile is the default behavior and will set the container status message to - // the contents of the container's terminationMessagePath when the container exits. - TerminationMessageReadFile TerminationMessagePolicy = "File" - // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs - // for the container status message when the container exits with an error and the - // terminationMessagePath has no contents. - TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" -) - -// Capability represent POSIX capabilities type -type Capability string - -// Capabilities represent POSIX capabilities that can be added or removed to a running container. -type Capabilities struct { - // Added capabilities - // +optional - Add []Capability - // Removed capabilities - // +optional - Drop []Capability -} - -// ResourceRequirements describes the compute resource requirements. -type ResourceRequirements struct { - // Limits describes the maximum amount of compute resources allowed. - // +optional - Limits ResourceList - // Requests describes the minimum amount of compute resources required. - // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value - // +optional - Requests ResourceList -} - -// Container represents a single container that is expected to be run on the host. -type Container struct { - // Required: This must be a DNS_LABEL. Each container in a pod must - // have a unique name. - Name string - // Required. - Image string - // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // +optional - Command []string - // Optional: The docker image's cmd is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // +optional - Args []string - // Optional: Defaults to Docker's default. - // +optional - WorkingDir string - // +optional - Ports []ContainerPort - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +optional - EnvFrom []EnvFromSource - // +optional - Env []EnvVar - // Compute resource requirements. - // +optional - Resources ResourceRequirements - // +optional - VolumeMounts []VolumeMount - // +optional - LivenessProbe *Probe - // +optional - ReadinessProbe *Probe - // +optional - Lifecycle *Lifecycle - // Required. - // +optional - TerminationMessagePath string - // +optional - TerminationMessagePolicy TerminationMessagePolicy - // Required: Policy for pulling images for this container - ImagePullPolicy PullPolicy - // Optional: SecurityContext defines the security options the container should be run with. - // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - // +optional - SecurityContext *SecurityContext - - // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) - // and shouldn't be used for general purpose containers. - // +optional - Stdin bool - // +optional - StdinOnce bool - // +optional - TTY bool -} - -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -type Handler struct { - // One and only one of the following should be specified. - // Exec specifies the action to take. - // +optional - Exec *ExecAction - // HTTPGet specifies the http request to perform. - // +optional - HTTPGet *HTTPGetAction - // TCPSocket specifies an action involving a TCP port. - // TODO: implement a realistic TCP lifecycle hook - // +optional - TCPSocket *TCPSocketAction -} - -// Lifecycle describes actions that the management system should take in response to container lifecycle -// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -// until the action is complete, unless the container process fails, in which case the handler is aborted. -type Lifecycle struct { - // PostStart is called immediately after a container is created. If the handler fails, the container - // is terminated and restarted. - // +optional - PostStart *Handler - // PreStop is called immediately before a container is terminated. The reason for termination is - // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. - // +optional - PreStop *Handler -} - -// The below types are used by kube_client and api_server. - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; -// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -type ContainerStateWaiting struct { - // A brief CamelCase string indicating details about why the container is in waiting state. - // +optional - Reason string - // A human-readable message indicating details about why the container is in waiting state. - // +optional - Message string -} - -type ContainerStateRunning struct { - // +optional - StartedAt metav1.Time -} - -type ContainerStateTerminated struct { - ExitCode int32 - // +optional - Signal int32 - // +optional - Reason string - // +optional - Message string - // +optional - StartedAt metav1.Time - // +optional - FinishedAt metav1.Time - // +optional - ContainerID string -} - -// ContainerState holds a possible state of container. -// Only one of its members may be specified. -// If none of them is specified, the default one is ContainerStateWaiting. -type ContainerState struct { - // +optional - Waiting *ContainerStateWaiting - // +optional - Running *ContainerStateRunning - // +optional - Terminated *ContainerStateTerminated -} - -type ContainerStatus struct { - // Each container in a pod must have a unique name. - Name string - // +optional - State ContainerState - // +optional - LastTerminationState ContainerState - // Ready specifies whether the container has passed its readiness check. - Ready bool - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. - RestartCount int32 - Image string - ImageID string - // +optional - ContainerID string -} - -// PodPhase is a label for the condition of a pod at the current time. -type PodPhase string - -// These are the valid statuses of pods. -const ( - // PodPending means the pod has been accepted by the system, but one or more of the containers - // has not been started. This includes time before being bound to a node, as well as time spent - // pulling images onto the host. - PodPending PodPhase = "Pending" - // PodRunning means the pod has been bound to a node and all of the containers have been started. - // At least one container is still running or is in the process of being restarted. - PodRunning PodPhase = "Running" - // PodSucceeded means that all containers in the pod have voluntarily terminated - // with a container exit code of 0, and the system is not going to restart any of these containers. - PodSucceeded PodPhase = "Succeeded" - // PodFailed means that all containers in the pod have terminated, and at least one container has - // terminated in a failure (exited with a non-zero exit code or was stopped by the system). - PodFailed PodPhase = "Failed" - // PodUnknown means that for some reason the state of the pod could not be obtained, typically due - // to an error in communicating with the host of the pod. - PodUnknown PodPhase = "Unknown" -) - -type PodConditionType string - -// These are valid conditions of pod. -const ( - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" - // PodReady means the pod is able to service requests and should be added to the - // load balancing pools of all matching services. - PodReady PodConditionType = "Ready" - // PodInitialized means that all init containers in the pod have started successfully. - PodInitialized PodConditionType = "Initialized" - // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler - // can't schedule the pod right now, for example due to insufficient resources in the cluster. - PodReasonUnschedulable = "Unschedulable" -) - -type PodCondition struct { - Type PodConditionType - Status ConditionStatus - // +optional - LastProbeTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -// RestartPolicy describes how the container should be restarted. -// Only one of the following restart policies may be specified. -// If none of the following policies is specified, the default one -// is RestartPolicyAlways. -type RestartPolicy string - -const ( - RestartPolicyAlways RestartPolicy = "Always" - RestartPolicyOnFailure RestartPolicy = "OnFailure" - RestartPolicyNever RestartPolicy = "Never" -) - -// PodList is a list of Pods. -type PodList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Pod -} - -// DNSPolicy defines how a pod's DNS will be configured. -type DNSPolicy string - -const ( - // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS - // first, if it is available, then fall back on the default - // (as determined by kubelet) DNS settings. - DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" - - // DNSClusterFirst indicates that the pod should use cluster DNS - // first unless hostNetwork is true, if it is available, then - // fall back on the default (as determined by kubelet) DNS settings. - DNSClusterFirst DNSPolicy = "ClusterFirst" - - // DNSDefault indicates that the pod should use the default (as - // determined by kubelet) DNS settings. - DNSDefault DNSPolicy = "Default" -) - -// A node selector represents the union of the results of one or more label queries -// over a set of nodes; that is, it represents the OR of the selectors represented -// by the node selector terms. -type NodeSelector struct { - //Required. A list of node selector terms. The terms are ORed. - NodeSelectorTerms []NodeSelectorTerm -} - -// A null or empty node selector term matches no objects. -type NodeSelectorTerm struct { - //Required. A list of node selector requirements. The requirements are ANDed. - MatchExpressions []NodeSelectorRequirement -} - -// A node selector requirement is a selector that contains values, a key, and an operator -// that relates the key and values. -type NodeSelectorRequirement struct { - // The label key that the selector applies to. - Key string - // Represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - Operator NodeSelectorOperator - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. If the operator is Gt or Lt, the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - // +optional - Values []string -} - -// A node selector operator is the set of operators that can be used in -// a node selector requirement. -type NodeSelectorOperator string - -const ( - NodeSelectorOpIn NodeSelectorOperator = "In" - NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" - NodeSelectorOpExists NodeSelectorOperator = "Exists" - NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" - NodeSelectorOpGt NodeSelectorOperator = "Gt" - NodeSelectorOpLt NodeSelectorOperator = "Lt" -) - -// Affinity is a group of affinity scheduling rules. -type Affinity struct { - // Describes node affinity scheduling rules for the pod. - // +optional - NodeAffinity *NodeAffinity - // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAffinity *PodAffinity - // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAntiAffinity *PodAntiAffinity -} - -// Pod affinity is a group of inter pod affinity scheduling rules. -type PodAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm -} - -// Pod anti affinity is a group of inter pod anti affinity scheduling rules. -type PodAntiAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm - // The scheduler will prefer to schedule pods to nodes that satisfy - // the anti-affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm -} - -// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) -type WeightedPodAffinityTerm struct { - // weight associated with matching the corresponding podAffinityTerm, - // in the range 1-100. - Weight int32 - // Required. A pod affinity term, associated with the corresponding weight. - PodAffinityTerm PodAffinityTerm -} - -// Defines a set of pods (namely those matching the labelSelector -// relative to the given namespace(s)) that this pod should be -// co-located (affinity) or not co-located (anti-affinity) with, -// where co-located is defined as running on a node whose value of -// the label with key matches that of any node on which -// a pod of the set of pods is running. -type PodAffinityTerm struct { - // A label query over a set of resources, in this case pods. - // +optional - LabelSelector *metav1.LabelSelector - // namespaces specifies which namespaces the labelSelector applies to (matches against); - // null or empty list means "this pod's namespace" - Namespaces []string - // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - // the labelSelector in the specified namespaces, where co-located is defined as running on a node - // whose value of the label with key topologyKey matches that of any node on which any of the - // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - // +optional - TopologyKey string -} - -// Node affinity is a group of node affinity scheduling rules. -type NodeAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // will try to eventually evict the pod from its node. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // may or may not try to eventually evict the pod from its node. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node matches the corresponding matchExpressions; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm -} - -// An empty preferred scheduling term matches all objects with implicit weight 0 -// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). -type PreferredSchedulingTerm struct { - // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - Weight int32 - // A node selector term, associated with the corresponding weight. - Preference NodeSelectorTerm -} - -// The node this Taint is attached to has the effect "effect" on -// any pod that that does not tolerate the Taint. -type Taint struct { - // Required. The taint key to be applied to a node. - Key string - // Required. The taint value corresponding to the taint key. - // +optional - Value string - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. - Effect TaintEffect - // TimeAdded represents the time at which the taint was added. - // It is only written for NoExecute taints. - // +optional - TimeAdded metav1.Time -} - -type TaintEffect string - -const ( - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // but allow all pods submitted to Kubelet without going through the scheduler - // to start, and allow all already-running pods to continue running. - // Enforced by the scheduler. - TaintEffectNoSchedule TaintEffect = "NoSchedule" - // Like TaintEffectNoSchedule, but the scheduler tries not to schedule - // new pods onto the node, rather than prohibiting new pods from scheduling - // onto the node entirely. Enforced by the scheduler. - TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to - // Kubelet without going through the scheduler to start. - // Enforced by Kubelet and the scheduler. - // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" - // Evict any already-running pods that do not tolerate the taint. - // Currently enforced by NodeController. - TaintEffectNoExecute TaintEffect = "NoExecute" -) - -// The pod this Toleration is attached to tolerates any taint that matches -// the triple using the matching operator . -type Toleration struct { - // Key is the taint key that the toleration applies to. Empty means match all taint keys. - // If the key is empty, operator must be Exists; this combination means to match all values and all keys. - // +optional - Key string - // Operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a pod can - // tolerate all taints of a particular category. - // +optional - Operator TolerationOperator - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - // +optional - Value string - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - // +optional - Effect TaintEffect - // TolerationSeconds represents the period of time the toleration (which must be - // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - // it is not set, which means tolerate the taint forever (do not evict). Zero and - // negative values will be treated as 0 (evict immediately) by the system. - // +optional - TolerationSeconds *int64 -} - -// A toleration operator is the set of operators that can be used in a toleration. -type TolerationOperator string - -const ( - TolerationOpExists TolerationOperator = "Exists" - TolerationOpEqual TolerationOperator = "Equal" -) - -// PodSpec is a description of a pod -type PodSpec struct { - Volumes []Volume - // List of initialization containers belonging to the pod. - InitContainers []Container - // List of containers belonging to the pod. - Containers []Container - // +optional - RestartPolicy RestartPolicy - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // +optional - TerminationGracePeriodSeconds *int64 - // Optional duration in seconds relative to the StartTime that the pod may be active on a node - // before the system actively tries to terminate the pod; value must be positive integer - // +optional - ActiveDeadlineSeconds *int64 - // Required: Set DNS policy. - // +optional - DNSPolicy DNSPolicy - // NodeSelector is a selector which must be true for the pod to fit on a node - // +optional - NodeSelector map[string]string - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod - // The pod will be allowed to use secrets referenced by the ServiceAccount - ServiceAccountName string - // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - // +optional - AutomountServiceAccountToken *bool - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - // +optional - NodeName string - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optional - SecurityContext *PodSecurityContext - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. - // +optional - ImagePullSecrets []LocalObjectReference - // Specifies the hostname of the Pod. - // If not specified, the pod's hostname will be set to a system-defined value. - // +optional - Hostname string - // If specified, the fully qualified Pod hostname will be "...svc.". - // If not specified, the pod will not have a domainname at all. - // +optional - Subdomain string - // If specified, the pod's scheduling constraints - // +optional - Affinity *Affinity - // If specified, the pod will be dispatched by specified scheduler. - // If not specified, the pod will be dispatched by default scheduler. - // +optional - SchedulerName string - // If specified, the pod's tolerations. - // +optional - Tolerations []Toleration -} - -// Sysctl defines a kernel parameter to be set -type Sysctl struct { - // Name of a property to set - Name string - // Value of a property to set - Value string -} - -// PodSecurityContext holds pod-level security attributes and common container settings. -// Some fields are also present in container.securityContext. Field values of -// container.securityContext take precedence over field values of PodSecurityContext. -type PodSecurityContext struct { - // Use the host's network namespace. If this option is set, the ports that will be - // used must be specified. - // Optional: Default to false - // +k8s:conversion-gen=false - // +optional - HostNetwork bool - // Use the host's pid namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostPID bool - // Use the host's ipc namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostIPC bool - // The SELinux context to be applied to all containers. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in SecurityContext. If set in - // both SecurityContext and PodSecurityContext, the value specified in SecurityContext - // takes precedence for that container. - // +optional - SELinuxOptions *SELinuxOptions - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // +optional - RunAsUser *int64 - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsNonRoot *bool - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. - // +optional - SupplementalGroups []int64 - // A special supplemental group that applies to all containers in a pod. - // Some volume types allow the Kubelet to change the ownership of that volume - // to be owned by the pod: - // - // 1. The owning GID will be the FSGroup - // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - // 3. The permission bits are OR'd with rw-rw---- - // - // If unset, the Kubelet will not modify the ownership and permissions of any volume. - // +optional - FSGroup *int64 -} - -// PodQOSClass defines the supported qos classes of Pods. -type PodQOSClass string - -const ( - // PodQOSGuaranteed is the Guaranteed qos class. - PodQOSGuaranteed PodQOSClass = "Guaranteed" - // PodQOSBurstable is the Burstable qos class. - PodQOSBurstable PodQOSClass = "Burstable" - // PodQOSBestEffort is the BestEffort qos class. - PodQOSBestEffort PodQOSClass = "BestEffort" -) - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -type PodStatus struct { - // +optional - Phase PodPhase - // +optional - Conditions []PodCondition - // A human readable message indicating details about why the pod is in this state. - // +optional - Message string - // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk' - // +optional - Reason string - - // +optional - HostIP string - // +optional - PodIP string - - // Date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - // +optional - StartTime *metav1.Time - // +optional - QOSClass PodQOSClass - - // The list has one entry per init container in the manifest. The most recent successful - // init container will have ready = true, the most recently started container will have - // startTime set. - // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses - InitContainerStatuses []ContainerStatus - // The list has one entry per container in the manifest. Each entry is - // currently the output of `docker inspect`. This output format is *not* - // final and should not be relied upon. - // TODO: Make real decisions about what our info should look like. Re-enable fuzz test - // when we have done this. - // +optional - ContainerStatuses []ContainerStatus -} - -// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded -type PodStatusResult struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - // Status represents the current information about a pod. This data may not be up - // to date. - // +optional - Status PodStatus -} - -// +genclient=true - -// Pod is a collection of containers, used as either input (create, update) or as output (list, get). -type Pod struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a pod. - // +optional - Spec PodSpec - - // Status represents the current information about a pod. This data may not be up - // to date. - // +optional - Status PodStatus -} - -// PodTemplateSpec describes the data a pod should have when created from a template -type PodTemplateSpec struct { - // Metadata of the pods created from this template. - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a pod. - // +optional - Spec PodSpec -} - -// +genclient=true - -// PodTemplate describes a template for creating copies of a predefined pod. -type PodTemplate struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Template defines the pods that will be created from this pod template - // +optional - Template PodTemplateSpec -} - -// PodTemplateList is a list of PodTemplates. -type PodTemplateList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []PodTemplate -} - -// ReplicationControllerSpec is the specification of a replication controller. -// As the internal representation of a replication controller, it may have either -// a TemplateRef or a Template set. -type ReplicationControllerSpec struct { - // Replicas is the number of desired replicas. - Replicas int32 - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 - - // Selector is a label query over pods that should match the Replicas count. - Selector map[string]string - - // TemplateRef is a reference to an object that describes the pod that will be created if - // insufficient replicas are detected. This reference is ignored if a Template is set. - // Must be set before converting to a versioned API object - // +optional - //TemplateRef *ObjectReference - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Internally, this takes precedence over a - // TemplateRef. - // +optional - Template *PodTemplateSpec -} - -// ReplicationControllerStatus represents the current status of a replication -// controller. -type ReplicationControllerStatus struct { - // Replicas is the number of actual replicas. - Replicas int32 - - // The number of pods that have labels matching the labels of the pod template of the replication controller. - // +optional - FullyLabeledReplicas int32 - - // The number of ready replicas for this replication controller. - // +optional - ReadyReplicas int32 - - // The number of available replicas (ready for at least minReadySeconds) for this replication controller. - // +optional - AvailableReplicas int32 - - // ObservedGeneration is the most recent generation observed by the controller. - // +optional - ObservedGeneration int64 - - // Represents the latest available observations of a replication controller's current state. - // +optional - Conditions []ReplicationControllerCondition -} - -type ReplicationControllerConditionType string - -// These are valid conditions of a replication controller. -const ( - // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods - // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, - // etc. or deleted due to kubelet being down or finalizers are failing. - ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" -) - -// ReplicationControllerCondition describes the state of a replication controller at a certain point. -type ReplicationControllerCondition struct { - // Type of replication controller condition. - Type ReplicationControllerConditionType - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus - // The last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - // +optional - Reason string - // A human readable message indicating details about the transition. - // +optional - Message string -} - -// +genclient=true - -// ReplicationController represents the configuration of a replication controller. -type ReplicationController struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired behavior of this replication controller. - // +optional - Spec ReplicationControllerSpec - - // Status is the current status of this replication controller. This data may be - // out of date by some window of time. - // +optional - Status ReplicationControllerStatus -} - -// ReplicationControllerList is a collection of replication controllers. -type ReplicationControllerList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ReplicationController -} - -const ( - // ClusterIPNone - do not assign a cluster IP - // no proxying required and no environment variables should be created for pods - ClusterIPNone = "None" -) - -// ServiceList holds a list of services. -type ServiceList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Service -} - -// Session Affinity Type string -type ServiceAffinity string - -const ( - // ServiceAffinityClientIP is the Client IP based. - ServiceAffinityClientIP ServiceAffinity = "ClientIP" - - // ServiceAffinityNone - no session affinity. - ServiceAffinityNone ServiceAffinity = "None" -) - -// Service Type string describes ingress methods for a service -type ServiceType string - -const ( - // ServiceTypeClusterIP means a service will only be accessible inside the - // cluster, via the ClusterIP. - ServiceTypeClusterIP ServiceType = "ClusterIP" - - // ServiceTypeNodePort means a service will be exposed on one port of - // every node, in addition to 'ClusterIP' type. - ServiceTypeNodePort ServiceType = "NodePort" - - // ServiceTypeLoadBalancer means a service will be exposed via an - // external load balancer (if the cloud provider supports it), in addition - // to 'NodePort' type. - ServiceTypeLoadBalancer ServiceType = "LoadBalancer" - - // ServiceTypeExternalName means a service consists of only a reference to - // an external name that kubedns or equivalent will return as a CNAME - // record, with no exposing or proxying of any pods involved. - ServiceTypeExternalName ServiceType = "ExternalName" -) - -// ServiceStatus represents the current status of a service -type ServiceStatus struct { - // LoadBalancer contains the current status of the load-balancer, - // if one is present. - // +optional - LoadBalancer LoadBalancerStatus -} - -// LoadBalancerStatus represents the status of a load-balancer -type LoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer; - // traffic intended for the service should be sent to these ingress points. - // +optional - Ingress []LoadBalancerIngress -} - -// LoadBalancerIngress represents the status of a load-balancer ingress point: -// traffic intended for the service should be sent to an ingress point. -type LoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based - // (typically GCE or OpenStack load-balancers) - // +optional - IP string - - // Hostname is set for load-balancer ingress points that are DNS based - // (typically AWS load-balancers) - // +optional - Hostname string -} - -// ServiceSpec describes the attributes that a user creates on a service -type ServiceSpec struct { - // Type determines how the Service is exposed. Defaults to ClusterIP. Valid - // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. - // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. - // More info: http://kubernetes.io/docs/user-guide/services#overview - // +optional - Type ServiceType - - // Required: The list of ports that are exposed by this service. - Ports []ServicePort - - // Route service traffic to pods with label keys and values matching this - // selector. If empty or not present, the service is assumed to have an - // external process managing its endpoints, which Kubernetes will not - // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - // Ignored if type is ExternalName. - // More info: http://kubernetes.io/docs/user-guide/services#overview - Selector map[string]string - - // ClusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. - // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies - // +optional - ClusterIP string - - // ExternalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. - ExternalName string - - // ExternalIPs are used by external load balancers, or can be set by - // users to handle external traffic that arrives at a node. - // +optional - ExternalIPs []string - - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. - // This feature depends on whether the underlying cloud-provider supports specifying - // the loadBalancerIP when a load balancer is created. - // This field will be ignored if the cloud-provider does not support the feature. - // +optional - LoadBalancerIP string - - // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. - // +optional - SessionAffinity ServiceAffinity - - // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider - // load-balancer will be restricted to the specified client IPs. This field will be ignored if the - // cloud-provider does not support the feature." - // +optional - LoadBalancerSourceRanges []string -} - -type ServicePort struct { - // Optional if only one ServicePort is defined on this service: The - // name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. - Name string - - // The IP protocol for this port. Supports "TCP" and "UDP". - Protocol Protocol - - // The port that will be exposed on the service. - Port int32 - - // Optional: The target port on pods selected by this service. If this - // is a string, it will be looked up as a named port in the target - // Pod's container ports. If this is not specified, the value - // of the 'port' field is used (an identity map). - // This field is ignored for services with clusterIP=None, and should be - // omitted or set equal to the 'port' field. - TargetPort intstr.IntOrString - - // The port on each node on which this service is exposed. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. - NodePort int32 -} - -// +genclient=true - -// Service is a named abstraction of software service (for example, mysql) consisting of local port -// (for example 3306) that the proxy listens on, and the selector that determines which pods -// will answer requests sent through the proxy. -type Service struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a service. - // +optional - Spec ServiceSpec - - // Status represents the current status of a service. - // +optional - Status ServiceStatus -} - -// +genclient=true - -// ServiceAccount binds together: -// * a name, understood by users, and perhaps by peripheral systems, for an identity -// * a principal that can be authenticated and authorized -// * a set of secrets -type ServiceAccount struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount - Secrets []ObjectReference - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // +optional - ImagePullSecrets []LocalObjectReference - - // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. - // Can be overridden at the pod level. - // +optional - AutomountServiceAccountToken *bool -} - -// ServiceAccountList is a list of ServiceAccount objects -type ServiceAccountList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ServiceAccount -} - -// +genclient=true - -// Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] -type Endpoints struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // The set of all endpoints is the union of all subsets. - Subsets []EndpointSubset -} - -// EndpointSubset is a group of addresses with a common set of ports. The -// expanded set of endpoints is the Cartesian product of Addresses x Ports. -// For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } -// The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] -type EndpointSubset struct { - Addresses []EndpointAddress - NotReadyAddresses []EndpointAddress - Ports []EndpointPort -} - -// EndpointAddress is a tuple that describes single IP address. -type EndpointAddress struct { - // The IP of this endpoint. - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, see #4447. - IP string - // Optional: Hostname of this endpoint - // Meant to be used by DNS servers etc. - // +optional - Hostname string - // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. - // +optional - NodeName *string - // Optional: The kubernetes object related to the entry point. - TargetRef *ObjectReference -} - -// EndpointPort is a tuple that describes a single port. -type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). Optional - // if only one port is defined. Must be a DNS_LABEL. - Name string - - // The port number. - Port int32 - - // The IP protocol for this port. - Protocol Protocol -} - -// EndpointsList is a list of endpoints. -type EndpointsList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Endpoints -} - -// NodeSpec describes the attributes that a node is created with. -type NodeSpec struct { - // PodCIDR represents the pod IP range assigned to the node - // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. - // +optional - PodCIDR string - - // External ID of the node assigned by some machine database (e.g. a cloud provider) - // +optional - ExternalID string - - // ID of the node assigned by the cloud provider - // Note: format is "://" - // +optional - ProviderID string - - // Unschedulable controls node schedulability of new pods. By default node is schedulable. - // +optional - Unschedulable bool - - // If specified, the node's taints. - // +optional - Taints []Taint -} - -// DaemonEndpoint contains information about a single Daemon endpoint. -type DaemonEndpoint struct { - /* - The port tag was not properly in quotes in earlier releases, so it must be - uppercased for backwards compat (since it was falling back to var name of - 'Port'). - */ - - // Port number of the given endpoint. - Port int32 -} - -// NodeDaemonEndpoints lists ports opened by daemons running on the Node. -type NodeDaemonEndpoints struct { - // Endpoint on which Kubelet is listening. - // +optional - KubeletEndpoint DaemonEndpoint -} - -// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. -type NodeSystemInfo struct { - // MachineID reported by the node. For unique machine identification - // in the cluster this field is prefered. Learn more from man(5) - // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html - MachineID string - // SystemUUID reported by the node. For unique machine identification - // MachineID is prefered. This field is specific to Red Hat hosts - // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html - SystemUUID string - // Boot ID reported by the node. - BootID string - // Kernel Version reported by the node. - KernelVersion string - // OS Image reported by the node. - OSImage string - // ContainerRuntime Version reported by the node. - ContainerRuntimeVersion string - // Kubelet Version reported by the node. - KubeletVersion string - // KubeProxy Version reported by the node. - KubeProxyVersion string - // The Operating System reported by the node - OperatingSystem string - // The Architecture reported by the node - Architecture string -} - -// NodeStatus is information about the current status of a node. -type NodeStatus struct { - // Capacity represents the total resources of a node. - // +optional - Capacity ResourceList - // Allocatable represents the resources of a node that are available for scheduling. - // +optional - Allocatable ResourceList - // NodePhase is the current lifecycle phase of the node. - // +optional - Phase NodePhase - // Conditions is an array of current node conditions. - // +optional - Conditions []NodeCondition - // Queried from cloud provider, if available. - // +optional - Addresses []NodeAddress - // Endpoints of daemons running on the Node. - // +optional - DaemonEndpoints NodeDaemonEndpoints - // Set of ids/uuids to uniquely identify the node. - // +optional - NodeInfo NodeSystemInfo - // List of container images on this node - // +optional - Images []ContainerImage - // List of attachable volumes in use (mounted) by the node. - // +optional - VolumesInUse []UniqueVolumeName - // List of volumes that are attached to the node. - // +optional - VolumesAttached []AttachedVolume -} - -type UniqueVolumeName string - -// AttachedVolume describes a volume attached to a node -type AttachedVolume struct { - // Name of the attached volume - Name UniqueVolumeName - - // DevicePath represents the device path where the volume should be available - DevicePath string -} - -// AvoidPods describes pods that should avoid this node. This is the value for a -// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and -// will eventually become a field of NodeStatus. -type AvoidPods struct { - // Bounded-sized list of signatures of pods that should avoid this node, sorted - // in timestamp order from oldest to newest. Size of the slice is unspecified. - // +optional - PreferAvoidPods []PreferAvoidPodsEntry -} - -// Describes a class of pods that should avoid this node. -type PreferAvoidPodsEntry struct { - // The class of pods. - PodSignature PodSignature - // Time at which this entry was added to the list. - // +optional - EvictionTime metav1.Time - // (brief) reason why this entry was added to the list. - // +optional - Reason string - // Human readable message indicating why this entry was added to the list. - // +optional - Message string -} - -// Describes the class of pods that should avoid this node. -// Exactly one field should be set. -type PodSignature struct { - // Reference to controller whose pods should avoid this node. - // +optional - PodController *metav1.OwnerReference -} - -// Describe a container image -type ContainerImage struct { - // Names by which this image is known. - Names []string - // The size of the image in bytes. - // +optional - SizeBytes int64 -} - -type NodePhase string - -// These are the valid phases of node. -const ( - // NodePending means the node has been created/added by the system, but not configured. - NodePending NodePhase = "Pending" - // NodeRunning means the node has been configured and has Kubernetes components running. - NodeRunning NodePhase = "Running" - // NodeTerminated means the node has been removed from the cluster. - NodeTerminated NodePhase = "Terminated" -) - -type NodeConditionType string - -// These are valid conditions of node. Currently, we don't have enough information to decide -// node condition. In the future, we will add more. The proposed set of conditions are: -// NodeReady, NodeReachable -const ( - // NodeReady means kubelet is healthy and ready to accept pods. - NodeReady NodeConditionType = "Ready" - // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk - // space on the node. - NodeOutOfDisk NodeConditionType = "OutOfDisk" - // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. - NodeMemoryPressure NodeConditionType = "MemoryPressure" - // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. - NodeDiskPressure NodeConditionType = "DiskPressure" - // NodeNetworkUnavailable means that network for the node is not correctly configured. - NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" -) - -type NodeCondition struct { - Type NodeConditionType - Status ConditionStatus - // +optional - LastHeartbeatTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -type NodeAddressType string - -// These are valid address types of node. NodeLegacyHostIP is used to transit -// from out-dated HostIP field to NodeAddress. -const ( - // Deprecated: NodeLegacyHostIP will be removed in 1.7. - NodeLegacyHostIP NodeAddressType = "LegacyHostIP" - NodeHostName NodeAddressType = "Hostname" - NodeExternalIP NodeAddressType = "ExternalIP" - NodeInternalIP NodeAddressType = "InternalIP" - NodeExternalDNS NodeAddressType = "ExternalDNS" - NodeInternalDNS NodeAddressType = "InternalDNS" -) - -type NodeAddress struct { - Type NodeAddressType - Address string -} - -// NodeResources is an object for conveying resource information about a node. -// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. -type NodeResources struct { - // Capacity represents the available resources of a node - // +optional - Capacity ResourceList -} - -// ResourceName is the name identifying various resources in a ResourceList. -type ResourceName string - -// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, -// with the -, _, and . characters allowed anywhere, except the first or last character. -// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than -// camel case, separating compound words. -// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. -const ( - // CPU, in cores. (500m = .5 cores) - ResourceCPU ResourceName = "cpu" - // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceMemory ResourceName = "memory" - // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) - ResourceStorage ResourceName = "storage" - // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. - ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" - // Number of Pods that may be running on this Node: see ResourcePods -) - -const ( - // Namespace prefix for opaque counted resources (alpha). - ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" -) - -// ResourceList is a set of (resource name, quantity) pairs. -type ResourceList map[ResourceName]resource.Quantity - -// +genclient=true -// +nonNamespaced=true - -// Node is a worker node in Kubernetes -// The name of the node according to etcd is in ObjectMeta.Name. -type Node struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a node. - // +optional - Spec NodeSpec - - // Status describes the current status of a Node - // +optional - Status NodeStatus -} - -// NodeList is a list of nodes. -type NodeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Node -} - -// NamespaceSpec describes the attributes on a Namespace -type NamespaceSpec struct { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage - Finalizers []FinalizerName -} - -// FinalizerName is the name identifying a finalizer during namespace lifecycle. -type FinalizerName string - -// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or -// in metav1. -const ( - FinalizerKubernetes FinalizerName = "kubernetes" -) - -// NamespaceStatus is information about the current status of a Namespace. -type NamespaceStatus struct { - // Phase is the current lifecycle phase of the namespace. - // +optional - Phase NamespacePhase -} - -type NamespacePhase string - -// These are the valid phases of a namespace. -const ( - // NamespaceActive means the namespace is available for use in the system - NamespaceActive NamespacePhase = "Active" - // NamespaceTerminating means the namespace is undergoing graceful termination - NamespaceTerminating NamespacePhase = "Terminating" -) - -// +genclient=true -// +nonNamespaced=true - -// A namespace provides a scope for Names. -// Use of multiple namespaces is optional -type Namespace struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of the Namespace. - // +optional - Spec NamespaceSpec - - // Status describes the current status of a Namespace - // +optional - Status NamespaceStatus -} - -// NamespaceList is a list of Namespaces. -type NamespaceList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Namespace -} - -// Binding ties one object to another - for example, a pod is bound to a node by a scheduler. -type Binding struct { - metav1.TypeMeta - // ObjectMeta describes the object that is being bound. - // +optional - metav1.ObjectMeta - - // Target is the object to bind to. - Target ObjectReference -} - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -type Preconditions struct { - // Specifies the target UID. - // +optional - UID *types.UID -} - -// DeletionPropagation decides whether and how garbage collection will be performed. -type DeletionPropagation string - -const ( - // Orphans the dependents. - DeletePropagationOrphan DeletionPropagation = "Orphan" - // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. - DeletePropagationBackground DeletionPropagation = "Background" - // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. - // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. - // This policy is cascading, i.e., the dependents will be deleted with Foreground. - DeletePropagationForeground DeletionPropagation = "Foreground" -) - -// DeleteOptions may be provided when deleting an API object -// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. -type DeleteOptions struct { - metav1.TypeMeta - - // Optional duration in seconds before the object should be deleted. Value must be non-negative integer. - // The value zero indicates delete immediately. If this value is nil, the default grace period for the - // specified type will be used. - // +optional - GracePeriodSeconds *int64 - - // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be - // returned. - // +optional - Preconditions *Preconditions - - // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. - // Should the dependent objects be orphaned. If true/false, the "orphan" - // finalizer will be added to/removed from the object's finalizers list. - // Either this field or PropagationPolicy may be set, but not both. - // +optional - OrphanDependents *bool - - // Whether and how garbage collection will be performed. - // Either this field or OrphanDependents may be set, but not both. - // The default policy is decided by the existing finalizer set in the - // metadata.finalizers and the resource-specific default policy. - // +optional - PropagationPolicy *DeletionPropagation -} - -// ListOptions is the query options to a standard REST list call, and has future support for -// watch calls. -// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. -type ListOptions struct { - metav1.TypeMeta - - // A selector based on labels - LabelSelector labels.Selector - // A selector based on fields - FieldSelector fields.Selector - // If true, watch for changes to this list - Watch bool - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - // When specified for list: - // - if unset, then the result is returned from remote storage based on quorum-read flag; - // - if it's 0, then we simply return what we currently have in cache, no guarantee; - // - if set to non zero, then the result is at least as fresh as given rv. - ResourceVersion string - // Timeout for the list/watch call. - TimeoutSeconds *int64 -} - -// PodLogOptions is the query options for a Pod's logs REST call -type PodLogOptions struct { - metav1.TypeMeta - - // Container for which to return logs - Container string - // If true, follow the logs for the pod - Follow bool - // If true, return previous terminated container logs - Previous bool - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceSeconds *int64 - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceTime *metav1.Time - // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. - Timestamps bool - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - TailLines *int64 - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - LimitBytes *int64 -} - -// PodAttachOptions is the query options to a Pod's remote attach call -// TODO: merge w/ PodExecOptions below for stdin, stdout, etc -type PodAttachOptions struct { - metav1.TypeMeta - - // Stdin if true indicates that stdin is to be redirected for the attach call - // +optional - Stdin bool - - // Stdout if true indicates that stdout is to be redirected for the attach call - // +optional - Stdout bool - - // Stderr if true indicates that stderr is to be redirected for the attach call - // +optional - Stderr bool - - // TTY if true indicates that a tty will be allocated for the attach call - // +optional - TTY bool - - // Container to attach to. - // +optional - Container string -} - -// PodExecOptions is the query options to a Pod's remote exec call -type PodExecOptions struct { - metav1.TypeMeta - - // Stdin if true indicates that stdin is to be redirected for the exec call - Stdin bool - - // Stdout if true indicates that stdout is to be redirected for the exec call - Stdout bool - - // Stderr if true indicates that stderr is to be redirected for the exec call - Stderr bool - - // TTY if true indicates that a tty will be allocated for the exec call - TTY bool - - // Container in which to execute the command. - Container string - - // Command is the remote command to execute; argv array; not executed within a shell. - Command []string -} - -// PodPortForwardOptions is the query options to a Pod's port forward call -type PodPortForwardOptions struct { - metav1.TypeMeta - - // The list of ports to forward - // +optional - Ports []int32 -} - -// PodProxyOptions is the query options to a Pod's proxy call -type PodProxyOptions struct { - metav1.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// NodeProxyOptions is the query options to a Node's proxy call -type NodeProxyOptions struct { - metav1.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// ServiceProxyOptions is the query options to a Service's proxy call. -type ServiceProxyOptions struct { - metav1.TypeMeta - - // Path is the part of URLs that include service endpoints, suffixes, - // and parameters to use for the current proxy request to service. - // For example, the whole request URL is - // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. - // Path is _search?q=user:kimchy. - Path string -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -type ObjectReference struct { - // +optional - Kind string - // +optional - Namespace string - // +optional - Name string - // +optional - UID types.UID - // +optional - APIVersion string - // +optional - ResourceVersion string - - // Optional. If referring to a piece of an object instead of an entire object, this string - // should contain information to identify the sub-object. For example, if the object - // reference is to a container within a pod, this would take on a value like: - // "spec.containers{name}" (where "name" refers to the name of the container that triggered - // the event) or if no container name is specified "spec.containers[2]" (container with - // index 2 in this pod). This syntax is chosen only to have some well-defined way of - // referencing a part of an object. - // TODO: this design is not final and this field is subject to change in the future. - // +optional - FieldPath string -} - -// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. -type LocalObjectReference struct { - //TODO: Add other useful fields. apiVersion, kind, uid? - Name string -} - -type SerializedReference struct { - metav1.TypeMeta - // +optional - Reference ObjectReference -} - -type EventSource struct { - // Component from which the event is generated. - // +optional - Component string - // Node name on which the event is generated. - // +optional - Host string -} - -// Valid values for event types (new types could be added in future) -const ( - // Information only and will not cause any problems - EventTypeNormal string = "Normal" - // These events are to warn that something might go wrong - EventTypeWarning string = "Warning" -) - -// +genclient=true - -// Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. -type Event struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Required. The object that this event is about. - // +optional - InvolvedObject ObjectReference - - // Optional; this should be a short, machine understandable string that gives the reason - // for this event being generated. For example, if the event is reporting that a container - // can't start, the Reason might be "ImageNotFound". - // TODO: provide exact specification for format. - // +optional - Reason string - - // Optional. A human-readable description of the status of this operation. - // TODO: decide on maximum length. - // +optional - Message string - - // Optional. The component reporting this event. Should be a short machine understandable string. - // +optional - Source EventSource - - // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - // +optional - FirstTimestamp metav1.Time - - // The time at which the most recent occurrence of this event was recorded. - // +optional - LastTimestamp metav1.Time - - // The number of times this event has occurred. - // +optional - Count int32 - - // Type of this event (Normal, Warning), new types could be added in the future. - // +optional - Type string -} - -// EventList is a list of events. -type EventList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Event -} - -// List holds a list of objects, which may not be known by the server. -type List struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []runtime.Object -} - -// A type of object that is limited -type LimitType string - -const ( - // Limit that applies to all pods in a namespace - LimitTypePod LimitType = "Pod" - // Limit that applies to all containers in a namespace - LimitTypeContainer LimitType = "Container" - // Limit that applies to all persistent volume claims in a namespace - LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" -) - -// LimitRangeItem defines a min/max usage limit for any resource that matches on kind -type LimitRangeItem struct { - // Type of resource that this limit applies to - // +optional - Type LimitType - // Max usage constraints on this kind by resource name - // +optional - Max ResourceList - // Min usage constraints on this kind by resource name - // +optional - Min ResourceList - // Default resource requirement limit value by resource name. - // +optional - Default ResourceList - // DefaultRequest resource requirement request value by resource name. - // +optional - DefaultRequest ResourceList - // MaxLimitRequestRatio represents the max burst value for the named resource - // +optional - MaxLimitRequestRatio ResourceList -} - -// LimitRangeSpec defines a min/max usage limit for resources that match on kind -type LimitRangeSpec struct { - // Limits is the list of LimitRangeItem objects that are enforced - Limits []LimitRangeItem -} - -// +genclient=true - -// LimitRange sets resource usage limits for each kind of resource in a Namespace -type LimitRange struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the limits enforced - // +optional - Spec LimitRangeSpec -} - -// LimitRangeList is a list of LimitRange items. -type LimitRangeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is a list of LimitRange objects - Items []LimitRange -} - -// The following identify resource constants for Kubernetes object types -const ( - // Pods, number - ResourcePods ResourceName = "pods" - // Services, number - ResourceServices ResourceName = "services" - // ReplicationControllers, number - ResourceReplicationControllers ResourceName = "replicationcontrollers" - // ResourceQuotas, number - ResourceQuotas ResourceName = "resourcequotas" - // ResourceSecrets, number - ResourceSecrets ResourceName = "secrets" - // ResourceConfigMaps, number - ResourceConfigMaps ResourceName = "configmaps" - // ResourcePersistentVolumeClaims, number - ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" - // ResourceServicesNodePorts, number - ResourceServicesNodePorts ResourceName = "services.nodeports" - // ResourceServicesLoadBalancers, number - ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" - // CPU request, in cores. (500m = .5 cores) - ResourceRequestsCPU ResourceName = "requests.cpu" - // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsMemory ResourceName = "requests.memory" - // Storage request, in bytes - ResourceRequestsStorage ResourceName = "requests.storage" - // CPU limit, in cores. (500m = .5 cores) - ResourceLimitsCPU ResourceName = "limits.cpu" - // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsMemory ResourceName = "limits.memory" -) - -// A ResourceQuotaScope defines a filter that must match each object tracked by a quota -type ResourceQuotaScope string - -const ( - // Match all pod objects where spec.activeDeadlineSeconds - ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" - // Match all pod objects where !spec.activeDeadlineSeconds - ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" - // Match all pod objects that have best effort quality of service - ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" - // Match all pod objects that do not have best effort quality of service - ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" -) - -// ResourceQuotaSpec defines the desired hard limits to enforce for Quota -type ResourceQuotaSpec struct { - // Hard is the set of desired hard limits for each named resource - // +optional - Hard ResourceList - // A collection of filters that must match each object tracked by a quota. - // If not specified, the quota matches all objects. - // +optional - Scopes []ResourceQuotaScope -} - -// ResourceQuotaStatus defines the enforced hard limits and observed use -type ResourceQuotaStatus struct { - // Hard is the set of enforced hard limits for each named resource - // +optional - Hard ResourceList - // Used is the current observed total usage of the resource in the namespace - // +optional - Used ResourceList -} - -// +genclient=true - -// ResourceQuota sets aggregate quota restrictions enforced per namespace -type ResourceQuota struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired quota - // +optional - Spec ResourceQuotaSpec - - // Status defines the actual enforced quota and its current usage - // +optional - Status ResourceQuotaStatus -} - -// ResourceQuotaList is a list of ResourceQuota items -type ResourceQuotaList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is a list of ResourceQuota objects - Items []ResourceQuota -} - -// +genclient=true - -// Secret holds secret data of a certain type. The total bytes of the values in -// the Data field must be less than MaxSecretSize bytes. -type Secret struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN - // or leading dot followed by valid DNS_SUBDOMAIN. - // The serialized form of the secret data is a base64 encoded string, - // representing the arbitrary (possibly non-string) data value here. - // +optional - Data map[string][]byte - - // Used to facilitate programmatic handling of secret data. - // +optional - Type SecretType -} - -const MaxSecretSize = 1 * 1024 * 1024 - -type SecretType string - -const ( - // SecretTypeOpaque is the default; arbitrary user-defined data - SecretTypeOpaque SecretType = "Opaque" - - // SecretTypeServiceAccountToken contains a token that identifies a service account to the API - // - // Required fields: - // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies - // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies - // - Secret.Data["token"] - a token that identifies the service account to the API - SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" - - // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountNameKey = "kubernetes.io/service-account.name" - // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountUIDKey = "kubernetes.io/service-account.uid" - // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets - ServiceAccountTokenKey = "token" - // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets - ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" - // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets - ServiceAccountRootCAKey = "ca.crt" - // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls - ServiceAccountNamespaceKey = "namespace" - - // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg - // - // Required fields: - // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file - SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" - - // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets - DockerConfigKey = ".dockercfg" - - // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json - // - // Required fields: - // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file - SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" - - // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets - DockerConfigJsonKey = ".dockerconfigjson" - - // SecretTypeBasicAuth contains data needed for basic authentication. - // - // Required at least one of fields: - // - Secret.Data["username"] - username used for authentication - // - Secret.Data["password"] - password or token needed for authentication - SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" - - // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets - BasicAuthUsernameKey = "username" - // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets - BasicAuthPasswordKey = "password" - - // SecretTypeSSHAuth contains data needed for SSH authetication. - // - // Required field: - // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication - SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" - - // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets - SSHAuthPrivateKey = "ssh-privatekey" - - // SecretTypeTLS contains information about a TLS client or server secret. It - // is primarily used with TLS termination of the Ingress resource, but may be - // used in other types. - // - // Required fields: - // - Secret.Data["tls.key"] - TLS private key. - // Secret.Data["tls.crt"] - TLS certificate. - // TODO: Consider supporting different formats, specifying CA/destinationCA. - SecretTypeTLS SecretType = "kubernetes.io/tls" - - // TLSCertKey is the key for tls certificates in a TLS secret. - TLSCertKey = "tls.crt" - // TLSPrivateKeyKey is the key for the private key field in a TLS secret. - TLSPrivateKeyKey = "tls.key" -) - -type SecretList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Secret -} - -// +genclient=true - -// ConfigMap holds configuration data for components or applications to consume. -type ConfigMap struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Data contains the configuration data. - // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. - // +optional - Data map[string]string -} - -// ConfigMapList is a resource containing a list of ConfigMap objects. -type ConfigMapList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is the list of ConfigMaps. - Items []ConfigMap -} - -// These constants are for remote command execution and port forwarding and are -// used by both the client side and server side components. -// -// This is probably not the ideal place for them, but it didn't seem worth it -// to create pkg/exec and pkg/portforward just to contain a single file with -// constants in it. Suggestions for more appropriate alternatives are -// definitely welcome! -const ( - // Enable stdin for remote command execution - ExecStdinParam = "input" - // Enable stdout for remote command execution - ExecStdoutParam = "output" - // Enable stderr for remote command execution - ExecStderrParam = "error" - // Enable TTY for remote command execution - ExecTTYParam = "tty" - // Command to run for remote command execution - ExecCommandParamm = "command" - - // Name of header that specifies stream type - StreamType = "streamType" - // Value for streamType header for stdin stream - StreamTypeStdin = "stdin" - // Value for streamType header for stdout stream - StreamTypeStdout = "stdout" - // Value for streamType header for stderr stream - StreamTypeStderr = "stderr" - // Value for streamType header for data stream - StreamTypeData = "data" - // Value for streamType header for error stream - StreamTypeError = "error" - // Value for streamType header for terminal resize stream - StreamTypeResize = "resize" - - // Name of header that specifies the port being forwarded - PortHeader = "port" - // Name of header that specifies a request ID used to associate the error - // and data streams for a single forwarded connection - PortForwardRequestIDHeader = "requestID" -) - -// Type and constants for component health validation. -type ComponentConditionType string - -// These are the valid conditions for the component. -const ( - ComponentHealthy ComponentConditionType = "Healthy" -) - -type ComponentCondition struct { - Type ComponentConditionType - Status ConditionStatus - // +optional - Message string - // +optional - Error string -} - -// +genclient=true -// +nonNamespaced=true - -// ComponentStatus (and ComponentStatusList) holds the cluster validation info. -type ComponentStatus struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // +optional - Conditions []ComponentCondition -} - -type ComponentStatusList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ComponentStatus -} - -// SecurityContext holds security configuration that will be applied to a container. -// Some fields are present in both SecurityContext and PodSecurityContext. When both -// are set, the values in SecurityContext take precedence. -type SecurityContext struct { - // The capabilities to add/drop when running containers. - // Defaults to the default set of capabilities granted by the container runtime. - // +optional - Capabilities *Capabilities - // Run container in privileged mode. - // Processes in privileged containers are essentially equivalent to root on the host. - // Defaults to false. - // +optional - Privileged *bool - // The SELinux context to be applied to the container. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - SELinuxOptions *SELinuxOptions - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsUser *int64 - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsNonRoot *bool - // The read-only root filesystem allows you to restrict the locations that an application can write - // files to, ensuring the persistent data can only be written to mounts. - // +optional - ReadOnlyRootFilesystem *bool -} - -// SELinuxOptions are the labels to be applied to the container. -type SELinuxOptions struct { - // SELinux user label - // +optional - User string - // SELinux role label - // +optional - Role string - // SELinux type label - // +optional - Type string - // SELinux level label. - // +optional - Level string -} - -// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record -// the global allocation state of the cluster. The schema of Range and Data generic, in that Range -// should be a string representation of the inputs to a range (for instance, for IP allocation it -// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a -// binary range. Consumers should use annotations to record additional information (schema version, -// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation -// of the cluster, thus the object is less strongly typed than most. -type RangeAllocation struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or - // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define - // a start and end unless there is an implicit end. - Range string - // A byte array representing the serialized state of a range allocation. Additional clarifiers on - // the type or format of data should be represented with annotations. For IP allocations, this is - // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing - // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). - Data []byte -} - -const ( - // "default-scheduler" is the name of default scheduler. - DefaultSchedulerName = "default-scheduler" - - // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule - // corresponding to every RequiredDuringScheduling affinity rule. - // When the --hard-pod-affinity-weight scheduler flag is not specified, - // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int = 1 - - // When the --failure-domains scheduler flag is not specified, - // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. - DefaultFailureDomains string = metav1.LabelHostname + "," + metav1.LabelZoneFailureDomain + "," + metav1.LabelZoneRegion -) diff --git a/vendor/k8s.io/client-go/pkg/api/v1/OWNERS b/vendor/k8s.io/client-go/pkg/api/v1/OWNERS deleted file mode 100755 index fdb84b24a..000000000 --- a/vendor/k8s.io/client-go/pkg/api/v1/OWNERS +++ /dev/null @@ -1,41 +0,0 @@ -reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- yujuhong -- brendandburns -- derekwaynecarr -- caesarxuchao -- vishh -- mikedanese -- liggitt -- nikhiljindal -- bprashanth -- gmarek -- erictune -- davidopp -- pmorie -- sttts -- kargakis -- dchen1107 -- saad-ali -- zmerlynn -- luxas -- janetkuo -- justinsb -- roberthbailey -- ncdc -- timstclair -- eparis -- timothysc -- piosz -- jsafrane -- dims -- errordeveloper -- madhusudancs -- krousey -- jayunit100 -- rootfs -- markturansky diff --git a/vendor/k8s.io/client-go/pkg/api/v1/conversion.go b/vendor/k8s.io/client-go/pkg/api/v1/conversion.go deleted file mode 100644 index 041517819..000000000 --- a/vendor/k8s.io/client-go/pkg/api/v1/conversion.go +++ /dev/null @@ -1,785 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/json" - "fmt" - "reflect" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/apis/extensions" -) - -// This is a "fast-path" that avoids reflection for common types. It focuses on the objects that are -// converted the most in the cluster. -// TODO: generate one of these for every external API group - this is to prove the impact -func addFastPathConversionFuncs(scheme *runtime.Scheme) error { - scheme.AddGenericConversionFunc(func(objA, objB interface{}, s conversion.Scope) (bool, error) { - switch a := objA.(type) { - case *Pod: - switch b := objB.(type) { - case *api.Pod: - return true, Convert_v1_Pod_To_api_Pod(a, b, s) - } - case *api.Pod: - switch b := objB.(type) { - case *Pod: - return true, Convert_api_Pod_To_v1_Pod(a, b, s) - } - - case *Event: - switch b := objB.(type) { - case *api.Event: - return true, Convert_v1_Event_To_api_Event(a, b, s) - } - case *api.Event: - switch b := objB.(type) { - case *Event: - return true, Convert_api_Event_To_v1_Event(a, b, s) - } - - case *ReplicationController: - switch b := objB.(type) { - case *api.ReplicationController: - return true, Convert_v1_ReplicationController_To_api_ReplicationController(a, b, s) - } - case *api.ReplicationController: - switch b := objB.(type) { - case *ReplicationController: - return true, Convert_api_ReplicationController_To_v1_ReplicationController(a, b, s) - } - - case *Node: - switch b := objB.(type) { - case *api.Node: - return true, Convert_v1_Node_To_api_Node(a, b, s) - } - case *api.Node: - switch b := objB.(type) { - case *Node: - return true, Convert_api_Node_To_v1_Node(a, b, s) - } - - case *Namespace: - switch b := objB.(type) { - case *api.Namespace: - return true, Convert_v1_Namespace_To_api_Namespace(a, b, s) - } - case *api.Namespace: - switch b := objB.(type) { - case *Namespace: - return true, Convert_api_Namespace_To_v1_Namespace(a, b, s) - } - - case *Service: - switch b := objB.(type) { - case *api.Service: - return true, Convert_v1_Service_To_api_Service(a, b, s) - } - case *api.Service: - switch b := objB.(type) { - case *Service: - return true, Convert_api_Service_To_v1_Service(a, b, s) - } - - case *Endpoints: - switch b := objB.(type) { - case *api.Endpoints: - return true, Convert_v1_Endpoints_To_api_Endpoints(a, b, s) - } - case *api.Endpoints: - switch b := objB.(type) { - case *Endpoints: - return true, Convert_api_Endpoints_To_v1_Endpoints(a, b, s) - } - - case *metav1.WatchEvent: - switch b := objB.(type) { - case *metav1.InternalEvent: - return true, metav1.Convert_versioned_Event_to_versioned_InternalEvent(a, b, s) - } - case *metav1.InternalEvent: - switch b := objB.(type) { - case *metav1.WatchEvent: - return true, metav1.Convert_versioned_InternalEvent_to_versioned_Event(a, b, s) - } - } - return false, nil - }) - return nil -} - -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_api_Pod_To_v1_Pod, - Convert_api_PodSpec_To_v1_PodSpec, - Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, - Convert_api_ServiceSpec_To_v1_ServiceSpec, - Convert_v1_Pod_To_api_Pod, - Convert_v1_PodSpec_To_api_PodSpec, - Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, - Convert_v1_Secret_To_api_Secret, - Convert_v1_ServiceSpec_To_api_ServiceSpec, - Convert_v1_ResourceList_To_api_ResourceList, - Convert_v1_ReplicationController_to_extensions_ReplicaSet, - Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec, - Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus, - Convert_extensions_ReplicaSet_to_v1_ReplicationController, - Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec, - Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus, - ) - if err != nil { - return err - } - - // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. - for _, k := range []string{ - "Endpoints", - "ResourceQuota", - "PersistentVolumeClaim", - "Service", - "ServiceAccount", - "ConfigMap", - } { - kind := k // don't close over range variables - err = scheme.AddFieldLabelConversionFunc("v1", kind, - func(label, value string) (string, string, error) { - switch label { - case "metadata.namespace", - "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) - } - }, - ) - if err != nil { - return err - } - } - - // Add field conversion funcs. - err = scheme.AddFieldLabelConversionFunc("v1", "Pod", - func(label, value string) (string, string, error) { - switch label { - case "metadata.annotations", - "metadata.labels", - "metadata.name", - "metadata.namespace", - "spec.nodeName", - "spec.restartPolicy", - "spec.serviceAccountName", - "status.phase", - "status.podIP": - return label, value, nil - // This is for backwards compatibility with old v1 clients which send spec.host - case "spec.host": - return "spec.nodeName", value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }, - ) - if err != nil { - return err - } - err = scheme.AddFieldLabelConversionFunc("v1", "Node", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name": - return label, value, nil - case "spec.unschedulable": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }, - ) - if err != nil { - return err - } - err = scheme.AddFieldLabelConversionFunc("v1", "ReplicationController", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", - "metadata.namespace", - "status.replicas": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - if err != nil { - return err - } - err = scheme.AddFieldLabelConversionFunc("v1", "PersistentVolume", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }, - ) - if err != nil { - return err - } - if err := AddFieldLabelConversionsForEvent(scheme); err != nil { - return err - } - if err := AddFieldLabelConversionsForNamespace(scheme); err != nil { - return err - } - if err := AddFieldLabelConversionsForSecret(scheme); err != nil { - return err - } - return nil -} - -func Convert_v1_ReplicationController_to_extensions_ReplicaSet(in *ReplicationController, out *extensions.ReplicaSet, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(in *ReplicationControllerSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { - out.Replicas = *in.Replicas - if in.Selector != nil { - metav1.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s) - } - if in.Template != nil { - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, &out.Template, s); err != nil { - return err - } - } - return nil -} - -func Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(in *ReplicationControllerStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - return nil -} - -func Convert_extensions_ReplicaSet_to_v1_ReplicationController(in *extensions.ReplicaSet, out *ReplicationController, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { - fieldErr, ok := err.(*field.Error) - if !ok { - return err - } - if out.Annotations == nil { - out.Annotations = make(map[string]string) - } - out.Annotations[api.NonConvertibleAnnotationPrefix+"/"+fieldErr.Field] = reflect.ValueOf(fieldErr.BadValue).String() - } - if err := Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(in *extensions.ReplicaSetSpec, out *ReplicationControllerSpec, s conversion.Scope) error { - out.Replicas = new(int32) - *out.Replicas = in.Replicas - out.MinReadySeconds = in.MinReadySeconds - var invalidErr error - if in.Selector != nil { - invalidErr = metav1.Convert_unversioned_LabelSelector_to_map(in.Selector, &out.Selector, s) - } - out.Template = new(PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil { - return err - } - return invalidErr -} - -func Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(in *extensions.ReplicaSetStatus, out *ReplicationControllerStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - return nil -} - -func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error { - out.Replicas = &in.Replicas - out.MinReadySeconds = in.MinReadySeconds - out.Selector = in.Selector - if in.Template != nil { - out.Template = new(PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = in.Selector - if in.Template != nil { - out.Template = new(api.PodTemplateSpec) - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { - if err := autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s); err != nil { - return err - } - - if old := out.Annotations; old != nil { - out.Annotations = make(map[string]string, len(old)) - for k, v := range old { - out.Annotations[k] = v - } - } - if len(out.Status.InitContainerStatuses) > 0 { - if out.Annotations == nil { - out.Annotations = make(map[string]string) - } - value, err := json.Marshal(out.Status.InitContainerStatuses) - if err != nil { - return err - } - out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) - out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value) - } else { - delete(out.Annotations, PodInitContainerStatusesAnnotationKey) - delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) - } - return nil -} - -func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - // TODO: sometime after we move init container to stable, remove these conversions - // If there is a beta annotation, copy to alpha key. - // See commit log for PR #31026 for why we do this. - if valueBeta, okBeta := in.Annotations[PodInitContainerStatusesBetaAnnotationKey]; okBeta { - in.Annotations[PodInitContainerStatusesAnnotationKey] = valueBeta - } - // Move the annotation to the internal repr. field - if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { - var values []ContainerStatus - if err := json.Unmarshal([]byte(value), &values); err != nil { - return err - } - // Conversion from external to internal version exists more to - // satisfy the needs of the decoder than it does to be a general - // purpose tool. And Decode always creates an intermediate object - // to decode to. Thus the caller of UnsafeConvertToVersion is - // taking responsibility to ensure mutation of in is not exposed - // back to the caller. - in.Status.InitContainerStatuses = values - } - - if err := autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s); err != nil { - return err - } - if len(out.Annotations) > 0 { - old := out.Annotations - out.Annotations = make(map[string]string, len(old)) - for k, v := range old { - out.Annotations[k] = v - } - delete(out.Annotations, PodInitContainerStatusesAnnotationKey) - delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) - } - return nil -} - -func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { - if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { - return err - } - - // TODO: sometime after we move init container to stable, remove these conversions. - if old := out.Annotations; old != nil { - out.Annotations = make(map[string]string, len(old)) - for k, v := range old { - out.Annotations[k] = v - } - } - if len(out.Spec.InitContainers) > 0 { - if out.Annotations == nil { - out.Annotations = make(map[string]string) - } - value, err := json.Marshal(out.Spec.InitContainers) - if err != nil { - return err - } - out.Annotations[PodInitContainersAnnotationKey] = string(value) - out.Annotations[PodInitContainersBetaAnnotationKey] = string(value) - } else { - delete(out.Annotations, PodInitContainersAnnotationKey) - delete(out.Annotations, PodInitContainersBetaAnnotationKey) - } - return nil -} - -func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - // TODO: sometime after we move init container to stable, remove these conversions - // If there is a beta annotation, copy to alpha key. - // See commit log for PR #31026 for why we do this. - if valueBeta, okBeta := in.Annotations[PodInitContainersBetaAnnotationKey]; okBeta { - in.Annotations[PodInitContainersAnnotationKey] = valueBeta - } - // Move the annotation to the internal repr. field - if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok { - var values []Container - if err := json.Unmarshal([]byte(value), &values); err != nil { - return err - } - // Conversion from external to internal version exists more to - // satisfy the needs of the decoder than it does to be a general - // purpose tool. And Decode always creates an intermediate object - // to decode to. Thus the caller of UnsafeConvertToVersion is - // taking responsibility to ensure mutation of in is not exposed - // back to the caller. - in.Spec.InitContainers = values - - // Call defaulters explicitly until annotations are removed - for i := range in.Spec.InitContainers { - c := &in.Spec.InitContainers[i] - SetDefaults_Container(c) - } - } - - if err := autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s); err != nil { - return err - } - if len(out.Annotations) > 0 { - old := out.Annotations - out.Annotations = make(map[string]string, len(old)) - for k, v := range old { - out.Annotations[k] = v - } - delete(out.Annotations, PodInitContainersAnnotationKey) - delete(out.Annotations, PodInitContainersBetaAnnotationKey) - } - return nil -} - -// The following two PodSpec conversions are done here to support ServiceAccount -// as an alias for ServiceAccountName. -func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { - if err := autoConvert_api_PodSpec_To_v1_PodSpec(in, out, s); err != nil { - return err - } - - // DeprecatedServiceAccount is an alias for ServiceAccountName. - out.DeprecatedServiceAccount = in.ServiceAccountName - - if in.SecurityContext != nil { - // the host namespace fields have to be handled here for backward compatibility - // with v1.0.0 - out.HostPID = in.SecurityContext.HostPID - out.HostNetwork = in.SecurityContext.HostNetwork - out.HostIPC = in.SecurityContext.HostIPC - } - - return nil -} - -func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error { - if err := autoConvert_v1_PodSpec_To_api_PodSpec(in, out, s); err != nil { - return err - } - - // We support DeprecatedServiceAccount as an alias for ServiceAccountName. - // If both are specified, ServiceAccountName (the new field) wins. - if in.ServiceAccountName == "" { - out.ServiceAccountName = in.DeprecatedServiceAccount - } - - // the host namespace fields have to be handled specially for backward compatibility - // with v1.0.0 - if out.SecurityContext == nil { - out.SecurityContext = new(api.PodSecurityContext) - } - out.SecurityContext.HostNetwork = in.HostNetwork - out.SecurityContext.HostPID = in.HostPID - out.SecurityContext.HostIPC = in.HostIPC - - return nil -} - -func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { - if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil { - return err - } - - // TODO: sometime after we move init container to stable, remove these conversions - if len(out.Spec.InitContainers) > 0 || len(out.Status.InitContainerStatuses) > 0 { - old := out.Annotations - out.Annotations = make(map[string]string, len(old)) - for k, v := range old { - out.Annotations[k] = v - } - delete(out.Annotations, PodInitContainersAnnotationKey) - delete(out.Annotations, PodInitContainersBetaAnnotationKey) - delete(out.Annotations, PodInitContainerStatusesAnnotationKey) - delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) - } - if len(out.Spec.InitContainers) > 0 { - value, err := json.Marshal(out.Spec.InitContainers) - if err != nil { - return err - } - out.Annotations[PodInitContainersAnnotationKey] = string(value) - out.Annotations[PodInitContainersBetaAnnotationKey] = string(value) - } - if len(out.Status.InitContainerStatuses) > 0 { - value, err := json.Marshal(out.Status.InitContainerStatuses) - if err != nil { - return err - } - out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) - out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value) - } - - return nil -} - -func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { - // If there is a beta annotation, copy to alpha key. - // See commit log for PR #31026 for why we do this. - if valueBeta, okBeta := in.Annotations[PodInitContainersBetaAnnotationKey]; okBeta { - in.Annotations[PodInitContainersAnnotationKey] = valueBeta - } - // TODO: sometime after we move init container to stable, remove these conversions - // Move the annotation to the internal repr. field - if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok { - var values []Container - if err := json.Unmarshal([]byte(value), &values); err != nil { - return err - } - // Conversion from external to internal version exists more to - // satisfy the needs of the decoder than it does to be a general - // purpose tool. And Decode always creates an intermediate object - // to decode to. Thus the caller of UnsafeConvertToVersion is - // taking responsibility to ensure mutation of in is not exposed - // back to the caller. - in.Spec.InitContainers = values - // Call defaulters explicitly until annotations are removed - for i := range in.Spec.InitContainers { - c := &in.Spec.InitContainers[i] - SetDefaults_Container(c) - } - } - // If there is a beta annotation, copy to alpha key. - // See commit log for PR #31026 for why we do this. - if valueBeta, okBeta := in.Annotations[PodInitContainerStatusesBetaAnnotationKey]; okBeta { - in.Annotations[PodInitContainerStatusesAnnotationKey] = valueBeta - } - if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { - var values []ContainerStatus - if err := json.Unmarshal([]byte(value), &values); err != nil { - return err - } - // Conversion from external to internal version exists more to - // satisfy the needs of the decoder than it does to be a general - // purpose tool. And Decode always creates an intermediate object - // to decode to. Thus the caller of UnsafeConvertToVersion is - // taking responsibility to ensure mutation of in is not exposed - // back to the caller. - in.Status.InitContainerStatuses = values - } - - if err := autoConvert_v1_Pod_To_api_Pod(in, out, s); err != nil { - return err - } - if len(out.Annotations) > 0 { - old := out.Annotations - out.Annotations = make(map[string]string, len(old)) - for k, v := range old { - out.Annotations[k] = v - } - delete(out.Annotations, PodInitContainersAnnotationKey) - delete(out.Annotations, PodInitContainersBetaAnnotationKey) - delete(out.Annotations, PodInitContainerStatusesAnnotationKey) - delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) - } - return nil -} - -func Convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { - if err := autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in, out, s); err != nil { - return err - } - // Publish both externalIPs and deprecatedPublicIPs fields in v1. - out.DeprecatedPublicIPs = in.ExternalIPs - return nil -} - -func Convert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error { - if err := autoConvert_v1_Secret_To_api_Secret(in, out, s); err != nil { - return err - } - - // StringData overwrites Data - if len(in.StringData) > 0 { - if out.Data == nil { - out.Data = map[string][]byte{} - } - for k, v := range in.StringData { - out.Data[k] = []byte(v) - } - } - - return nil -} - -func Convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { - if err := autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in, out, s); err != nil { - return err - } - // Prefer the legacy deprecatedPublicIPs field, if provided. - if len(in.DeprecatedPublicIPs) > 0 { - out.ExternalIPs = in.DeprecatedPublicIPs - } - return nil -} - -func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *PodSecurityContext, s conversion.Scope) error { - out.SupplementalGroups = in.SupplementalGroups - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(SELinuxOptions) - if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - out.RunAsUser = in.RunAsUser - out.RunAsNonRoot = in.RunAsNonRoot - out.FSGroup = in.FSGroup - return nil -} - -func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { - out.SupplementalGroups = in.SupplementalGroups - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(api.SELinuxOptions) - if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - out.RunAsUser = in.RunAsUser - out.RunAsNonRoot = in.RunAsNonRoot - out.FSGroup = in.FSGroup - return nil -} - -// +k8s:conversion-fn=copy-only -func Convert_v1_ResourceList_To_api_ResourceList(in *ResourceList, out *api.ResourceList, s conversion.Scope) error { - if *in == nil { - return nil - } - if *out == nil { - *out = make(api.ResourceList, len(*in)) - } - for key, val := range *in { - // Moved to defaults - // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. - // In the future, we should instead reject values that need rounding. - // const milliScale = -3 - // val.RoundUp(milliScale) - - (*out)[api.ResourceName(key)] = val - } - return nil -} - -func AddFieldLabelConversionsForEvent(scheme *runtime.Scheme) error { - return scheme.AddFieldLabelConversionFunc("v1", "Event", - func(label, value string) (string, string, error) { - switch label { - case "involvedObject.kind", - "involvedObject.namespace", - "involvedObject.name", - "involvedObject.uid", - "involvedObject.apiVersion", - "involvedObject.resourceVersion", - "involvedObject.fieldPath", - "reason", - "source", - "type", - "metadata.namespace", - "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) -} - -func AddFieldLabelConversionsForNamespace(scheme *runtime.Scheme) error { - return scheme.AddFieldLabelConversionFunc("v1", "Namespace", - func(label, value string) (string, string, error) { - switch label { - case "status.phase", - "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) -} - -func AddFieldLabelConversionsForSecret(scheme *runtime.Scheme) error { - return scheme.AddFieldLabelConversionFunc("v1", "Secret", - func(label, value string) (string, string, error) { - switch label { - case "type", - "metadata.namespace", - "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) -} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/defaults.go b/vendor/k8s.io/client-go/pkg/api/v1/defaults.go deleted file mode 100644 index 17b0deb01..000000000 --- a/vendor/k8s.io/client-go/pkg/api/v1/defaults.go +++ /dev/null @@ -1,389 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/pkg/util" - "k8s.io/client-go/pkg/util/parsers" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - RegisterDefaults(scheme) - return scheme.AddDefaultingFuncs( - SetDefaults_PodExecOptions, - SetDefaults_PodAttachOptions, - SetDefaults_ReplicationController, - SetDefaults_Volume, - SetDefaults_ContainerPort, - SetDefaults_Container, - SetDefaults_ServiceSpec, - SetDefaults_Pod, - SetDefaults_PodSpec, - SetDefaults_Probe, - SetDefaults_SecretVolumeSource, - SetDefaults_ConfigMapVolumeSource, - SetDefaults_DownwardAPIVolumeSource, - SetDefaults_ProjectedVolumeSource, - SetDefaults_Secret, - SetDefaults_PersistentVolume, - SetDefaults_PersistentVolumeClaim, - SetDefaults_ISCSIVolumeSource, - SetDefaults_Endpoints, - SetDefaults_HTTPGetAction, - SetDefaults_NamespaceStatus, - SetDefaults_Node, - SetDefaults_NodeStatus, - SetDefaults_ObjectFieldSelector, - SetDefaults_LimitRangeItem, - SetDefaults_ConfigMap, - SetDefaults_RBDVolumeSource, - SetDefaults_ResourceList, - ) -} - -func SetDefaults_ResourceList(obj *ResourceList) { - for key, val := range *obj { - // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. - // In the future, we should instead reject values that need rounding. - const milliScale = -3 - val.RoundUp(milliScale) - - (*obj)[ResourceName(key)] = val - } -} - -func SetDefaults_PodExecOptions(obj *PodExecOptions) { - obj.Stdout = true - obj.Stderr = true -} -func SetDefaults_PodAttachOptions(obj *PodAttachOptions) { - obj.Stdout = true - obj.Stderr = true -} -func SetDefaults_ReplicationController(obj *ReplicationController) { - var labels map[string]string - if obj.Spec.Template != nil { - labels = obj.Spec.Template.Labels - } - // TODO: support templates defined elsewhere when we support them in the API - if labels != nil { - if len(obj.Spec.Selector) == 0 { - obj.Spec.Selector = labels - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } -} -func SetDefaults_Volume(obj *Volume) { - if util.AllPtrFieldsNil(&obj.VolumeSource) { - obj.VolumeSource = VolumeSource{ - EmptyDir: &EmptyDirVolumeSource{}, - } - } -} -func SetDefaults_ContainerPort(obj *ContainerPort) { - if obj.Protocol == "" { - obj.Protocol = ProtocolTCP - } -} -func SetDefaults_Container(obj *Container) { - if obj.ImagePullPolicy == "" { - // Ignore error and assume it has been validated elsewhere - _, tag, _, _ := parsers.ParseImageName(obj.Image) - - // Check image tag - if tag == "latest" { - obj.ImagePullPolicy = PullAlways - } else { - obj.ImagePullPolicy = PullIfNotPresent - } - } - if obj.TerminationMessagePath == "" { - obj.TerminationMessagePath = TerminationMessagePathDefault - } - if obj.TerminationMessagePolicy == "" { - obj.TerminationMessagePolicy = TerminationMessageReadFile - } -} -func SetDefaults_ServiceSpec(obj *ServiceSpec) { - if obj.SessionAffinity == "" { - obj.SessionAffinity = ServiceAffinityNone - } - if obj.Type == "" { - obj.Type = ServiceTypeClusterIP - } - for i := range obj.Ports { - sp := &obj.Ports[i] - if sp.Protocol == "" { - sp.Protocol = ProtocolTCP - } - if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") { - sp.TargetPort = intstr.FromInt(int(sp.Port)) - } - } -} -func SetDefaults_Pod(obj *Pod) { - // If limits are specified, but requests are not, default requests to limits - // This is done here rather than a more specific defaulting pass on ResourceRequirements - // because we only want this defaulting semantic to take place on a Pod and not a PodTemplate - for i := range obj.Spec.Containers { - // set requests to limits if requests are not specified, but limits are - if obj.Spec.Containers[i].Resources.Limits != nil { - if obj.Spec.Containers[i].Resources.Requests == nil { - obj.Spec.Containers[i].Resources.Requests = make(ResourceList) - } - for key, value := range obj.Spec.Containers[i].Resources.Limits { - if _, exists := obj.Spec.Containers[i].Resources.Requests[key]; !exists { - obj.Spec.Containers[i].Resources.Requests[key] = *(value.Copy()) - } - } - } - } - for i := range obj.Spec.InitContainers { - if obj.Spec.InitContainers[i].Resources.Limits != nil { - if obj.Spec.InitContainers[i].Resources.Requests == nil { - obj.Spec.InitContainers[i].Resources.Requests = make(ResourceList) - } - for key, value := range obj.Spec.InitContainers[i].Resources.Limits { - if _, exists := obj.Spec.InitContainers[i].Resources.Requests[key]; !exists { - obj.Spec.InitContainers[i].Resources.Requests[key] = *(value.Copy()) - } - } - } - } -} -func SetDefaults_PodSpec(obj *PodSpec) { - if obj.DNSPolicy == "" { - obj.DNSPolicy = DNSClusterFirst - } - if obj.RestartPolicy == "" { - obj.RestartPolicy = RestartPolicyAlways - } - if obj.HostNetwork { - defaultHostNetworkPorts(&obj.Containers) - defaultHostNetworkPorts(&obj.InitContainers) - } - if obj.SecurityContext == nil { - obj.SecurityContext = &PodSecurityContext{} - } - if obj.TerminationGracePeriodSeconds == nil { - period := int64(DefaultTerminationGracePeriodSeconds) - obj.TerminationGracePeriodSeconds = &period - } - if obj.SchedulerName == "" { - obj.SchedulerName = DefaultSchedulerName - } -} -func SetDefaults_Probe(obj *Probe) { - if obj.TimeoutSeconds == 0 { - obj.TimeoutSeconds = 1 - } - if obj.PeriodSeconds == 0 { - obj.PeriodSeconds = 10 - } - if obj.SuccessThreshold == 0 { - obj.SuccessThreshold = 1 - } - if obj.FailureThreshold == 0 { - obj.FailureThreshold = 3 - } -} -func SetDefaults_SecretVolumeSource(obj *SecretVolumeSource) { - if obj.DefaultMode == nil { - perm := int32(SecretVolumeSourceDefaultMode) - obj.DefaultMode = &perm - } -} -func SetDefaults_ConfigMapVolumeSource(obj *ConfigMapVolumeSource) { - if obj.DefaultMode == nil { - perm := int32(ConfigMapVolumeSourceDefaultMode) - obj.DefaultMode = &perm - } -} -func SetDefaults_DownwardAPIVolumeSource(obj *DownwardAPIVolumeSource) { - if obj.DefaultMode == nil { - perm := int32(DownwardAPIVolumeSourceDefaultMode) - obj.DefaultMode = &perm - } -} -func SetDefaults_Secret(obj *Secret) { - if obj.Type == "" { - obj.Type = SecretTypeOpaque - } -} -func SetDefaults_ProjectedVolumeSource(obj *ProjectedVolumeSource) { - if obj.DefaultMode == nil { - perm := int32(ProjectedVolumeSourceDefaultMode) - obj.DefaultMode = &perm - } -} -func SetDefaults_PersistentVolume(obj *PersistentVolume) { - if obj.Status.Phase == "" { - obj.Status.Phase = VolumePending - } - if obj.Spec.PersistentVolumeReclaimPolicy == "" { - obj.Spec.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimRetain - } -} -func SetDefaults_PersistentVolumeClaim(obj *PersistentVolumeClaim) { - if obj.Status.Phase == "" { - obj.Status.Phase = ClaimPending - } -} -func SetDefaults_ISCSIVolumeSource(obj *ISCSIVolumeSource) { - if obj.ISCSIInterface == "" { - obj.ISCSIInterface = "default" - } -} -func SetDefaults_AzureDiskVolumeSource(obj *AzureDiskVolumeSource) { - if obj.CachingMode == nil { - obj.CachingMode = new(AzureDataDiskCachingMode) - *obj.CachingMode = AzureDataDiskCachingNone - } - if obj.FSType == nil { - obj.FSType = new(string) - *obj.FSType = "ext4" - } - if obj.ReadOnly == nil { - obj.ReadOnly = new(bool) - *obj.ReadOnly = false - } -} -func SetDefaults_Endpoints(obj *Endpoints) { - for i := range obj.Subsets { - ss := &obj.Subsets[i] - for i := range ss.Ports { - ep := &ss.Ports[i] - if ep.Protocol == "" { - ep.Protocol = ProtocolTCP - } - } - } -} -func SetDefaults_HTTPGetAction(obj *HTTPGetAction) { - if obj.Path == "" { - obj.Path = "/" - } - if obj.Scheme == "" { - obj.Scheme = URISchemeHTTP - } -} -func SetDefaults_NamespaceStatus(obj *NamespaceStatus) { - if obj.Phase == "" { - obj.Phase = NamespaceActive - } -} -func SetDefaults_Node(obj *Node) { - if obj.Spec.ExternalID == "" { - obj.Spec.ExternalID = obj.Name - } -} -func SetDefaults_NodeStatus(obj *NodeStatus) { - if obj.Allocatable == nil && obj.Capacity != nil { - obj.Allocatable = make(ResourceList, len(obj.Capacity)) - for key, value := range obj.Capacity { - obj.Allocatable[key] = *(value.Copy()) - } - obj.Allocatable = obj.Capacity - } -} -func SetDefaults_ObjectFieldSelector(obj *ObjectFieldSelector) { - if obj.APIVersion == "" { - obj.APIVersion = "v1" - } -} -func SetDefaults_LimitRangeItem(obj *LimitRangeItem) { - // for container limits, we apply default values - if obj.Type == LimitTypeContainer { - - if obj.Default == nil { - obj.Default = make(ResourceList) - } - if obj.DefaultRequest == nil { - obj.DefaultRequest = make(ResourceList) - } - - // If a default limit is unspecified, but the max is specified, default the limit to the max - for key, value := range obj.Max { - if _, exists := obj.Default[key]; !exists { - obj.Default[key] = *(value.Copy()) - } - } - // If a default limit is specified, but the default request is not, default request to limit - for key, value := range obj.Default { - if _, exists := obj.DefaultRequest[key]; !exists { - obj.DefaultRequest[key] = *(value.Copy()) - } - } - // If a default request is not specified, but the min is provided, default request to the min - for key, value := range obj.Min { - if _, exists := obj.DefaultRequest[key]; !exists { - obj.DefaultRequest[key] = *(value.Copy()) - } - } - } -} -func SetDefaults_ConfigMap(obj *ConfigMap) { - if obj.Data == nil { - obj.Data = make(map[string]string) - } -} - -// With host networking default all container ports to host ports. -func defaultHostNetworkPorts(containers *[]Container) { - for i := range *containers { - for j := range (*containers)[i].Ports { - if (*containers)[i].Ports[j].HostPort == 0 { - (*containers)[i].Ports[j].HostPort = (*containers)[i].Ports[j].ContainerPort - } - } - } -} - -func SetDefaults_RBDVolumeSource(obj *RBDVolumeSource) { - if obj.RBDPool == "" { - obj.RBDPool = "rbd" - } - if obj.RadosUser == "" { - obj.RadosUser = "admin" - } - if obj.Keyring == "" { - obj.Keyring = "/etc/ceph/keyring" - } -} - -func SetDefaults_ScaleIOVolumeSource(obj *ScaleIOVolumeSource) { - if obj.ProtectionDomain == "" { - obj.ProtectionDomain = "default" - } - if obj.StoragePool == "" { - obj.StoragePool = "default" - } - if obj.StorageMode == "" { - obj.StorageMode = "ThinProvisioned" - } - if obj.FSType == "" { - obj.FSType = "xfs" - } -} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/generate.go b/vendor/k8s.io/client-go/pkg/api/v1/generate.go deleted file mode 100644 index b8c44e4c7..000000000 --- a/vendor/k8s.io/client-go/pkg/api/v1/generate.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - - utilrand "k8s.io/apimachinery/pkg/util/rand" -) - -// NameGenerator generates names for objects. Some backends may have more information -// available to guide selection of new names and this interface hides those details. -type NameGenerator interface { - // GenerateName generates a valid name from the base name, adding a random suffix to the - // the base. If base is valid, the returned name must also be valid. The generator is - // responsible for knowing the maximum valid name length. - GenerateName(base string) string -} - -// GenerateName will resolve the object name of the provided ObjectMeta to a generated version if -// necessary. It expects that validation for ObjectMeta has already completed (that Base is a -// valid name) and that the NameGenerator generates a name that is also valid. -func GenerateName(u NameGenerator, meta *ObjectMeta) { - if len(meta.GenerateName) == 0 || len(meta.Name) != 0 { - return - } - meta.Name = u.GenerateName(meta.GenerateName) -} - -// simpleNameGenerator generates random names. -type simpleNameGenerator struct{} - -// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics -// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes -// name (63 characters) -var SimpleNameGenerator NameGenerator = simpleNameGenerator{} - -const ( - // TODO: make this flexible for non-core resources with alternate naming rules. - maxNameLength = 63 - randomLength = 5 - maxGeneratedNameLength = maxNameLength - randomLength -) - -func (simpleNameGenerator) GenerateName(base string) string { - if len(base) > maxGeneratedNameLength { - base = base[:maxGeneratedNameLength] - } - return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) -} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/helpers.go b/vendor/k8s.io/client-go/pkg/api/v1/helpers.go deleted file mode 100644 index 01f4ef470..000000000 --- a/vendor/k8s.io/client-go/pkg/api/v1/helpers.go +++ /dev/null @@ -1,632 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/json" - "fmt" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - - "k8s.io/client-go/pkg/api" -) - -// IsOpaqueIntResourceName returns true if the resource name has the opaque -// integer resource prefix. -func IsOpaqueIntResourceName(name ResourceName) bool { - return strings.HasPrefix(string(name), ResourceOpaqueIntPrefix) -} - -// OpaqueIntResourceName returns a ResourceName with the canonical opaque -// integer prefix prepended. If the argument already has the prefix, it is -// returned unmodified. -func OpaqueIntResourceName(name string) ResourceName { - if IsOpaqueIntResourceName(ResourceName(name)) { - return ResourceName(name) - } - return ResourceName(fmt.Sprintf("%s%s", api.ResourceOpaqueIntPrefix, name)) -} - -// NewDeleteOptions returns a DeleteOptions indicating the resource should -// be deleted within the specified grace period. Use zero to indicate -// immediate deletion. If you would prefer to use the default grace period, -// use &metav1.DeleteOptions{} directly. -func NewDeleteOptions(grace int64) *DeleteOptions { - return &DeleteOptions{GracePeriodSeconds: &grace} -} - -// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set. -func NewPreconditionDeleteOptions(uid string) *DeleteOptions { - u := types.UID(uid) - p := Preconditions{UID: &u} - return &DeleteOptions{Preconditions: &p} -} - -// NewUIDPreconditions returns a Preconditions with UID set. -func NewUIDPreconditions(uid string) *Preconditions { - u := types.UID(uid) - return &Preconditions{UID: &u} -} - -// this function aims to check if the service's ClusterIP is set or not -// the objective is not to perform validation here -func IsServiceIPSet(service *Service) bool { - return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != "" -} - -// this function aims to check if the service's cluster IP is requested or not -func IsServiceIPRequested(service *Service) bool { - // ExternalName services are CNAME aliases to external ones. Ignore the IP. - if service.Spec.Type == ServiceTypeExternalName { - return false - } - return service.Spec.ClusterIP == "" -} - -var standardFinalizers = sets.NewString( - string(FinalizerKubernetes), - metav1.FinalizerOrphanDependents, -) - -func IsStandardFinalizerName(str string) bool { - return standardFinalizers.Has(str) -} - -// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, -// only if they do not already exist -func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) { - for _, add := range addAddresses { - exists := false - for _, existing := range *addresses { - if existing.Address == add.Address && existing.Type == add.Type { - exists = true - break - } - } - if !exists { - *addresses = append(*addresses, add) - } - } -} - -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusEqual(l, r *LoadBalancerStatus) bool { - return ingressSliceEqual(l.Ingress, r.Ingress) -} - -func ingressSliceEqual(lhs, rhs []LoadBalancerIngress) bool { - if len(lhs) != len(rhs) { - return false - } - for i := range lhs { - if !ingressEqual(&lhs[i], &rhs[i]) { - return false - } - } - return true -} - -func ingressEqual(lhs, rhs *LoadBalancerIngress) bool { - if lhs.IP != rhs.IP { - return false - } - if lhs.Hostname != rhs.Hostname { - return false - } - return true -} - -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusDeepCopy(lb *LoadBalancerStatus) *LoadBalancerStatus { - c := &LoadBalancerStatus{} - c.Ingress = make([]LoadBalancerIngress, len(lb.Ingress)) - for i := range lb.Ingress { - c.Ingress[i] = lb.Ingress[i] - } - return c -} - -// GetAccessModesAsString returns a string representation of an array of access modes. -// modes, when present, are always in the same order: RWO,ROX,RWX. -func GetAccessModesAsString(modes []PersistentVolumeAccessMode) string { - modes = removeDuplicateAccessModes(modes) - modesStr := []string{} - if containsAccessMode(modes, ReadWriteOnce) { - modesStr = append(modesStr, "RWO") - } - if containsAccessMode(modes, ReadOnlyMany) { - modesStr = append(modesStr, "ROX") - } - if containsAccessMode(modes, ReadWriteMany) { - modesStr = append(modesStr, "RWX") - } - return strings.Join(modesStr, ",") -} - -// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString -func GetAccessModesFromString(modes string) []PersistentVolumeAccessMode { - strmodes := strings.Split(modes, ",") - accessModes := []PersistentVolumeAccessMode{} - for _, s := range strmodes { - s = strings.Trim(s, " ") - switch { - case s == "RWO": - accessModes = append(accessModes, ReadWriteOnce) - case s == "ROX": - accessModes = append(accessModes, ReadOnlyMany) - case s == "RWX": - accessModes = append(accessModes, ReadWriteMany) - } - } - return accessModes -} - -// removeDuplicateAccessModes returns an array of access modes without any duplicates -func removeDuplicateAccessModes(modes []PersistentVolumeAccessMode) []PersistentVolumeAccessMode { - accessModes := []PersistentVolumeAccessMode{} - for _, m := range modes { - if !containsAccessMode(accessModes, m) { - accessModes = append(accessModes, m) - } - } - return accessModes -} - -func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolumeAccessMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - -// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements -// labels.Selector. -func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.Selector, error) { - if len(nsm) == 0 { - return labels.Nothing(), nil - } - selector := labels.NewSelector() - for _, expr := range nsm { - var op selection.Operator - switch expr.Operator { - case NodeSelectorOpIn: - op = selection.In - case NodeSelectorOpNotIn: - op = selection.NotIn - case NodeSelectorOpExists: - op = selection.Exists - case NodeSelectorOpDoesNotExist: - op = selection.DoesNotExist - case NodeSelectorOpGt: - op = selection.GreaterThan - case NodeSelectorOpLt: - op = selection.LessThan - default: - return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) - } - r, err := labels.NewRequirement(expr.Key, op, expr.Values) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - } - return selector, nil -} - -const ( - // SeccompPodAnnotationKey represents the key of a seccomp profile applied - // to all containers of a pod. - SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" - - // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied - // to one container of a pod. - SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" - - // CreatedByAnnotation represents the key used to store the spec(json) - // used to create the resource. - CreatedByAnnotation = "kubernetes.io/created-by" - - // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) - // in the Annotations of a Node. - PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" - - // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure - // container of a pod. The annotation value is a comma separated list of sysctl_name=value - // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by - // the kubelet. Pods with other sysctls will fail to launch. - SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" - - // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure - // container of a pod. The annotation value is a comma separated list of sysctl_name=value - // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly - // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use - // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet - // will fail to launch. - UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" - - // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache - // an object (e.g. secret, config map) before fetching it again from apiserver. - // This annotation can be attached to node. - ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" - - // AffinityAnnotationKey represents the key of affinity data (json serialized) - // in the Annotations of a Pod. - // TODO: remove when alpha support for affinity is removed - AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" -) - -// Tries to add a toleration to annotations list. Returns true if something was updated -// false otherwise. -func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) { - podTolerations := pod.Spec.Tolerations - - var newTolerations []Toleration - updated := false - for i := range podTolerations { - if toleration.MatchToleration(&podTolerations[i]) { - if api.Semantic.DeepEqual(toleration, podTolerations[i]) { - return false, nil - } - newTolerations = append(newTolerations, *toleration) - updated = true - continue - } - - newTolerations = append(newTolerations, podTolerations[i]) - } - - if !updated { - newTolerations = append(newTolerations, *toleration) - } - - pod.Spec.Tolerations = newTolerations - return true, nil -} - -// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , -// if the two tolerations have same combination, regard as they match. -// TODO: uniqueness check for tolerations in api validations. -func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { - return t.Key == tolerationToMatch.Key && - t.Effect == tolerationToMatch.Effect && - t.Operator == tolerationToMatch.Operator && - t.Value == tolerationToMatch.Value -} - -// ToleratesTaint checks if the toleration tolerates the taint. -// The matching follows the rules below: -// (1) Empty toleration.effect means to match all taint effects, -// otherwise taint effect must equal to toleration.effect. -// (2) If toleration.operator is 'Exists', it means to match all taint values. -// (3) Empty toleration.key means to match all taint keys. -// If toleration.key is empty, toleration.operator must be 'Exists'; -// this combination means to match all taint values and all taint keys. -func (t *Toleration) ToleratesTaint(taint *Taint) bool { - if len(t.Effect) > 0 && t.Effect != taint.Effect { - return false - } - - if len(t.Key) > 0 && t.Key != taint.Key { - return false - } - - // TODO: Use proper defaulting when Toleration becomes a field of PodSpec - switch t.Operator { - // empty operator means Equal - case "", TolerationOpEqual: - return t.Value == taint.Value - case TolerationOpExists: - return true - default: - return false - } -} - -// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations. -func TolerationsTolerateTaint(tolerations []Toleration, taint *Taint) bool { - for i := range tolerations { - if tolerations[i].ToleratesTaint(taint) { - return true - } - } - return false -} - -type taintsFilterFunc func(*Taint) bool - -// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates -// all the taints that apply to the filter in given taint list. -func TolerationsTolerateTaintsWithFilter(tolerations []Toleration, taints []Taint, applyFilter taintsFilterFunc) bool { - if len(taints) == 0 { - return true - } - - for i := range taints { - if applyFilter != nil && !applyFilter(&taints[i]) { - continue - } - - if !TolerationsTolerateTaint(tolerations, &taints[i]) { - return false - } - } - - return true -} - -// DeleteTaintsByKey removes all the taints that have the same key to given taintKey -func DeleteTaintsByKey(taints []Taint, taintKey string) ([]Taint, bool) { - newTaints := []Taint{} - deleted := false - for i := range taints { - if taintKey == taints[i].Key { - deleted = true - continue - } - newTaints = append(newTaints, taints[i]) - } - return newTaints, deleted -} - -// DeleteTaint removes all the the taints that have the same key and effect to given taintToDelete. -func DeleteTaint(taints []Taint, taintToDelete *Taint) ([]Taint, bool) { - newTaints := []Taint{} - deleted := false - for i := range taints { - if taintToDelete.MatchTaint(&taints[i]) { - deleted = true - continue - } - newTaints = append(newTaints, taints[i]) - } - return newTaints, deleted -} - -// Returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise. -func GetMatchingTolerations(taints []Taint, tolerations []Toleration) (bool, []Toleration) { - if len(taints) == 0 { - return true, []Toleration{} - } - if len(tolerations) == 0 && len(taints) > 0 { - return false, []Toleration{} - } - result := []Toleration{} - for i := range taints { - tolerated := false - for j := range tolerations { - if tolerations[j].ToleratesTaint(&taints[i]) { - result = append(result, tolerations[j]) - tolerated = true - break - } - } - if !tolerated { - return false, []Toleration{} - } - } - return true, result -} - -// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, -// if the two taints have same key:effect, regard as they match. -func (t *Taint) MatchTaint(taintToMatch *Taint) bool { - return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect -} - -// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. -func (t *Taint) ToString() string { - if len(t.Value) == 0 { - return fmt.Sprintf("%v:%v", t.Key, t.Effect) - } - return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) -} - -func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (AvoidPods, error) { - var avoidPods AvoidPods - if len(annotations) > 0 && annotations[PreferAvoidPodsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[PreferAvoidPodsAnnotationKey]), &avoidPods) - if err != nil { - return avoidPods, err - } - } - return avoidPods, nil -} - -// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls -// and a slice of unsafe Sysctls. This is only a convenience wrapper around -// SysctlsFromPodAnnotation. -func SysctlsFromPodAnnotations(a map[string]string) ([]Sysctl, []Sysctl, error) { - safe, err := SysctlsFromPodAnnotation(a[SysctlsPodAnnotationKey]) - if err != nil { - return nil, nil, err - } - unsafe, err := SysctlsFromPodAnnotation(a[UnsafeSysctlsPodAnnotationKey]) - if err != nil { - return nil, nil, err - } - - return safe, unsafe, nil -} - -// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. -func SysctlsFromPodAnnotation(annotation string) ([]Sysctl, error) { - if len(annotation) == 0 { - return nil, nil - } - - kvs := strings.Split(annotation, ",") - sysctls := make([]Sysctl, len(kvs)) - for i, kv := range kvs { - cs := strings.Split(kv, "=") - if len(cs) != 2 || len(cs[0]) == 0 { - return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv) - } - sysctls[i].Name = cs[0] - sysctls[i].Value = cs[1] - } - return sysctls, nil -} - -// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. -func PodAnnotationsFromSysctls(sysctls []Sysctl) string { - if len(sysctls) == 0 { - return "" - } - - kvs := make([]string, len(sysctls)) - for i := range sysctls { - kvs[i] = fmt.Sprintf("%s=%s", sysctls[i].Name, sysctls[i].Value) - } - return strings.Join(kvs, ",") -} - -type Sysctl struct { - Name string `protobuf:"bytes,1,opt,name=name"` - Value string `protobuf:"bytes,2,opt,name=value"` -} - -// NodeResources is an object for conveying resource information about a node. -// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. -type NodeResources struct { - // Capacity represents the available resources of a node - Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` -} - -// Tries to add a taint to annotations list. Returns a new copy of updated Node and true if something was updated -// false otherwise. -func AddOrUpdateTaint(node *Node, taint *Taint) (*Node, bool, error) { - objCopy, err := api.Scheme.DeepCopy(node) - if err != nil { - return nil, false, err - } - newNode := objCopy.(*Node) - nodeTaints := newNode.Spec.Taints - - var newTaints []Taint - updated := false - for i := range nodeTaints { - if taint.MatchTaint(&nodeTaints[i]) { - if api.Semantic.DeepEqual(taint, nodeTaints[i]) { - return newNode, false, nil - } - newTaints = append(newTaints, *taint) - updated = true - continue - } - - newTaints = append(newTaints, nodeTaints[i]) - } - - if !updated { - newTaints = append(newTaints, *taint) - } - - newNode.Spec.Taints = newTaints - return newNode, true, nil -} - -func TaintExists(taints []Taint, taintToFind *Taint) bool { - for _, taint := range taints { - if taint.MatchTaint(taintToFind) { - return true - } - } - return false -} - -// Tries to remove a taint from annotations list. Returns a new copy of updated Node and true if something was updated -// false otherwise. -func RemoveTaint(node *Node, taint *Taint) (*Node, bool, error) { - objCopy, err := api.Scheme.DeepCopy(node) - if err != nil { - return nil, false, err - } - newNode := objCopy.(*Node) - nodeTaints := newNode.Spec.Taints - if len(nodeTaints) == 0 { - return newNode, false, nil - } - - if !TaintExists(nodeTaints, taint) { - return newNode, false, nil - } - - newTaints, _ := DeleteTaint(nodeTaints, taint) - newNode.Spec.Taints = newTaints - return newNode, true, nil -} - -// GetAffinityFromPodAnnotations gets the json serialized affinity data from Pod.Annotations -// and converts it to the Affinity type in api. -// TODO: remove when alpha support for affinity is removed -func GetAffinityFromPodAnnotations(annotations map[string]string) (*Affinity, error) { - if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" { - var affinity Affinity - err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity) - if err != nil { - return nil, err - } - return &affinity, nil - } - return nil, nil -} - -// GetPersistentVolumeClass returns StorageClassName. -func GetPersistentVolumeClass(volume *PersistentVolume) string { - // Use beta annotation first - if class, found := volume.Annotations[BetaStorageClassAnnotation]; found { - return class - } - - return volume.Spec.StorageClassName -} - -// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was -// requested, it returns "". -func GetPersistentVolumeClaimClass(claim *PersistentVolumeClaim) string { - // Use beta annotation first - if class, found := claim.Annotations[BetaStorageClassAnnotation]; found { - return class - } - - if claim.Spec.StorageClassName != nil { - return *claim.Spec.StorageClassName - } - - return "" -} - -// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. -func PersistentVolumeClaimHasClass(claim *PersistentVolumeClaim) bool { - // Use beta annotation first - if _, found := claim.Annotations[BetaStorageClassAnnotation]; found { - return true - } - - if claim.Spec.StorageClassName != nil { - return true - } - - return false -} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/meta.go b/vendor/k8s.io/client-go/pkg/api/v1/meta.go deleted file mode 100644 index bb1ae2ff7..000000000 --- a/vendor/k8s.io/client-go/pkg/api/v1/meta.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -func (obj *ObjectMeta) GetObjectMeta() metav1.Object { return obj } - -// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows -// fast, direct access to metadata fields for API objects. -func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } -func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } -func (meta *ObjectMeta) GetName() string { return meta.Name } -func (meta *ObjectMeta) SetName(name string) { meta.Name = name } -func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } -func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } -func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } -func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } -func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } -func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } -func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } -func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } -func (meta *ObjectMeta) GetCreationTimestamp() metav1.Time { return meta.CreationTimestamp } -func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp metav1.Time) { - meta.CreationTimestamp = creationTimestamp -} -func (meta *ObjectMeta) GetDeletionTimestamp() *metav1.Time { return meta.DeletionTimestamp } -func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *metav1.Time) { - meta.DeletionTimestamp = deletionTimestamp -} -func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } -func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } -func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } -func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } -func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } -func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } - -func (meta *ObjectMeta) GetOwnerReferences() []metav1.OwnerReference { - ret := make([]metav1.OwnerReference, len(meta.OwnerReferences)) - for i := 0; i < len(meta.OwnerReferences); i++ { - ret[i].Kind = meta.OwnerReferences[i].Kind - ret[i].Name = meta.OwnerReferences[i].Name - ret[i].UID = meta.OwnerReferences[i].UID - ret[i].APIVersion = meta.OwnerReferences[i].APIVersion - if meta.OwnerReferences[i].Controller != nil { - value := *meta.OwnerReferences[i].Controller - ret[i].Controller = &value - } - if meta.OwnerReferences[i].BlockOwnerDeletion != nil { - value := *meta.OwnerReferences[i].BlockOwnerDeletion - ret[i].BlockOwnerDeletion = &value - } - } - return ret -} - -func (meta *ObjectMeta) SetOwnerReferences(references []metav1.OwnerReference) { - newReferences := make([]metav1.OwnerReference, len(references)) - for i := 0; i < len(references); i++ { - newReferences[i].Kind = references[i].Kind - newReferences[i].Name = references[i].Name - newReferences[i].UID = references[i].UID - newReferences[i].APIVersion = references[i].APIVersion - if references[i].Controller != nil { - value := *references[i].Controller - newReferences[i].Controller = &value - } - if references[i].BlockOwnerDeletion != nil { - value := *references[i].BlockOwnerDeletion - newReferences[i].BlockOwnerDeletion = &value - } - } - meta.OwnerReferences = newReferences -} - -func (meta *ObjectMeta) GetClusterName() string { - return meta.ClusterName -} -func (meta *ObjectMeta) SetClusterName(clusterName string) { - meta.ClusterName = clusterName -} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/resource_helpers.go b/vendor/k8s.io/client-go/pkg/api/v1/resource_helpers.go deleted file mode 100644 index ec8423276..000000000 --- a/vendor/k8s.io/client-go/pkg/api/v1/resource_helpers.go +++ /dev/null @@ -1,257 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "time" - - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Returns string version of ResourceName. -func (self ResourceName) String() string { - return string(self) -} - -// Returns the CPU limit if specified. -func (self *ResourceList) Cpu() *resource.Quantity { - if val, ok := (*self)[ResourceCPU]; ok { - return &val - } - return &resource.Quantity{Format: resource.DecimalSI} -} - -// Returns the Memory limit if specified. -func (self *ResourceList) Memory() *resource.Quantity { - if val, ok := (*self)[ResourceMemory]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} -} - -func (self *ResourceList) Pods() *resource.Quantity { - if val, ok := (*self)[ResourcePods]; ok { - return &val - } - return &resource.Quantity{} -} - -func (self *ResourceList) NvidiaGPU() *resource.Quantity { - if val, ok := (*self)[ResourceNvidiaGPU]; ok { - return &val - } - return &resource.Quantity{} -} - -func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) { - for i := range statuses { - if statuses[i].Name == name { - return statuses[i], true - } - } - return ContainerStatus{}, false -} - -func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus { - for i := range statuses { - if statuses[i].Name == name { - return statuses[i] - } - } - return ContainerStatus{} -} - -// IsPodAvailable returns true if a pod is available; false otherwise. -// Precondition for an available pod is that it must be ready. On top -// of that, there are two cases when a pod can be considered available: -// 1. minReadySeconds == 0, or -// 2. LastTransitionTime (is set) + minReadySeconds < current time -func IsPodAvailable(pod *Pod, minReadySeconds int32, now metav1.Time) bool { - if !IsPodReady(pod) { - return false - } - - c := GetPodReadyCondition(pod.Status) - minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second - if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { - return true - } - return false -} - -// IsPodReady returns true if a pod is ready; false otherwise. -func IsPodReady(pod *Pod) bool { - return IsPodReadyConditionTrue(pod.Status) -} - -// IsPodReady retruns true if a pod is ready; false otherwise. -func IsPodReadyConditionTrue(status PodStatus) bool { - condition := GetPodReadyCondition(status) - return condition != nil && condition.Status == ConditionTrue -} - -// Extracts the pod ready condition from the given status and returns that. -// Returns nil if the condition is not present. -func GetPodReadyCondition(status PodStatus) *PodCondition { - _, condition := GetPodCondition(&status, PodReady) - return condition -} - -// GetPodCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the index of the located condition. -func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) { - if status == nil { - return -1, nil - } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return i, &status.Conditions[i] - } - } - return -1, nil -} - -// GetNodeCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the index of the located condition. -func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) { - if status == nil { - return -1, nil - } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return i, &status.Conditions[i] - } - } - return -1, nil -} - -// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the -// status has changed. -// Returns true if pod condition has changed or has been added. -func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { - condition.LastTransitionTime = metav1.Now() - // Try to find this pod condition. - conditionIndex, oldCondition := GetPodCondition(status, condition.Type) - - if oldCondition == nil { - // We are adding new pod condition. - status.Conditions = append(status.Conditions, *condition) - return true - } else { - // We are updating an existing condition, so we need to check if it has changed. - if condition.Status == oldCondition.Status { - condition.LastTransitionTime = oldCondition.LastTransitionTime - } - - isEqual := condition.Status == oldCondition.Status && - condition.Reason == oldCondition.Reason && - condition.Message == oldCondition.Message && - condition.LastProbeTime.Equal(oldCondition.LastProbeTime) && - condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) - - status.Conditions[conditionIndex] = *condition - // Return true if one of the fields have changed. - return !isEqual - } -} - -// IsNodeReady returns true if a node is ready; false otherwise. -func IsNodeReady(node *Node) bool { - for _, c := range node.Status.Conditions { - if c.Type == NodeReady { - return c.Status == ConditionTrue - } - } - return false -} - -// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all -// containers of the pod. -func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) { - reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{} - for _, container := range pod.Spec.Containers { - for name, quantity := range container.Resources.Requests { - if value, ok := reqs[name]; !ok { - reqs[name] = *quantity.Copy() - } else { - value.Add(quantity) - reqs[name] = value - } - } - for name, quantity := range container.Resources.Limits { - if value, ok := limits[name]; !ok { - limits[name] = *quantity.Copy() - } else { - value.Add(quantity) - limits[name] = value - } - } - } - // init containers define the minimum of any resource - for _, container := range pod.Spec.InitContainers { - for name, quantity := range container.Resources.Requests { - value, ok := reqs[name] - if !ok { - reqs[name] = *quantity.Copy() - continue - } - if quantity.Cmp(value) > 0 { - reqs[name] = *quantity.Copy() - } - } - for name, quantity := range container.Resources.Limits { - value, ok := limits[name] - if !ok { - limits[name] = *quantity.Copy() - continue - } - if quantity.Cmp(value) > 0 { - limits[name] = *quantity.Copy() - } - } - } - return -} - -// finds and returns the request for a specific resource. -func GetResourceRequest(pod *Pod, resource ResourceName) int64 { - if resource == ResourcePods { - return 1 - } - totalResources := int64(0) - for _, container := range pod.Spec.Containers { - if rQuantity, ok := container.Resources.Requests[resource]; ok { - if resource == ResourceCPU { - totalResources += rQuantity.MilliValue() - } else { - totalResources += rQuantity.Value() - } - } - } - // take max_resource(sum_pod, any_init_container) - for _, container := range pod.Spec.InitContainers { - if rQuantity, ok := container.Resources.Requests[resource]; ok { - if resource == ResourceCPU && rQuantity.MilliValue() > totalResources { - totalResources = rQuantity.MilliValue() - } else if rQuantity.Value() > totalResources { - totalResources = rQuantity.Value() - } - } - } - return totalResources -} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/types.generated.go b/vendor/k8s.io/client-go/pkg/api/v1/types.generated.go deleted file mode 100644 index c6fd805aa..000000000 --- a/vendor/k8s.io/client-go/pkg/api/v1/types.generated.go +++ /dev/null @@ -1,73800 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg3_resource "k8s.io/apimachinery/pkg/api/resource" - pkg2_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg5_runtime "k8s.io/apimachinery/pkg/runtime" - pkg1_types "k8s.io/apimachinery/pkg/types" - pkg4_intstr "k8s.io/apimachinery/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg3_resource.Quantity - var v1 pkg2_v1.Time - var v2 pkg5_runtime.RawExtension - var v3 pkg1_types.UID - var v4 pkg4_intstr.IntOrString - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [15]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = x.GenerateName != "" - yyq2[2] = x.Namespace != "" - yyq2[3] = x.SelfLink != "" - yyq2[4] = x.UID != "" - yyq2[5] = x.ResourceVersion != "" - yyq2[6] = x.Generation != 0 - yyq2[7] = true - yyq2[8] = x.DeletionTimestamp != nil - yyq2[9] = x.DeletionGracePeriodSeconds != nil - yyq2[10] = len(x.Labels) != 0 - yyq2[11] = len(x.Annotations) != 0 - yyq2[12] = len(x.OwnerReferences) != 0 - yyq2[13] = len(x.Finalizers) != 0 - yyq2[14] = x.ClusterName != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(15) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generateName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selfLink")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generation")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yy25 := &x.CreationTimestamp - yym26 := z.EncBinary() - _ = yym26 - if false { - } else if z.HasExtensions() && z.EncExt(yy25) { - } else if yym26 { - z.EncBinaryMarshal(yy25) - } else if !yym26 && z.IsJSONHandle() { - z.EncJSONMarshal(yy25) - } else { - z.EncFallback(yy25) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy27 := &x.CreationTimestamp - yym28 := z.EncBinary() - _ = yym28 - if false { - } else if z.HasExtensions() && z.EncExt(yy27) { - } else if yym28 { - z.EncBinaryMarshal(yy27) - } else if !yym28 && z.IsJSONHandle() { - z.EncJSONMarshal(yy27) - } else { - z.EncFallback(yy27) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym30 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym30 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym31 := z.EncBinary() - _ = yym31 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym31 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym31 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy33 := *x.DeletionGracePeriodSeconds - yym34 := z.EncBinary() - _ = yym34 - if false { - } else { - r.EncodeInt(int64(yy33)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy35 := *x.DeletionGracePeriodSeconds - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeInt(int64(yy35)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.Labels == nil { - r.EncodeNil() - } else { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Labels == nil { - r.EncodeNil() - } else { - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.Annotations == nil { - r.EncodeNil() - } else { - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("annotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Annotations == nil { - r.EncodeNil() - } else { - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - h.encSlicev1_OwnerReference(([]pkg2_v1.OwnerReference)(x.OwnerReferences), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - h.encSlicev1_OwnerReference(([]pkg2_v1.OwnerReference)(x.OwnerReferences), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finalizers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym48 := z.EncBinary() - _ = yym48 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - yym50 := z.EncBinary() - _ = yym50 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym51 := z.EncBinary() - _ = yym51 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectMeta) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "generateName": - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - yyv6 := &x.GenerateName - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - yyv8 := &x.Namespace - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "selfLink": - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - yyv10 := &x.SelfLink - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - yyv12 := &x.UID - yym13 := z.DecBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.DecExt(yyv12) { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - yyv14 := &x.ResourceVersion - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - case "generation": - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - yyv16 := &x.Generation - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*int64)(yyv16)) = int64(r.DecodeInt(64)) - } - } - case "creationTimestamp": - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg2_v1.Time{} - } else { - yyv18 := &x.CreationTimestamp - yym19 := z.DecBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.DecExt(yyv18) { - } else if yym19 { - z.DecBinaryUnmarshal(yyv18) - } else if !yym19 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv18) - } else { - z.DecFallback(yyv18, false) - } - } - case "deletionTimestamp": - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg2_v1.Time) - } - yym21 := z.DecBinary() - _ = yym21 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym21 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym21 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - case "deletionGracePeriodSeconds": - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "labels": - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv24 := &x.Labels - yym25 := z.DecBinary() - _ = yym25 - if false { - } else { - z.F.DecMapStringStringX(yyv24, false, d) - } - } - case "annotations": - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv26 := &x.Annotations - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - z.F.DecMapStringStringX(yyv26, false, d) - } - } - case "ownerReferences": - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv28 := &x.OwnerReferences - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - h.decSlicev1_OwnerReference((*[]pkg2_v1.OwnerReference)(yyv28), d) - } - } - case "finalizers": - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv30 := &x.Finalizers - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - z.F.DecSliceStringX(yyv30, false, d) - } - } - case "clusterName": - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - yyv32 := &x.ClusterName - yym33 := z.DecBinary() - _ = yym33 - if false { - } else { - *((*string)(yyv32)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj34 int - var yyb34 bool - var yyhl34 bool = l >= 0 - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv35 := &x.Name - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*string)(yyv35)) = r.DecodeString() - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - yyv37 := &x.GenerateName - yym38 := z.DecBinary() - _ = yym38 - if false { - } else { - *((*string)(yyv37)) = r.DecodeString() - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - yyv39 := &x.Namespace - yym40 := z.DecBinary() - _ = yym40 - if false { - } else { - *((*string)(yyv39)) = r.DecodeString() - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - yyv41 := &x.SelfLink - yym42 := z.DecBinary() - _ = yym42 - if false { - } else { - *((*string)(yyv41)) = r.DecodeString() - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - yyv43 := &x.UID - yym44 := z.DecBinary() - _ = yym44 - if false { - } else if z.HasExtensions() && z.DecExt(yyv43) { - } else { - *((*string)(yyv43)) = r.DecodeString() - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - yyv45 := &x.ResourceVersion - yym46 := z.DecBinary() - _ = yym46 - if false { - } else { - *((*string)(yyv45)) = r.DecodeString() - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - yyv47 := &x.Generation - yym48 := z.DecBinary() - _ = yym48 - if false { - } else { - *((*int64)(yyv47)) = int64(r.DecodeInt(64)) - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg2_v1.Time{} - } else { - yyv49 := &x.CreationTimestamp - yym50 := z.DecBinary() - _ = yym50 - if false { - } else if z.HasExtensions() && z.DecExt(yyv49) { - } else if yym50 { - z.DecBinaryUnmarshal(yyv49) - } else if !yym50 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv49) - } else { - z.DecFallback(yyv49, false) - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg2_v1.Time) - } - yym52 := z.DecBinary() - _ = yym52 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym52 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym52 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym54 := z.DecBinary() - _ = yym54 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv55 := &x.Labels - yym56 := z.DecBinary() - _ = yym56 - if false { - } else { - z.F.DecMapStringStringX(yyv55, false, d) - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv57 := &x.Annotations - yym58 := z.DecBinary() - _ = yym58 - if false { - } else { - z.F.DecMapStringStringX(yyv57, false, d) - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv59 := &x.OwnerReferences - yym60 := z.DecBinary() - _ = yym60 - if false { - } else { - h.decSlicev1_OwnerReference((*[]pkg2_v1.OwnerReference)(yyv59), d) - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv61 := &x.Finalizers - yym62 := z.DecBinary() - _ = yym62 - if false { - } else { - z.F.DecSliceStringX(yyv61, false, d) - } - } - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - yyv63 := &x.ClusterName - yym64 := z.DecBinary() - _ = yym64 - if false { - } else { - *((*string)(yyv63)) = r.DecodeString() - } - } - for { - yyj34++ - if yyhl34 { - yyb34 = yyj34 > l - } else { - yyb34 = r.CheckBreak() - } - if yyb34 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj34-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [27]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.VolumeSource.HostPath != nil && x.HostPath != nil - yyq2[2] = x.VolumeSource.EmptyDir != nil && x.EmptyDir != nil - yyq2[3] = x.VolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil - yyq2[4] = x.VolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil - yyq2[5] = x.VolumeSource.GitRepo != nil && x.GitRepo != nil - yyq2[6] = x.VolumeSource.Secret != nil && x.Secret != nil - yyq2[7] = x.VolumeSource.NFS != nil && x.NFS != nil - yyq2[8] = x.VolumeSource.ISCSI != nil && x.ISCSI != nil - yyq2[9] = x.VolumeSource.Glusterfs != nil && x.Glusterfs != nil - yyq2[10] = x.VolumeSource.PersistentVolumeClaim != nil && x.PersistentVolumeClaim != nil - yyq2[11] = x.VolumeSource.RBD != nil && x.RBD != nil - yyq2[12] = x.VolumeSource.FlexVolume != nil && x.FlexVolume != nil - yyq2[13] = x.VolumeSource.Cinder != nil && x.Cinder != nil - yyq2[14] = x.VolumeSource.CephFS != nil && x.CephFS != nil - yyq2[15] = x.VolumeSource.Flocker != nil && x.Flocker != nil - yyq2[16] = x.VolumeSource.DownwardAPI != nil && x.DownwardAPI != nil - yyq2[17] = x.VolumeSource.FC != nil && x.FC != nil - yyq2[18] = x.VolumeSource.AzureFile != nil && x.AzureFile != nil - yyq2[19] = x.VolumeSource.ConfigMap != nil && x.ConfigMap != nil - yyq2[20] = x.VolumeSource.VsphereVolume != nil && x.VsphereVolume != nil - yyq2[21] = x.VolumeSource.Quobyte != nil && x.Quobyte != nil - yyq2[22] = x.VolumeSource.AzureDisk != nil && x.AzureDisk != nil - yyq2[23] = x.VolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil - yyq2[24] = x.VolumeSource.Projected != nil && x.Projected != nil - yyq2[25] = x.VolumeSource.PortworxVolume != nil && x.PortworxVolume != nil - yyq2[26] = x.VolumeSource.ScaleIO != nil && x.ScaleIO != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(27) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - var yyn6 bool - if x.VolumeSource.HostPath == nil { - yyn6 = true - goto LABEL6 - } - LABEL6: - if yyr2 || yy2arr2 { - if yyn6 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn6 { - r.EncodeNil() - } else { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - } - var yyn9 bool - if x.VolumeSource.EmptyDir == nil { - yyn9 = true - goto LABEL9 - } - LABEL9: - if yyr2 || yy2arr2 { - if yyn9 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn9 { - r.EncodeNil() - } else { - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } - } - } - var yyn12 bool - if x.VolumeSource.GCEPersistentDisk == nil { - yyn12 = true - goto LABEL12 - } - LABEL12: - if yyr2 || yy2arr2 { - if yyn12 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn12 { - r.EncodeNil() - } else { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn15 bool - if x.VolumeSource.AWSElasticBlockStore == nil { - yyn15 = true - goto LABEL15 - } - LABEL15: - if yyr2 || yy2arr2 { - if yyn15 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn15 { - r.EncodeNil() - } else { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - } - var yyn18 bool - if x.VolumeSource.GitRepo == nil { - yyn18 = true - goto LABEL18 - } - LABEL18: - if yyr2 || yy2arr2 { - if yyn18 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn18 { - r.EncodeNil() - } else { - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } - } - } - var yyn21 bool - if x.VolumeSource.Secret == nil { - yyn21 = true - goto LABEL21 - } - LABEL21: - if yyr2 || yy2arr2 { - if yyn21 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secret")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn21 { - r.EncodeNil() - } else { - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } - } - } - var yyn24 bool - if x.VolumeSource.NFS == nil { - yyn24 = true - goto LABEL24 - } - LABEL24: - if yyr2 || yy2arr2 { - if yyn24 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn24 { - r.EncodeNil() - } else { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - } - var yyn27 bool - if x.VolumeSource.ISCSI == nil { - yyn27 = true - goto LABEL27 - } - LABEL27: - if yyr2 || yy2arr2 { - if yyn27 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn27 { - r.EncodeNil() - } else { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - } - var yyn30 bool - if x.VolumeSource.Glusterfs == nil { - yyn30 = true - goto LABEL30 - } - LABEL30: - if yyr2 || yy2arr2 { - if yyn30 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn30 { - r.EncodeNil() - } else { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - } - var yyn33 bool - if x.VolumeSource.PersistentVolumeClaim == nil { - yyn33 = true - goto LABEL33 - } - LABEL33: - if yyr2 || yy2arr2 { - if yyn33 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn33 { - r.EncodeNil() - } else { - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } - } - } - var yyn36 bool - if x.VolumeSource.RBD == nil { - yyn36 = true - goto LABEL36 - } - LABEL36: - if yyr2 || yy2arr2 { - if yyn36 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn36 { - r.EncodeNil() - } else { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - } - var yyn39 bool - if x.VolumeSource.FlexVolume == nil { - yyn39 = true - goto LABEL39 - } - LABEL39: - if yyr2 || yy2arr2 { - if yyn39 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn39 { - r.EncodeNil() - } else { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn42 bool - if x.VolumeSource.Cinder == nil { - yyn42 = true - goto LABEL42 - } - LABEL42: - if yyr2 || yy2arr2 { - if yyn42 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn42 { - r.EncodeNil() - } else { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - } - var yyn45 bool - if x.VolumeSource.CephFS == nil { - yyn45 = true - goto LABEL45 - } - LABEL45: - if yyr2 || yy2arr2 { - if yyn45 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn45 { - r.EncodeNil() - } else { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - } - var yyn48 bool - if x.VolumeSource.Flocker == nil { - yyn48 = true - goto LABEL48 - } - LABEL48: - if yyr2 || yy2arr2 { - if yyn48 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn48 { - r.EncodeNil() - } else { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - } - var yyn51 bool - if x.VolumeSource.DownwardAPI == nil { - yyn51 = true - goto LABEL51 - } - LABEL51: - if yyr2 || yy2arr2 { - if yyn51 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn51 { - r.EncodeNil() - } else { - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } - } - } - var yyn54 bool - if x.VolumeSource.FC == nil { - yyn54 = true - goto LABEL54 - } - LABEL54: - if yyr2 || yy2arr2 { - if yyn54 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn54 { - r.EncodeNil() - } else { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - } - var yyn57 bool - if x.VolumeSource.AzureFile == nil { - yyn57 = true - goto LABEL57 - } - LABEL57: - if yyr2 || yy2arr2 { - if yyn57 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[18] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn57 { - r.EncodeNil() - } else { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - } - var yyn60 bool - if x.VolumeSource.ConfigMap == nil { - yyn60 = true - goto LABEL60 - } - LABEL60: - if yyr2 || yy2arr2 { - if yyn60 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[19] { - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[19] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMap")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn60 { - r.EncodeNil() - } else { - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } - } - } - var yyn63 bool - if x.VolumeSource.VsphereVolume == nil { - yyn63 = true - goto LABEL63 - } - LABEL63: - if yyr2 || yy2arr2 { - if yyn63 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[20] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[20] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn63 { - r.EncodeNil() - } else { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn66 bool - if x.VolumeSource.Quobyte == nil { - yyn66 = true - goto LABEL66 - } - LABEL66: - if yyr2 || yy2arr2 { - if yyn66 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[21] { - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[21] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("quobyte")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn66 { - r.EncodeNil() - } else { - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } - } - } - var yyn69 bool - if x.VolumeSource.AzureDisk == nil { - yyn69 = true - goto LABEL69 - } - LABEL69: - if yyr2 || yy2arr2 { - if yyn69 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[22] { - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[22] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn69 { - r.EncodeNil() - } else { - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn72 bool - if x.VolumeSource.PhotonPersistentDisk == nil { - yyn72 = true - goto LABEL72 - } - LABEL72: - if yyr2 || yy2arr2 { - if yyn72 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[23] { - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[23] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn72 { - r.EncodeNil() - } else { - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn75 bool - if x.VolumeSource.Projected == nil { - yyn75 = true - goto LABEL75 - } - LABEL75: - if yyr2 || yy2arr2 { - if yyn75 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[24] { - if x.Projected == nil { - r.EncodeNil() - } else { - x.Projected.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[24] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("projected")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn75 { - r.EncodeNil() - } else { - if x.Projected == nil { - r.EncodeNil() - } else { - x.Projected.CodecEncodeSelf(e) - } - } - } - } - var yyn78 bool - if x.VolumeSource.PortworxVolume == nil { - yyn78 = true - goto LABEL78 - } - LABEL78: - if yyr2 || yy2arr2 { - if yyn78 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[25] { - if x.PortworxVolume == nil { - r.EncodeNil() - } else { - x.PortworxVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[25] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn78 { - r.EncodeNil() - } else { - if x.PortworxVolume == nil { - r.EncodeNil() - } else { - x.PortworxVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn81 bool - if x.VolumeSource.ScaleIO == nil { - yyn81 = true - goto LABEL81 - } - LABEL81: - if yyr2 || yy2arr2 { - if yyn81 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[26] { - if x.ScaleIO == nil { - r.EncodeNil() - } else { - x.ScaleIO.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[26] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn81 { - r.EncodeNil() - } else { - if x.ScaleIO == nil { - r.EncodeNil() - } else { - x.ScaleIO.CodecEncodeSelf(e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Volume) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "hostPath": - if x.VolumeSource.HostPath == nil { - x.VolumeSource.HostPath = new(HostPathVolumeSource) - } - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "emptyDir": - if x.VolumeSource.EmptyDir == nil { - x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) - } - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - case "gcePersistentDisk": - if x.VolumeSource.GCEPersistentDisk == nil { - x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if x.VolumeSource.AWSElasticBlockStore == nil { - x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "gitRepo": - if x.VolumeSource.GitRepo == nil { - x.VolumeSource.GitRepo = new(GitRepoVolumeSource) - } - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - case "secret": - if x.VolumeSource.Secret == nil { - x.VolumeSource.Secret = new(SecretVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - case "nfs": - if x.VolumeSource.NFS == nil { - x.VolumeSource.NFS = new(NFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "iscsi": - if x.VolumeSource.ISCSI == nil { - x.VolumeSource.ISCSI = new(ISCSIVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "glusterfs": - if x.VolumeSource.Glusterfs == nil { - x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "persistentVolumeClaim": - if x.VolumeSource.PersistentVolumeClaim == nil { - x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - case "rbd": - if x.VolumeSource.RBD == nil { - x.VolumeSource.RBD = new(RBDVolumeSource) - } - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "flexVolume": - if x.VolumeSource.FlexVolume == nil { - x.VolumeSource.FlexVolume = new(FlexVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "cinder": - if x.VolumeSource.Cinder == nil { - x.VolumeSource.Cinder = new(CinderVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if x.VolumeSource.CephFS == nil { - x.VolumeSource.CephFS = new(CephFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "flocker": - if x.VolumeSource.Flocker == nil { - x.VolumeSource.Flocker = new(FlockerVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "downwardAPI": - if x.VolumeSource.DownwardAPI == nil { - x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) - } - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - case "fc": - if x.VolumeSource.FC == nil { - x.VolumeSource.FC = new(FCVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "azureFile": - if x.VolumeSource.AzureFile == nil { - x.VolumeSource.AzureFile = new(AzureFileVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "configMap": - if x.VolumeSource.ConfigMap == nil { - x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - case "vsphereVolume": - if x.VolumeSource.VsphereVolume == nil { - x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - case "quobyte": - if x.VolumeSource.Quobyte == nil { - x.VolumeSource.Quobyte = new(QuobyteVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - case "azureDisk": - if x.VolumeSource.AzureDisk == nil { - x.VolumeSource.AzureDisk = new(AzureDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - case "photonPersistentDisk": - if x.VolumeSource.PhotonPersistentDisk == nil { - x.VolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - case "projected": - if x.VolumeSource.Projected == nil { - x.VolumeSource.Projected = new(ProjectedVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Projected != nil { - x.Projected = nil - } - } else { - if x.Projected == nil { - x.Projected = new(ProjectedVolumeSource) - } - x.Projected.CodecDecodeSelf(d) - } - case "portworxVolume": - if x.VolumeSource.PortworxVolume == nil { - x.VolumeSource.PortworxVolume = new(PortworxVolumeSource) - } - if r.TryDecodeAsNil() { - if x.PortworxVolume != nil { - x.PortworxVolume = nil - } - } else { - if x.PortworxVolume == nil { - x.PortworxVolume = new(PortworxVolumeSource) - } - x.PortworxVolume.CodecDecodeSelf(d) - } - case "scaleIO": - if x.VolumeSource.ScaleIO == nil { - x.VolumeSource.ScaleIO = new(ScaleIOVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ScaleIO != nil { - x.ScaleIO = nil - } - } else { - if x.ScaleIO == nil { - x.ScaleIO = new(ScaleIOVolumeSource) - } - x.ScaleIO.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj32 int - var yyb32 bool - var yyhl32 bool = l >= 0 - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv33 := &x.Name - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - *((*string)(yyv33)) = r.DecodeString() - } - } - if x.VolumeSource.HostPath == nil { - x.VolumeSource.HostPath = new(HostPathVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - if x.VolumeSource.EmptyDir == nil { - x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - if x.VolumeSource.GCEPersistentDisk == nil { - x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - if x.VolumeSource.AWSElasticBlockStore == nil { - x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - if x.VolumeSource.GitRepo == nil { - x.VolumeSource.GitRepo = new(GitRepoVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - if x.VolumeSource.Secret == nil { - x.VolumeSource.Secret = new(SecretVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - if x.VolumeSource.NFS == nil { - x.VolumeSource.NFS = new(NFSVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - if x.VolumeSource.ISCSI == nil { - x.VolumeSource.ISCSI = new(ISCSIVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - if x.VolumeSource.Glusterfs == nil { - x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - if x.VolumeSource.PersistentVolumeClaim == nil { - x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - if x.VolumeSource.RBD == nil { - x.VolumeSource.RBD = new(RBDVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - if x.VolumeSource.FlexVolume == nil { - x.VolumeSource.FlexVolume = new(FlexVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - if x.VolumeSource.Cinder == nil { - x.VolumeSource.Cinder = new(CinderVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - if x.VolumeSource.CephFS == nil { - x.VolumeSource.CephFS = new(CephFSVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - if x.VolumeSource.Flocker == nil { - x.VolumeSource.Flocker = new(FlockerVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - if x.VolumeSource.DownwardAPI == nil { - x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - if x.VolumeSource.FC == nil { - x.VolumeSource.FC = new(FCVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - if x.VolumeSource.AzureFile == nil { - x.VolumeSource.AzureFile = new(AzureFileVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - if x.VolumeSource.ConfigMap == nil { - x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - if x.VolumeSource.VsphereVolume == nil { - x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - if x.VolumeSource.Quobyte == nil { - x.VolumeSource.Quobyte = new(QuobyteVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - if x.VolumeSource.AzureDisk == nil { - x.VolumeSource.AzureDisk = new(AzureDiskVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - if x.VolumeSource.PhotonPersistentDisk == nil { - x.VolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - if x.VolumeSource.Projected == nil { - x.VolumeSource.Projected = new(ProjectedVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Projected != nil { - x.Projected = nil - } - } else { - if x.Projected == nil { - x.Projected = new(ProjectedVolumeSource) - } - x.Projected.CodecDecodeSelf(d) - } - if x.VolumeSource.PortworxVolume == nil { - x.VolumeSource.PortworxVolume = new(PortworxVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PortworxVolume != nil { - x.PortworxVolume = nil - } - } else { - if x.PortworxVolume == nil { - x.PortworxVolume = new(PortworxVolumeSource) - } - x.PortworxVolume.CodecDecodeSelf(d) - } - if x.VolumeSource.ScaleIO == nil { - x.VolumeSource.ScaleIO = new(ScaleIOVolumeSource) - } - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ScaleIO != nil { - x.ScaleIO = nil - } - } else { - if x.ScaleIO == nil { - x.ScaleIO = new(ScaleIOVolumeSource) - } - x.ScaleIO.CodecDecodeSelf(d) - } - for { - yyj32++ - if yyhl32 { - yyb32 = yyj32 > l - } else { - yyb32 = r.CheckBreak() - } - if yyb32 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj32-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [26]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.HostPath != nil - yyq2[1] = x.EmptyDir != nil - yyq2[2] = x.GCEPersistentDisk != nil - yyq2[3] = x.AWSElasticBlockStore != nil - yyq2[4] = x.GitRepo != nil - yyq2[5] = x.Secret != nil - yyq2[6] = x.NFS != nil - yyq2[7] = x.ISCSI != nil - yyq2[8] = x.Glusterfs != nil - yyq2[9] = x.PersistentVolumeClaim != nil - yyq2[10] = x.RBD != nil - yyq2[11] = x.FlexVolume != nil - yyq2[12] = x.Cinder != nil - yyq2[13] = x.CephFS != nil - yyq2[14] = x.Flocker != nil - yyq2[15] = x.DownwardAPI != nil - yyq2[16] = x.FC != nil - yyq2[17] = x.AzureFile != nil - yyq2[18] = x.ConfigMap != nil - yyq2[19] = x.VsphereVolume != nil - yyq2[20] = x.Quobyte != nil - yyq2[21] = x.AzureDisk != nil - yyq2[22] = x.PhotonPersistentDisk != nil - yyq2[23] = x.Projected != nil - yyq2[24] = x.PortworxVolume != nil - yyq2[25] = x.ScaleIO != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(26) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secret")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[18] { - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMap")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[19] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[19] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[20] { - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[20] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("quobyte")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[21] { - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[21] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[22] { - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[22] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[23] { - if x.Projected == nil { - r.EncodeNil() - } else { - x.Projected.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[23] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("projected")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Projected == nil { - r.EncodeNil() - } else { - x.Projected.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[24] { - if x.PortworxVolume == nil { - r.EncodeNil() - } else { - x.PortworxVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[24] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PortworxVolume == nil { - r.EncodeNil() - } else { - x.PortworxVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[25] { - if x.ScaleIO == nil { - r.EncodeNil() - } else { - x.ScaleIO.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[25] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ScaleIO == nil { - r.EncodeNil() - } else { - x.ScaleIO.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "hostPath": - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "emptyDir": - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - case "gcePersistentDisk": - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "gitRepo": - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - case "secret": - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - case "nfs": - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "iscsi": - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "glusterfs": - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "persistentVolumeClaim": - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - case "rbd": - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "flexVolume": - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "cinder": - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "flocker": - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "downwardAPI": - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - case "fc": - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "azureFile": - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "configMap": - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - case "vsphereVolume": - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - case "quobyte": - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - case "azureDisk": - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - case "photonPersistentDisk": - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - case "projected": - if r.TryDecodeAsNil() { - if x.Projected != nil { - x.Projected = nil - } - } else { - if x.Projected == nil { - x.Projected = new(ProjectedVolumeSource) - } - x.Projected.CodecDecodeSelf(d) - } - case "portworxVolume": - if r.TryDecodeAsNil() { - if x.PortworxVolume != nil { - x.PortworxVolume = nil - } - } else { - if x.PortworxVolume == nil { - x.PortworxVolume = new(PortworxVolumeSource) - } - x.PortworxVolume.CodecDecodeSelf(d) - } - case "scaleIO": - if r.TryDecodeAsNil() { - if x.ScaleIO != nil { - x.ScaleIO = nil - } - } else { - if x.ScaleIO == nil { - x.ScaleIO = new(ScaleIOVolumeSource) - } - x.ScaleIO.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj30 int - var yyb30 bool - var yyhl30 bool = l >= 0 - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Projected != nil { - x.Projected = nil - } - } else { - if x.Projected == nil { - x.Projected = new(ProjectedVolumeSource) - } - x.Projected.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PortworxVolume != nil { - x.PortworxVolume = nil - } - } else { - if x.PortworxVolume == nil { - x.PortworxVolume = new(PortworxVolumeSource) - } - x.PortworxVolume.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ScaleIO != nil { - x.ScaleIO = nil - } - } else { - if x.ScaleIO == nil { - x.ScaleIO = new(ScaleIOVolumeSource) - } - x.ScaleIO.CodecDecodeSelf(d) - } - for { - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj30-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("claimName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "claimName": - if r.TryDecodeAsNil() { - x.ClaimName = "" - } else { - yyv4 := &x.ClaimName - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv6 := &x.ReadOnly - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(yyv6)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClaimName = "" - } else { - yyv9 := &x.ClaimName - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv11 := &x.ReadOnly - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(yyv11)) = r.DecodeBool() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [19]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.GCEPersistentDisk != nil - yyq2[1] = x.AWSElasticBlockStore != nil - yyq2[2] = x.HostPath != nil - yyq2[3] = x.Glusterfs != nil - yyq2[4] = x.NFS != nil - yyq2[5] = x.RBD != nil - yyq2[6] = x.ISCSI != nil - yyq2[7] = x.Cinder != nil - yyq2[8] = x.CephFS != nil - yyq2[9] = x.FC != nil - yyq2[10] = x.Flocker != nil - yyq2[11] = x.FlexVolume != nil - yyq2[12] = x.AzureFile != nil - yyq2[13] = x.VsphereVolume != nil - yyq2[14] = x.Quobyte != nil - yyq2[15] = x.AzureDisk != nil - yyq2[16] = x.PhotonPersistentDisk != nil - yyq2[17] = x.PortworxVolume != nil - yyq2[18] = x.ScaleIO != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(19) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("quobyte")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - if x.PortworxVolume == nil { - r.EncodeNil() - } else { - x.PortworxVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PortworxVolume == nil { - r.EncodeNil() - } else { - x.PortworxVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[18] { - if x.ScaleIO == nil { - r.EncodeNil() - } else { - x.ScaleIO.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ScaleIO == nil { - r.EncodeNil() - } else { - x.ScaleIO.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "gcePersistentDisk": - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "hostPath": - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "glusterfs": - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "nfs": - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "rbd": - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "iscsi": - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "cinder": - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "fc": - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "flocker": - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "flexVolume": - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "azureFile": - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "vsphereVolume": - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - case "quobyte": - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - case "azureDisk": - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - case "photonPersistentDisk": - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - case "portworxVolume": - if r.TryDecodeAsNil() { - if x.PortworxVolume != nil { - x.PortworxVolume = nil - } - } else { - if x.PortworxVolume == nil { - x.PortworxVolume = new(PortworxVolumeSource) - } - x.PortworxVolume.CodecDecodeSelf(d) - } - case "scaleIO": - if r.TryDecodeAsNil() { - if x.ScaleIO != nil { - x.ScaleIO = nil - } - } else { - if x.ScaleIO == nil { - x.ScaleIO = new(ScaleIOVolumeSource) - } - x.ScaleIO.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj23 int - var yyb23 bool - var yyhl23 bool = l >= 0 - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PortworxVolume != nil { - x.PortworxVolume = nil - } - } else { - if x.PortworxVolume == nil { - x.PortworxVolume = new(PortworxVolumeSource) - } - x.PortworxVolume.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ScaleIO != nil { - x.ScaleIO = nil - } - } else { - if x.ScaleIO == nil { - x.ScaleIO = new(ScaleIOVolumeSource) - } - x.ScaleIO.CodecDecodeSelf(d) - } - for { - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj23-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolume) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolume) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [24]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Capacity) != 0 - yyq2[1] = x.PersistentVolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil - yyq2[2] = x.PersistentVolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil - yyq2[3] = x.PersistentVolumeSource.HostPath != nil && x.HostPath != nil - yyq2[4] = x.PersistentVolumeSource.Glusterfs != nil && x.Glusterfs != nil - yyq2[5] = x.PersistentVolumeSource.NFS != nil && x.NFS != nil - yyq2[6] = x.PersistentVolumeSource.RBD != nil && x.RBD != nil - yyq2[7] = x.PersistentVolumeSource.ISCSI != nil && x.ISCSI != nil - yyq2[8] = x.PersistentVolumeSource.Cinder != nil && x.Cinder != nil - yyq2[9] = x.PersistentVolumeSource.CephFS != nil && x.CephFS != nil - yyq2[10] = x.PersistentVolumeSource.FC != nil && x.FC != nil - yyq2[11] = x.PersistentVolumeSource.Flocker != nil && x.Flocker != nil - yyq2[12] = x.PersistentVolumeSource.FlexVolume != nil && x.FlexVolume != nil - yyq2[13] = x.PersistentVolumeSource.AzureFile != nil && x.AzureFile != nil - yyq2[14] = x.PersistentVolumeSource.VsphereVolume != nil && x.VsphereVolume != nil - yyq2[15] = x.PersistentVolumeSource.Quobyte != nil && x.Quobyte != nil - yyq2[16] = x.PersistentVolumeSource.AzureDisk != nil && x.AzureDisk != nil - yyq2[17] = x.PersistentVolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil - yyq2[18] = x.PersistentVolumeSource.PortworxVolume != nil && x.PortworxVolume != nil - yyq2[19] = x.PersistentVolumeSource.ScaleIO != nil && x.ScaleIO != nil - yyq2[20] = len(x.AccessModes) != 0 - yyq2[21] = x.ClaimRef != nil - yyq2[22] = x.PersistentVolumeReclaimPolicy != "" - yyq2[23] = x.StorageClassName != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(24) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - } - var yyn6 bool - if x.PersistentVolumeSource.GCEPersistentDisk == nil { - yyn6 = true - goto LABEL6 - } - LABEL6: - if yyr2 || yy2arr2 { - if yyn6 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn6 { - r.EncodeNil() - } else { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn9 bool - if x.PersistentVolumeSource.AWSElasticBlockStore == nil { - yyn9 = true - goto LABEL9 - } - LABEL9: - if yyr2 || yy2arr2 { - if yyn9 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn9 { - r.EncodeNil() - } else { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - } - var yyn12 bool - if x.PersistentVolumeSource.HostPath == nil { - yyn12 = true - goto LABEL12 - } - LABEL12: - if yyr2 || yy2arr2 { - if yyn12 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn12 { - r.EncodeNil() - } else { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - } - var yyn15 bool - if x.PersistentVolumeSource.Glusterfs == nil { - yyn15 = true - goto LABEL15 - } - LABEL15: - if yyr2 || yy2arr2 { - if yyn15 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn15 { - r.EncodeNil() - } else { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - } - var yyn18 bool - if x.PersistentVolumeSource.NFS == nil { - yyn18 = true - goto LABEL18 - } - LABEL18: - if yyr2 || yy2arr2 { - if yyn18 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn18 { - r.EncodeNil() - } else { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - } - var yyn21 bool - if x.PersistentVolumeSource.RBD == nil { - yyn21 = true - goto LABEL21 - } - LABEL21: - if yyr2 || yy2arr2 { - if yyn21 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn21 { - r.EncodeNil() - } else { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - } - var yyn24 bool - if x.PersistentVolumeSource.ISCSI == nil { - yyn24 = true - goto LABEL24 - } - LABEL24: - if yyr2 || yy2arr2 { - if yyn24 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn24 { - r.EncodeNil() - } else { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - } - var yyn27 bool - if x.PersistentVolumeSource.Cinder == nil { - yyn27 = true - goto LABEL27 - } - LABEL27: - if yyr2 || yy2arr2 { - if yyn27 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn27 { - r.EncodeNil() - } else { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - } - var yyn30 bool - if x.PersistentVolumeSource.CephFS == nil { - yyn30 = true - goto LABEL30 - } - LABEL30: - if yyr2 || yy2arr2 { - if yyn30 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn30 { - r.EncodeNil() - } else { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - } - var yyn33 bool - if x.PersistentVolumeSource.FC == nil { - yyn33 = true - goto LABEL33 - } - LABEL33: - if yyr2 || yy2arr2 { - if yyn33 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn33 { - r.EncodeNil() - } else { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - } - var yyn36 bool - if x.PersistentVolumeSource.Flocker == nil { - yyn36 = true - goto LABEL36 - } - LABEL36: - if yyr2 || yy2arr2 { - if yyn36 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn36 { - r.EncodeNil() - } else { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - } - var yyn39 bool - if x.PersistentVolumeSource.FlexVolume == nil { - yyn39 = true - goto LABEL39 - } - LABEL39: - if yyr2 || yy2arr2 { - if yyn39 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn39 { - r.EncodeNil() - } else { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn42 bool - if x.PersistentVolumeSource.AzureFile == nil { - yyn42 = true - goto LABEL42 - } - LABEL42: - if yyr2 || yy2arr2 { - if yyn42 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn42 { - r.EncodeNil() - } else { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - } - var yyn45 bool - if x.PersistentVolumeSource.VsphereVolume == nil { - yyn45 = true - goto LABEL45 - } - LABEL45: - if yyr2 || yy2arr2 { - if yyn45 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn45 { - r.EncodeNil() - } else { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn48 bool - if x.PersistentVolumeSource.Quobyte == nil { - yyn48 = true - goto LABEL48 - } - LABEL48: - if yyr2 || yy2arr2 { - if yyn48 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("quobyte")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn48 { - r.EncodeNil() - } else { - if x.Quobyte == nil { - r.EncodeNil() - } else { - x.Quobyte.CodecEncodeSelf(e) - } - } - } - } - var yyn51 bool - if x.PersistentVolumeSource.AzureDisk == nil { - yyn51 = true - goto LABEL51 - } - LABEL51: - if yyr2 || yy2arr2 { - if yyn51 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn51 { - r.EncodeNil() - } else { - if x.AzureDisk == nil { - r.EncodeNil() - } else { - x.AzureDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn54 bool - if x.PersistentVolumeSource.PhotonPersistentDisk == nil { - yyn54 = true - goto LABEL54 - } - LABEL54: - if yyr2 || yy2arr2 { - if yyn54 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn54 { - r.EncodeNil() - } else { - if x.PhotonPersistentDisk == nil { - r.EncodeNil() - } else { - x.PhotonPersistentDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn57 bool - if x.PersistentVolumeSource.PortworxVolume == nil { - yyn57 = true - goto LABEL57 - } - LABEL57: - if yyr2 || yy2arr2 { - if yyn57 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[18] { - if x.PortworxVolume == nil { - r.EncodeNil() - } else { - x.PortworxVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn57 { - r.EncodeNil() - } else { - if x.PortworxVolume == nil { - r.EncodeNil() - } else { - x.PortworxVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn60 bool - if x.PersistentVolumeSource.ScaleIO == nil { - yyn60 = true - goto LABEL60 - } - LABEL60: - if yyr2 || yy2arr2 { - if yyn60 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[19] { - if x.ScaleIO == nil { - r.EncodeNil() - } else { - x.ScaleIO.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[19] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn60 { - r.EncodeNil() - } else { - if x.ScaleIO == nil { - r.EncodeNil() - } else { - x.ScaleIO.CodecEncodeSelf(e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[20] { - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym64 := z.EncBinary() - _ = yym64 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[20] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("accessModes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym65 := z.EncBinary() - _ = yym65 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[21] { - if x.ClaimRef == nil { - r.EncodeNil() - } else { - x.ClaimRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[21] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("claimRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ClaimRef == nil { - r.EncodeNil() - } else { - x.ClaimRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[22] { - x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[22] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeReclaimPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[23] { - yym73 := z.EncBinary() - _ = yym73 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.StorageClassName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[23] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("storageClassName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym74 := z.EncBinary() - _ = yym74 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.StorageClassName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv4 := &x.Capacity - yyv4.CodecDecodeSelf(d) - } - case "gcePersistentDisk": - if x.PersistentVolumeSource.GCEPersistentDisk == nil { - x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if x.PersistentVolumeSource.AWSElasticBlockStore == nil { - x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "hostPath": - if x.PersistentVolumeSource.HostPath == nil { - x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) - } - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "glusterfs": - if x.PersistentVolumeSource.Glusterfs == nil { - x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "nfs": - if x.PersistentVolumeSource.NFS == nil { - x.PersistentVolumeSource.NFS = new(NFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "rbd": - if x.PersistentVolumeSource.RBD == nil { - x.PersistentVolumeSource.RBD = new(RBDVolumeSource) - } - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "iscsi": - if x.PersistentVolumeSource.ISCSI == nil { - x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "cinder": - if x.PersistentVolumeSource.Cinder == nil { - x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if x.PersistentVolumeSource.CephFS == nil { - x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "fc": - if x.PersistentVolumeSource.FC == nil { - x.PersistentVolumeSource.FC = new(FCVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "flocker": - if x.PersistentVolumeSource.Flocker == nil { - x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "flexVolume": - if x.PersistentVolumeSource.FlexVolume == nil { - x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "azureFile": - if x.PersistentVolumeSource.AzureFile == nil { - x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "vsphereVolume": - if x.PersistentVolumeSource.VsphereVolume == nil { - x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - case "quobyte": - if x.PersistentVolumeSource.Quobyte == nil { - x.PersistentVolumeSource.Quobyte = new(QuobyteVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - case "azureDisk": - if x.PersistentVolumeSource.AzureDisk == nil { - x.PersistentVolumeSource.AzureDisk = new(AzureDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - case "photonPersistentDisk": - if x.PersistentVolumeSource.PhotonPersistentDisk == nil { - x.PersistentVolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - case "portworxVolume": - if x.PersistentVolumeSource.PortworxVolume == nil { - x.PersistentVolumeSource.PortworxVolume = new(PortworxVolumeSource) - } - if r.TryDecodeAsNil() { - if x.PortworxVolume != nil { - x.PortworxVolume = nil - } - } else { - if x.PortworxVolume == nil { - x.PortworxVolume = new(PortworxVolumeSource) - } - x.PortworxVolume.CodecDecodeSelf(d) - } - case "scaleIO": - if x.PersistentVolumeSource.ScaleIO == nil { - x.PersistentVolumeSource.ScaleIO = new(ScaleIOVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ScaleIO != nil { - x.ScaleIO = nil - } - } else { - if x.ScaleIO == nil { - x.ScaleIO = new(ScaleIOVolumeSource) - } - x.ScaleIO.CodecDecodeSelf(d) - } - case "accessModes": - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv24 := &x.AccessModes - yym25 := z.DecBinary() - _ = yym25 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv24), d) - } - } - case "claimRef": - if r.TryDecodeAsNil() { - if x.ClaimRef != nil { - x.ClaimRef = nil - } - } else { - if x.ClaimRef == nil { - x.ClaimRef = new(ObjectReference) - } - x.ClaimRef.CodecDecodeSelf(d) - } - case "persistentVolumeReclaimPolicy": - if r.TryDecodeAsNil() { - x.PersistentVolumeReclaimPolicy = "" - } else { - yyv27 := &x.PersistentVolumeReclaimPolicy - yyv27.CodecDecodeSelf(d) - } - case "storageClassName": - if r.TryDecodeAsNil() { - x.StorageClassName = "" - } else { - yyv28 := &x.StorageClassName - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*string)(yyv28)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj30 int - var yyb30 bool - var yyhl30 bool = l >= 0 - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv31 := &x.Capacity - yyv31.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.GCEPersistentDisk == nil { - x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.AWSElasticBlockStore == nil { - x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.HostPath == nil { - x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Glusterfs == nil { - x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.NFS == nil { - x.PersistentVolumeSource.NFS = new(NFSVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.RBD == nil { - x.PersistentVolumeSource.RBD = new(RBDVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.ISCSI == nil { - x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Cinder == nil { - x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.CephFS == nil { - x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.FC == nil { - x.PersistentVolumeSource.FC = new(FCVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Flocker == nil { - x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.FlexVolume == nil { - x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.AzureFile == nil { - x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.VsphereVolume == nil { - x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Quobyte == nil { - x.PersistentVolumeSource.Quobyte = new(QuobyteVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Quobyte != nil { - x.Quobyte = nil - } - } else { - if x.Quobyte == nil { - x.Quobyte = new(QuobyteVolumeSource) - } - x.Quobyte.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.AzureDisk == nil { - x.PersistentVolumeSource.AzureDisk = new(AzureDiskVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureDisk != nil { - x.AzureDisk = nil - } - } else { - if x.AzureDisk == nil { - x.AzureDisk = new(AzureDiskVolumeSource) - } - x.AzureDisk.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.PhotonPersistentDisk == nil { - x.PersistentVolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PhotonPersistentDisk != nil { - x.PhotonPersistentDisk = nil - } - } else { - if x.PhotonPersistentDisk == nil { - x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) - } - x.PhotonPersistentDisk.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.PortworxVolume == nil { - x.PersistentVolumeSource.PortworxVolume = new(PortworxVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PortworxVolume != nil { - x.PortworxVolume = nil - } - } else { - if x.PortworxVolume == nil { - x.PortworxVolume = new(PortworxVolumeSource) - } - x.PortworxVolume.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.ScaleIO == nil { - x.PersistentVolumeSource.ScaleIO = new(ScaleIOVolumeSource) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ScaleIO != nil { - x.ScaleIO = nil - } - } else { - if x.ScaleIO == nil { - x.ScaleIO = new(ScaleIOVolumeSource) - } - x.ScaleIO.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv51 := &x.AccessModes - yym52 := z.DecBinary() - _ = yym52 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv51), d) - } - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ClaimRef != nil { - x.ClaimRef = nil - } - } else { - if x.ClaimRef == nil { - x.ClaimRef = new(ObjectReference) - } - x.ClaimRef.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PersistentVolumeReclaimPolicy = "" - } else { - yyv54 := &x.PersistentVolumeReclaimPolicy - yyv54.CodecDecodeSelf(d) - } - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StorageClassName = "" - } else { - yyv55 := &x.StorageClassName - yym56 := z.DecBinary() - _ = yym56 - if false { - } else { - *((*string)(yyv55)) = r.DecodeString() - } - } - for { - yyj30++ - if yyhl30 { - yyb30 = yyj30 > l - } else { - yyb30 = r.CheckBreak() - } - if yyb30 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj30-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PersistentVolumeReclaimPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumeReclaimPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PersistentVolumeStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Phase != "" - yyq2[1] = x.Message != "" - yyq2[2] = x.Reason != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - yyv4 := &x.Phase - yyv4.CodecDecodeSelf(d) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv5 := &x.Message - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*string)(yyv5)) = r.DecodeString() - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv7 := &x.Reason - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*string)(yyv7)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - yyv10 := &x.Phase - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv11 := &x.Message - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv13 := &x.Reason - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicePersistentVolume((*[]PersistentVolume)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSlicePersistentVolume((*[]PersistentVolume)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaim) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaim) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaim) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeClaimSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeClaimStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeClaimSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeClaimStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.AccessModes) != 0 - yyq2[1] = x.Selector != nil - yyq2[2] = true - yyq2[3] = x.VolumeName != "" - yyq2[4] = x.StorageClassName != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("accessModes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.Resources - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.Resources - yy12.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.StorageClassName == nil { - r.EncodeNil() - } else { - yy18 := *x.StorageClassName - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy18)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("storageClassName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StorageClassName == nil { - r.EncodeNil() - } else { - yy20 := *x.StorageClassName - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy20)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "accessModes": - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv4 := &x.AccessModes - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv4), d) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_v1.LabelSelector) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "resources": - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv8 := &x.Resources - yyv8.CodecDecodeSelf(d) - } - case "volumeName": - if r.TryDecodeAsNil() { - x.VolumeName = "" - } else { - yyv9 := &x.VolumeName - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - case "storageClassName": - if r.TryDecodeAsNil() { - if x.StorageClassName != nil { - x.StorageClassName = nil - } - } else { - if x.StorageClassName == nil { - x.StorageClassName = new(string) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(x.StorageClassName)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv14 := &x.AccessModes - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv14), d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_v1.LabelSelector) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv18 := &x.Resources - yyv18.CodecDecodeSelf(d) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeName = "" - } else { - yyv19 := &x.VolumeName - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StorageClassName != nil { - x.StorageClassName = nil - } - } else { - if x.StorageClassName == nil { - x.StorageClassName = new(string) - } - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(x.StorageClassName)) = r.DecodeString() - } - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Phase != "" - yyq2[1] = len(x.AccessModes) != 0 - yyq2[2] = len(x.Capacity) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("accessModes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - yyv4 := &x.Phase - yyv4.CodecDecodeSelf(d) - } - case "accessModes": - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv5 := &x.AccessModes - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv5), d) - } - } - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv7 := &x.Capacity - yyv7.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - yyv9 := &x.Phase - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv10 := &x.AccessModes - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv10), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv12 := &x.Capacity - yyv12.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PersistentVolumeAccessMode) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumeAccessMode) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x PersistentVolumePhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumePhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x PersistentVolumeClaimPhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumeClaimPhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *HostPathVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HostPathVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HostPathVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv4 := &x.Path - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HostPathVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv7 := &x.Path - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*string)(yyv7)) = r.DecodeString() - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EmptyDirVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Medium != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Medium.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("medium")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Medium.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EmptyDirVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EmptyDirVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "medium": - if r.TryDecodeAsNil() { - x.Medium = "" - } else { - yyv4 := &x.Medium - yyv4.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EmptyDirVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Medium = "" - } else { - yyv6 := &x.Medium - yyv6.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *GlusterfsVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("endpoints")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *GlusterfsVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *GlusterfsVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "endpoints": - if r.TryDecodeAsNil() { - x.EndpointsName = "" - } else { - yyv4 := &x.EndpointsName - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv6 := &x.Path - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv8 := &x.ReadOnly - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *GlusterfsVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EndpointsName = "" - } else { - yyv11 := &x.EndpointsName - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv13 := &x.Path - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv15 := &x.ReadOnly - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(yyv15)) = r.DecodeBool() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.FSType != "" - yyq2[3] = x.RBDPool != "" - yyq2[4] = x.RadosUser != "" - yyq2[5] = x.Keyring != "" - yyq2[6] = x.SecretRef != nil - yyq2[7] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.CephMonitors == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.CephMonitors, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("monitors")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CephMonitors == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.CephMonitors, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pool")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("keyring")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RBDVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RBDVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "monitors": - if r.TryDecodeAsNil() { - x.CephMonitors = nil - } else { - yyv4 := &x.CephMonitors - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "image": - if r.TryDecodeAsNil() { - x.RBDImage = "" - } else { - yyv6 := &x.RBDImage - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv8 := &x.FSType - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "pool": - if r.TryDecodeAsNil() { - x.RBDPool = "" - } else { - yyv10 := &x.RBDPool - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "user": - if r.TryDecodeAsNil() { - x.RadosUser = "" - } else { - yyv12 := &x.RadosUser - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - case "keyring": - if r.TryDecodeAsNil() { - x.Keyring = "" - } else { - yyv14 := &x.Keyring - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv17 := &x.ReadOnly - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*bool)(yyv17)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj19 int - var yyb19 bool - var yyhl19 bool = l >= 0 - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CephMonitors = nil - } else { - yyv20 := &x.CephMonitors - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - z.F.DecSliceStringX(yyv20, false, d) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RBDImage = "" - } else { - yyv22 := &x.RBDImage - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*string)(yyv22)) = r.DecodeString() - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv24 := &x.FSType - yym25 := z.DecBinary() - _ = yym25 - if false { - } else { - *((*string)(yyv24)) = r.DecodeString() - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RBDPool = "" - } else { - yyv26 := &x.RBDPool - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - *((*string)(yyv26)) = r.DecodeString() - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RadosUser = "" - } else { - yyv28 := &x.RadosUser - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*string)(yyv28)) = r.DecodeString() - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Keyring = "" - } else { - yyv30 := &x.Keyring - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - *((*string)(yyv30)) = r.DecodeString() - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv33 := &x.ReadOnly - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - *((*bool)(yyv33)) = r.DecodeBool() - } - } - for { - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj19-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CinderVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - yyq2[2] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CinderVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CinderVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "volumeID": - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - yyv4 := &x.VolumeID - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv6 := &x.FSType - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv8 := &x.ReadOnly - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CinderVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - yyv11 := &x.VolumeID - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv13 := &x.FSType - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv15 := &x.ReadOnly - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(yyv15)) = r.DecodeBool() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Path != "" - yyq2[2] = x.User != "" - yyq2[3] = x.SecretFile != "" - yyq2[4] = x.SecretRef != nil - yyq2[5] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Monitors == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Monitors, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("monitors")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Monitors == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Monitors, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CephFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CephFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "monitors": - if r.TryDecodeAsNil() { - x.Monitors = nil - } else { - yyv4 := &x.Monitors - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv6 := &x.Path - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "user": - if r.TryDecodeAsNil() { - x.User = "" - } else { - yyv8 := &x.User - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "secretFile": - if r.TryDecodeAsNil() { - x.SecretFile = "" - } else { - yyv10 := &x.SecretFile - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv13 := &x.ReadOnly - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*bool)(yyv13)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj15 int - var yyb15 bool - var yyhl15 bool = l >= 0 - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Monitors = nil - } else { - yyv16 := &x.Monitors - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - z.F.DecSliceStringX(yyv16, false, d) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv18 := &x.Path - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*string)(yyv18)) = r.DecodeString() - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - yyv20 := &x.User - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - *((*string)(yyv20)) = r.DecodeString() - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretFile = "" - } else { - yyv22 := &x.SecretFile - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*string)(yyv22)) = r.DecodeString() - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv25 := &x.ReadOnly - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*bool)(yyv25)) = r.DecodeBool() - } - } - for { - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj15-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FlockerVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.DatasetName != "" - yyq2[1] = x.DatasetUUID != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("datasetName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DatasetUUID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("datasetUUID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DatasetUUID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FlockerVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FlockerVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "datasetName": - if r.TryDecodeAsNil() { - x.DatasetName = "" - } else { - yyv4 := &x.DatasetName - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "datasetUUID": - if r.TryDecodeAsNil() { - x.DatasetUUID = "" - } else { - yyv6 := &x.DatasetUUID - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FlockerVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DatasetName = "" - } else { - yyv9 := &x.DatasetName - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DatasetUUID = "" - } else { - yyv11 := &x.DatasetUUID - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x StorageMedium) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *StorageMedium) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x Protocol) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *Protocol) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - yyq2[2] = x.Partition != 0 - yyq2[3] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pdName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("partition")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *GCEPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "pdName": - if r.TryDecodeAsNil() { - x.PDName = "" - } else { - yyv4 := &x.PDName - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv6 := &x.FSType - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "partition": - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - yyv8 := &x.Partition - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv10 := &x.ReadOnly - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PDName = "" - } else { - yyv13 := &x.PDName - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv15 := &x.FSType - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - yyv17 := &x.Partition - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(yyv17)) = int32(r.DecodeInt(32)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv19 := &x.ReadOnly - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(yyv19)) = r.DecodeBool() - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *QuobyteVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.ReadOnly != false - yyq2[3] = x.User != "" - yyq2[4] = x.Group != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Registry)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registry")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Registry)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Volume)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Volume)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("group")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *QuobyteVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *QuobyteVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "registry": - if r.TryDecodeAsNil() { - x.Registry = "" - } else { - yyv4 := &x.Registry - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "volume": - if r.TryDecodeAsNil() { - x.Volume = "" - } else { - yyv6 := &x.Volume - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv8 := &x.ReadOnly - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - case "user": - if r.TryDecodeAsNil() { - x.User = "" - } else { - yyv10 := &x.User - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "group": - if r.TryDecodeAsNil() { - x.Group = "" - } else { - yyv12 := &x.Group - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *QuobyteVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Registry = "" - } else { - yyv15 := &x.Registry - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Volume = "" - } else { - yyv17 := &x.Volume - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv19 := &x.ReadOnly - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(yyv19)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - yyv21 := &x.User - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Group = "" - } else { - yyv23 := &x.Group - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*string)(yyv23)) = r.DecodeString() - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - yyq2[2] = x.SecretRef != nil - yyq2[3] = x.ReadOnly != false - yyq2[4] = len(x.Options) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("driver")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Options == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - z.F.EncMapStringStringV(x.Options, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("options")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Options == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - z.F.EncMapStringStringV(x.Options, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FlexVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FlexVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "driver": - if r.TryDecodeAsNil() { - x.Driver = "" - } else { - yyv4 := &x.Driver - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv6 := &x.FSType - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv9 := &x.ReadOnly - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*bool)(yyv9)) = r.DecodeBool() - } - } - case "options": - if r.TryDecodeAsNil() { - x.Options = nil - } else { - yyv11 := &x.Options - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - z.F.DecMapStringStringX(yyv11, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Driver = "" - } else { - yyv14 := &x.Driver - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv16 := &x.FSType - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv19 := &x.ReadOnly - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(yyv19)) = r.DecodeBool() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Options = nil - } else { - yyv21 := &x.Options - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - z.F.DecMapStringStringX(yyv21, false, d) - } - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - yyq2[2] = x.Partition != 0 - yyq2[3] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("partition")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AWSElasticBlockStoreVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "volumeID": - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - yyv4 := &x.VolumeID - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv6 := &x.FSType - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "partition": - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - yyv8 := &x.Partition - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv10 := &x.ReadOnly - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - yyv13 := &x.VolumeID - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv15 := &x.FSType - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - yyv17 := &x.Partition - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(yyv17)) = int32(r.DecodeInt(32)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv19 := &x.ReadOnly - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(yyv19)) = r.DecodeBool() - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *GitRepoVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Revision != "" - yyq2[2] = x.Directory != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("repository")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revision")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("directory")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *GitRepoVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *GitRepoVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "repository": - if r.TryDecodeAsNil() { - x.Repository = "" - } else { - yyv4 := &x.Repository - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "revision": - if r.TryDecodeAsNil() { - x.Revision = "" - } else { - yyv6 := &x.Revision - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "directory": - if r.TryDecodeAsNil() { - x.Directory = "" - } else { - yyv8 := &x.Directory - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *GitRepoVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Repository = "" - } else { - yyv11 := &x.Repository - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Revision = "" - } else { - yyv13 := &x.Revision - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Directory = "" - } else { - yyv15 := &x.Directory - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.SecretName != "" - yyq2[1] = len(x.Items) != 0 - yyq2[2] = x.DefaultMode != nil - yyq2[3] = x.Optional != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Items == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy10 := *x.DefaultMode - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(yy10)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy12 := *x.DefaultMode - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(yy12)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Optional == nil { - r.EncodeNil() - } else { - yy15 := *x.Optional - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(yy15)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("optional")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Optional == nil { - r.EncodeNil() - } else { - yy17 := *x.Optional - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeBool(bool(yy17)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "secretName": - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - yyv4 := &x.SecretName - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) - } - } - case "defaultMode": - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - case "optional": - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - yyv13 := &x.SecretName - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv15 := &x.Items - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv15), d) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecretProjection) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = len(x.Items) != 0 - yyq2[2] = x.Optional != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Items == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Optional == nil { - r.EncodeNil() - } else { - yy10 := *x.Optional - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(yy10)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("optional")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Optional == nil { - r.EncodeNil() - } else { - yy12 := *x.Optional - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(yy12)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretProjection) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) - } - } - case "optional": - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv11 := &x.Name - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Server)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("server")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Server)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "server": - if r.TryDecodeAsNil() { - x.Server = "" - } else { - yyv4 := &x.Server - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv6 := &x.Path - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv8 := &x.ReadOnly - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Server = "" - } else { - yyv11 := &x.Server - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv13 := &x.Path - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv15 := &x.ReadOnly - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(yyv15)) = r.DecodeBool() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[3] = x.ISCSIInterface != "" - yyq2[4] = x.FSType != "" - yyq2[5] = x.ReadOnly != false - yyq2[6] = len(x.Portals) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetPortal")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iqn")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Lun)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lun")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.Lun)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsiInterface")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.Portals == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - z.F.EncSliceStringV(x.Portals, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("portals")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Portals == nil { - r.EncodeNil() - } else { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - z.F.EncSliceStringV(x.Portals, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ISCSIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ISCSIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "targetPortal": - if r.TryDecodeAsNil() { - x.TargetPortal = "" - } else { - yyv4 := &x.TargetPortal - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "iqn": - if r.TryDecodeAsNil() { - x.IQN = "" - } else { - yyv6 := &x.IQN - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "lun": - if r.TryDecodeAsNil() { - x.Lun = 0 - } else { - yyv8 := &x.Lun - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - case "iscsiInterface": - if r.TryDecodeAsNil() { - x.ISCSIInterface = "" - } else { - yyv10 := &x.ISCSIInterface - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv12 := &x.FSType - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv14 := &x.ReadOnly - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*bool)(yyv14)) = r.DecodeBool() - } - } - case "portals": - if r.TryDecodeAsNil() { - x.Portals = nil - } else { - yyv16 := &x.Portals - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - z.F.DecSliceStringX(yyv16, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetPortal = "" - } else { - yyv19 := &x.TargetPortal - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IQN = "" - } else { - yyv21 := &x.IQN - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Lun = 0 - } else { - yyv23 := &x.Lun - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*int32)(yyv23)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ISCSIInterface = "" - } else { - yyv25 := &x.ISCSIInterface - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv27 := &x.FSType - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*string)(yyv27)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv29 := &x.ReadOnly - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*bool)(yyv29)) = r.DecodeBool() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Portals = nil - } else { - yyv31 := &x.Portals - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - z.F.DecSliceStringX(yyv31, false, d) - } - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.FSType != "" - yyq2[3] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.TargetWWNs == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.TargetWWNs, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetWWNs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetWWNs == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.TargetWWNs, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Lun == nil { - r.EncodeNil() - } else { - yy7 := *x.Lun - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lun")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Lun == nil { - r.EncodeNil() - } else { - yy9 := *x.Lun - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FCVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "targetWWNs": - if r.TryDecodeAsNil() { - x.TargetWWNs = nil - } else { - yyv4 := &x.TargetWWNs - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "lun": - if r.TryDecodeAsNil() { - if x.Lun != nil { - x.Lun = nil - } - } else { - if x.Lun == nil { - x.Lun = new(int32) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv8 := &x.FSType - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv10 := &x.ReadOnly - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetWWNs = nil - } else { - yyv13 := &x.TargetWWNs - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - z.F.DecSliceStringX(yyv13, false, d) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Lun != nil { - x.Lun = nil - } - } else { - if x.Lun == nil { - x.Lun = new(int32) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv17 := &x.FSType - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv19 := &x.ReadOnly - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(yyv19)) = r.DecodeBool() - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *AzureFileVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("shareName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AzureFileVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AzureFileVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "secretName": - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - yyv4 := &x.SecretName - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "shareName": - if r.TryDecodeAsNil() { - x.ShareName = "" - } else { - yyv6 := &x.ShareName - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv8 := &x.ReadOnly - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - yyv11 := &x.SecretName - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ShareName = "" - } else { - yyv13 := &x.ShareName - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv15 := &x.ReadOnly - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(yyv15)) = r.DecodeBool() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VsphereVirtualDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "volumePath": - if r.TryDecodeAsNil() { - x.VolumePath = "" - } else { - yyv4 := &x.VolumePath - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv6 := &x.FSType - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumePath = "" - } else { - yyv9 := &x.VolumePath - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv11 := &x.FSType - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PhotonPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PdID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pdID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PdID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PhotonPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "pdID": - if r.TryDecodeAsNil() { - x.PdID = "" - } else { - yyv4 := &x.PdID - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv6 := &x.FSType - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PdID = "" - } else { - yyv9 := &x.PdID - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv11 := &x.FSType - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x AzureDataDiskCachingMode) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *AzureDataDiskCachingMode) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *AzureDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.CachingMode != nil - yyq2[3] = x.FSType != nil - yyq2[4] = x.ReadOnly != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DiskName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("diskName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DiskName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DataDiskURI)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("diskURI")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DataDiskURI)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.CachingMode == nil { - r.EncodeNil() - } else { - yy10 := *x.CachingMode - yy10.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cachingMode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CachingMode == nil { - r.EncodeNil() - } else { - yy12 := *x.CachingMode - yy12.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.FSType == nil { - r.EncodeNil() - } else { - yy15 := *x.FSType - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy15)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FSType == nil { - r.EncodeNil() - } else { - yy17 := *x.FSType - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy17)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.ReadOnly == nil { - r.EncodeNil() - } else { - yy20 := *x.ReadOnly - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeBool(bool(yy20)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ReadOnly == nil { - r.EncodeNil() - } else { - yy22 := *x.ReadOnly - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeBool(bool(yy22)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AzureDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AzureDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "diskName": - if r.TryDecodeAsNil() { - x.DiskName = "" - } else { - yyv4 := &x.DiskName - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "diskURI": - if r.TryDecodeAsNil() { - x.DataDiskURI = "" - } else { - yyv6 := &x.DataDiskURI - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "cachingMode": - if r.TryDecodeAsNil() { - if x.CachingMode != nil { - x.CachingMode = nil - } - } else { - if x.CachingMode == nil { - x.CachingMode = new(AzureDataDiskCachingMode) - } - x.CachingMode.CodecDecodeSelf(d) - } - case "fsType": - if r.TryDecodeAsNil() { - if x.FSType != nil { - x.FSType = nil - } - } else { - if x.FSType == nil { - x.FSType = new(string) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(x.FSType)) = r.DecodeString() - } - } - case "readOnly": - if r.TryDecodeAsNil() { - if x.ReadOnly != nil { - x.ReadOnly = nil - } - } else { - if x.ReadOnly == nil { - x.ReadOnly = new(bool) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(x.ReadOnly)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DiskName = "" - } else { - yyv14 := &x.DiskName - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DataDiskURI = "" - } else { - yyv16 := &x.DataDiskURI - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CachingMode != nil { - x.CachingMode = nil - } - } else { - if x.CachingMode == nil { - x.CachingMode = new(AzureDataDiskCachingMode) - } - x.CachingMode.CodecDecodeSelf(d) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FSType != nil { - x.FSType = nil - } - } else { - if x.FSType == nil { - x.FSType = new(string) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(x.FSType)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ReadOnly != nil { - x.ReadOnly = nil - } - } else { - if x.ReadOnly == nil { - x.ReadOnly = new(bool) - } - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*bool)(x.ReadOnly)) = r.DecodeBool() - } - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PortworxVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - yyq2[2] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PortworxVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PortworxVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "volumeID": - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - yyv4 := &x.VolumeID - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv6 := &x.FSType - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv8 := &x.ReadOnly - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PortworxVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - yyv11 := &x.VolumeID - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv13 := &x.FSType - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv15 := &x.ReadOnly - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(yyv15)) = r.DecodeBool() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleIOVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [10]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[3] = x.SSLEnabled != false - yyq2[4] = x.ProtectionDomain != "" - yyq2[5] = x.StoragePool != "" - yyq2[6] = x.StorageMode != "" - yyq2[7] = x.VolumeName != "" - yyq2[8] = x.FSType != "" - yyq2[9] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(10) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Gateway)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gateway")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Gateway)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.System)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("system")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.System)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.SSLEnabled)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sslEnabled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.SSLEnabled)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ProtectionDomain)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protectionDomain")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ProtectionDomain)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.StoragePool)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("storagePool")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.StoragePool)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.StorageMode)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("storageMode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.StorageMode)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleIOVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleIOVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "gateway": - if r.TryDecodeAsNil() { - x.Gateway = "" - } else { - yyv4 := &x.Gateway - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "system": - if r.TryDecodeAsNil() { - x.System = "" - } else { - yyv6 := &x.System - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - case "sslEnabled": - if r.TryDecodeAsNil() { - x.SSLEnabled = false - } else { - yyv9 := &x.SSLEnabled - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*bool)(yyv9)) = r.DecodeBool() - } - } - case "protectionDomain": - if r.TryDecodeAsNil() { - x.ProtectionDomain = "" - } else { - yyv11 := &x.ProtectionDomain - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - case "storagePool": - if r.TryDecodeAsNil() { - x.StoragePool = "" - } else { - yyv13 := &x.StoragePool - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - case "storageMode": - if r.TryDecodeAsNil() { - x.StorageMode = "" - } else { - yyv15 := &x.StorageMode - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - case "volumeName": - if r.TryDecodeAsNil() { - x.VolumeName = "" - } else { - yyv17 := &x.VolumeName - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv19 := &x.FSType - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv21 := &x.ReadOnly - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*bool)(yyv21)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleIOVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj23 int - var yyb23 bool - var yyhl23 bool = l >= 0 - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Gateway = "" - } else { - yyv24 := &x.Gateway - yym25 := z.DecBinary() - _ = yym25 - if false { - } else { - *((*string)(yyv24)) = r.DecodeString() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.System = "" - } else { - yyv26 := &x.System - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - *((*string)(yyv26)) = r.DecodeString() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SSLEnabled = false - } else { - yyv29 := &x.SSLEnabled - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*bool)(yyv29)) = r.DecodeBool() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ProtectionDomain = "" - } else { - yyv31 := &x.ProtectionDomain - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*string)(yyv31)) = r.DecodeString() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StoragePool = "" - } else { - yyv33 := &x.StoragePool - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - *((*string)(yyv33)) = r.DecodeString() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StorageMode = "" - } else { - yyv35 := &x.StorageMode - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*string)(yyv35)) = r.DecodeString() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeName = "" - } else { - yyv37 := &x.VolumeName - yym38 := z.DecBinary() - _ = yym38 - if false { - } else { - *((*string)(yyv37)) = r.DecodeString() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - yyv39 := &x.FSType - yym40 := z.DecBinary() - _ = yym40 - if false { - } else { - *((*string)(yyv39)) = r.DecodeString() - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv41 := &x.ReadOnly - yym42 := z.DecBinary() - _ = yym42 - if false { - } else { - *((*bool)(yyv41)) = r.DecodeBool() - } - } - for { - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj23-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = len(x.Items) != 0 - yyq2[2] = x.DefaultMode != nil - yyq2[3] = x.Optional != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Items == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy10 := *x.DefaultMode - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(yy10)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy12 := *x.DefaultMode - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(yy12)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Optional == nil { - r.EncodeNil() - } else { - yy15 := *x.Optional - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(yy15)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("optional")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Optional == nil { - r.EncodeNil() - } else { - yy17 := *x.Optional - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeBool(bool(yy17)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) - } - } - case "defaultMode": - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - case "optional": - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv13 := &x.Name - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv15 := &x.Items - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv15), d) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapProjection) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = len(x.Items) != 0 - yyq2[2] = x.Optional != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Items == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Optional == nil { - r.EncodeNil() - } else { - yy10 := *x.Optional - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(yy10)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("optional")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Optional == nil { - r.EncodeNil() - } else { - yy12 := *x.Optional - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(yy12)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapProjection) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) - } - } - case "optional": - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv11 := &x.Name - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ProjectedVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.DefaultMode != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Sources == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceVolumeProjection(([]VolumeProjection)(x.Sources), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Sources == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceVolumeProjection(([]VolumeProjection)(x.Sources), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy7 := *x.DefaultMode - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy9 := *x.DefaultMode - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ProjectedVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ProjectedVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "sources": - if r.TryDecodeAsNil() { - x.Sources = nil - } else { - yyv4 := &x.Sources - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceVolumeProjection((*[]VolumeProjection)(yyv4), d) - } - } - case "defaultMode": - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ProjectedVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Sources = nil - } else { - yyv9 := &x.Sources - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceVolumeProjection((*[]VolumeProjection)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VolumeProjection) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Secret != nil - yyq2[1] = x.DownwardAPI != nil - yyq2[2] = x.ConfigMap != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secret")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMap")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VolumeProjection) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VolumeProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "secret": - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretProjection) - } - x.Secret.CodecDecodeSelf(d) - } - case "downwardAPI": - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIProjection) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - case "configMap": - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapProjection) - } - x.ConfigMap.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VolumeProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretProjection) - } - x.Secret.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIProjection) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapProjection) - } - x.ConfigMap.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.Mode != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Mode == nil { - r.EncodeNil() - } else { - yy10 := *x.Mode - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(yy10)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("mode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Mode == nil { - r.EncodeNil() - } else { - yy12 := *x.Mode - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(yy12)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KeyToPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv4 := &x.Key - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv6 := &x.Path - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "mode": - if r.TryDecodeAsNil() { - if x.Mode != nil { - x.Mode = nil - } - } else { - if x.Mode == nil { - x.Mode = new(int32) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KeyToPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv11 := &x.Key - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv13 := &x.Path - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Mode != nil { - x.Mode = nil - } - } else { - if x.Mode == nil { - x.Mode = new(int32) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerPort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = x.HostPort != 0 - yyq2[3] = x.Protocol != "" - yyq2[4] = x.HostIP != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.HostPort)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.HostPort)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.ContainerPort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.ContainerPort)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.Protocol.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Protocol.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerPort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "hostPort": - if r.TryDecodeAsNil() { - x.HostPort = 0 - } else { - yyv6 := &x.HostPort - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(yyv6)) = int32(r.DecodeInt(32)) - } - } - case "containerPort": - if r.TryDecodeAsNil() { - x.ContainerPort = 0 - } else { - yyv8 := &x.ContainerPort - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - case "protocol": - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - yyv10 := &x.Protocol - yyv10.CodecDecodeSelf(d) - } - case "hostIP": - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - yyv11 := &x.HostIP - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv14 := &x.Name - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPort = 0 - } else { - yyv16 := &x.HostPort - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*int32)(yyv16)) = int32(r.DecodeInt(32)) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerPort = 0 - } else { - yyv18 := &x.ContainerPort - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*int32)(yyv18)) = int32(r.DecodeInt(32)) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - yyv20 := &x.Protocol - yyv20.CodecDecodeSelf(d) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - yyv21 := &x.HostIP - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.ReadOnly != false - yyq2[3] = x.SubPath != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("mountPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VolumeMount) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv6 := &x.ReadOnly - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(yyv6)) = r.DecodeBool() - } - } - case "mountPath": - if r.TryDecodeAsNil() { - x.MountPath = "" - } else { - yyv8 := &x.MountPath - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "subPath": - if r.TryDecodeAsNil() { - x.SubPath = "" - } else { - yyv10 := &x.SubPath - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv13 := &x.Name - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - yyv15 := &x.ReadOnly - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(yyv15)) = r.DecodeBool() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MountPath = "" - } else { - yyv17 := &x.MountPath - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SubPath = "" - } else { - yyv19 := &x.SubPath - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EnvVar) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Value != "" - yyq2[2] = x.ValueFrom != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ValueFrom == nil { - r.EncodeNil() - } else { - x.ValueFrom.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("valueFrom")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ValueFrom == nil { - r.EncodeNil() - } else { - x.ValueFrom.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EnvVar) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EnvVar) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv6 := &x.Value - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "valueFrom": - if r.TryDecodeAsNil() { - if x.ValueFrom != nil { - x.ValueFrom = nil - } - } else { - if x.ValueFrom == nil { - x.ValueFrom = new(EnvVarSource) - } - x.ValueFrom.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EnvVar) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv10 := &x.Name - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv12 := &x.Value - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ValueFrom != nil { - x.ValueFrom = nil - } - } else { - if x.ValueFrom == nil { - x.ValueFrom = new(EnvVarSource) - } - x.ValueFrom.CodecDecodeSelf(d) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.FieldRef != nil - yyq2[1] = x.ResourceFieldRef != nil - yyq2[2] = x.ConfigMapKeyRef != nil - yyq2[3] = x.SecretKeyRef != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ConfigMapKeyRef == nil { - r.EncodeNil() - } else { - x.ConfigMapKeyRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMapKeyRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ConfigMapKeyRef == nil { - r.EncodeNil() - } else { - x.ConfigMapKeyRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.SecretKeyRef == nil { - r.EncodeNil() - } else { - x.SecretKeyRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretKeyRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretKeyRef == nil { - r.EncodeNil() - } else { - x.SecretKeyRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EnvVarSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "fieldRef": - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - case "resourceFieldRef": - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - case "configMapKeyRef": - if r.TryDecodeAsNil() { - if x.ConfigMapKeyRef != nil { - x.ConfigMapKeyRef = nil - } - } else { - if x.ConfigMapKeyRef == nil { - x.ConfigMapKeyRef = new(ConfigMapKeySelector) - } - x.ConfigMapKeyRef.CodecDecodeSelf(d) - } - case "secretKeyRef": - if r.TryDecodeAsNil() { - if x.SecretKeyRef != nil { - x.SecretKeyRef = nil - } - } else { - if x.SecretKeyRef == nil { - x.SecretKeyRef = new(SecretKeySelector) - } - x.SecretKeyRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMapKeyRef != nil { - x.ConfigMapKeyRef = nil - } - } else { - if x.ConfigMapKeyRef == nil { - x.ConfigMapKeyRef = new(ConfigMapKeySelector) - } - x.ConfigMapKeyRef.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretKeyRef != nil { - x.SecretKeyRef = nil - } - } else { - if x.SecretKeyRef == nil { - x.SecretKeyRef = new(SecretKeySelector) - } - x.SecretKeyRef.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ObjectFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv4 := &x.APIVersion - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "fieldPath": - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - yyv6 := &x.FieldPath - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv9 := &x.APIVersion - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - yyv11 := &x.FieldPath - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ContainerName != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.Divisor - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("divisor")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.Divisor - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "containerName": - if r.TryDecodeAsNil() { - x.ContainerName = "" - } else { - yyv4 := &x.ContainerName - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "resource": - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - yyv6 := &x.Resource - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "divisor": - if r.TryDecodeAsNil() { - x.Divisor = pkg3_resource.Quantity{} - } else { - yyv8 := &x.Divisor - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerName = "" - } else { - yyv11 := &x.ContainerName - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - yyv13 := &x.Resource - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Divisor = pkg3_resource.Quantity{} - } else { - yyv15 := &x.Divisor - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[2] = x.Optional != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Optional == nil { - r.EncodeNil() - } else { - yy10 := *x.Optional - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(yy10)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("optional")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Optional == nil { - r.EncodeNil() - } else { - yy12 := *x.Optional - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(yy12)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv6 := &x.Key - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "optional": - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv11 := &x.Name - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv13 := &x.Key - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[2] = x.Optional != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Optional == nil { - r.EncodeNil() - } else { - yy10 := *x.Optional - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(yy10)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("optional")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Optional == nil { - r.EncodeNil() - } else { - yy12 := *x.Optional - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(yy12)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv6 := &x.Key - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "optional": - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv11 := &x.Name - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv13 := &x.Key - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EnvFromSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Prefix != "" - yyq2[1] = x.ConfigMapRef != nil - yyq2[2] = x.SecretRef != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Prefix)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("prefix")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Prefix)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.ConfigMapRef == nil { - r.EncodeNil() - } else { - x.ConfigMapRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMapRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ConfigMapRef == nil { - r.EncodeNil() - } else { - x.ConfigMapRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EnvFromSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EnvFromSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "prefix": - if r.TryDecodeAsNil() { - x.Prefix = "" - } else { - yyv4 := &x.Prefix - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "configMapRef": - if r.TryDecodeAsNil() { - if x.ConfigMapRef != nil { - x.ConfigMapRef = nil - } - } else { - if x.ConfigMapRef == nil { - x.ConfigMapRef = new(ConfigMapEnvSource) - } - x.ConfigMapRef.CodecDecodeSelf(d) - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(SecretEnvSource) - } - x.SecretRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EnvFromSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Prefix = "" - } else { - yyv9 := &x.Prefix - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMapRef != nil { - x.ConfigMapRef = nil - } - } else { - if x.ConfigMapRef == nil { - x.ConfigMapRef = new(ConfigMapEnvSource) - } - x.ConfigMapRef.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(SecretEnvSource) - } - x.SecretRef.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapEnvSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = x.Optional != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Optional == nil { - r.EncodeNil() - } else { - yy7 := *x.Optional - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("optional")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Optional == nil { - r.EncodeNil() - } else { - yy9 := *x.Optional - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapEnvSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapEnvSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "optional": - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapEnvSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv9 := &x.Name - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecretEnvSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = x.Optional != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Optional == nil { - r.EncodeNil() - } else { - yy7 := *x.Optional - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("optional")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Optional == nil { - r.EncodeNil() - } else { - yy9 := *x.Optional - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretEnvSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretEnvSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "optional": - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretEnvSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv9 := &x.Name - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Optional != nil { - x.Optional = nil - } - } else { - if x.Optional == nil { - x.Optional = new(bool) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(x.Optional)) = r.DecodeBool() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPHeader) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPHeader) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv6 := &x.Value - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPHeader) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv9 := &x.Name - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv11 := &x.Value - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Path != "" - yyq2[2] = x.Host != "" - yyq2[3] = x.Scheme != "" - yyq2[4] = len(x.HTTPHeaders) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Port - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Port - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("host")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.Scheme.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scheme")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Scheme.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.HTTPHeaders == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpHeaders")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HTTPHeaders == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPGetAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPGetAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv4 := &x.Path - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "port": - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv6 := &x.Port - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "host": - if r.TryDecodeAsNil() { - x.Host = "" - } else { - yyv8 := &x.Host - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "scheme": - if r.TryDecodeAsNil() { - x.Scheme = "" - } else { - yyv10 := &x.Scheme - yyv10.CodecDecodeSelf(d) - } - case "httpHeaders": - if r.TryDecodeAsNil() { - x.HTTPHeaders = nil - } else { - yyv11 := &x.HTTPHeaders - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceHTTPHeader((*[]HTTPHeader)(yyv11), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv14 := &x.Path - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv16 := &x.Port - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(yyv16) { - } else if !yym17 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv16) - } else { - z.DecFallback(yyv16, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Host = "" - } else { - yyv18 := &x.Host - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*string)(yyv18)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Scheme = "" - } else { - yyv20 := &x.Scheme - yyv20.CodecDecodeSelf(d) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HTTPHeaders = nil - } else { - yyv21 := &x.HTTPHeaders - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - h.decSliceHTTPHeader((*[]HTTPHeader)(yyv21), d) - } - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x URIScheme) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *URIScheme) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *TCPSocketAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Port - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4) - } else { - z.EncFallback(yy4) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Port - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(yy6) - } else { - z.EncFallback(yy6) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TCPSocketAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TCPSocketAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "port": - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv4 := &x.Port - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) - } else { - z.DecFallback(yyv4, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TCPSocketAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv7 := &x.Port - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ExecAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Command) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Command == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("command")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ExecAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ExecAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "command": - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv4 := &x.Command - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ExecAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv7 := &x.Command - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - z.F.DecSliceStringX(yyv7, false, d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Handler.Exec != nil && x.Exec != nil - yyq2[1] = x.Handler.HTTPGet != nil && x.HTTPGet != nil - yyq2[2] = x.Handler.TCPSocket != nil && x.TCPSocket != nil - yyq2[3] = x.InitialDelaySeconds != 0 - yyq2[4] = x.TimeoutSeconds != 0 - yyq2[5] = x.PeriodSeconds != 0 - yyq2[6] = x.SuccessThreshold != 0 - yyq2[7] = x.FailureThreshold != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - var yyn3 bool - if x.Handler.Exec == nil { - yyn3 = true - goto LABEL3 - } - LABEL3: - if yyr2 || yy2arr2 { - if yyn3 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn3 { - r.EncodeNil() - } else { - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } - } - } - var yyn6 bool - if x.Handler.HTTPGet == nil { - yyn6 = true - goto LABEL6 - } - LABEL6: - if yyr2 || yy2arr2 { - if yyn6 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpGet")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn6 { - r.EncodeNil() - } else { - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } - } - } - var yyn9 bool - if x.Handler.TCPSocket == nil { - yyn9 = true - goto LABEL9 - } - LABEL9: - if yyr2 || yy2arr2 { - if yyn9 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn9 { - r.EncodeNil() - } else { - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.InitialDelaySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("initialDelaySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.InitialDelaySeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.TimeoutSeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.TimeoutSeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.PeriodSeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("periodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.PeriodSeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(x.SuccessThreshold)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("successThreshold")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(x.SuccessThreshold)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeInt(int64(x.FailureThreshold)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failureThreshold")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeInt(int64(x.FailureThreshold)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Probe) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Probe) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "exec": - if x.Handler.Exec == nil { - x.Handler.Exec = new(ExecAction) - } - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - case "httpGet": - if x.Handler.HTTPGet == nil { - x.Handler.HTTPGet = new(HTTPGetAction) - } - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - case "tcpSocket": - if x.Handler.TCPSocket == nil { - x.Handler.TCPSocket = new(TCPSocketAction) - } - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - case "initialDelaySeconds": - if r.TryDecodeAsNil() { - x.InitialDelaySeconds = 0 - } else { - yyv7 := &x.InitialDelaySeconds - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*int32)(yyv7)) = int32(r.DecodeInt(32)) - } - } - case "timeoutSeconds": - if r.TryDecodeAsNil() { - x.TimeoutSeconds = 0 - } else { - yyv9 := &x.TimeoutSeconds - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*int32)(yyv9)) = int32(r.DecodeInt(32)) - } - } - case "periodSeconds": - if r.TryDecodeAsNil() { - x.PeriodSeconds = 0 - } else { - yyv11 := &x.PeriodSeconds - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int32)(yyv11)) = int32(r.DecodeInt(32)) - } - } - case "successThreshold": - if r.TryDecodeAsNil() { - x.SuccessThreshold = 0 - } else { - yyv13 := &x.SuccessThreshold - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int32)(yyv13)) = int32(r.DecodeInt(32)) - } - } - case "failureThreshold": - if r.TryDecodeAsNil() { - x.FailureThreshold = 0 - } else { - yyv15 := &x.FailureThreshold - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int32)(yyv15)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj17 int - var yyb17 bool - var yyhl17 bool = l >= 0 - if x.Handler.Exec == nil { - x.Handler.Exec = new(ExecAction) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - if x.Handler.HTTPGet == nil { - x.Handler.HTTPGet = new(HTTPGetAction) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - if x.Handler.TCPSocket == nil { - x.Handler.TCPSocket = new(TCPSocketAction) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.InitialDelaySeconds = 0 - } else { - yyv21 := &x.InitialDelaySeconds - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int32)(yyv21)) = int32(r.DecodeInt(32)) - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TimeoutSeconds = 0 - } else { - yyv23 := &x.TimeoutSeconds - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*int32)(yyv23)) = int32(r.DecodeInt(32)) - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PeriodSeconds = 0 - } else { - yyv25 := &x.PeriodSeconds - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*int32)(yyv25)) = int32(r.DecodeInt(32)) - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SuccessThreshold = 0 - } else { - yyv27 := &x.SuccessThreshold - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*int32)(yyv27)) = int32(r.DecodeInt(32)) - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FailureThreshold = 0 - } else { - yyv29 := &x.FailureThreshold - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*int32)(yyv29)) = int32(r.DecodeInt(32)) - } - } - for { - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj17-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PullPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PullPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x TerminationMessagePolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *TerminationMessagePolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x Capability) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *Capability) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Add) != 0 - yyq2[1] = len(x.Drop) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Add == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Add), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("add")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Add == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Add), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Drop == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Drop), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("drop")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Drop == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Drop), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Capabilities) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Capabilities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "add": - if r.TryDecodeAsNil() { - x.Add = nil - } else { - yyv4 := &x.Add - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv4), d) - } - } - case "drop": - if r.TryDecodeAsNil() { - x.Drop = nil - } else { - yyv6 := &x.Drop - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Capabilities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Add = nil - } else { - yyv9 := &x.Add - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Drop = nil - } else { - yyv11 := &x.Drop - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Limits) != 0 - yyq2[1] = len(x.Requests) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Limits == nil { - r.EncodeNil() - } else { - x.Limits.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("limits")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Limits == nil { - r.EncodeNil() - } else { - x.Limits.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Requests == nil { - r.EncodeNil() - } else { - x.Requests.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requests")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Requests == nil { - r.EncodeNil() - } else { - x.Requests.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceRequirements) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceRequirements) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "limits": - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv4 := &x.Limits - yyv4.CodecDecodeSelf(d) - } - case "requests": - if r.TryDecodeAsNil() { - x.Requests = nil - } else { - yyv5 := &x.Requests - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceRequirements) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv7 := &x.Limits - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Requests = nil - } else { - yyv8 := &x.Requests - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [20]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Image != "" - yyq2[2] = len(x.Command) != 0 - yyq2[3] = len(x.Args) != 0 - yyq2[4] = x.WorkingDir != "" - yyq2[5] = len(x.Ports) != 0 - yyq2[6] = len(x.EnvFrom) != 0 - yyq2[7] = len(x.Env) != 0 - yyq2[8] = true - yyq2[9] = len(x.VolumeMounts) != 0 - yyq2[10] = x.LivenessProbe != nil - yyq2[11] = x.ReadinessProbe != nil - yyq2[12] = x.Lifecycle != nil - yyq2[13] = x.TerminationMessagePath != "" - yyq2[14] = x.TerminationMessagePolicy != "" - yyq2[15] = x.ImagePullPolicy != "" - yyq2[16] = x.SecurityContext != nil - yyq2[17] = x.Stdin != false - yyq2[18] = x.StdinOnce != false - yyq2[19] = x.TTY != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(20) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Command == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("command")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Args == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - z.F.EncSliceStringV(x.Args, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("args")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Args == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - z.F.EncSliceStringV(x.Args, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("workingDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.Ports == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.EnvFrom == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - h.encSliceEnvFromSource(([]EnvFromSource)(x.EnvFrom), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("envFrom")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.EnvFrom == nil { - r.EncodeNil() - } else { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - h.encSliceEnvFromSource(([]EnvFromSource)(x.EnvFrom), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.Env == nil { - r.EncodeNil() - } else { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - h.encSliceEnvVar(([]EnvVar)(x.Env), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("env")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Env == nil { - r.EncodeNil() - } else { - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - h.encSliceEnvVar(([]EnvVar)(x.Env), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yy28 := &x.Resources - yy28.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy30 := &x.Resources - yy30.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.VolumeMounts == nil { - r.EncodeNil() - } else { - yym33 := z.EncBinary() - _ = yym33 - if false { - } else { - h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeMounts")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumeMounts == nil { - r.EncodeNil() - } else { - yym34 := z.EncBinary() - _ = yym34 - if false { - } else { - h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.LivenessProbe == nil { - r.EncodeNil() - } else { - x.LivenessProbe.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("livenessProbe")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LivenessProbe == nil { - r.EncodeNil() - } else { - x.LivenessProbe.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.ReadinessProbe == nil { - r.EncodeNil() - } else { - x.ReadinessProbe.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readinessProbe")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ReadinessProbe == nil { - r.EncodeNil() - } else { - x.ReadinessProbe.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.Lifecycle == nil { - r.EncodeNil() - } else { - x.Lifecycle.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lifecycle")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Lifecycle == nil { - r.EncodeNil() - } else { - x.Lifecycle.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym46 := z.EncBinary() - _ = yym46 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - x.TerminationMessagePolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.TerminationMessagePolicy.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - x.ImagePullPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imagePullPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.ImagePullPolicy.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("securityContext")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - yym57 := z.EncBinary() - _ = yym57 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdin")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym58 := z.EncBinary() - _ = yym58 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[18] { - yym60 := z.EncBinary() - _ = yym60 - if false { - } else { - r.EncodeBool(bool(x.StdinOnce)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdinOnce")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym61 := z.EncBinary() - _ = yym61 - if false { - } else { - r.EncodeBool(bool(x.StdinOnce)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[19] { - yym63 := z.EncBinary() - _ = yym63 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[19] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tty")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym64 := z.EncBinary() - _ = yym64 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Container) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Container) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "image": - if r.TryDecodeAsNil() { - x.Image = "" - } else { - yyv6 := &x.Image - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "command": - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv8 := &x.Command - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } - } - case "args": - if r.TryDecodeAsNil() { - x.Args = nil - } else { - yyv10 := &x.Args - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - z.F.DecSliceStringX(yyv10, false, d) - } - } - case "workingDir": - if r.TryDecodeAsNil() { - x.WorkingDir = "" - } else { - yyv12 := &x.WorkingDir - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv14 := &x.Ports - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - h.decSliceContainerPort((*[]ContainerPort)(yyv14), d) - } - } - case "envFrom": - if r.TryDecodeAsNil() { - x.EnvFrom = nil - } else { - yyv16 := &x.EnvFrom - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - h.decSliceEnvFromSource((*[]EnvFromSource)(yyv16), d) - } - } - case "env": - if r.TryDecodeAsNil() { - x.Env = nil - } else { - yyv18 := &x.Env - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - h.decSliceEnvVar((*[]EnvVar)(yyv18), d) - } - } - case "resources": - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv20 := &x.Resources - yyv20.CodecDecodeSelf(d) - } - case "volumeMounts": - if r.TryDecodeAsNil() { - x.VolumeMounts = nil - } else { - yyv21 := &x.VolumeMounts - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - h.decSliceVolumeMount((*[]VolumeMount)(yyv21), d) - } - } - case "livenessProbe": - if r.TryDecodeAsNil() { - if x.LivenessProbe != nil { - x.LivenessProbe = nil - } - } else { - if x.LivenessProbe == nil { - x.LivenessProbe = new(Probe) - } - x.LivenessProbe.CodecDecodeSelf(d) - } - case "readinessProbe": - if r.TryDecodeAsNil() { - if x.ReadinessProbe != nil { - x.ReadinessProbe = nil - } - } else { - if x.ReadinessProbe == nil { - x.ReadinessProbe = new(Probe) - } - x.ReadinessProbe.CodecDecodeSelf(d) - } - case "lifecycle": - if r.TryDecodeAsNil() { - if x.Lifecycle != nil { - x.Lifecycle = nil - } - } else { - if x.Lifecycle == nil { - x.Lifecycle = new(Lifecycle) - } - x.Lifecycle.CodecDecodeSelf(d) - } - case "terminationMessagePath": - if r.TryDecodeAsNil() { - x.TerminationMessagePath = "" - } else { - yyv26 := &x.TerminationMessagePath - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - *((*string)(yyv26)) = r.DecodeString() - } - } - case "terminationMessagePolicy": - if r.TryDecodeAsNil() { - x.TerminationMessagePolicy = "" - } else { - yyv28 := &x.TerminationMessagePolicy - yyv28.CodecDecodeSelf(d) - } - case "imagePullPolicy": - if r.TryDecodeAsNil() { - x.ImagePullPolicy = "" - } else { - yyv29 := &x.ImagePullPolicy - yyv29.CodecDecodeSelf(d) - } - case "securityContext": - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(SecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - case "stdin": - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - yyv31 := &x.Stdin - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*bool)(yyv31)) = r.DecodeBool() - } - } - case "stdinOnce": - if r.TryDecodeAsNil() { - x.StdinOnce = false - } else { - yyv33 := &x.StdinOnce - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - *((*bool)(yyv33)) = r.DecodeBool() - } - } - case "tty": - if r.TryDecodeAsNil() { - x.TTY = false - } else { - yyv35 := &x.TTY - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*bool)(yyv35)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj37 int - var yyb37 bool - var yyhl37 bool = l >= 0 - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv38 := &x.Name - yym39 := z.DecBinary() - _ = yym39 - if false { - } else { - *((*string)(yyv38)) = r.DecodeString() - } - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Image = "" - } else { - yyv40 := &x.Image - yym41 := z.DecBinary() - _ = yym41 - if false { - } else { - *((*string)(yyv40)) = r.DecodeString() - } - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv42 := &x.Command - yym43 := z.DecBinary() - _ = yym43 - if false { - } else { - z.F.DecSliceStringX(yyv42, false, d) - } - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Args = nil - } else { - yyv44 := &x.Args - yym45 := z.DecBinary() - _ = yym45 - if false { - } else { - z.F.DecSliceStringX(yyv44, false, d) - } - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.WorkingDir = "" - } else { - yyv46 := &x.WorkingDir - yym47 := z.DecBinary() - _ = yym47 - if false { - } else { - *((*string)(yyv46)) = r.DecodeString() - } - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv48 := &x.Ports - yym49 := z.DecBinary() - _ = yym49 - if false { - } else { - h.decSliceContainerPort((*[]ContainerPort)(yyv48), d) - } - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnvFrom = nil - } else { - yyv50 := &x.EnvFrom - yym51 := z.DecBinary() - _ = yym51 - if false { - } else { - h.decSliceEnvFromSource((*[]EnvFromSource)(yyv50), d) - } - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Env = nil - } else { - yyv52 := &x.Env - yym53 := z.DecBinary() - _ = yym53 - if false { - } else { - h.decSliceEnvVar((*[]EnvVar)(yyv52), d) - } - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv54 := &x.Resources - yyv54.CodecDecodeSelf(d) - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeMounts = nil - } else { - yyv55 := &x.VolumeMounts - yym56 := z.DecBinary() - _ = yym56 - if false { - } else { - h.decSliceVolumeMount((*[]VolumeMount)(yyv55), d) - } - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LivenessProbe != nil { - x.LivenessProbe = nil - } - } else { - if x.LivenessProbe == nil { - x.LivenessProbe = new(Probe) - } - x.LivenessProbe.CodecDecodeSelf(d) - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ReadinessProbe != nil { - x.ReadinessProbe = nil - } - } else { - if x.ReadinessProbe == nil { - x.ReadinessProbe = new(Probe) - } - x.ReadinessProbe.CodecDecodeSelf(d) - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Lifecycle != nil { - x.Lifecycle = nil - } - } else { - if x.Lifecycle == nil { - x.Lifecycle = new(Lifecycle) - } - x.Lifecycle.CodecDecodeSelf(d) - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TerminationMessagePath = "" - } else { - yyv60 := &x.TerminationMessagePath - yym61 := z.DecBinary() - _ = yym61 - if false { - } else { - *((*string)(yyv60)) = r.DecodeString() - } - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TerminationMessagePolicy = "" - } else { - yyv62 := &x.TerminationMessagePolicy - yyv62.CodecDecodeSelf(d) - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImagePullPolicy = "" - } else { - yyv63 := &x.ImagePullPolicy - yyv63.CodecDecodeSelf(d) - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(SecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - yyv65 := &x.Stdin - yym66 := z.DecBinary() - _ = yym66 - if false { - } else { - *((*bool)(yyv65)) = r.DecodeBool() - } - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StdinOnce = false - } else { - yyv67 := &x.StdinOnce - yym68 := z.DecBinary() - _ = yym68 - if false { - } else { - *((*bool)(yyv67)) = r.DecodeBool() - } - } - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TTY = false - } else { - yyv69 := &x.TTY - yym70 := z.DecBinary() - _ = yym70 - if false { - } else { - *((*bool)(yyv69)) = r.DecodeBool() - } - } - for { - yyj37++ - if yyhl37 { - yyb37 = yyj37 > l - } else { - yyb37 = r.CheckBreak() - } - if yyb37 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj37-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Exec != nil - yyq2[1] = x.HTTPGet != nil - yyq2[2] = x.TCPSocket != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpGet")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Handler) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Handler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "exec": - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - case "httpGet": - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - case "tcpSocket": - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Handler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.PostStart != nil - yyq2[1] = x.PreStop != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.PostStart == nil { - r.EncodeNil() - } else { - x.PostStart.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("postStart")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PostStart == nil { - r.EncodeNil() - } else { - x.PostStart.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PreStop == nil { - r.EncodeNil() - } else { - x.PreStop.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preStop")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreStop == nil { - r.EncodeNil() - } else { - x.PreStop.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Lifecycle) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Lifecycle) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "postStart": - if r.TryDecodeAsNil() { - if x.PostStart != nil { - x.PostStart = nil - } - } else { - if x.PostStart == nil { - x.PostStart = new(Handler) - } - x.PostStart.CodecDecodeSelf(d) - } - case "preStop": - if r.TryDecodeAsNil() { - if x.PreStop != nil { - x.PreStop = nil - } - } else { - if x.PreStop == nil { - x.PreStop = new(Handler) - } - x.PreStop.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Lifecycle) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PostStart != nil { - x.PostStart = nil - } - } else { - if x.PostStart == nil { - x.PostStart = new(Handler) - } - x.PostStart.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PreStop != nil { - x.PreStop = nil - } - } else { - if x.PreStop == nil { - x.PreStop = new(Handler) - } - x.PreStop.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ConditionStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ConditionStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ContainerStateWaiting) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Reason != "" - yyq2[1] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStateWaiting) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStateWaiting) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv4 := &x.Reason - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv6 := &x.Message - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStateWaiting) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv9 := &x.Reason - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv11 := &x.Message - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerStateRunning) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.StartedAt - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else if yym5 { - z.EncBinaryMarshal(yy4) - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4) - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startedAt")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.StartedAt - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else if yym7 { - z.EncBinaryMarshal(yy6) - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(yy6) - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStateRunning) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStateRunning) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "startedAt": - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_v1.Time{} - } else { - yyv4 := &x.StartedAt - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if yym5 { - z.DecBinaryUnmarshal(yyv4) - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) - } else { - z.DecFallback(yyv4, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStateRunning) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_v1.Time{} - } else { - yyv7 := &x.StartedAt - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if yym8 { - z.DecBinaryUnmarshal(yyv7) - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Signal != 0 - yyq2[2] = x.Reason != "" - yyq2[3] = x.Message != "" - yyq2[4] = true - yyq2[5] = true - yyq2[6] = x.ContainerID != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.ExitCode)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exitCode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.ExitCode)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Signal)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("signal")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Signal)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy16 := &x.StartedAt - yym17 := z.EncBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.EncExt(yy16) { - } else if yym17 { - z.EncBinaryMarshal(yy16) - } else if !yym17 && z.IsJSONHandle() { - z.EncJSONMarshal(yy16) - } else { - z.EncFallback(yy16) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startedAt")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy18 := &x.StartedAt - yym19 := z.EncBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.EncExt(yy18) { - } else if yym19 { - z.EncBinaryMarshal(yy18) - } else if !yym19 && z.IsJSONHandle() { - z.EncJSONMarshal(yy18) - } else { - z.EncFallback(yy18) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yy21 := &x.FinishedAt - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.EncExt(yy21) { - } else if yym22 { - z.EncBinaryMarshal(yy21) - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(yy21) - } else { - z.EncFallback(yy21) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finishedAt")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy23 := &x.FinishedAt - yym24 := z.EncBinary() - _ = yym24 - if false { - } else if z.HasExtensions() && z.EncExt(yy23) { - } else if yym24 { - z.EncBinaryMarshal(yy23) - } else if !yym24 && z.IsJSONHandle() { - z.EncJSONMarshal(yy23) - } else { - z.EncFallback(yy23) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStateTerminated) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStateTerminated) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "exitCode": - if r.TryDecodeAsNil() { - x.ExitCode = 0 - } else { - yyv4 := &x.ExitCode - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - case "signal": - if r.TryDecodeAsNil() { - x.Signal = 0 - } else { - yyv6 := &x.Signal - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(yyv6)) = int32(r.DecodeInt(32)) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv8 := &x.Reason - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv10 := &x.Message - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "startedAt": - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_v1.Time{} - } else { - yyv12 := &x.StartedAt - yym13 := z.DecBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.DecExt(yyv12) { - } else if yym13 { - z.DecBinaryUnmarshal(yyv12) - } else if !yym13 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv12) - } else { - z.DecFallback(yyv12, false) - } - } - case "finishedAt": - if r.TryDecodeAsNil() { - x.FinishedAt = pkg2_v1.Time{} - } else { - yyv14 := &x.FinishedAt - yym15 := z.DecBinary() - _ = yym15 - if false { - } else if z.HasExtensions() && z.DecExt(yyv14) { - } else if yym15 { - z.DecBinaryUnmarshal(yyv14) - } else if !yym15 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv14) - } else { - z.DecFallback(yyv14, false) - } - } - case "containerID": - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - yyv16 := &x.ContainerID - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExitCode = 0 - } else { - yyv19 := &x.ExitCode - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int32)(yyv19)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Signal = 0 - } else { - yyv21 := &x.Signal - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int32)(yyv21)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv23 := &x.Reason - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*string)(yyv23)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv25 := &x.Message - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_v1.Time{} - } else { - yyv27 := &x.StartedAt - yym28 := z.DecBinary() - _ = yym28 - if false { - } else if z.HasExtensions() && z.DecExt(yyv27) { - } else if yym28 { - z.DecBinaryUnmarshal(yyv27) - } else if !yym28 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv27) - } else { - z.DecFallback(yyv27, false) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FinishedAt = pkg2_v1.Time{} - } else { - yyv29 := &x.FinishedAt - yym30 := z.DecBinary() - _ = yym30 - if false { - } else if z.HasExtensions() && z.DecExt(yyv29) { - } else if yym30 { - z.DecBinaryUnmarshal(yyv29) - } else if !yym30 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv29) - } else { - z.DecFallback(yyv29, false) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - yyv31 := &x.ContainerID - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*string)(yyv31)) = r.DecodeString() - } - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Waiting != nil - yyq2[1] = x.Running != nil - yyq2[2] = x.Terminated != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Waiting == nil { - r.EncodeNil() - } else { - x.Waiting.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("waiting")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Waiting == nil { - r.EncodeNil() - } else { - x.Waiting.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Running == nil { - r.EncodeNil() - } else { - x.Running.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("running")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Running == nil { - r.EncodeNil() - } else { - x.Running.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Terminated == nil { - r.EncodeNil() - } else { - x.Terminated.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminated")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Terminated == nil { - r.EncodeNil() - } else { - x.Terminated.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerState) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerState) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "waiting": - if r.TryDecodeAsNil() { - if x.Waiting != nil { - x.Waiting = nil - } - } else { - if x.Waiting == nil { - x.Waiting = new(ContainerStateWaiting) - } - x.Waiting.CodecDecodeSelf(d) - } - case "running": - if r.TryDecodeAsNil() { - if x.Running != nil { - x.Running = nil - } - } else { - if x.Running == nil { - x.Running = new(ContainerStateRunning) - } - x.Running.CodecDecodeSelf(d) - } - case "terminated": - if r.TryDecodeAsNil() { - if x.Terminated != nil { - x.Terminated = nil - } - } else { - if x.Terminated == nil { - x.Terminated = new(ContainerStateTerminated) - } - x.Terminated.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerState) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Waiting != nil { - x.Waiting = nil - } - } else { - if x.Waiting == nil { - x.Waiting = new(ContainerStateWaiting) - } - x.Waiting.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Running != nil { - x.Running = nil - } - } else { - if x.Running == nil { - x.Running = new(ContainerStateRunning) - } - x.Running.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Terminated != nil { - x.Terminated = nil - } - } else { - if x.Terminated == nil { - x.Terminated = new(ContainerStateTerminated) - } - x.Terminated.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = true - yyq2[2] = true - yyq2[7] = x.ContainerID != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 5 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy7 := &x.State - yy7.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("state")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.State - yy9.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy12 := &x.LastTerminationState - yy12.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastState")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.LastTerminationState - yy14.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeBool(bool(x.Ready)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ready")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeBool(bool(x.Ready)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.RestartCount)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("restartCount")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(x.RestartCount)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imageID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "state": - if r.TryDecodeAsNil() { - x.State = ContainerState{} - } else { - yyv6 := &x.State - yyv6.CodecDecodeSelf(d) - } - case "lastState": - if r.TryDecodeAsNil() { - x.LastTerminationState = ContainerState{} - } else { - yyv7 := &x.LastTerminationState - yyv7.CodecDecodeSelf(d) - } - case "ready": - if r.TryDecodeAsNil() { - x.Ready = false - } else { - yyv8 := &x.Ready - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - case "restartCount": - if r.TryDecodeAsNil() { - x.RestartCount = 0 - } else { - yyv10 := &x.RestartCount - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(yyv10)) = int32(r.DecodeInt(32)) - } - } - case "image": - if r.TryDecodeAsNil() { - x.Image = "" - } else { - yyv12 := &x.Image - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - case "imageID": - if r.TryDecodeAsNil() { - x.ImageID = "" - } else { - yyv14 := &x.ImageID - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - case "containerID": - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - yyv16 := &x.ContainerID - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv19 := &x.Name - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.State = ContainerState{} - } else { - yyv21 := &x.State - yyv21.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTerminationState = ContainerState{} - } else { - yyv22 := &x.LastTerminationState - yyv22.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ready = false - } else { - yyv23 := &x.Ready - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(yyv23)) = r.DecodeBool() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RestartCount = 0 - } else { - yyv25 := &x.RestartCount - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*int32)(yyv25)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Image = "" - } else { - yyv27 := &x.Image - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*string)(yyv27)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImageID = "" - } else { - yyv29 := &x.ImageID - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*string)(yyv29)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - yyv31 := &x.ContainerID - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*string)(yyv31)) = r.DecodeString() - } - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PodPhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PodPhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x PodConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PodConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastProbeTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastProbeTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_v1.Time{} - } else { - yyv6 := &x.LastProbeTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_v1.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv10 := &x.Reason - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv12 := &x.Message - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv15 := &x.Type - yyv15.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv16 := &x.Status - yyv16.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_v1.Time{} - } else { - yyv17 := &x.LastProbeTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_v1.Time{} - } else { - yyv19 := &x.LastTransitionTime - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else if yym20 { - z.DecBinaryUnmarshal(yyv19) - } else if !yym20 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv19) - } else { - z.DecFallback(yyv19, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv21 := &x.Reason - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv23 := &x.Message - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*string)(yyv23)) = r.DecodeString() - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x RestartPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *RestartPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x DNSPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DNSPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NodeSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.NodeSelectorTerms == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeSelectorTerms")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeSelectorTerms == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "nodeSelectorTerms": - if r.TryDecodeAsNil() { - x.NodeSelectorTerms = nil - } else { - yyv4 := &x.NodeSelectorTerms - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeSelectorTerms = nil - } else { - yyv7 := &x.NodeSelectorTerms - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSelectorTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("matchExpressions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSelectorTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSelectorTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "matchExpressions": - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv4 := &x.MatchExpressions - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSelectorTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv7 := &x.MatchExpressions - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = len(x.Values) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Operator.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operator")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Operator.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Values == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("values")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Values == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv4 := &x.Key - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "operator": - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - yyv6 := &x.Operator - yyv6.CodecDecodeSelf(d) - } - case "values": - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv7 := &x.Values - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - z.F.DecSliceStringX(yyv7, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv10 := &x.Key - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - yyv12 := &x.Operator - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv13 := &x.Values - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - z.F.DecSliceStringX(yyv13, false, d) - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NodeSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodeSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.NodeAffinity != nil - yyq2[1] = x.PodAffinity != nil - yyq2[2] = x.PodAntiAffinity != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.NodeAffinity == nil { - r.EncodeNil() - } else { - x.NodeAffinity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeAffinity == nil { - r.EncodeNil() - } else { - x.NodeAffinity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PodAffinity == nil { - r.EncodeNil() - } else { - x.PodAffinity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodAffinity == nil { - r.EncodeNil() - } else { - x.PodAffinity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.PodAntiAffinity == nil { - r.EncodeNil() - } else { - x.PodAntiAffinity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodAntiAffinity == nil { - r.EncodeNil() - } else { - x.PodAntiAffinity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Affinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "nodeAffinity": - if r.TryDecodeAsNil() { - if x.NodeAffinity != nil { - x.NodeAffinity = nil - } - } else { - if x.NodeAffinity == nil { - x.NodeAffinity = new(NodeAffinity) - } - x.NodeAffinity.CodecDecodeSelf(d) - } - case "podAffinity": - if r.TryDecodeAsNil() { - if x.PodAffinity != nil { - x.PodAffinity = nil - } - } else { - if x.PodAffinity == nil { - x.PodAffinity = new(PodAffinity) - } - x.PodAffinity.CodecDecodeSelf(d) - } - case "podAntiAffinity": - if r.TryDecodeAsNil() { - if x.PodAntiAffinity != nil { - x.PodAntiAffinity = nil - } - } else { - if x.PodAntiAffinity == nil { - x.PodAntiAffinity = new(PodAntiAffinity) - } - x.PodAntiAffinity.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NodeAffinity != nil { - x.NodeAffinity = nil - } - } else { - if x.NodeAffinity == nil { - x.NodeAffinity = new(NodeAffinity) - } - x.NodeAffinity.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodAffinity != nil { - x.PodAffinity = nil - } - } else { - if x.PodAffinity == nil { - x.PodAffinity = new(PodAffinity) - } - x.PodAffinity.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodAntiAffinity != nil { - x.PodAntiAffinity = nil - } - } else { - if x.PodAntiAffinity == nil { - x.PodAntiAffinity = new(PodAntiAffinity) - } - x.PodAntiAffinity.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 - yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "requiredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) - } - } - case "preferredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 - yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "requiredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) - } - } - case "preferredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("weight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.PodAffinityTerm - yy7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.PodAffinityTerm - yy9.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "weight": - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - yyv4 := &x.Weight - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - case "podAffinityTerm": - if r.TryDecodeAsNil() { - x.PodAffinityTerm = PodAffinityTerm{} - } else { - yyv6 := &x.PodAffinityTerm - yyv6.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - yyv8 := &x.Weight - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodAffinityTerm = PodAffinityTerm{} - } else { - yyv10 := &x.PodAffinityTerm - yyv10.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.LabelSelector != nil - yyq2[1] = len(x.Namespaces) != 0 - yyq2[2] = x.TopologyKey != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.LabelSelector == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { - } else { - z.EncFallback(x.LabelSelector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LabelSelector == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { - } else { - z.EncFallback(x.LabelSelector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Namespaces == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncSliceStringV(x.Namespaces, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespaces")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Namespaces == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncSliceStringV(x.Namespaces, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("topologyKey")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "labelSelector": - if r.TryDecodeAsNil() { - if x.LabelSelector != nil { - x.LabelSelector = nil - } - } else { - if x.LabelSelector == nil { - x.LabelSelector = new(pkg2_v1.LabelSelector) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { - } else { - z.DecFallback(x.LabelSelector, false) - } - } - case "namespaces": - if r.TryDecodeAsNil() { - x.Namespaces = nil - } else { - yyv6 := &x.Namespaces - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - case "topologyKey": - if r.TryDecodeAsNil() { - x.TopologyKey = "" - } else { - yyv8 := &x.TopologyKey - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LabelSelector != nil { - x.LabelSelector = nil - } - } else { - if x.LabelSelector == nil { - x.LabelSelector = new(pkg2_v1.LabelSelector) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { - } else { - z.DecFallback(x.LabelSelector, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespaces = nil - } else { - yyv13 := &x.Namespaces - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - z.F.DecSliceStringX(yyv13, false, d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TopologyKey = "" - } else { - yyv15 := &x.TopologyKey - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil - yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "requiredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - } else { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) - } - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) - } - case "preferredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv5 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - } else { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) - } - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv9 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("weight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Preference - yy7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preference")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Preference - yy9.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "weight": - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - yyv4 := &x.Weight - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - case "preference": - if r.TryDecodeAsNil() { - x.Preference = NodeSelectorTerm{} - } else { - yyv6 := &x.Preference - yyv6.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - yyv8 := &x.Weight - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Preference = NodeSelectorTerm{} - } else { - yyv10 := &x.Preference - yyv10.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Value != "" - yyq2[3] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Effect.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("effect")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Effect.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy13 := &x.TimeAdded - yym14 := z.EncBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.EncExt(yy13) { - } else if yym14 { - z.EncBinaryMarshal(yy13) - } else if !yym14 && z.IsJSONHandle() { - z.EncJSONMarshal(yy13) - } else { - z.EncFallback(yy13) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("timeAdded")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy15 := &x.TimeAdded - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Taint) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Taint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv4 := &x.Key - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv6 := &x.Value - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "effect": - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - yyv8 := &x.Effect - yyv8.CodecDecodeSelf(d) - } - case "timeAdded": - if r.TryDecodeAsNil() { - x.TimeAdded = pkg2_v1.Time{} - } else { - yyv9 := &x.TimeAdded - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if yym10 { - z.DecBinaryUnmarshal(yyv9) - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv12 := &x.Key - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv14 := &x.Value - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - yyv16 := &x.Effect - yyv16.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TimeAdded = pkg2_v1.Time{} - } else { - yyv17 := &x.TimeAdded - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x TaintEffect) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *TaintEffect) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Key != "" - yyq2[1] = x.Operator != "" - yyq2[2] = x.Value != "" - yyq2[3] = x.Effect != "" - yyq2[4] = x.TolerationSeconds != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - x.Operator.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operator")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Operator.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.Effect.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("effect")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Effect.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.TolerationSeconds == nil { - r.EncodeNil() - } else { - yy16 := *x.TolerationSeconds - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(yy16)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tolerationSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TolerationSeconds == nil { - r.EncodeNil() - } else { - yy18 := *x.TolerationSeconds - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(yy18)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Toleration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Toleration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv4 := &x.Key - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "operator": - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - yyv6 := &x.Operator - yyv6.CodecDecodeSelf(d) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv7 := &x.Value - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*string)(yyv7)) = r.DecodeString() - } - } - case "effect": - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - yyv9 := &x.Effect - yyv9.CodecDecodeSelf(d) - } - case "tolerationSeconds": - if r.TryDecodeAsNil() { - if x.TolerationSeconds != nil { - x.TolerationSeconds = nil - } - } else { - if x.TolerationSeconds == nil { - x.TolerationSeconds = new(int64) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int64)(x.TolerationSeconds)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv13 := &x.Key - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - yyv15 := &x.Operator - yyv15.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv16 := &x.Value - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - yyv18 := &x.Effect - yyv18.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TolerationSeconds != nil { - x.TolerationSeconds = nil - } - } else { - if x.TolerationSeconds == nil { - x.TolerationSeconds = new(int64) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int64)(x.TolerationSeconds)) = int64(r.DecodeInt(64)) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x TolerationOperator) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *TolerationOperator) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [22]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Volumes) != 0 - yyq2[1] = len(x.InitContainers) != 0 - yyq2[3] = x.RestartPolicy != "" - yyq2[4] = x.TerminationGracePeriodSeconds != nil - yyq2[5] = x.ActiveDeadlineSeconds != nil - yyq2[6] = x.DNSPolicy != "" - yyq2[7] = len(x.NodeSelector) != 0 - yyq2[8] = x.ServiceAccountName != "" - yyq2[9] = x.DeprecatedServiceAccount != "" - yyq2[10] = x.AutomountServiceAccountToken != nil - yyq2[11] = x.NodeName != "" - yyq2[12] = x.HostNetwork != false - yyq2[13] = x.HostPID != false - yyq2[14] = x.HostIPC != false - yyq2[15] = x.SecurityContext != nil - yyq2[16] = len(x.ImagePullSecrets) != 0 - yyq2[17] = x.Hostname != "" - yyq2[18] = x.Subdomain != "" - yyq2[19] = x.Affinity != nil - yyq2[20] = x.SchedulerName != "" - yyq2[21] = len(x.Tolerations) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(22) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Volumes == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceVolume(([]Volume)(x.Volumes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Volumes == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceVolume(([]Volume)(x.Volumes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.InitContainers == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceContainer(([]Container)(x.InitContainers), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("initContainers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.InitContainers == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceContainer(([]Container)(x.InitContainers), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Containers == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceContainer(([]Container)(x.Containers), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Containers == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - h.encSliceContainer(([]Container)(x.Containers), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.RestartPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("restartPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.RestartPolicy.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.TerminationGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy16 := *x.TerminationGracePeriodSeconds - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(yy16)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminationGracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TerminationGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy18 := *x.TerminationGracePeriodSeconds - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(yy18)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy21 := *x.ActiveDeadlineSeconds - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(yy21)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy23 := *x.ActiveDeadlineSeconds - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeInt(int64(yy23)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - x.DNSPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("dnsPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.DNSPolicy.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.NodeSelector == nil { - r.EncodeNil() - } else { - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - z.F.EncMapStringStringV(x.NodeSelector, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeSelector == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - z.F.EncMapStringStringV(x.NodeSelector, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceAccountName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym33 := z.EncBinary() - _ = yym33 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceAccount")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.AutomountServiceAccountToken == nil { - r.EncodeNil() - } else { - yy38 := *x.AutomountServiceAccountToken - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - r.EncodeBool(bool(yy38)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("automountServiceAccountToken")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AutomountServiceAccountToken == nil { - r.EncodeNil() - } else { - yy40 := *x.AutomountServiceAccountToken - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeBool(bool(yy40)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - yym43 := z.EncBinary() - _ = yym43 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - yym46 := z.EncBinary() - _ = yym46 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - yym49 := z.EncBinary() - _ = yym49 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym50 := z.EncBinary() - _ = yym50 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - yym52 := z.EncBinary() - _ = yym52 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym53 := z.EncBinary() - _ = yym53 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("securityContext")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym58 := z.EncBinary() - _ = yym58 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym59 := z.EncBinary() - _ = yym59 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - yym61 := z.EncBinary() - _ = yym61 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostname")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym62 := z.EncBinary() - _ = yym62 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[18] { - yym64 := z.EncBinary() - _ = yym64 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subdomain")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym65 := z.EncBinary() - _ = yym65 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[19] { - if x.Affinity == nil { - r.EncodeNil() - } else { - x.Affinity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[19] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("affinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Affinity == nil { - r.EncodeNil() - } else { - x.Affinity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[20] { - yym70 := z.EncBinary() - _ = yym70 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[20] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("schedulerName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym71 := z.EncBinary() - _ = yym71 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[21] { - if x.Tolerations == nil { - r.EncodeNil() - } else { - yym73 := z.EncBinary() - _ = yym73 - if false { - } else { - h.encSliceToleration(([]Toleration)(x.Tolerations), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[21] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tolerations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Tolerations == nil { - r.EncodeNil() - } else { - yym74 := z.EncBinary() - _ = yym74 - if false { - } else { - h.encSliceToleration(([]Toleration)(x.Tolerations), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "volumes": - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv4 := &x.Volumes - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceVolume((*[]Volume)(yyv4), d) - } - } - case "initContainers": - if r.TryDecodeAsNil() { - x.InitContainers = nil - } else { - yyv6 := &x.InitContainers - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceContainer((*[]Container)(yyv6), d) - } - } - case "containers": - if r.TryDecodeAsNil() { - x.Containers = nil - } else { - yyv8 := &x.Containers - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - h.decSliceContainer((*[]Container)(yyv8), d) - } - } - case "restartPolicy": - if r.TryDecodeAsNil() { - x.RestartPolicy = "" - } else { - yyv10 := &x.RestartPolicy - yyv10.CodecDecodeSelf(d) - } - case "terminationGracePeriodSeconds": - if r.TryDecodeAsNil() { - if x.TerminationGracePeriodSeconds != nil { - x.TerminationGracePeriodSeconds = nil - } - } else { - if x.TerminationGracePeriodSeconds == nil { - x.TerminationGracePeriodSeconds = new(int64) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "activeDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "dnsPolicy": - if r.TryDecodeAsNil() { - x.DNSPolicy = "" - } else { - yyv15 := &x.DNSPolicy - yyv15.CodecDecodeSelf(d) - } - case "nodeSelector": - if r.TryDecodeAsNil() { - x.NodeSelector = nil - } else { - yyv16 := &x.NodeSelector - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - z.F.DecMapStringStringX(yyv16, false, d) - } - } - case "serviceAccountName": - if r.TryDecodeAsNil() { - x.ServiceAccountName = "" - } else { - yyv18 := &x.ServiceAccountName - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*string)(yyv18)) = r.DecodeString() - } - } - case "serviceAccount": - if r.TryDecodeAsNil() { - x.DeprecatedServiceAccount = "" - } else { - yyv20 := &x.DeprecatedServiceAccount - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - *((*string)(yyv20)) = r.DecodeString() - } - } - case "automountServiceAccountToken": - if r.TryDecodeAsNil() { - if x.AutomountServiceAccountToken != nil { - x.AutomountServiceAccountToken = nil - } - } else { - if x.AutomountServiceAccountToken == nil { - x.AutomountServiceAccountToken = new(bool) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() - } - } - case "nodeName": - if r.TryDecodeAsNil() { - x.NodeName = "" - } else { - yyv24 := &x.NodeName - yym25 := z.DecBinary() - _ = yym25 - if false { - } else { - *((*string)(yyv24)) = r.DecodeString() - } - } - case "hostNetwork": - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - yyv26 := &x.HostNetwork - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - *((*bool)(yyv26)) = r.DecodeBool() - } - } - case "hostPID": - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - yyv28 := &x.HostPID - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*bool)(yyv28)) = r.DecodeBool() - } - } - case "hostIPC": - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - yyv30 := &x.HostIPC - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - *((*bool)(yyv30)) = r.DecodeBool() - } - } - case "securityContext": - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(PodSecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - case "imagePullSecrets": - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv33 := &x.ImagePullSecrets - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv33), d) - } - } - case "hostname": - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - yyv35 := &x.Hostname - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*string)(yyv35)) = r.DecodeString() - } - } - case "subdomain": - if r.TryDecodeAsNil() { - x.Subdomain = "" - } else { - yyv37 := &x.Subdomain - yym38 := z.DecBinary() - _ = yym38 - if false { - } else { - *((*string)(yyv37)) = r.DecodeString() - } - } - case "affinity": - if r.TryDecodeAsNil() { - if x.Affinity != nil { - x.Affinity = nil - } - } else { - if x.Affinity == nil { - x.Affinity = new(Affinity) - } - x.Affinity.CodecDecodeSelf(d) - } - case "schedulerName": - if r.TryDecodeAsNil() { - x.SchedulerName = "" - } else { - yyv40 := &x.SchedulerName - yym41 := z.DecBinary() - _ = yym41 - if false { - } else { - *((*string)(yyv40)) = r.DecodeString() - } - } - case "tolerations": - if r.TryDecodeAsNil() { - x.Tolerations = nil - } else { - yyv42 := &x.Tolerations - yym43 := z.DecBinary() - _ = yym43 - if false { - } else { - h.decSliceToleration((*[]Toleration)(yyv42), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj44 int - var yyb44 bool - var yyhl44 bool = l >= 0 - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv45 := &x.Volumes - yym46 := z.DecBinary() - _ = yym46 - if false { - } else { - h.decSliceVolume((*[]Volume)(yyv45), d) - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.InitContainers = nil - } else { - yyv47 := &x.InitContainers - yym48 := z.DecBinary() - _ = yym48 - if false { - } else { - h.decSliceContainer((*[]Container)(yyv47), d) - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Containers = nil - } else { - yyv49 := &x.Containers - yym50 := z.DecBinary() - _ = yym50 - if false { - } else { - h.decSliceContainer((*[]Container)(yyv49), d) - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RestartPolicy = "" - } else { - yyv51 := &x.RestartPolicy - yyv51.CodecDecodeSelf(d) - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TerminationGracePeriodSeconds != nil { - x.TerminationGracePeriodSeconds = nil - } - } else { - if x.TerminationGracePeriodSeconds == nil { - x.TerminationGracePeriodSeconds = new(int64) - } - yym53 := z.DecBinary() - _ = yym53 - if false { - } else { - *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym55 := z.DecBinary() - _ = yym55 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DNSPolicy = "" - } else { - yyv56 := &x.DNSPolicy - yyv56.CodecDecodeSelf(d) - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeSelector = nil - } else { - yyv57 := &x.NodeSelector - yym58 := z.DecBinary() - _ = yym58 - if false { - } else { - z.F.DecMapStringStringX(yyv57, false, d) - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceAccountName = "" - } else { - yyv59 := &x.ServiceAccountName - yym60 := z.DecBinary() - _ = yym60 - if false { - } else { - *((*string)(yyv59)) = r.DecodeString() - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DeprecatedServiceAccount = "" - } else { - yyv61 := &x.DeprecatedServiceAccount - yym62 := z.DecBinary() - _ = yym62 - if false { - } else { - *((*string)(yyv61)) = r.DecodeString() - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AutomountServiceAccountToken != nil { - x.AutomountServiceAccountToken = nil - } - } else { - if x.AutomountServiceAccountToken == nil { - x.AutomountServiceAccountToken = new(bool) - } - yym64 := z.DecBinary() - _ = yym64 - if false { - } else { - *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeName = "" - } else { - yyv65 := &x.NodeName - yym66 := z.DecBinary() - _ = yym66 - if false { - } else { - *((*string)(yyv65)) = r.DecodeString() - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - yyv67 := &x.HostNetwork - yym68 := z.DecBinary() - _ = yym68 - if false { - } else { - *((*bool)(yyv67)) = r.DecodeBool() - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - yyv69 := &x.HostPID - yym70 := z.DecBinary() - _ = yym70 - if false { - } else { - *((*bool)(yyv69)) = r.DecodeBool() - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - yyv71 := &x.HostIPC - yym72 := z.DecBinary() - _ = yym72 - if false { - } else { - *((*bool)(yyv71)) = r.DecodeBool() - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(PodSecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv74 := &x.ImagePullSecrets - yym75 := z.DecBinary() - _ = yym75 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv74), d) - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - yyv76 := &x.Hostname - yym77 := z.DecBinary() - _ = yym77 - if false { - } else { - *((*string)(yyv76)) = r.DecodeString() - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subdomain = "" - } else { - yyv78 := &x.Subdomain - yym79 := z.DecBinary() - _ = yym79 - if false { - } else { - *((*string)(yyv78)) = r.DecodeString() - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Affinity != nil { - x.Affinity = nil - } - } else { - if x.Affinity == nil { - x.Affinity = new(Affinity) - } - x.Affinity.CodecDecodeSelf(d) - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SchedulerName = "" - } else { - yyv81 := &x.SchedulerName - yym82 := z.DecBinary() - _ = yym82 - if false { - } else { - *((*string)(yyv81)) = r.DecodeString() - } - } - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Tolerations = nil - } else { - yyv83 := &x.Tolerations - yym84 := z.DecBinary() - _ = yym84 - if false { - } else { - h.decSliceToleration((*[]Toleration)(yyv83), d) - } - } - for { - yyj44++ - if yyhl44 { - yyb44 = yyj44 > l - } else { - yyb44 = r.CheckBreak() - } - if yyb44 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj44-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.SELinuxOptions != nil - yyq2[1] = x.RunAsUser != nil - yyq2[2] = x.RunAsNonRoot != nil - yyq2[3] = len(x.SupplementalGroups) != 0 - yyq2[4] = x.FSGroup != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy7 := *x.RunAsUser - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy9 := *x.RunAsUser - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy12 := *x.RunAsNonRoot - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(yy12)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy14 := *x.RunAsNonRoot - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeBool(bool(yy14)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.SupplementalGroups == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - z.F.EncSliceInt64V(x.SupplementalGroups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SupplementalGroups == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - z.F.EncSliceInt64V(x.SupplementalGroups, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.FSGroup == nil { - r.EncodeNil() - } else { - yy20 := *x.FSGroup - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(yy20)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FSGroup == nil { - r.EncodeNil() - } else { - yy22 := *x.FSGroup - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(yy22)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "seLinuxOptions": - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - case "runAsUser": - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - case "runAsNonRoot": - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - case "supplementalGroups": - if r.TryDecodeAsNil() { - x.SupplementalGroups = nil - } else { - yyv9 := &x.SupplementalGroups - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - z.F.DecSliceInt64X(yyv9, false, d) - } - } - case "fsGroup": - if r.TryDecodeAsNil() { - if x.FSGroup != nil { - x.FSGroup = nil - } - } else { - if x.FSGroup == nil { - x.FSGroup = new(int64) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SupplementalGroups = nil - } else { - yyv19 := &x.SupplementalGroups - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - z.F.DecSliceInt64X(yyv19, false, d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FSGroup != nil { - x.FSGroup = nil - } - } else { - if x.FSGroup == nil { - x.FSGroup = new(int64) - } - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) - } - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PodQOSClass) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PodQOSClass) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [10]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Phase != "" - yyq2[1] = len(x.Conditions) != 0 - yyq2[2] = x.Message != "" - yyq2[3] = x.Reason != "" - yyq2[4] = x.HostIP != "" - yyq2[5] = x.PodIP != "" - yyq2[6] = x.StartTime != nil - yyq2[7] = len(x.InitContainerStatuses) != 0 - yyq2[8] = len(x.ContainerStatuses) != 0 - yyq2[9] = x.QOSClass != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(10) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.StartTime == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym22 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartTime == nil { - r.EncodeNil() - } else { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym23 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym23 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.InitContainerStatuses == nil { - r.EncodeNil() - } else { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - h.encSliceContainerStatus(([]ContainerStatus)(x.InitContainerStatuses), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("initContainerStatuses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.InitContainerStatuses == nil { - r.EncodeNil() - } else { - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - h.encSliceContainerStatus(([]ContainerStatus)(x.InitContainerStatuses), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.ContainerStatuses == nil { - r.EncodeNil() - } else { - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerStatuses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ContainerStatuses == nil { - r.EncodeNil() - } else { - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - x.QOSClass.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("qosClass")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.QOSClass.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - yyv4 := &x.Phase - yyv4.CodecDecodeSelf(d) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv5 := &x.Conditions - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePodCondition((*[]PodCondition)(yyv5), d) - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv7 := &x.Message - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*string)(yyv7)) = r.DecodeString() - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv9 := &x.Reason - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - case "hostIP": - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - yyv11 := &x.HostIP - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - case "podIP": - if r.TryDecodeAsNil() { - x.PodIP = "" - } else { - yyv13 := &x.PodIP - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - case "startTime": - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg2_v1.Time) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym16 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - case "initContainerStatuses": - if r.TryDecodeAsNil() { - x.InitContainerStatuses = nil - } else { - yyv17 := &x.InitContainerStatuses - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - h.decSliceContainerStatus((*[]ContainerStatus)(yyv17), d) - } - } - case "containerStatuses": - if r.TryDecodeAsNil() { - x.ContainerStatuses = nil - } else { - yyv19 := &x.ContainerStatuses - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceContainerStatus((*[]ContainerStatus)(yyv19), d) - } - } - case "qosClass": - if r.TryDecodeAsNil() { - x.QOSClass = "" - } else { - yyv21 := &x.QOSClass - yyv21.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj22 int - var yyb22 bool - var yyhl22 bool = l >= 0 - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - yyv23 := &x.Phase - yyv23.CodecDecodeSelf(d) - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv24 := &x.Conditions - yym25 := z.DecBinary() - _ = yym25 - if false { - } else { - h.decSlicePodCondition((*[]PodCondition)(yyv24), d) - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv26 := &x.Message - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - *((*string)(yyv26)) = r.DecodeString() - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv28 := &x.Reason - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*string)(yyv28)) = r.DecodeString() - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - yyv30 := &x.HostIP - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - *((*string)(yyv30)) = r.DecodeString() - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodIP = "" - } else { - yyv32 := &x.PodIP - yym33 := z.DecBinary() - _ = yym33 - if false { - } else { - *((*string)(yyv32)) = r.DecodeString() - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg2_v1.Time) - } - yym35 := z.DecBinary() - _ = yym35 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym35 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym35 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.InitContainerStatuses = nil - } else { - yyv36 := &x.InitContainerStatuses - yym37 := z.DecBinary() - _ = yym37 - if false { - } else { - h.decSliceContainerStatus((*[]ContainerStatus)(yyv36), d) - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerStatuses = nil - } else { - yyv38 := &x.ContainerStatuses - yym39 := z.DecBinary() - _ = yym39 - if false { - } else { - h.decSliceContainerStatus((*[]ContainerStatus)(yyv38), d) - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.QOSClass = "" - } else { - yyv40 := &x.QOSClass - yyv40.CodecDecodeSelf(d) - } - for { - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj22-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodStatusResult) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Status - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Status - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodStatusResult) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodStatusResult) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv10 := &x.Status - yyv10.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodStatusResult) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv12 := &x.Kind - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv14 := &x.APIVersion - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv16 := &x.ObjectMeta - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(yyv16) { - } else { - z.DecFallback(yyv16, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv18 := &x.Status - yyv18.CodecDecodeSelf(d) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Pod) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Pod) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Pod) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSlicePod(([]Pod)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSlicePod(([]Pod)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicePod((*[]Pod)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSlicePod((*[]Pod)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv6 := &x.Spec - yyv6.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodTemplate) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Template - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Template - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodTemplate) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = PodTemplateSpec{} - } else { - yyv10 := &x.Template - yyv10.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv12 := &x.Kind - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv14 := &x.APIVersion - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv16 := &x.ObjectMeta - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(yyv16) { - } else { - z.DecFallback(yyv16, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = PodTemplateSpec{} - } else { - yyv18 := &x.Template - yyv18.CodecDecodeSelf(d) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodTemplateList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodTemplateList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodTemplateList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicePodTemplate((*[]PodTemplate)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodTemplateList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSlicePodTemplate((*[]PodTemplate)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != nil - yyq2[1] = x.MinReadySeconds != 0 - yyq2[2] = len(x.Selector) != 0 - yyq2[3] = x.Template != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Replicas == nil { - r.EncodeNil() - } else { - yy4 := *x.Replicas - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Replicas == nil { - r.EncodeNil() - } else { - yy6 := *x.Replicas - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Template == nil { - r.EncodeNil() - } else { - x.Template.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Template == nil { - r.EncodeNil() - } else { - x.Template.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - case "minReadySeconds": - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - yyv6 := &x.MinReadySeconds - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(yyv6)) = int32(r.DecodeInt(32)) - } - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv8 := &x.Selector - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecMapStringStringX(yyv8, false, d) - } - } - case "template": - if r.TryDecodeAsNil() { - if x.Template != nil { - x.Template = nil - } - } else { - if x.Template == nil { - x.Template = new(PodTemplateSpec) - } - x.Template.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - yyv14 := &x.MinReadySeconds - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*int32)(yyv14)) = int32(r.DecodeInt(32)) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv16 := &x.Selector - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - z.F.DecMapStringStringX(yyv16, false, d) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Template != nil { - x.Template = nil - } - } else { - if x.Template == nil { - x.Template = new(PodTemplateSpec) - } - x.Template.CodecDecodeSelf(d) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FullyLabeledReplicas != 0 - yyq2[2] = x.ReadyReplicas != 0 - yyq2[3] = x.AvailableReplicas != 0 - yyq2[4] = x.ObservedGeneration != 0 - yyq2[5] = len(x.Conditions) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.ReadyReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.ReadyReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - h.encSliceReplicationControllerCondition(([]ReplicationControllerCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - h.encSliceReplicationControllerCondition(([]ReplicationControllerCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv4 := &x.Replicas - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - case "fullyLabeledReplicas": - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - yyv6 := &x.FullyLabeledReplicas - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(yyv6)) = int32(r.DecodeInt(32)) - } - } - case "readyReplicas": - if r.TryDecodeAsNil() { - x.ReadyReplicas = 0 - } else { - yyv8 := &x.ReadyReplicas - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - case "availableReplicas": - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - yyv10 := &x.AvailableReplicas - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(yyv10)) = int32(r.DecodeInt(32)) - } - } - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - yyv12 := &x.ObservedGeneration - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int64)(yyv12)) = int64(r.DecodeInt(64)) - } - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv14 := &x.Conditions - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv14), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv17 := &x.Replicas - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(yyv17)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - yyv19 := &x.FullyLabeledReplicas - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int32)(yyv19)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadyReplicas = 0 - } else { - yyv21 := &x.ReadyReplicas - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int32)(yyv21)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - yyv23 := &x.AvailableReplicas - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*int32)(yyv23)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - yyv25 := &x.ObservedGeneration - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*int64)(yyv25)) = int64(r.DecodeInt(64)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv27 := &x.Conditions - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv27), d) - } - } - for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ReplicationControllerConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ReplicationControllerConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ReplicationControllerCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = x.Reason != "" - yyq2[4] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastTransitionTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastTransitionTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_v1.Time{} - } else { - yyv6 := &x.LastTransitionTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv8 := &x.Reason - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv10 := &x.Message - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv13 := &x.Type - yyv13.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv14 := &x.Status - yyv14.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_v1.Time{} - } else { - yyv15 := &x.LastTransitionTime - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if yym16 { - z.DecBinaryUnmarshal(yyv15) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv17 := &x.Reason - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv19 := &x.Message - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationController) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationController) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationController) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ReplicationControllerSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ReplicationControllerStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ReplicationControllerSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ReplicationControllerStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceReplicationController(([]ReplicationController)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceReplicationController(([]ReplicationController)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceReplicationController((*[]ReplicationController)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceReplicationController((*[]ReplicationController)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ServiceAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ServiceAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x ServiceType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ServiceType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ServiceStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.LoadBalancer - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.LoadBalancer - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "loadBalancer": - if r.TryDecodeAsNil() { - x.LoadBalancer = LoadBalancerStatus{} - } else { - yyv4 := &x.LoadBalancer - yyv4.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancer = LoadBalancerStatus{} - } else { - yyv6 := &x.LoadBalancer - yyv6.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LoadBalancerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Ingress) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Ingress == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ingress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ingress == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LoadBalancerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LoadBalancerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ingress": - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv4 := &x.Ingress - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LoadBalancerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv7 := &x.Ingress - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LoadBalancerIngress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.IP != "" - yyq2[1] = x.Hostname != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ip")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostname")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LoadBalancerIngress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LoadBalancerIngress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ip": - if r.TryDecodeAsNil() { - x.IP = "" - } else { - yyv4 := &x.IP - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "hostname": - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - yyv6 := &x.Hostname - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LoadBalancerIngress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IP = "" - } else { - yyv9 := &x.IP - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - yyv11 := &x.Hostname - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [10]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Ports) != 0 - yyq2[1] = len(x.Selector) != 0 - yyq2[2] = x.ClusterIP != "" - yyq2[3] = x.Type != "" - yyq2[4] = len(x.ExternalIPs) != 0 - yyq2[5] = len(x.DeprecatedPublicIPs) != 0 - yyq2[6] = x.SessionAffinity != "" - yyq2[7] = x.LoadBalancerIP != "" - yyq2[8] = len(x.LoadBalancerSourceRanges) != 0 - yyq2[9] = x.ExternalName != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(10) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Ports == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceServicePort(([]ServicePort)(x.Ports), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceServicePort(([]ServicePort)(x.Ports), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.ExternalIPs == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - z.F.EncSliceStringV(x.ExternalIPs, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("externalIPs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ExternalIPs == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - z.F.EncSliceStringV(x.ExternalIPs, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.DeprecatedPublicIPs == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - z.F.EncSliceStringV(x.DeprecatedPublicIPs, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deprecatedPublicIPs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DeprecatedPublicIPs == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - z.F.EncSliceStringV(x.DeprecatedPublicIPs, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - x.SessionAffinity.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sessionAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.SessionAffinity.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancerIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.LoadBalancerSourceRanges == nil { - r.EncodeNil() - } else { - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancerSourceRanges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LoadBalancerSourceRanges == nil { - r.EncodeNil() - } else { - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExternalName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("externalName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExternalName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv4 := &x.Ports - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceServicePort((*[]ServicePort)(yyv4), d) - } - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv6 := &x.Selector - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecMapStringStringX(yyv6, false, d) - } - } - case "clusterIP": - if r.TryDecodeAsNil() { - x.ClusterIP = "" - } else { - yyv8 := &x.ClusterIP - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv10 := &x.Type - yyv10.CodecDecodeSelf(d) - } - case "externalIPs": - if r.TryDecodeAsNil() { - x.ExternalIPs = nil - } else { - yyv11 := &x.ExternalIPs - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - z.F.DecSliceStringX(yyv11, false, d) - } - } - case "deprecatedPublicIPs": - if r.TryDecodeAsNil() { - x.DeprecatedPublicIPs = nil - } else { - yyv13 := &x.DeprecatedPublicIPs - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - z.F.DecSliceStringX(yyv13, false, d) - } - } - case "sessionAffinity": - if r.TryDecodeAsNil() { - x.SessionAffinity = "" - } else { - yyv15 := &x.SessionAffinity - yyv15.CodecDecodeSelf(d) - } - case "loadBalancerIP": - if r.TryDecodeAsNil() { - x.LoadBalancerIP = "" - } else { - yyv16 := &x.LoadBalancerIP - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - case "loadBalancerSourceRanges": - if r.TryDecodeAsNil() { - x.LoadBalancerSourceRanges = nil - } else { - yyv18 := &x.LoadBalancerSourceRanges - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - z.F.DecSliceStringX(yyv18, false, d) - } - } - case "externalName": - if r.TryDecodeAsNil() { - x.ExternalName = "" - } else { - yyv20 := &x.ExternalName - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - *((*string)(yyv20)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj22 int - var yyb22 bool - var yyhl22 bool = l >= 0 - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv23 := &x.Ports - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - h.decSliceServicePort((*[]ServicePort)(yyv23), d) - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv25 := &x.Selector - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - z.F.DecMapStringStringX(yyv25, false, d) - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterIP = "" - } else { - yyv27 := &x.ClusterIP - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*string)(yyv27)) = r.DecodeString() - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv29 := &x.Type - yyv29.CodecDecodeSelf(d) - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExternalIPs = nil - } else { - yyv30 := &x.ExternalIPs - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - z.F.DecSliceStringX(yyv30, false, d) - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DeprecatedPublicIPs = nil - } else { - yyv32 := &x.DeprecatedPublicIPs - yym33 := z.DecBinary() - _ = yym33 - if false { - } else { - z.F.DecSliceStringX(yyv32, false, d) - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SessionAffinity = "" - } else { - yyv34 := &x.SessionAffinity - yyv34.CodecDecodeSelf(d) - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancerIP = "" - } else { - yyv35 := &x.LoadBalancerIP - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*string)(yyv35)) = r.DecodeString() - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancerSourceRanges = nil - } else { - yyv37 := &x.LoadBalancerSourceRanges - yym38 := z.DecBinary() - _ = yym38 - if false { - } else { - z.F.DecSliceStringX(yyv37, false, d) - } - } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExternalName = "" - } else { - yyv39 := &x.ExternalName - yym40 := z.DecBinary() - _ = yym40 - if false { - } else { - *((*string)(yyv39)) = r.DecodeString() - } - } - for { - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l - } else { - yyb22 = r.CheckBreak() - } - if yyb22 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj22-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServicePort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = x.Protocol != "" - yyq2[3] = true - yyq2[4] = x.NodePort != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - x.Protocol.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Protocol.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy13 := &x.TargetPort - yym14 := z.EncBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.EncExt(yy13) { - } else if !yym14 && z.IsJSONHandle() { - z.EncJSONMarshal(yy13) - } else { - z.EncFallback(yy13) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy15 := &x.TargetPort - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeInt(int64(x.NodePort)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodePort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.NodePort)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServicePort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "protocol": - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - yyv6 := &x.Protocol - yyv6.CodecDecodeSelf(d) - } - case "port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - yyv7 := &x.Port - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*int32)(yyv7)) = int32(r.DecodeInt(32)) - } - } - case "targetPort": - if r.TryDecodeAsNil() { - x.TargetPort = pkg4_intstr.IntOrString{} - } else { - yyv9 := &x.TargetPort - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - case "nodePort": - if r.TryDecodeAsNil() { - x.NodePort = 0 - } else { - yyv11 := &x.NodePort - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int32)(yyv11)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv14 := &x.Name - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - yyv16 := &x.Protocol - yyv16.CodecDecodeSelf(d) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - yyv17 := &x.Port - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(yyv17)) = int32(r.DecodeInt(32)) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetPort = pkg4_intstr.IntOrString{} - } else { - yyv19 := &x.TargetPort - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else if !yym20 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv19) - } else { - z.DecFallback(yyv19, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodePort = 0 - } else { - yyv21 := &x.NodePort - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int32)(yyv21)) = int32(r.DecodeInt(32)) - } - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Service) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Service) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Service) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ServiceSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ServiceStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ServiceSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ServiceStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceService(([]Service)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceService(([]Service)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceService((*[]Service)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceService((*[]Service)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = len(x.Secrets) != 0 - yyq2[4] = len(x.ImagePullSecrets) != 0 - yyq2[5] = x.AutomountServiceAccountToken != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Secrets == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secrets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Secrets == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.AutomountServiceAccountToken == nil { - r.EncodeNil() - } else { - yy21 := *x.AutomountServiceAccountToken - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeBool(bool(yy21)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("automountServiceAccountToken")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AutomountServiceAccountToken == nil { - r.EncodeNil() - } else { - yy23 := *x.AutomountServiceAccountToken - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeBool(bool(yy23)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceAccount) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceAccount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "secrets": - if r.TryDecodeAsNil() { - x.Secrets = nil - } else { - yyv10 := &x.Secrets - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceObjectReference((*[]ObjectReference)(yyv10), d) - } - } - case "imagePullSecrets": - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv12 := &x.ImagePullSecrets - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv12), d) - } - } - case "automountServiceAccountToken": - if r.TryDecodeAsNil() { - if x.AutomountServiceAccountToken != nil { - x.AutomountServiceAccountToken = nil - } - } else { - if x.AutomountServiceAccountToken == nil { - x.AutomountServiceAccountToken = new(bool) - } - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv17 := &x.Kind - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv19 := &x.APIVersion - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv21 := &x.ObjectMeta - yym22 := z.DecBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.DecExt(yyv21) { - } else { - z.DecFallback(yyv21, false) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Secrets = nil - } else { - yyv23 := &x.Secrets - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - h.decSliceObjectReference((*[]ObjectReference)(yyv23), d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv25 := &x.ImagePullSecrets - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv25), d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AutomountServiceAccountToken != nil { - x.AutomountServiceAccountToken = nil - } - } else { - if x.AutomountServiceAccountToken == nil { - x.AutomountServiceAccountToken = new(bool) - } - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() - } - } - for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceAccountList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceAccountList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceAccountList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceServiceAccount((*[]ServiceAccount)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceAccountList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceServiceAccount((*[]ServiceAccount)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Endpoints) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Subsets == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subsets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Subsets == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Endpoints) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Endpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "subsets": - if r.TryDecodeAsNil() { - x.Subsets = nil - } else { - yyv10 := &x.Subsets - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceEndpointSubset((*[]EndpointSubset)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Endpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subsets = nil - } else { - yyv19 := &x.Subsets - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceEndpointSubset((*[]EndpointSubset)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Addresses) != 0 - yyq2[1] = len(x.NotReadyAddresses) != 0 - yyq2[2] = len(x.Ports) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Addresses == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("addresses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Addresses == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.NotReadyAddresses == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("notReadyAddresses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NotReadyAddresses == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Ports == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointSubset) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointSubset) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "addresses": - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv4 := &x.Addresses - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv4), d) - } - } - case "notReadyAddresses": - if r.TryDecodeAsNil() { - x.NotReadyAddresses = nil - } else { - yyv6 := &x.NotReadyAddresses - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv6), d) - } - } - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv8 := &x.Ports - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - h.decSliceEndpointPort((*[]EndpointPort)(yyv8), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointSubset) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv11 := &x.Addresses - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv11), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NotReadyAddresses = nil - } else { - yyv13 := &x.NotReadyAddresses - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv15 := &x.Ports - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - h.decSliceEndpointPort((*[]EndpointPort)(yyv15), d) - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Hostname != "" - yyq2[2] = x.NodeName != nil - yyq2[3] = x.TargetRef != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ip")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostname")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.NodeName == nil { - r.EncodeNil() - } else { - yy10 := *x.NodeName - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy10)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeName == nil { - r.EncodeNil() - } else { - yy12 := *x.NodeName - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy12)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.TargetRef == nil { - r.EncodeNil() - } else { - x.TargetRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetRef == nil { - r.EncodeNil() - } else { - x.TargetRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointAddress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ip": - if r.TryDecodeAsNil() { - x.IP = "" - } else { - yyv4 := &x.IP - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "hostname": - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - yyv6 := &x.Hostname - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "nodeName": - if r.TryDecodeAsNil() { - if x.NodeName != nil { - x.NodeName = nil - } - } else { - if x.NodeName == nil { - x.NodeName = new(string) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(x.NodeName)) = r.DecodeString() - } - } - case "targetRef": - if r.TryDecodeAsNil() { - if x.TargetRef != nil { - x.TargetRef = nil - } - } else { - if x.TargetRef == nil { - x.TargetRef = new(ObjectReference) - } - x.TargetRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IP = "" - } else { - yyv12 := &x.IP - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - yyv14 := &x.Hostname - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NodeName != nil { - x.NodeName = nil - } - } else { - if x.NodeName == nil { - x.NodeName = new(string) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(x.NodeName)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TargetRef != nil { - x.TargetRef = nil - } - } else { - if x.TargetRef == nil { - x.TargetRef = new(ObjectReference) - } - x.TargetRef.CodecDecodeSelf(d) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointPort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[2] = x.Protocol != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - x.Protocol.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Protocol.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointPort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - yyv6 := &x.Port - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(yyv6)) = int32(r.DecodeInt(32)) - } - } - case "protocol": - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - yyv8 := &x.Protocol - yyv8.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv10 := &x.Name - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - yyv12 := &x.Port - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int32)(yyv12)) = int32(r.DecodeInt(32)) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - yyv14 := &x.Protocol - yyv14.CodecDecodeSelf(d) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointsList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceEndpoints(([]Endpoints)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceEndpoints(([]Endpoints)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointsList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointsList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceEndpoints((*[]Endpoints)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointsList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceEndpoints((*[]Endpoints)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.PodCIDR != "" - yyq2[1] = x.ExternalID != "" - yyq2[2] = x.ProviderID != "" - yyq2[3] = x.Unschedulable != false - yyq2[4] = len(x.Taints) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("externalID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("providerID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.Unschedulable)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("unschedulable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.Unschedulable)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Taints == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceTaint(([]Taint)(x.Taints), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("taints")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Taints == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encSliceTaint(([]Taint)(x.Taints), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "podCIDR": - if r.TryDecodeAsNil() { - x.PodCIDR = "" - } else { - yyv4 := &x.PodCIDR - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "externalID": - if r.TryDecodeAsNil() { - x.ExternalID = "" - } else { - yyv6 := &x.ExternalID - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "providerID": - if r.TryDecodeAsNil() { - x.ProviderID = "" - } else { - yyv8 := &x.ProviderID - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "unschedulable": - if r.TryDecodeAsNil() { - x.Unschedulable = false - } else { - yyv10 := &x.Unschedulable - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } - } - case "taints": - if r.TryDecodeAsNil() { - x.Taints = nil - } else { - yyv12 := &x.Taints - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - h.decSliceTaint((*[]Taint)(yyv12), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodCIDR = "" - } else { - yyv15 := &x.PodCIDR - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExternalID = "" - } else { - yyv17 := &x.ExternalID - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ProviderID = "" - } else { - yyv19 := &x.ProviderID - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Unschedulable = false - } else { - yyv21 := &x.Unschedulable - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*bool)(yyv21)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Taints = nil - } else { - yyv23 := &x.Taints - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - h.decSliceTaint((*[]Taint)(yyv23), d) - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonEndpoint) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonEndpoint) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonEndpoint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - yyv4 := &x.Port - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonEndpoint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - yyv7 := &x.Port - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*int32)(yyv7)) = int32(r.DecodeInt(32)) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeDaemonEndpoints) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.KubeletEndpoint - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeletEndpoint")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.KubeletEndpoint - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeDaemonEndpoints) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeDaemonEndpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kubeletEndpoint": - if r.TryDecodeAsNil() { - x.KubeletEndpoint = DaemonEndpoint{} - } else { - yyv4 := &x.KubeletEndpoint - yyv4.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeDaemonEndpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeletEndpoint = DaemonEndpoint{} - } else { - yyv6 := &x.KubeletEndpoint - yyv6.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [10]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(10) - } else { - yynn2 = 10 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("machineID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("systemUUID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("bootID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kernelVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("osImage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerRuntimeVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeletVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeProxyVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operatingSystem")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("architecture")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSystemInfo) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSystemInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "machineID": - if r.TryDecodeAsNil() { - x.MachineID = "" - } else { - yyv4 := &x.MachineID - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "systemUUID": - if r.TryDecodeAsNil() { - x.SystemUUID = "" - } else { - yyv6 := &x.SystemUUID - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "bootID": - if r.TryDecodeAsNil() { - x.BootID = "" - } else { - yyv8 := &x.BootID - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "kernelVersion": - if r.TryDecodeAsNil() { - x.KernelVersion = "" - } else { - yyv10 := &x.KernelVersion - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "osImage": - if r.TryDecodeAsNil() { - x.OSImage = "" - } else { - yyv12 := &x.OSImage - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - case "containerRuntimeVersion": - if r.TryDecodeAsNil() { - x.ContainerRuntimeVersion = "" - } else { - yyv14 := &x.ContainerRuntimeVersion - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - case "kubeletVersion": - if r.TryDecodeAsNil() { - x.KubeletVersion = "" - } else { - yyv16 := &x.KubeletVersion - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - case "kubeProxyVersion": - if r.TryDecodeAsNil() { - x.KubeProxyVersion = "" - } else { - yyv18 := &x.KubeProxyVersion - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*string)(yyv18)) = r.DecodeString() - } - } - case "operatingSystem": - if r.TryDecodeAsNil() { - x.OperatingSystem = "" - } else { - yyv20 := &x.OperatingSystem - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - *((*string)(yyv20)) = r.DecodeString() - } - } - case "architecture": - if r.TryDecodeAsNil() { - x.Architecture = "" - } else { - yyv22 := &x.Architecture - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*string)(yyv22)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj24 int - var yyb24 bool - var yyhl24 bool = l >= 0 - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MachineID = "" - } else { - yyv25 := &x.MachineID - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SystemUUID = "" - } else { - yyv27 := &x.SystemUUID - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*string)(yyv27)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.BootID = "" - } else { - yyv29 := &x.BootID - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*string)(yyv29)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KernelVersion = "" - } else { - yyv31 := &x.KernelVersion - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*string)(yyv31)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OSImage = "" - } else { - yyv33 := &x.OSImage - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - *((*string)(yyv33)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerRuntimeVersion = "" - } else { - yyv35 := &x.ContainerRuntimeVersion - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*string)(yyv35)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeletVersion = "" - } else { - yyv37 := &x.KubeletVersion - yym38 := z.DecBinary() - _ = yym38 - if false { - } else { - *((*string)(yyv37)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeProxyVersion = "" - } else { - yyv39 := &x.KubeProxyVersion - yym40 := z.DecBinary() - _ = yym40 - if false { - } else { - *((*string)(yyv39)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OperatingSystem = "" - } else { - yyv41 := &x.OperatingSystem - yym42 := z.DecBinary() - _ = yym42 - if false { - } else { - *((*string)(yyv41)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Architecture = "" - } else { - yyv43 := &x.Architecture - yym44 := z.DecBinary() - _ = yym44 - if false { - } else { - *((*string)(yyv43)) = r.DecodeString() - } - } - for { - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj24-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [10]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Capacity) != 0 - yyq2[1] = len(x.Allocatable) != 0 - yyq2[2] = x.Phase != "" - yyq2[3] = len(x.Conditions) != 0 - yyq2[4] = len(x.Addresses) != 0 - yyq2[5] = true - yyq2[6] = true - yyq2[7] = len(x.Images) != 0 - yyq2[8] = len(x.VolumesInUse) != 0 - yyq2[9] = len(x.VolumesAttached) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(10) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Allocatable == nil { - r.EncodeNil() - } else { - x.Allocatable.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allocatable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Allocatable == nil { - r.EncodeNil() - } else { - x.Allocatable.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Addresses == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("addresses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Addresses == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yy19 := &x.DaemonEndpoints - yy19.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("daemonEndpoints")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy21 := &x.DaemonEndpoints - yy21.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yy24 := &x.NodeInfo - yy24.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeInfo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy26 := &x.NodeInfo - yy26.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.Images == nil { - r.EncodeNil() - } else { - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - h.encSliceContainerImage(([]ContainerImage)(x.Images), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("images")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Images == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - h.encSliceContainerImage(([]ContainerImage)(x.Images), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.VolumesInUse == nil { - r.EncodeNil() - } else { - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumesInUse")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumesInUse == nil { - r.EncodeNil() - } else { - yym33 := z.EncBinary() - _ = yym33 - if false { - } else { - h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.VolumesAttached == nil { - r.EncodeNil() - } else { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumesAttached")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumesAttached == nil { - r.EncodeNil() - } else { - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv4 := &x.Capacity - yyv4.CodecDecodeSelf(d) - } - case "allocatable": - if r.TryDecodeAsNil() { - x.Allocatable = nil - } else { - yyv5 := &x.Allocatable - yyv5.CodecDecodeSelf(d) - } - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - yyv6 := &x.Phase - yyv6.CodecDecodeSelf(d) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv7 := &x.Conditions - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceNodeCondition((*[]NodeCondition)(yyv7), d) - } - } - case "addresses": - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv9 := &x.Addresses - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceNodeAddress((*[]NodeAddress)(yyv9), d) - } - } - case "daemonEndpoints": - if r.TryDecodeAsNil() { - x.DaemonEndpoints = NodeDaemonEndpoints{} - } else { - yyv11 := &x.DaemonEndpoints - yyv11.CodecDecodeSelf(d) - } - case "nodeInfo": - if r.TryDecodeAsNil() { - x.NodeInfo = NodeSystemInfo{} - } else { - yyv12 := &x.NodeInfo - yyv12.CodecDecodeSelf(d) - } - case "images": - if r.TryDecodeAsNil() { - x.Images = nil - } else { - yyv13 := &x.Images - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceContainerImage((*[]ContainerImage)(yyv13), d) - } - } - case "volumesInUse": - if r.TryDecodeAsNil() { - x.VolumesInUse = nil - } else { - yyv15 := &x.VolumesInUse - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv15), d) - } - } - case "volumesAttached": - if r.TryDecodeAsNil() { - x.VolumesAttached = nil - } else { - yyv17 := &x.VolumesAttached - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - h.decSliceAttachedVolume((*[]AttachedVolume)(yyv17), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj19 int - var yyb19 bool - var yyhl19 bool = l >= 0 - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv20 := &x.Capacity - yyv20.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Allocatable = nil - } else { - yyv21 := &x.Allocatable - yyv21.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - yyv22 := &x.Phase - yyv22.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv23 := &x.Conditions - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - h.decSliceNodeCondition((*[]NodeCondition)(yyv23), d) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv25 := &x.Addresses - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - h.decSliceNodeAddress((*[]NodeAddress)(yyv25), d) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DaemonEndpoints = NodeDaemonEndpoints{} - } else { - yyv27 := &x.DaemonEndpoints - yyv27.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeInfo = NodeSystemInfo{} - } else { - yyv28 := &x.NodeInfo - yyv28.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Images = nil - } else { - yyv29 := &x.Images - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - h.decSliceContainerImage((*[]ContainerImage)(yyv29), d) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumesInUse = nil - } else { - yyv31 := &x.VolumesInUse - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv31), d) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumesAttached = nil - } else { - yyv33 := &x.VolumesAttached - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - h.decSliceAttachedVolume((*[]AttachedVolume)(yyv33), d) - } - } - for { - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj19-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x UniqueVolumeName) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *UniqueVolumeName) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Name.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Name.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("devicePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AttachedVolume) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AttachedVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yyv4.CodecDecodeSelf(d) - } - case "devicePath": - if r.TryDecodeAsNil() { - x.DevicePath = "" - } else { - yyv5 := &x.DevicePath - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*string)(yyv5)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv8 := &x.Name - yyv8.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DevicePath = "" - } else { - yyv9 := &x.DevicePath - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *AvoidPods) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.PreferAvoidPods) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.PreferAvoidPods == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSlicePreferAvoidPodsEntry(([]PreferAvoidPodsEntry)(x.PreferAvoidPods), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferAvoidPods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferAvoidPods == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSlicePreferAvoidPodsEntry(([]PreferAvoidPodsEntry)(x.PreferAvoidPods), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AvoidPods) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AvoidPods) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "preferAvoidPods": - if r.TryDecodeAsNil() { - x.PreferAvoidPods = nil - } else { - yyv4 := &x.PreferAvoidPods - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AvoidPods) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferAvoidPods = nil - } else { - yyv7 := &x.PreferAvoidPods - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PreferAvoidPodsEntry) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = true - yyq2[2] = x.Reason != "" - yyq2[3] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.PodSignature - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podSignature")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.PodSignature - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.EvictionTime - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if yym10 { - z.EncBinaryMarshal(yy9) - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evictionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.EvictionTime - yym12 := z.EncBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.EncExt(yy11) { - } else if yym12 { - z.EncBinaryMarshal(yy11) - } else if !yym12 && z.IsJSONHandle() { - z.EncJSONMarshal(yy11) - } else { - z.EncFallback(yy11) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PreferAvoidPodsEntry) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PreferAvoidPodsEntry) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "podSignature": - if r.TryDecodeAsNil() { - x.PodSignature = PodSignature{} - } else { - yyv4 := &x.PodSignature - yyv4.CodecDecodeSelf(d) - } - case "evictionTime": - if r.TryDecodeAsNil() { - x.EvictionTime = pkg2_v1.Time{} - } else { - yyv5 := &x.EvictionTime - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(yyv5) { - } else if yym6 { - z.DecBinaryUnmarshal(yyv5) - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv5) - } else { - z.DecFallback(yyv5, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv7 := &x.Reason - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*string)(yyv7)) = r.DecodeString() - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv9 := &x.Message - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PreferAvoidPodsEntry) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodSignature = PodSignature{} - } else { - yyv12 := &x.PodSignature - yyv12.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvictionTime = pkg2_v1.Time{} - } else { - yyv13 := &x.EvictionTime - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(yyv13) { - } else if yym14 { - z.DecBinaryUnmarshal(yyv13) - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv13) - } else { - z.DecFallback(yyv13, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv15 := &x.Reason - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv17 := &x.Message - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSignature) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.PodController != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.PodController == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.PodController) { - } else { - z.EncFallback(x.PodController) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podController")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodController == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.PodController) { - } else { - z.EncFallback(x.PodController) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSignature) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSignature) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "podController": - if r.TryDecodeAsNil() { - if x.PodController != nil { - x.PodController = nil - } - } else { - if x.PodController == nil { - x.PodController = new(pkg2_v1.OwnerReference) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.PodController) { - } else { - z.DecFallback(x.PodController, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSignature) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodController != nil { - x.PodController = nil - } - } else { - if x.PodController == nil { - x.PodController = new(pkg2_v1.OwnerReference) - } - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(x.PodController) { - } else { - z.DecFallback(x.PodController, false) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.SizeBytes != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Names == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Names, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("names")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Names == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Names, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.SizeBytes)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sizeBytes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.SizeBytes)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerImage) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerImage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "names": - if r.TryDecodeAsNil() { - x.Names = nil - } else { - yyv4 := &x.Names - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "sizeBytes": - if r.TryDecodeAsNil() { - x.SizeBytes = 0 - } else { - yyv6 := &x.SizeBytes - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int64)(yyv6)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerImage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Names = nil - } else { - yyv9 := &x.Names - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - z.F.DecSliceStringX(yyv9, false, d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SizeBytes = 0 - } else { - yyv11 := &x.SizeBytes - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int64)(yyv11)) = int64(r.DecodeInt(64)) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NodePhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodePhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x NodeConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodeConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastHeartbeatTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastHeartbeatTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastHeartbeatTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "lastHeartbeatTime": - if r.TryDecodeAsNil() { - x.LastHeartbeatTime = pkg2_v1.Time{} - } else { - yyv6 := &x.LastHeartbeatTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_v1.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv10 := &x.Reason - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv12 := &x.Message - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv15 := &x.Type - yyv15.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv16 := &x.Status - yyv16.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastHeartbeatTime = pkg2_v1.Time{} - } else { - yyv17 := &x.LastHeartbeatTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_v1.Time{} - } else { - yyv19 := &x.LastTransitionTime - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else if yym20 { - z.DecBinaryUnmarshal(yyv19) - } else if !yym20 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv19) - } else { - z.DecFallback(yyv19, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv21 := &x.Reason - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv23 := &x.Message - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*string)(yyv23)) = r.DecodeString() - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NodeAddressType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodeAddressType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NodeAddress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("address")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeAddress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "address": - if r.TryDecodeAsNil() { - x.Address = "" - } else { - yyv5 := &x.Address - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*string)(yyv5)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv8 := &x.Type - yyv8.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Address = "" - } else { - yyv9 := &x.Address - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ResourceName) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ResourceName) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x ResourceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encResourceList((ResourceList)(x), e) - } - } -} - -func (x *ResourceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decResourceList((*ResourceList)(x), d) - } -} - -func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = NodeSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = NodeStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = NodeSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = NodeStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceNode(([]Node)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceNode(([]Node)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceNode((*[]Node)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceNode((*[]Node)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x FinalizerName) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *FinalizerName) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NamespaceSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Finalizers) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finalizers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NamespaceSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NamespaceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "finalizers": - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv4 := &x.Finalizers - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceFinalizerName((*[]FinalizerName)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NamespaceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv7 := &x.Finalizers - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceFinalizerName((*[]FinalizerName)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NamespaceStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Phase != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NamespaceStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NamespaceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - yyv4 := &x.Phase - yyv4.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NamespaceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - yyv6 := &x.Phase - yyv6.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NamespacePhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NamespacePhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Namespace) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Namespace) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Namespace) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = NamespaceSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = NamespaceStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = NamespaceSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = NamespaceStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NamespaceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceNamespace(([]Namespace)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceNamespace(([]Namespace)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NamespaceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NamespaceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceNamespace((*[]Namespace)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NamespaceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceNamespace((*[]Namespace)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Binding) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy15 := &x.Target - yy15.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("target")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Target - yy17.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Binding) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Binding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "target": - if r.TryDecodeAsNil() { - x.Target = ObjectReference{} - } else { - yyv10 := &x.Target - yyv10.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv12 := &x.Kind - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv14 := &x.APIVersion - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv16 := &x.ObjectMeta - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(yyv16) { - } else { - z.DecFallback(yyv16, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Target = ObjectReference{} - } else { - yyv18 := &x.Target - yyv18.CodecDecodeSelf(d) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Preconditions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.UID != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.UID == nil { - r.EncodeNil() - } else { - yy4 := *x.UID - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.UID == nil { - r.EncodeNil() - } else { - yy6 := *x.UID - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Preconditions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "uid": - if r.TryDecodeAsNil() { - if x.UID != nil { - x.UID = nil - } - } else { - if x.UID == nil { - x.UID = new(pkg1_types.UID) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.UID) { - } else { - *((*string)(x.UID)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.UID != nil { - x.UID = nil - } - } else { - if x.UID == nil { - x.UID = new(pkg1_types.UID) - } - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(x.UID) { - } else { - *((*string)(x.UID)) = r.DecodeString() - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DeletionPropagation) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DeletionPropagation) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = x.GracePeriodSeconds != nil - yyq2[3] = x.Preconditions != nil - yyq2[4] = x.OrphanDependents != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.GracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy10 := *x.GracePeriodSeconds - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(yy10)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy12 := *x.GracePeriodSeconds - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(yy12)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Preconditions == nil { - r.EncodeNil() - } else { - x.Preconditions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preconditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Preconditions == nil { - r.EncodeNil() - } else { - x.Preconditions.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.OrphanDependents == nil { - r.EncodeNil() - } else { - yy18 := *x.OrphanDependents - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(yy18)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("orphanDependents")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OrphanDependents == nil { - r.EncodeNil() - } else { - yy20 := *x.OrphanDependents - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeBool(bool(yy20)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.PropagationPolicy == nil { - r.EncodeNil() - } else { - yy23 := *x.PropagationPolicy - yy23.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("PropagationPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PropagationPolicy == nil { - r.EncodeNil() - } else { - yy25 := *x.PropagationPolicy - yy25.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "gracePeriodSeconds": - if r.TryDecodeAsNil() { - if x.GracePeriodSeconds != nil { - x.GracePeriodSeconds = nil - } - } else { - if x.GracePeriodSeconds == nil { - x.GracePeriodSeconds = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "preconditions": - if r.TryDecodeAsNil() { - if x.Preconditions != nil { - x.Preconditions = nil - } - } else { - if x.Preconditions == nil { - x.Preconditions = new(Preconditions) - } - x.Preconditions.CodecDecodeSelf(d) - } - case "orphanDependents": - if r.TryDecodeAsNil() { - if x.OrphanDependents != nil { - x.OrphanDependents = nil - } - } else { - if x.OrphanDependents == nil { - x.OrphanDependents = new(bool) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(x.OrphanDependents)) = r.DecodeBool() - } - } - case "PropagationPolicy": - if r.TryDecodeAsNil() { - if x.PropagationPolicy != nil { - x.PropagationPolicy = nil - } - } else { - if x.PropagationPolicy == nil { - x.PropagationPolicy = new(DeletionPropagation) - } - x.PropagationPolicy.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv15 := &x.Kind - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv17 := &x.APIVersion - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GracePeriodSeconds != nil { - x.GracePeriodSeconds = nil - } - } else { - if x.GracePeriodSeconds == nil { - x.GracePeriodSeconds = new(int64) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Preconditions != nil { - x.Preconditions = nil - } - } else { - if x.Preconditions == nil { - x.Preconditions = new(Preconditions) - } - x.Preconditions.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.OrphanDependents != nil { - x.OrphanDependents = nil - } - } else { - if x.OrphanDependents == nil { - x.OrphanDependents = new(bool) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*bool)(x.OrphanDependents)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PropagationPolicy != nil { - x.PropagationPolicy = nil - } - } else { - if x.PropagationPolicy == nil { - x.PropagationPolicy = new(DeletionPropagation) - } - x.PropagationPolicy.CodecDecodeSelf(d) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = x.LabelSelector != "" - yyq2[3] = x.FieldSelector != "" - yyq2[4] = x.Watch != false - yyq2[5] = x.ResourceVersion != "" - yyq2[6] = x.TimeoutSeconds != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(x.Watch)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("watch")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeBool(bool(x.Watch)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.TimeoutSeconds == nil { - r.EncodeNil() - } else { - yy22 := *x.TimeoutSeconds - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(yy22)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TimeoutSeconds == nil { - r.EncodeNil() - } else { - yy24 := *x.TimeoutSeconds - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeInt(int64(yy24)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ListOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "labelSelector": - if r.TryDecodeAsNil() { - x.LabelSelector = "" - } else { - yyv8 := &x.LabelSelector - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "fieldSelector": - if r.TryDecodeAsNil() { - x.FieldSelector = "" - } else { - yyv10 := &x.FieldSelector - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "watch": - if r.TryDecodeAsNil() { - x.Watch = false - } else { - yyv12 := &x.Watch - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(yyv12)) = r.DecodeBool() - } - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - yyv14 := &x.ResourceVersion - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - case "timeoutSeconds": - if r.TryDecodeAsNil() { - if x.TimeoutSeconds != nil { - x.TimeoutSeconds = nil - } - } else { - if x.TimeoutSeconds == nil { - x.TimeoutSeconds = new(int64) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv19 := &x.Kind - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv21 := &x.APIVersion - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LabelSelector = "" - } else { - yyv23 := &x.LabelSelector - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*string)(yyv23)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FieldSelector = "" - } else { - yyv25 := &x.FieldSelector - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Watch = false - } else { - yyv27 := &x.Watch - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*bool)(yyv27)) = r.DecodeBool() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - yyv29 := &x.ResourceVersion - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*string)(yyv29)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TimeoutSeconds != nil { - x.TimeoutSeconds = nil - } - } else { - if x.TimeoutSeconds == nil { - x.TimeoutSeconds = new(int64) - } - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) - } - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [10]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = x.Container != "" - yyq2[3] = x.Follow != false - yyq2[4] = x.Previous != false - yyq2[5] = x.SinceSeconds != nil - yyq2[6] = x.SinceTime != nil - yyq2[7] = x.Timestamps != false - yyq2[8] = x.TailLines != nil - yyq2[9] = x.LimitBytes != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(10) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("container")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.Follow)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("follow")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.Follow)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(x.Previous)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("previous")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeBool(bool(x.Previous)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.SinceSeconds == nil { - r.EncodeNil() - } else { - yy19 := *x.SinceSeconds - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(yy19)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sinceSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SinceSeconds == nil { - r.EncodeNil() - } else { - yy21 := *x.SinceSeconds - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(yy21)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.SinceTime == nil { - r.EncodeNil() - } else { - yym24 := z.EncBinary() - _ = yym24 - if false { - } else if z.HasExtensions() && z.EncExt(x.SinceTime) { - } else if yym24 { - z.EncBinaryMarshal(x.SinceTime) - } else if !yym24 && z.IsJSONHandle() { - z.EncJSONMarshal(x.SinceTime) - } else { - z.EncFallback(x.SinceTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sinceTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SinceTime == nil { - r.EncodeNil() - } else { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else if z.HasExtensions() && z.EncExt(x.SinceTime) { - } else if yym25 { - z.EncBinaryMarshal(x.SinceTime) - } else if !yym25 && z.IsJSONHandle() { - z.EncJSONMarshal(x.SinceTime) - } else { - z.EncFallback(x.SinceTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeBool(bool(x.Timestamps)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("timestamps")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeBool(bool(x.Timestamps)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.TailLines == nil { - r.EncodeNil() - } else { - yy30 := *x.TailLines - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeInt(int64(yy30)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tailLines")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TailLines == nil { - r.EncodeNil() - } else { - yy32 := *x.TailLines - yym33 := z.EncBinary() - _ = yym33 - if false { - } else { - r.EncodeInt(int64(yy32)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.LimitBytes == nil { - r.EncodeNil() - } else { - yy35 := *x.LimitBytes - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeInt(int64(yy35)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("limitBytes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LimitBytes == nil { - r.EncodeNil() - } else { - yy37 := *x.LimitBytes - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeInt(int64(yy37)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodLogOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "container": - if r.TryDecodeAsNil() { - x.Container = "" - } else { - yyv8 := &x.Container - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "follow": - if r.TryDecodeAsNil() { - x.Follow = false - } else { - yyv10 := &x.Follow - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } - } - case "previous": - if r.TryDecodeAsNil() { - x.Previous = false - } else { - yyv12 := &x.Previous - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(yyv12)) = r.DecodeBool() - } - } - case "sinceSeconds": - if r.TryDecodeAsNil() { - if x.SinceSeconds != nil { - x.SinceSeconds = nil - } - } else { - if x.SinceSeconds == nil { - x.SinceSeconds = new(int64) - } - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) - } - } - case "sinceTime": - if r.TryDecodeAsNil() { - if x.SinceTime != nil { - x.SinceTime = nil - } - } else { - if x.SinceTime == nil { - x.SinceTime = new(pkg2_v1.Time) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(x.SinceTime) { - } else if yym17 { - z.DecBinaryUnmarshal(x.SinceTime) - } else if !yym17 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.SinceTime) - } else { - z.DecFallback(x.SinceTime, false) - } - } - case "timestamps": - if r.TryDecodeAsNil() { - x.Timestamps = false - } else { - yyv18 := &x.Timestamps - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*bool)(yyv18)) = r.DecodeBool() - } - } - case "tailLines": - if r.TryDecodeAsNil() { - if x.TailLines != nil { - x.TailLines = nil - } - } else { - if x.TailLines == nil { - x.TailLines = new(int64) - } - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) - } - } - case "limitBytes": - if r.TryDecodeAsNil() { - if x.LimitBytes != nil { - x.LimitBytes = nil - } - } else { - if x.LimitBytes == nil { - x.LimitBytes = new(int64) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj24 int - var yyb24 bool - var yyhl24 bool = l >= 0 - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv25 := &x.Kind - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv27 := &x.APIVersion - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*string)(yyv27)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Container = "" - } else { - yyv29 := &x.Container - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*string)(yyv29)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Follow = false - } else { - yyv31 := &x.Follow - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*bool)(yyv31)) = r.DecodeBool() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Previous = false - } else { - yyv33 := &x.Previous - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - *((*bool)(yyv33)) = r.DecodeBool() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SinceSeconds != nil { - x.SinceSeconds = nil - } - } else { - if x.SinceSeconds == nil { - x.SinceSeconds = new(int64) - } - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SinceTime != nil { - x.SinceTime = nil - } - } else { - if x.SinceTime == nil { - x.SinceTime = new(pkg2_v1.Time) - } - yym38 := z.DecBinary() - _ = yym38 - if false { - } else if z.HasExtensions() && z.DecExt(x.SinceTime) { - } else if yym38 { - z.DecBinaryUnmarshal(x.SinceTime) - } else if !yym38 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.SinceTime) - } else { - z.DecFallback(x.SinceTime, false) - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Timestamps = false - } else { - yyv39 := &x.Timestamps - yym40 := z.DecBinary() - _ = yym40 - if false { - } else { - *((*bool)(yyv39)) = r.DecodeBool() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TailLines != nil { - x.TailLines = nil - } - } else { - if x.TailLines == nil { - x.TailLines = new(int64) - } - yym42 := z.DecBinary() - _ = yym42 - if false { - } else { - *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LimitBytes != nil { - x.LimitBytes = nil - } - } else { - if x.LimitBytes == nil { - x.LimitBytes = new(int64) - } - yym44 := z.DecBinary() - _ = yym44 - if false { - } else { - *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) - } - } - for { - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj24-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = x.Stdin != false - yyq2[3] = x.Stdout != false - yyq2[4] = x.Stderr != false - yyq2[5] = x.TTY != false - yyq2[6] = x.Container != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdin")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stderr")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tty")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("container")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAttachOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAttachOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "stdin": - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - yyv8 := &x.Stdin - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - case "stdout": - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - yyv10 := &x.Stdout - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } - } - case "stderr": - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - yyv12 := &x.Stderr - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(yyv12)) = r.DecodeBool() - } - } - case "tty": - if r.TryDecodeAsNil() { - x.TTY = false - } else { - yyv14 := &x.TTY - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*bool)(yyv14)) = r.DecodeBool() - } - } - case "container": - if r.TryDecodeAsNil() { - x.Container = "" - } else { - yyv16 := &x.Container - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv19 := &x.Kind - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv21 := &x.APIVersion - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - yyv23 := &x.Stdin - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(yyv23)) = r.DecodeBool() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - yyv25 := &x.Stdout - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*bool)(yyv25)) = r.DecodeBool() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - yyv27 := &x.Stderr - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*bool)(yyv27)) = r.DecodeBool() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TTY = false - } else { - yyv29 := &x.TTY - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*bool)(yyv29)) = r.DecodeBool() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Container = "" - } else { - yyv31 := &x.Container - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*string)(yyv31)) = r.DecodeString() - } - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = x.Stdin != false - yyq2[3] = x.Stdout != false - yyq2[4] = x.Stderr != false - yyq2[5] = x.TTY != false - yyq2[6] = x.Container != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdin")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stderr")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tty")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("container")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("command")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodExecOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodExecOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "stdin": - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - yyv8 := &x.Stdin - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - case "stdout": - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - yyv10 := &x.Stdout - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } - } - case "stderr": - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - yyv12 := &x.Stderr - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(yyv12)) = r.DecodeBool() - } - } - case "tty": - if r.TryDecodeAsNil() { - x.TTY = false - } else { - yyv14 := &x.TTY - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*bool)(yyv14)) = r.DecodeBool() - } - } - case "container": - if r.TryDecodeAsNil() { - x.Container = "" - } else { - yyv16 := &x.Container - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - case "command": - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv18 := &x.Command - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - z.F.DecSliceStringX(yyv18, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj20 int - var yyb20 bool - var yyhl20 bool = l >= 0 - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv21 := &x.Kind - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv23 := &x.APIVersion - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*string)(yyv23)) = r.DecodeString() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - yyv25 := &x.Stdin - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*bool)(yyv25)) = r.DecodeBool() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - yyv27 := &x.Stdout - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*bool)(yyv27)) = r.DecodeBool() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - yyv29 := &x.Stderr - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*bool)(yyv29)) = r.DecodeBool() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TTY = false - } else { - yyv31 := &x.TTY - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*bool)(yyv31)) = r.DecodeBool() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Container = "" - } else { - yyv33 := &x.Container - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - *((*string)(yyv33)) = r.DecodeString() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv35 := &x.Command - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - z.F.DecSliceStringX(yyv35, false, d) - } - } - for { - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj20-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodPortForwardOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = len(x.Ports) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Ports == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceInt32V(x.Ports, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceInt32V(x.Ports, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodPortForwardOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodPortForwardOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv8 := &x.Ports - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceInt32X(yyv8, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodPortForwardOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv11 := &x.Kind - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv13 := &x.APIVersion - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv15 := &x.Ports - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - z.F.DecSliceInt32X(yyv15, false, d) - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = x.Path != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv8 := &x.Path - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv11 := &x.Kind - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv13 := &x.APIVersion - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv15 := &x.Path - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = x.Path != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv8 := &x.Path - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv11 := &x.Kind - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv13 := &x.APIVersion - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv15 := &x.Path - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = x.Path != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv8 := &x.Path - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv11 := &x.Kind - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv13 := &x.APIVersion - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv15 := &x.Path - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.Namespace != "" - yyq2[2] = x.Name != "" - yyq2[3] = x.UID != "" - yyq2[4] = x.APIVersion != "" - yyq2[5] = x.ResourceVersion != "" - yyq2[6] = x.FieldPath != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - yyv6 := &x.Namespace - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv8 := &x.Name - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - yyv10 := &x.UID - yym11 := z.DecBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.DecExt(yyv10) { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv12 := &x.APIVersion - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - yyv14 := &x.ResourceVersion - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - case "fieldPath": - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - yyv16 := &x.FieldPath - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv19 := &x.Kind - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - yyv21 := &x.Namespace - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv23 := &x.Name - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*string)(yyv23)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - yyv25 := &x.UID - yym26 := z.DecBinary() - _ = yym26 - if false { - } else if z.HasExtensions() && z.DecExt(yyv25) { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv27 := &x.APIVersion - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*string)(yyv27)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - yyv29 := &x.ResourceVersion - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*string)(yyv29)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - yyv31 := &x.FieldPath - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*string)(yyv31)) = r.DecodeString() - } - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LocalObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LocalObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LocalObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LocalObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv7 := &x.Name - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*string)(yyv7)) = r.DecodeString() - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SerializedReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.Reference - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reference")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.Reference - yy12.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SerializedReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SerializedReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "reference": - if r.TryDecodeAsNil() { - x.Reference = ObjectReference{} - } else { - yyv8 := &x.Reference - yyv8.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SerializedReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv10 := &x.Kind - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv12 := &x.APIVersion - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reference = ObjectReference{} - } else { - yyv14 := &x.Reference - yyv14.CodecDecodeSelf(d) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EventSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Component != "" - yyq2[1] = x.Host != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Component)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("component")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Component)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("host")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EventSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EventSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "component": - if r.TryDecodeAsNil() { - x.Component = "" - } else { - yyv4 := &x.Component - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "host": - if r.TryDecodeAsNil() { - x.Host = "" - } else { - yyv6 := &x.Host - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EventSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Component = "" - } else { - yyv9 := &x.Component - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Host = "" - } else { - yyv11 := &x.Host - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [11]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - yyq2[6] = true - yyq2[7] = true - yyq2[8] = true - yyq2[9] = x.Count != 0 - yyq2[10] = x.Type != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(11) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy15 := &x.InvolvedObject - yy15.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("involvedObject")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.InvolvedObject - yy17.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yy26 := &x.Source - yy26.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("source")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy28 := &x.Source - yy28.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yy31 := &x.FirstTimestamp - yym32 := z.EncBinary() - _ = yym32 - if false { - } else if z.HasExtensions() && z.EncExt(yy31) { - } else if yym32 { - z.EncBinaryMarshal(yy31) - } else if !yym32 && z.IsJSONHandle() { - z.EncJSONMarshal(yy31) - } else { - z.EncFallback(yy31) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("firstTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy33 := &x.FirstTimestamp - yym34 := z.EncBinary() - _ = yym34 - if false { - } else if z.HasExtensions() && z.EncExt(yy33) { - } else if yym34 { - z.EncBinaryMarshal(yy33) - } else if !yym34 && z.IsJSONHandle() { - z.EncJSONMarshal(yy33) - } else { - z.EncFallback(yy33) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yy36 := &x.LastTimestamp - yym37 := z.EncBinary() - _ = yym37 - if false { - } else if z.HasExtensions() && z.EncExt(yy36) { - } else if yym37 { - z.EncBinaryMarshal(yy36) - } else if !yym37 && z.IsJSONHandle() { - z.EncJSONMarshal(yy36) - } else { - z.EncFallback(yy36) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy38 := &x.LastTimestamp - yym39 := z.EncBinary() - _ = yym39 - if false { - } else if z.HasExtensions() && z.EncExt(yy38) { - } else if yym39 { - z.EncBinaryMarshal(yy38) - } else if !yym39 && z.IsJSONHandle() { - z.EncJSONMarshal(yy38) - } else { - z.EncFallback(yy38) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeInt(int64(x.Count)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("count")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeInt(int64(x.Count)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Event) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Event) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "involvedObject": - if r.TryDecodeAsNil() { - x.InvolvedObject = ObjectReference{} - } else { - yyv10 := &x.InvolvedObject - yyv10.CodecDecodeSelf(d) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv11 := &x.Reason - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv13 := &x.Message - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - case "source": - if r.TryDecodeAsNil() { - x.Source = EventSource{} - } else { - yyv15 := &x.Source - yyv15.CodecDecodeSelf(d) - } - case "firstTimestamp": - if r.TryDecodeAsNil() { - x.FirstTimestamp = pkg2_v1.Time{} - } else { - yyv16 := &x.FirstTimestamp - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(yyv16) { - } else if yym17 { - z.DecBinaryUnmarshal(yyv16) - } else if !yym17 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv16) - } else { - z.DecFallback(yyv16, false) - } - } - case "lastTimestamp": - if r.TryDecodeAsNil() { - x.LastTimestamp = pkg2_v1.Time{} - } else { - yyv18 := &x.LastTimestamp - yym19 := z.DecBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.DecExt(yyv18) { - } else if yym19 { - z.DecBinaryUnmarshal(yyv18) - } else if !yym19 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv18) - } else { - z.DecFallback(yyv18, false) - } - } - case "count": - if r.TryDecodeAsNil() { - x.Count = 0 - } else { - yyv20 := &x.Count - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - *((*int32)(yyv20)) = int32(r.DecodeInt(32)) - } - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv22 := &x.Type - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*string)(yyv22)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj24 int - var yyb24 bool - var yyhl24 bool = l >= 0 - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv25 := &x.Kind - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv27 := &x.APIVersion - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*string)(yyv27)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv29 := &x.ObjectMeta - yym30 := z.DecBinary() - _ = yym30 - if false { - } else if z.HasExtensions() && z.DecExt(yyv29) { - } else { - z.DecFallback(yyv29, false) - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.InvolvedObject = ObjectReference{} - } else { - yyv31 := &x.InvolvedObject - yyv31.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv32 := &x.Reason - yym33 := z.DecBinary() - _ = yym33 - if false { - } else { - *((*string)(yyv32)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv34 := &x.Message - yym35 := z.DecBinary() - _ = yym35 - if false { - } else { - *((*string)(yyv34)) = r.DecodeString() - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Source = EventSource{} - } else { - yyv36 := &x.Source - yyv36.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FirstTimestamp = pkg2_v1.Time{} - } else { - yyv37 := &x.FirstTimestamp - yym38 := z.DecBinary() - _ = yym38 - if false { - } else if z.HasExtensions() && z.DecExt(yyv37) { - } else if yym38 { - z.DecBinaryUnmarshal(yyv37) - } else if !yym38 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv37) - } else { - z.DecFallback(yyv37, false) - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTimestamp = pkg2_v1.Time{} - } else { - yyv39 := &x.LastTimestamp - yym40 := z.DecBinary() - _ = yym40 - if false { - } else if z.HasExtensions() && z.DecExt(yyv39) { - } else if yym40 { - z.DecBinaryUnmarshal(yyv39) - } else if !yym40 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv39) - } else { - z.DecFallback(yyv39, false) - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Count = 0 - } else { - yyv41 := &x.Count - yym42 := z.DecBinary() - _ = yym42 - if false { - } else { - *((*int32)(yyv41)) = int32(r.DecodeInt(32)) - } - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv43 := &x.Type - yym44 := z.DecBinary() - _ = yym44 - if false { - } else { - *((*string)(yyv43)) = r.DecodeString() - } - } - for { - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj24-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EventList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceEvent(([]Event)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceEvent(([]Event)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EventList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EventList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceEvent((*[]Event)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EventList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceEvent((*[]Event)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *List) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *List) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x LimitType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *LimitType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Type != "" - yyq2[1] = len(x.Max) != 0 - yyq2[2] = len(x.Min) != 0 - yyq2[3] = len(x.Default) != 0 - yyq2[4] = len(x.DefaultRequest) != 0 - yyq2[5] = len(x.MaxLimitRequestRatio) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Max == nil { - r.EncodeNil() - } else { - x.Max.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Max == nil { - r.EncodeNil() - } else { - x.Max.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Min == nil { - r.EncodeNil() - } else { - x.Min.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Min == nil { - r.EncodeNil() - } else { - x.Min.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Default == nil { - r.EncodeNil() - } else { - x.Default.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("default")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Default == nil { - r.EncodeNil() - } else { - x.Default.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.DefaultRequest == nil { - r.EncodeNil() - } else { - x.DefaultRequest.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultRequest")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultRequest == nil { - r.EncodeNil() - } else { - x.DefaultRequest.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.MaxLimitRequestRatio == nil { - r.EncodeNil() - } else { - x.MaxLimitRequestRatio.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxLimitRequestRatio")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxLimitRequestRatio == nil { - r.EncodeNil() - } else { - x.MaxLimitRequestRatio.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRangeItem) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRangeItem) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "max": - if r.TryDecodeAsNil() { - x.Max = nil - } else { - yyv5 := &x.Max - yyv5.CodecDecodeSelf(d) - } - case "min": - if r.TryDecodeAsNil() { - x.Min = nil - } else { - yyv6 := &x.Min - yyv6.CodecDecodeSelf(d) - } - case "default": - if r.TryDecodeAsNil() { - x.Default = nil - } else { - yyv7 := &x.Default - yyv7.CodecDecodeSelf(d) - } - case "defaultRequest": - if r.TryDecodeAsNil() { - x.DefaultRequest = nil - } else { - yyv8 := &x.DefaultRequest - yyv8.CodecDecodeSelf(d) - } - case "maxLimitRequestRatio": - if r.TryDecodeAsNil() { - x.MaxLimitRequestRatio = nil - } else { - yyv9 := &x.MaxLimitRequestRatio - yyv9.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv11 := &x.Type - yyv11.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Max = nil - } else { - yyv12 := &x.Max - yyv12.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Min = nil - } else { - yyv13 := &x.Min - yyv13.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Default = nil - } else { - yyv14 := &x.Default - yyv14.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DefaultRequest = nil - } else { - yyv15 := &x.DefaultRequest - yyv15.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxLimitRequestRatio = nil - } else { - yyv16 := &x.MaxLimitRequestRatio - yyv16.CodecDecodeSelf(d) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LimitRangeSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Limits == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("limits")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Limits == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRangeSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRangeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "limits": - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv4 := &x.Limits - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRangeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv7 := &x.Limits - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LimitRange) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRange) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = LimitRangeSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv12 := &x.Kind - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv14 := &x.APIVersion - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv16 := &x.ObjectMeta - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(yyv16) { - } else { - z.DecFallback(yyv16, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = LimitRangeSpec{} - } else { - yyv18 := &x.Spec - yyv18.CodecDecodeSelf(d) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LimitRangeList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceLimitRange(([]LimitRange)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceLimitRange(([]LimitRange)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRangeList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRangeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceLimitRange((*[]LimitRange)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceLimitRange((*[]LimitRange)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ResourceQuotaScope) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ResourceQuotaScope) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Hard) != 0 - yyq2[1] = len(x.Scopes) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hard")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Scopes == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scopes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Scopes == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuotaSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuotaSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "hard": - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv4 := &x.Hard - yyv4.CodecDecodeSelf(d) - } - case "scopes": - if r.TryDecodeAsNil() { - x.Scopes = nil - } else { - yyv5 := &x.Scopes - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv8 := &x.Hard - yyv8.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Scopes = nil - } else { - yyv9 := &x.Scopes - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Hard) != 0 - yyq2[1] = len(x.Used) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hard")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Used == nil { - r.EncodeNil() - } else { - x.Used.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("used")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Used == nil { - r.EncodeNil() - } else { - x.Used.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuotaStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuotaStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "hard": - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv4 := &x.Hard - yyv4.CodecDecodeSelf(d) - } - case "used": - if r.TryDecodeAsNil() { - x.Used = nil - } else { - yyv5 := &x.Used - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuotaStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv7 := &x.Hard - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Used = nil - } else { - yyv8 := &x.Used - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceQuota) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuota) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuota) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ResourceQuotaSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ResourceQuotaStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ResourceQuotaSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ResourceQuotaStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceQuotaList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuotaList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuotaList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceResourceQuota((*[]ResourceQuota)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuotaList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceResourceQuota((*[]ResourceQuota)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = len(x.Data) != 0 - yyq2[4] = len(x.StringData) != 0 - yyq2[5] = x.Type != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Data == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.StringData == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - z.F.EncMapStringStringV(x.StringData, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stringData")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StringData == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - z.F.EncMapStringStringV(x.StringData, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Secret) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Secret) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv10 := &x.Data - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decMapstringSliceuint8((*map[string][]uint8)(yyv10), d) - } - } - case "stringData": - if r.TryDecodeAsNil() { - x.StringData = nil - } else { - yyv12 := &x.StringData - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecMapStringStringX(yyv12, false, d) - } - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv14 := &x.Type - yyv14.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj15 int - var yyb15 bool - var yyhl15 bool = l >= 0 - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv16 := &x.Kind - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv18 := &x.APIVersion - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*string)(yyv18)) = r.DecodeString() - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv20 := &x.ObjectMeta - yym21 := z.DecBinary() - _ = yym21 - if false { - } else if z.HasExtensions() && z.DecExt(yyv20) { - } else { - z.DecFallback(yyv20, false) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv22 := &x.Data - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - h.decMapstringSliceuint8((*map[string][]uint8)(yyv22), d) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StringData = nil - } else { - yyv24 := &x.StringData - yym25 := z.DecBinary() - _ = yym25 - if false { - } else { - z.F.DecMapStringStringX(yyv24, false, d) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv26 := &x.Type - yyv26.CodecDecodeSelf(d) - } - for { - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj15-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x SecretType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *SecretType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *SecretList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceSecret(([]Secret)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceSecret(([]Secret)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceSecret((*[]Secret)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceSecret((*[]Secret)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = len(x.Data) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Data == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - z.F.EncMapStringStringV(x.Data, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - z.F.EncMapStringStringV(x.Data, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMap) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMap) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv10 := &x.Data - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - z.F.DecMapStringStringX(yyv10, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMap) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv19 := &x.Data - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - z.F.DecMapStringStringX(yyv19, false, d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceConfigMap(([]ConfigMap)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceConfigMap(([]ConfigMap)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceConfigMap((*[]ConfigMap)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceConfigMap((*[]ConfigMap)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ComponentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ComponentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.Message != "" - yyq2[3] = x.Error != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Error)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("error")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Error)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ComponentCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ComponentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv6 := &x.Message - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "error": - if r.TryDecodeAsNil() { - x.Error = "" - } else { - yyv8 := &x.Error - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv11 := &x.Type - yyv11.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv13 := &x.Message - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Error = "" - } else { - yyv15 := &x.Error - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = len(x.Conditions) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ComponentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ComponentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv10 := &x.Conditions - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceComponentCondition((*[]ComponentCondition)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ComponentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv19 := &x.Conditions - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceComponentCondition((*[]ComponentCondition)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ComponentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ComponentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ComponentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceComponentStatus((*[]ComponentStatus)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ComponentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceComponentStatus((*[]ComponentStatus)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DownwardAPIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Items) != 0 - yyq2[1] = x.DefaultMode != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy7 := *x.DefaultMode - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultMode == nil { - r.EncodeNil() - } else { - yy9 := *x.DefaultMode - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DownwardAPIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DownwardAPIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4 := &x.Items - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d) - } - } - case "defaultMode": - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DownwardAPIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv9 := &x.Items - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DefaultMode != nil { - x.DefaultMode = nil - } - } else { - if x.DefaultMode == nil { - x.DefaultMode = new(int32) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FieldRef != nil - yyq2[2] = x.ResourceFieldRef != nil - yyq2[3] = x.Mode != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Mode == nil { - r.EncodeNil() - } else { - yy13 := *x.Mode - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(yy13)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("mode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Mode == nil { - r.EncodeNil() - } else { - yy15 := *x.Mode - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(yy15)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DownwardAPIVolumeFile) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv4 := &x.Path - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "fieldRef": - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - case "resourceFieldRef": - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - case "mode": - if r.TryDecodeAsNil() { - if x.Mode != nil { - x.Mode = nil - } - } else { - if x.Mode == nil { - x.Mode = new(int32) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv11 := &x.Path - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Mode != nil { - x.Mode = nil - } - } else { - if x.Mode == nil { - x.Mode = new(int32) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DownwardAPIProjection) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Items) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DownwardAPIProjection) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DownwardAPIProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4 := &x.Items - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DownwardAPIProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv7 := &x.Items - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Capabilities != nil - yyq2[1] = x.Privileged != nil - yyq2[2] = x.SELinuxOptions != nil - yyq2[3] = x.RunAsUser != nil - yyq2[4] = x.RunAsNonRoot != nil - yyq2[5] = x.ReadOnlyRootFilesystem != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Capabilities == nil { - r.EncodeNil() - } else { - x.Capabilities.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capabilities == nil { - r.EncodeNil() - } else { - x.Capabilities.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Privileged == nil { - r.EncodeNil() - } else { - yy7 := *x.Privileged - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("privileged")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Privileged == nil { - r.EncodeNil() - } else { - yy9 := *x.Privileged - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy15 := *x.RunAsUser - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(yy15)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy17 := *x.RunAsUser - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeInt(int64(yy17)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy20 := *x.RunAsNonRoot - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeBool(bool(yy20)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy22 := *x.RunAsNonRoot - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeBool(bool(yy22)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.ReadOnlyRootFilesystem == nil { - r.EncodeNil() - } else { - yy25 := *x.ReadOnlyRootFilesystem - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeBool(bool(yy25)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ReadOnlyRootFilesystem == nil { - r.EncodeNil() - } else { - yy27 := *x.ReadOnlyRootFilesystem - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeBool(bool(yy27)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "capabilities": - if r.TryDecodeAsNil() { - if x.Capabilities != nil { - x.Capabilities = nil - } - } else { - if x.Capabilities == nil { - x.Capabilities = new(Capabilities) - } - x.Capabilities.CodecDecodeSelf(d) - } - case "privileged": - if r.TryDecodeAsNil() { - if x.Privileged != nil { - x.Privileged = nil - } - } else { - if x.Privileged == nil { - x.Privileged = new(bool) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*bool)(x.Privileged)) = r.DecodeBool() - } - } - case "seLinuxOptions": - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - case "runAsUser": - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - case "runAsNonRoot": - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - case "readOnlyRootFilesystem": - if r.TryDecodeAsNil() { - if x.ReadOnlyRootFilesystem != nil { - x.ReadOnlyRootFilesystem = nil - } - } else { - if x.ReadOnlyRootFilesystem == nil { - x.ReadOnlyRootFilesystem = new(bool) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Capabilities != nil { - x.Capabilities = nil - } - } else { - if x.Capabilities == nil { - x.Capabilities = new(Capabilities) - } - x.Capabilities.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Privileged != nil { - x.Privileged = nil - } - } else { - if x.Privileged == nil { - x.Privileged = new(bool) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*bool)(x.Privileged)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ReadOnlyRootFilesystem != nil { - x.ReadOnlyRootFilesystem = nil - } - } else { - if x.ReadOnlyRootFilesystem == nil { - x.ReadOnlyRootFilesystem = new(bool) - } - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.User != "" - yyq2[1] = x.Role != "" - yyq2[2] = x.Type != "" - yyq2[3] = x.Level != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Role)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("role")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Role)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Level)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("level")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Level)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SELinuxOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SELinuxOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "user": - if r.TryDecodeAsNil() { - x.User = "" - } else { - yyv4 := &x.User - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "role": - if r.TryDecodeAsNil() { - x.Role = "" - } else { - yyv6 := &x.Role - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv8 := &x.Type - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "level": - if r.TryDecodeAsNil() { - x.Level = "" - } else { - yyv10 := &x.Level - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - yyv13 := &x.User - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Role = "" - } else { - yyv15 := &x.Role - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv17 := &x.Type - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Level = "" - } else { - yyv19 := &x.Level - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Range)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("range")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Range)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RangeAllocation) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RangeAllocation) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "range": - if r.TryDecodeAsNil() { - x.Range = "" - } else { - yyv10 := &x.Range - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv12 := &x.Data - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *yyv12 = r.DecodeBytes(*(*[]byte)(yyv12), false, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv15 := &x.Kind - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv17 := &x.APIVersion - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv19 := &x.ObjectMeta - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else { - z.DecFallback(yyv19, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Range = "" - } else { - yyv21 := &x.Range - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv23 := &x.Data - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *yyv23 = r.DecodeBytes(*(*[]byte)(yyv23), false, false) - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSlicev1_OwnerReference(v []pkg2_v1.OwnerReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yym3 := z.EncBinary() - _ = yym3 - if false { - } else if z.HasExtensions() && z.EncExt(yy2) { - } else { - z.EncFallback(yy2) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicev1_OwnerReference(v *[]pkg2_v1.OwnerReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []pkg2_v1.OwnerReference{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]pkg2_v1.OwnerReference, yyrl1) - } - } else { - yyv1 = make([]pkg2_v1.OwnerReference, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_v1.OwnerReference{} - } else { - yyv2 := &yyv1[yyj1] - yym3 := z.DecBinary() - _ = yym3 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2) { - } else { - z.DecFallback(yyv2, false) - } - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, pkg2_v1.OwnerReference{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_v1.OwnerReference{} - } else { - yyv4 := &yyv1[yyj1] - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, pkg2_v1.OwnerReference{}) // var yyz1 pkg2_v1.OwnerReference - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_v1.OwnerReference{} - } else { - yyv6 := &yyv1[yyj1] - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else { - z.DecFallback(yyv6, false) - } - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg2_v1.OwnerReference{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePersistentVolumeAccessMode(v []PersistentVolumeAccessMode, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePersistentVolumeAccessMode(v *[]PersistentVolumeAccessMode, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PersistentVolumeAccessMode{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PersistentVolumeAccessMode, yyrl1) - } - } else { - yyv1 = make([]PersistentVolumeAccessMode, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 PersistentVolumeAccessMode - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PersistentVolumeAccessMode{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePersistentVolume(v []PersistentVolume, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePersistentVolume(v *[]PersistentVolume, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PersistentVolume{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 528) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PersistentVolume, yyrl1) - } - } else { - yyv1 = make([]PersistentVolume, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolume{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PersistentVolume{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolume{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PersistentVolume{}) // var yyz1 PersistentVolume - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolume{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PersistentVolume{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePersistentVolumeClaim(v []PersistentVolumeClaim, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClaim, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PersistentVolumeClaim{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PersistentVolumeClaim, yyrl1) - } - } else { - yyv1 = make([]PersistentVolumeClaim, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolumeClaim{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PersistentVolumeClaim{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolumeClaim{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PersistentVolumeClaim{}) // var yyz1 PersistentVolumeClaim - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolumeClaim{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PersistentVolumeClaim{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceKeyToPath(v []KeyToPath, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []KeyToPath{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]KeyToPath, yyrl1) - } - } else { - yyv1 = make([]KeyToPath, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = KeyToPath{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, KeyToPath{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = KeyToPath{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, KeyToPath{}) // var yyz1 KeyToPath - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = KeyToPath{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []KeyToPath{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceVolumeProjection(v []VolumeProjection, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceVolumeProjection(v *[]VolumeProjection, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []VolumeProjection{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]VolumeProjection, yyrl1) - } - } else { - yyv1 = make([]VolumeProjection, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = VolumeProjection{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, VolumeProjection{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = VolumeProjection{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, VolumeProjection{}) // var yyz1 VolumeProjection - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = VolumeProjection{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []VolumeProjection{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceHTTPHeader(v []HTTPHeader, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHTTPHeader(v *[]HTTPHeader, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HTTPHeader{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HTTPHeader, yyrl1) - } - } else { - yyv1 = make([]HTTPHeader, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPHeader{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HTTPHeader{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPHeader{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HTTPHeader{}) // var yyz1 HTTPHeader - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPHeader{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HTTPHeader{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceCapability(v []Capability, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCapability(v *[]Capability, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Capability{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Capability, yyrl1) - } - } else { - yyv1 = make([]Capability, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 Capability - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Capability{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceContainerPort(v []ContainerPort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainerPort(v *[]ContainerPort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ContainerPort{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ContainerPort, yyrl1) - } - } else { - yyv1 = make([]ContainerPort, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerPort{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ContainerPort{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerPort{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ContainerPort{}) // var yyz1 ContainerPort - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerPort{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ContainerPort{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEnvFromSource(v []EnvFromSource, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEnvFromSource(v *[]EnvFromSource, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []EnvFromSource{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]EnvFromSource, yyrl1) - } - } else { - yyv1 = make([]EnvFromSource, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EnvFromSource{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, EnvFromSource{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EnvFromSource{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, EnvFromSource{}) // var yyz1 EnvFromSource - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = EnvFromSource{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []EnvFromSource{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEnvVar(v []EnvVar, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEnvVar(v *[]EnvVar, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []EnvVar{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]EnvVar, yyrl1) - } - } else { - yyv1 = make([]EnvVar, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EnvVar{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, EnvVar{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EnvVar{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, EnvVar{}) // var yyz1 EnvVar - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = EnvVar{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []EnvVar{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceVolumeMount(v []VolumeMount, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceVolumeMount(v *[]VolumeMount, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []VolumeMount{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]VolumeMount, yyrl1) - } - } else { - yyv1 = make([]VolumeMount, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = VolumeMount{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, VolumeMount{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = VolumeMount{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, VolumeMount{}) // var yyz1 VolumeMount - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = VolumeMount{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []VolumeMount{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNodeSelectorTerm(v []NodeSelectorTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeSelectorTerm(v *[]NodeSelectorTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NodeSelectorTerm{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NodeSelectorTerm, yyrl1) - } - } else { - yyv1 = make([]NodeSelectorTerm, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorTerm{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NodeSelectorTerm{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorTerm{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NodeSelectorTerm{}) // var yyz1 NodeSelectorTerm - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorTerm{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NodeSelectorTerm{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNodeSelectorRequirement(v []NodeSelectorRequirement, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequirement, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NodeSelectorRequirement{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NodeSelectorRequirement, yyrl1) - } - } else { - yyv1 = make([]NodeSelectorRequirement, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorRequirement{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NodeSelectorRequirement{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorRequirement{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NodeSelectorRequirement{}) // var yyz1 NodeSelectorRequirement - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorRequirement{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NodeSelectorRequirement{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodAffinityTerm{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodAffinityTerm, yyrl1) - } - } else { - yyv1 = make([]PodAffinityTerm, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodAffinityTerm{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodAffinityTerm{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodAffinityTerm{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodAffinityTerm{}) // var yyz1 PodAffinityTerm - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodAffinityTerm{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodAffinityTerm{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinityTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinityTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []WeightedPodAffinityTerm{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]WeightedPodAffinityTerm, yyrl1) - } - } else { - yyv1 = make([]WeightedPodAffinityTerm, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = WeightedPodAffinityTerm{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, WeightedPodAffinityTerm{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = WeightedPodAffinityTerm{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, WeightedPodAffinityTerm{}) // var yyz1 WeightedPodAffinityTerm - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = WeightedPodAffinityTerm{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []WeightedPodAffinityTerm{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredSchedulingTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePreferredSchedulingTerm(v *[]PreferredSchedulingTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PreferredSchedulingTerm{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PreferredSchedulingTerm, yyrl1) - } - } else { - yyv1 = make([]PreferredSchedulingTerm, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PreferredSchedulingTerm{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PreferredSchedulingTerm{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PreferredSchedulingTerm{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PreferredSchedulingTerm{}) // var yyz1 PreferredSchedulingTerm - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PreferredSchedulingTerm{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PreferredSchedulingTerm{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceVolume(v []Volume, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceVolume(v *[]Volume, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Volume{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 224) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Volume, yyrl1) - } - } else { - yyv1 = make([]Volume, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Volume{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Volume{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Volume{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Volume{}) // var yyz1 Volume - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Volume{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Volume{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceContainer(v []Container, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainer(v *[]Container, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Container{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Container, yyrl1) - } - } else { - yyv1 = make([]Container, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Container{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Container{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Container{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Container{}) // var yyz1 Container - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Container{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Container{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLocalObjectReference(v []LocalObjectReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLocalObjectReference(v *[]LocalObjectReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LocalObjectReference{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LocalObjectReference, yyrl1) - } - } else { - yyv1 = make([]LocalObjectReference, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LocalObjectReference{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LocalObjectReference{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LocalObjectReference{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LocalObjectReference{}) // var yyz1 LocalObjectReference - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LocalObjectReference{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LocalObjectReference{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceToleration(v []Toleration, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceToleration(v *[]Toleration, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Toleration{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Toleration, yyrl1) - } - } else { - yyv1 = make([]Toleration, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Toleration{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Toleration{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Toleration{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Toleration{}) // var yyz1 Toleration - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Toleration{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Toleration{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePodCondition(v []PodCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodCondition(v *[]PodCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodCondition, yyrl1) - } - } else { - yyv1 = make([]PodCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodCondition{}) // var yyz1 PodCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceContainerStatus(v []ContainerStatus, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainerStatus(v *[]ContainerStatus, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ContainerStatus{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ContainerStatus, yyrl1) - } - } else { - yyv1 = make([]ContainerStatus, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerStatus{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ContainerStatus{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerStatus{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ContainerStatus{}) // var yyz1 ContainerStatus - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerStatus{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ContainerStatus{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePod(v []Pod, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Pod{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 736) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Pod, yyrl1) - } - } else { - yyv1 = make([]Pod, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Pod{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Pod{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Pod{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Pod{}) // var yyz1 Pod - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Pod{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Pod{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePodTemplate(v []PodTemplate, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodTemplate{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 784) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodTemplate, yyrl1) - } - } else { - yyv1 = make([]PodTemplate, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodTemplate{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodTemplate{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodTemplate{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodTemplate{}) // var yyz1 PodTemplate - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodTemplate{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodTemplate{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceReplicationControllerCondition(v []ReplicationControllerCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceReplicationControllerCondition(v *[]ReplicationControllerCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ReplicationControllerCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 88) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ReplicationControllerCondition, yyrl1) - } - } else { - yyv1 = make([]ReplicationControllerCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicationControllerCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ReplicationControllerCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicationControllerCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ReplicationControllerCondition{}) // var yyz1 ReplicationControllerCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicationControllerCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ReplicationControllerCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceReplicationController(v []ReplicationController, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationController, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ReplicationController{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 336) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ReplicationController, yyrl1) - } - } else { - yyv1 = make([]ReplicationController, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicationController{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ReplicationController{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicationController{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ReplicationController{}) // var yyz1 ReplicationController - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicationController{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ReplicationController{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLoadBalancerIngress(v []LoadBalancerIngress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLoadBalancerIngress(v *[]LoadBalancerIngress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LoadBalancerIngress{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LoadBalancerIngress, yyrl1) - } - } else { - yyv1 = make([]LoadBalancerIngress, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LoadBalancerIngress{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LoadBalancerIngress{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LoadBalancerIngress{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LoadBalancerIngress{}) // var yyz1 LoadBalancerIngress - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LoadBalancerIngress{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LoadBalancerIngress{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceServicePort(v []ServicePort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServicePort(v *[]ServicePort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ServicePort{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ServicePort, yyrl1) - } - } else { - yyv1 = make([]ServicePort, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServicePort{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ServicePort{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServicePort{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ServicePort{}) // var yyz1 ServicePort - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServicePort{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ServicePort{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceService(v []Service, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceService(v *[]Service, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Service{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 464) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Service, yyrl1) - } - } else { - yyv1 = make([]Service, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Service{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Service{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Service{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Service{}) // var yyz1 Service - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Service{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Service{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceObjectReference(v []ObjectReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceObjectReference(v *[]ObjectReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ObjectReference{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ObjectReference, yyrl1) - } - } else { - yyv1 = make([]ObjectReference, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ObjectReference{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ObjectReference{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ObjectReference{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ObjectReference{}) // var yyz1 ObjectReference - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ObjectReference{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ObjectReference{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceServiceAccount(v []ServiceAccount, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServiceAccount(v *[]ServiceAccount, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ServiceAccount{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 312) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ServiceAccount, yyrl1) - } - } else { - yyv1 = make([]ServiceAccount, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServiceAccount{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ServiceAccount{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServiceAccount{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ServiceAccount{}) // var yyz1 ServiceAccount - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServiceAccount{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ServiceAccount{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEndpointSubset(v []EndpointSubset, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpointSubset(v *[]EndpointSubset, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []EndpointSubset{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]EndpointSubset, yyrl1) - } - } else { - yyv1 = make([]EndpointSubset, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointSubset{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, EndpointSubset{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointSubset{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, EndpointSubset{}) // var yyz1 EndpointSubset - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointSubset{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []EndpointSubset{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEndpointAddress(v []EndpointAddress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpointAddress(v *[]EndpointAddress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []EndpointAddress{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]EndpointAddress, yyrl1) - } - } else { - yyv1 = make([]EndpointAddress, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointAddress{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, EndpointAddress{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointAddress{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, EndpointAddress{}) // var yyz1 EndpointAddress - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointAddress{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []EndpointAddress{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEndpointPort(v []EndpointPort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpointPort(v *[]EndpointPort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []EndpointPort{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]EndpointPort, yyrl1) - } - } else { - yyv1 = make([]EndpointPort, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointPort{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, EndpointPort{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointPort{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, EndpointPort{}) // var yyz1 EndpointPort - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointPort{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []EndpointPort{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEndpoints(v []Endpoints, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpoints(v *[]Endpoints, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Endpoints{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Endpoints, yyrl1) - } - } else { - yyv1 = make([]Endpoints, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Endpoints{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Endpoints{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Endpoints{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Endpoints{}) // var yyz1 Endpoints - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Endpoints{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Endpoints{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceTaint(v []Taint, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceTaint(v *[]Taint, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Taint{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Taint, yyrl1) - } - } else { - yyv1 = make([]Taint, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Taint{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Taint{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Taint{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Taint{}) // var yyz1 Taint - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Taint{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Taint{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNodeCondition(v []NodeCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeCondition(v *[]NodeCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NodeCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NodeCondition, yyrl1) - } - } else { - yyv1 = make([]NodeCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NodeCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NodeCondition{}) // var yyz1 NodeCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NodeCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNodeAddress(v []NodeAddress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeAddress(v *[]NodeAddress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NodeAddress{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NodeAddress, yyrl1) - } - } else { - yyv1 = make([]NodeAddress, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeAddress{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NodeAddress{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeAddress{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NodeAddress{}) // var yyz1 NodeAddress - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeAddress{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NodeAddress{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceContainerImage(v []ContainerImage, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainerImage(v *[]ContainerImage, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ContainerImage{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ContainerImage, yyrl1) - } - } else { - yyv1 = make([]ContainerImage, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerImage{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ContainerImage{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerImage{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ContainerImage{}) // var yyz1 ContainerImage - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerImage{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ContainerImage{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceUniqueVolumeName(v []UniqueVolumeName, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceUniqueVolumeName(v *[]UniqueVolumeName, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []UniqueVolumeName{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]UniqueVolumeName, yyrl1) - } - } else { - yyv1 = make([]UniqueVolumeName, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 UniqueVolumeName - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []UniqueVolumeName{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceAttachedVolume(v []AttachedVolume, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceAttachedVolume(v *[]AttachedVolume, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []AttachedVolume{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]AttachedVolume, yyrl1) - } - } else { - yyv1 = make([]AttachedVolume, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = AttachedVolume{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, AttachedVolume{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = AttachedVolume{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, AttachedVolume{}) // var yyz1 AttachedVolume - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = AttachedVolume{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []AttachedVolume{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePreferAvoidPodsEntry(v []PreferAvoidPodsEntry, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePreferAvoidPodsEntry(v *[]PreferAvoidPodsEntry, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PreferAvoidPodsEntry{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PreferAvoidPodsEntry, yyrl1) - } - } else { - yyv1 = make([]PreferAvoidPodsEntry, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PreferAvoidPodsEntry{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PreferAvoidPodsEntry{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PreferAvoidPodsEntry{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PreferAvoidPodsEntry{}) // var yyz1 PreferAvoidPodsEntry - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PreferAvoidPodsEntry{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PreferAvoidPodsEntry{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encResourceList(v ResourceList, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yyk1.CodecEncodeSelf(e) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3 := &yyv1 - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(yy3) { - } else if !yym4 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3) - } else { - z.EncFallback(yy3) - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decResourceList(v *ResourceList, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 72) - yyv1 = make(map[ResourceName]pkg3_resource.Quantity, yyrl1) - *v = yyv1 - } - var yymk1 ResourceName - var yymv1 pkg3_resource.Quantity - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv2 := &yymk1 - yyv2.CodecDecodeSelf(d) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = pkg3_resource.Quantity{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = pkg3_resource.Quantity{} - } else { - yyv3 := &yymv1 - yym4 := z.DecBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3) { - } else if !yym4 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3) - } else { - z.DecFallback(yyv3, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv5 := &yymk1 - yyv5.CodecDecodeSelf(d) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = pkg3_resource.Quantity{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = pkg3_resource.Quantity{} - } else { - yyv6 := &yymv1 - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSliceNode(v []Node, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Node{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 656) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Node, yyrl1) - } - } else { - yyv1 = make([]Node, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Node{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Node{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Node{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Node{}) // var yyz1 Node - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Node{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Node{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceFinalizerName(v []FinalizerName, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceFinalizerName(v *[]FinalizerName, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []FinalizerName{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]FinalizerName, yyrl1) - } - } else { - yyv1 = make([]FinalizerName, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 FinalizerName - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []FinalizerName{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNamespace(v []Namespace, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNamespace(v *[]Namespace, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Namespace{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Namespace, yyrl1) - } - } else { - yyv1 = make([]Namespace, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Namespace{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Namespace{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Namespace{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Namespace{}) // var yyz1 Namespace - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Namespace{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Namespace{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEvent(v []Event, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Event{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 504) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Event, yyrl1) - } - } else { - yyv1 = make([]Event, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Event{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Event{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Event{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Event{}) // var yyz1 Event - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Event{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Event{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceruntime_RawExtension(v []pkg5_runtime.RawExtension, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yym3 := z.EncBinary() - _ = yym3 - if false { - } else if z.HasExtensions() && z.EncExt(yy2) { - } else if !yym3 && z.IsJSONHandle() { - z.EncJSONMarshal(yy2) - } else { - z.EncFallback(yy2) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg5_runtime.RawExtension, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []pkg5_runtime.RawExtension{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) - } - } else { - yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg5_runtime.RawExtension{} - } else { - yyv2 := &yyv1[yyj1] - yym3 := z.DecBinary() - _ = yym3 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2) { - } else if !yym3 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv2) - } else { - z.DecFallback(yyv2, false) - } - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg5_runtime.RawExtension{} - } else { - yyv4 := &yyv1[yyj1] - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) - } else { - z.DecFallback(yyv4, false) - } - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) // var yyz1 pkg5_runtime.RawExtension - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg5_runtime.RawExtension{} - } else { - yyv6 := &yyv1[yyj1] - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg5_runtime.RawExtension{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLimitRangeItem(v []LimitRangeItem, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLimitRangeItem(v *[]LimitRangeItem, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LimitRangeItem{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LimitRangeItem, yyrl1) - } - } else { - yyv1 = make([]LimitRangeItem, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRangeItem{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LimitRangeItem{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRangeItem{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LimitRangeItem{}) // var yyz1 LimitRangeItem - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRangeItem{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LimitRangeItem{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLimitRange(v []LimitRange, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LimitRange{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LimitRange, yyrl1) - } - } else { - yyv1 = make([]LimitRange, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRange{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LimitRange{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRange{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LimitRange{}) // var yyz1 LimitRange - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRange{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LimitRange{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceResourceQuotaScope(v []ResourceQuotaScope, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceResourceQuotaScope(v *[]ResourceQuotaScope, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ResourceQuotaScope{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ResourceQuotaScope, yyrl1) - } - } else { - yyv1 = make([]ResourceQuotaScope, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 ResourceQuotaScope - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ResourceQuotaScope{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceResourceQuota(v []ResourceQuota, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ResourceQuota{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 304) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ResourceQuota, yyrl1) - } - } else { - yyv1 = make([]ResourceQuota, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ResourceQuota{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ResourceQuota{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ResourceQuota{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ResourceQuota{}) // var yyz1 ResourceQuota - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ResourceQuota{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ResourceQuota{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encMapstringSliceuint8(v map[string][]uint8, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv1 == nil { - r.EncodeNil() - } else { - yym3 := z.EncBinary() - _ = yym3 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(yyv1)) - } - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringSliceuint8(v *map[string][]uint8, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) - yyv1 = make(map[string][]uint8, yyrl1) - *v = yyv1 - } - var yymk1 string - var yymv1 []uint8 - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv2 := &yymk1 - yym3 := z.DecBinary() - _ = yym3 - if false { - } else { - *((*string)(yyv2)) = r.DecodeString() - } - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv4 := &yymv1 - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *yyv4 = r.DecodeBytes(*(*[]byte)(yyv4), false, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv6 := &yymk1 - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv8 := &yymv1 - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *yyv8 = r.DecodeBytes(*(*[]byte)(yyv8), false, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSliceuint8(v []uint8, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(v)) -} - -func (x codecSelfer1234) decSliceuint8(v *[]uint8, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - *v = r.DecodeBytes(*((*[]byte)(v)), false, false) -} - -func (x codecSelfer1234) encSliceSecret(v []Secret, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceSecret(v *[]Secret, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Secret{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Secret, yyrl1) - } - } else { - yyv1 = make([]Secret, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Secret{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Secret{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Secret{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Secret{}) // var yyz1 Secret - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Secret{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Secret{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceConfigMap(v []ConfigMap, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceConfigMap(v *[]ConfigMap, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ConfigMap{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ConfigMap, yyrl1) - } - } else { - yyv1 = make([]ConfigMap, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ConfigMap{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ConfigMap{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ConfigMap{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ConfigMap{}) // var yyz1 ConfigMap - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ConfigMap{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ConfigMap{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceComponentCondition(v []ComponentCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceComponentCondition(v *[]ComponentCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ComponentCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ComponentCondition, yyrl1) - } - } else { - yyv1 = make([]ComponentCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ComponentCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ComponentCondition{}) // var yyz1 ComponentCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ComponentCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceComponentStatus(v []ComponentStatus, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceComponentStatus(v *[]ComponentStatus, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ComponentStatus{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ComponentStatus, yyrl1) - } - } else { - yyv1 = make([]ComponentStatus, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentStatus{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ComponentStatus{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentStatus{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ComponentStatus{}) // var yyz1 ComponentStatus - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentStatus{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ComponentStatus{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceDownwardAPIVolumeFile(v []DownwardAPIVolumeFile, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFile, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []DownwardAPIVolumeFile{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]DownwardAPIVolumeFile, yyrl1) - } - } else { - yyv1 = make([]DownwardAPIVolumeFile, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DownwardAPIVolumeFile{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, DownwardAPIVolumeFile{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DownwardAPIVolumeFile{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, DownwardAPIVolumeFile{}) // var yyz1 DownwardAPIVolumeFile - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = DownwardAPIVolumeFile{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []DownwardAPIVolumeFile{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go deleted file mode 100644 index 7d534c8c7..000000000 --- a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go +++ /dev/null @@ -1,4702 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1 - -import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" - api "k8s.io/client-go/pkg/api" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, - Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, - Convert_v1_Affinity_To_api_Affinity, - Convert_api_Affinity_To_v1_Affinity, - Convert_v1_AttachedVolume_To_api_AttachedVolume, - Convert_api_AttachedVolume_To_v1_AttachedVolume, - Convert_v1_AvoidPods_To_api_AvoidPods, - Convert_api_AvoidPods_To_v1_AvoidPods, - Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource, - Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource, - Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource, - Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, - Convert_v1_Binding_To_api_Binding, - Convert_api_Binding_To_v1_Binding, - Convert_v1_Capabilities_To_api_Capabilities, - Convert_api_Capabilities_To_v1_Capabilities, - Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, - Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, - Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource, - Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource, - Convert_v1_ComponentCondition_To_api_ComponentCondition, - Convert_api_ComponentCondition_To_v1_ComponentCondition, - Convert_v1_ComponentStatus_To_api_ComponentStatus, - Convert_api_ComponentStatus_To_v1_ComponentStatus, - Convert_v1_ComponentStatusList_To_api_ComponentStatusList, - Convert_api_ComponentStatusList_To_v1_ComponentStatusList, - Convert_v1_ConfigMap_To_api_ConfigMap, - Convert_api_ConfigMap_To_v1_ConfigMap, - Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource, - Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource, - Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector, - Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, - Convert_v1_ConfigMapList_To_api_ConfigMapList, - Convert_api_ConfigMapList_To_v1_ConfigMapList, - Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection, - Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection, - Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource, - Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, - Convert_v1_Container_To_api_Container, - Convert_api_Container_To_v1_Container, - Convert_v1_ContainerImage_To_api_ContainerImage, - Convert_api_ContainerImage_To_v1_ContainerImage, - Convert_v1_ContainerPort_To_api_ContainerPort, - Convert_api_ContainerPort_To_v1_ContainerPort, - Convert_v1_ContainerState_To_api_ContainerState, - Convert_api_ContainerState_To_v1_ContainerState, - Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning, - Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning, - Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated, - Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated, - Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting, - Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting, - Convert_v1_ContainerStatus_To_api_ContainerStatus, - Convert_api_ContainerStatus_To_v1_ContainerStatus, - Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint, - Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint, - Convert_v1_DeleteOptions_To_api_DeleteOptions, - Convert_api_DeleteOptions_To_v1_DeleteOptions, - Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection, - Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection, - Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, - Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, - Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, - Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, - Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, - Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, - Convert_v1_EndpointAddress_To_api_EndpointAddress, - Convert_api_EndpointAddress_To_v1_EndpointAddress, - Convert_v1_EndpointPort_To_api_EndpointPort, - Convert_api_EndpointPort_To_v1_EndpointPort, - Convert_v1_EndpointSubset_To_api_EndpointSubset, - Convert_api_EndpointSubset_To_v1_EndpointSubset, - Convert_v1_Endpoints_To_api_Endpoints, - Convert_api_Endpoints_To_v1_Endpoints, - Convert_v1_EndpointsList_To_api_EndpointsList, - Convert_api_EndpointsList_To_v1_EndpointsList, - Convert_v1_EnvFromSource_To_api_EnvFromSource, - Convert_api_EnvFromSource_To_v1_EnvFromSource, - Convert_v1_EnvVar_To_api_EnvVar, - Convert_api_EnvVar_To_v1_EnvVar, - Convert_v1_EnvVarSource_To_api_EnvVarSource, - Convert_api_EnvVarSource_To_v1_EnvVarSource, - Convert_v1_Event_To_api_Event, - Convert_api_Event_To_v1_Event, - Convert_v1_EventList_To_api_EventList, - Convert_api_EventList_To_v1_EventList, - Convert_v1_EventSource_To_api_EventSource, - Convert_api_EventSource_To_v1_EventSource, - Convert_v1_ExecAction_To_api_ExecAction, - Convert_api_ExecAction_To_v1_ExecAction, - Convert_v1_FCVolumeSource_To_api_FCVolumeSource, - Convert_api_FCVolumeSource_To_v1_FCVolumeSource, - Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource, - Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource, - Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource, - Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource, - Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource, - Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, - Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource, - Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, - Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, - Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, - Convert_v1_HTTPGetAction_To_api_HTTPGetAction, - Convert_api_HTTPGetAction_To_v1_HTTPGetAction, - Convert_v1_HTTPHeader_To_api_HTTPHeader, - Convert_api_HTTPHeader_To_v1_HTTPHeader, - Convert_v1_Handler_To_api_Handler, - Convert_api_Handler_To_v1_Handler, - Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, - Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource, - Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, - Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, - Convert_v1_KeyToPath_To_api_KeyToPath, - Convert_api_KeyToPath_To_v1_KeyToPath, - Convert_v1_Lifecycle_To_api_Lifecycle, - Convert_api_Lifecycle_To_v1_Lifecycle, - Convert_v1_LimitRange_To_api_LimitRange, - Convert_api_LimitRange_To_v1_LimitRange, - Convert_v1_LimitRangeItem_To_api_LimitRangeItem, - Convert_api_LimitRangeItem_To_v1_LimitRangeItem, - Convert_v1_LimitRangeList_To_api_LimitRangeList, - Convert_api_LimitRangeList_To_v1_LimitRangeList, - Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec, - Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec, - Convert_v1_List_To_api_List, - Convert_api_List_To_v1_List, - Convert_v1_ListOptions_To_api_ListOptions, - Convert_api_ListOptions_To_v1_ListOptions, - Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress, - Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress, - Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus, - Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus, - Convert_v1_LocalObjectReference_To_api_LocalObjectReference, - Convert_api_LocalObjectReference_To_v1_LocalObjectReference, - Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource, - Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource, - Convert_v1_Namespace_To_api_Namespace, - Convert_api_Namespace_To_v1_Namespace, - Convert_v1_NamespaceList_To_api_NamespaceList, - Convert_api_NamespaceList_To_v1_NamespaceList, - Convert_v1_NamespaceSpec_To_api_NamespaceSpec, - Convert_api_NamespaceSpec_To_v1_NamespaceSpec, - Convert_v1_NamespaceStatus_To_api_NamespaceStatus, - Convert_api_NamespaceStatus_To_v1_NamespaceStatus, - Convert_v1_Node_To_api_Node, - Convert_api_Node_To_v1_Node, - Convert_v1_NodeAddress_To_api_NodeAddress, - Convert_api_NodeAddress_To_v1_NodeAddress, - Convert_v1_NodeAffinity_To_api_NodeAffinity, - Convert_api_NodeAffinity_To_v1_NodeAffinity, - Convert_v1_NodeCondition_To_api_NodeCondition, - Convert_api_NodeCondition_To_v1_NodeCondition, - Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints, - Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, - Convert_v1_NodeList_To_api_NodeList, - Convert_api_NodeList_To_v1_NodeList, - Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions, - Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions, - Convert_v1_NodeResources_To_api_NodeResources, - Convert_api_NodeResources_To_v1_NodeResources, - Convert_v1_NodeSelector_To_api_NodeSelector, - Convert_api_NodeSelector_To_v1_NodeSelector, - Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement, - Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement, - Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm, - Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm, - Convert_v1_NodeSpec_To_api_NodeSpec, - Convert_api_NodeSpec_To_v1_NodeSpec, - Convert_v1_NodeStatus_To_api_NodeStatus, - Convert_api_NodeStatus_To_v1_NodeStatus, - Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo, - Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo, - Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, - Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, - Convert_v1_ObjectMeta_To_api_ObjectMeta, - Convert_api_ObjectMeta_To_v1_ObjectMeta, - Convert_v1_ObjectReference_To_api_ObjectReference, - Convert_api_ObjectReference_To_v1_ObjectReference, - Convert_v1_PersistentVolume_To_api_PersistentVolume, - Convert_api_PersistentVolume_To_v1_PersistentVolume, - Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim, - Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, - Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList, - Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, - Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec, - Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, - Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus, - Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, - Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource, - Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, - Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList, - Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList, - Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource, - Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource, - Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec, - Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, - Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus, - Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, - Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource, - Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource, - Convert_v1_Pod_To_api_Pod, - Convert_api_Pod_To_v1_Pod, - Convert_v1_PodAffinity_To_api_PodAffinity, - Convert_api_PodAffinity_To_v1_PodAffinity, - Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm, - Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm, - Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity, - Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity, - Convert_v1_PodAttachOptions_To_api_PodAttachOptions, - Convert_api_PodAttachOptions_To_v1_PodAttachOptions, - Convert_v1_PodCondition_To_api_PodCondition, - Convert_api_PodCondition_To_v1_PodCondition, - Convert_v1_PodExecOptions_To_api_PodExecOptions, - Convert_api_PodExecOptions_To_v1_PodExecOptions, - Convert_v1_PodList_To_api_PodList, - Convert_api_PodList_To_v1_PodList, - Convert_v1_PodLogOptions_To_api_PodLogOptions, - Convert_api_PodLogOptions_To_v1_PodLogOptions, - Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions, - Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions, - Convert_v1_PodProxyOptions_To_api_PodProxyOptions, - Convert_api_PodProxyOptions_To_v1_PodProxyOptions, - Convert_v1_PodSecurityContext_To_api_PodSecurityContext, - Convert_api_PodSecurityContext_To_v1_PodSecurityContext, - Convert_v1_PodSignature_To_api_PodSignature, - Convert_api_PodSignature_To_v1_PodSignature, - Convert_v1_PodSpec_To_api_PodSpec, - Convert_api_PodSpec_To_v1_PodSpec, - Convert_v1_PodStatus_To_api_PodStatus, - Convert_api_PodStatus_To_v1_PodStatus, - Convert_v1_PodStatusResult_To_api_PodStatusResult, - Convert_api_PodStatusResult_To_v1_PodStatusResult, - Convert_v1_PodTemplate_To_api_PodTemplate, - Convert_api_PodTemplate_To_v1_PodTemplate, - Convert_v1_PodTemplateList_To_api_PodTemplateList, - Convert_api_PodTemplateList_To_v1_PodTemplateList, - Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec, - Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec, - Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource, - Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource, - Convert_v1_Preconditions_To_api_Preconditions, - Convert_api_Preconditions_To_v1_Preconditions, - Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry, - Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry, - Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm, - Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm, - Convert_v1_Probe_To_api_Probe, - Convert_api_Probe_To_v1_Probe, - Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource, - Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource, - Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource, - Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource, - Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource, - Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource, - Convert_v1_RangeAllocation_To_api_RangeAllocation, - Convert_api_RangeAllocation_To_v1_RangeAllocation, - Convert_v1_ReplicationController_To_api_ReplicationController, - Convert_api_ReplicationController_To_v1_ReplicationController, - Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition, - Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition, - Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList, - Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList, - Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, - Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, - Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus, - Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, - Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector, - Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector, - Convert_v1_ResourceQuota_To_api_ResourceQuota, - Convert_api_ResourceQuota_To_v1_ResourceQuota, - Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList, - Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList, - Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec, - Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, - Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus, - Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, - Convert_v1_ResourceRequirements_To_api_ResourceRequirements, - Convert_api_ResourceRequirements_To_v1_ResourceRequirements, - Convert_v1_SELinuxOptions_To_api_SELinuxOptions, - Convert_api_SELinuxOptions_To_v1_SELinuxOptions, - Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource, - Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource, - Convert_v1_Secret_To_api_Secret, - Convert_api_Secret_To_v1_Secret, - Convert_v1_SecretEnvSource_To_api_SecretEnvSource, - Convert_api_SecretEnvSource_To_v1_SecretEnvSource, - Convert_v1_SecretKeySelector_To_api_SecretKeySelector, - Convert_api_SecretKeySelector_To_v1_SecretKeySelector, - Convert_v1_SecretList_To_api_SecretList, - Convert_api_SecretList_To_v1_SecretList, - Convert_v1_SecretProjection_To_api_SecretProjection, - Convert_api_SecretProjection_To_v1_SecretProjection, - Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource, - Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource, - Convert_v1_SecurityContext_To_api_SecurityContext, - Convert_api_SecurityContext_To_v1_SecurityContext, - Convert_v1_SerializedReference_To_api_SerializedReference, - Convert_api_SerializedReference_To_v1_SerializedReference, - Convert_v1_Service_To_api_Service, - Convert_api_Service_To_v1_Service, - Convert_v1_ServiceAccount_To_api_ServiceAccount, - Convert_api_ServiceAccount_To_v1_ServiceAccount, - Convert_v1_ServiceAccountList_To_api_ServiceAccountList, - Convert_api_ServiceAccountList_To_v1_ServiceAccountList, - Convert_v1_ServiceList_To_api_ServiceList, - Convert_api_ServiceList_To_v1_ServiceList, - Convert_v1_ServicePort_To_api_ServicePort, - Convert_api_ServicePort_To_v1_ServicePort, - Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions, - Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions, - Convert_v1_ServiceSpec_To_api_ServiceSpec, - Convert_api_ServiceSpec_To_v1_ServiceSpec, - Convert_v1_ServiceStatus_To_api_ServiceStatus, - Convert_api_ServiceStatus_To_v1_ServiceStatus, - Convert_v1_Sysctl_To_api_Sysctl, - Convert_api_Sysctl_To_v1_Sysctl, - Convert_v1_TCPSocketAction_To_api_TCPSocketAction, - Convert_api_TCPSocketAction_To_v1_TCPSocketAction, - Convert_v1_Taint_To_api_Taint, - Convert_api_Taint_To_v1_Taint, - Convert_v1_Toleration_To_api_Toleration, - Convert_api_Toleration_To_v1_Toleration, - Convert_v1_Volume_To_api_Volume, - Convert_api_Volume_To_v1_Volume, - Convert_v1_VolumeMount_To_api_VolumeMount, - Convert_api_VolumeMount_To_v1_VolumeMount, - Convert_v1_VolumeProjection_To_api_VolumeProjection, - Convert_api_VolumeProjection_To_v1_VolumeProjection, - Convert_v1_VolumeSource_To_api_VolumeSource, - Convert_api_VolumeSource_To_v1_VolumeSource, - Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource, - Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource, - Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm, - Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm, - ) -} - -func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in, out, s) -} - -func autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) -} - -func autoConvert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error { - out.NodeAffinity = (*api.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) - out.PodAffinity = (*api.PodAffinity)(unsafe.Pointer(in.PodAffinity)) - out.PodAntiAffinity = (*api.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) - return nil -} - -func Convert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error { - return autoConvert_v1_Affinity_To_api_Affinity(in, out, s) -} - -func autoConvert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error { - out.NodeAffinity = (*NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) - out.PodAffinity = (*PodAffinity)(unsafe.Pointer(in.PodAffinity)) - out.PodAntiAffinity = (*PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) - return nil -} - -func Convert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error { - return autoConvert_api_Affinity_To_v1_Affinity(in, out, s) -} - -func autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { - out.Name = api.UniqueVolumeName(in.Name) - out.DevicePath = in.DevicePath - return nil -} - -func Convert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { - return autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in, out, s) -} - -func autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error { - out.Name = UniqueVolumeName(in.Name) - out.DevicePath = in.DevicePath - return nil -} - -func Convert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error { - return autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in, out, s) -} - -func autoConvert_v1_AvoidPods_To_api_AvoidPods(in *AvoidPods, out *api.AvoidPods, s conversion.Scope) error { - out.PreferAvoidPods = *(*[]api.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) - return nil -} - -func Convert_v1_AvoidPods_To_api_AvoidPods(in *AvoidPods, out *api.AvoidPods, s conversion.Scope) error { - return autoConvert_v1_AvoidPods_To_api_AvoidPods(in, out, s) -} - -func autoConvert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *AvoidPods, s conversion.Scope) error { - out.PreferAvoidPods = *(*[]PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) - return nil -} - -func Convert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *AvoidPods, s conversion.Scope) error { - return autoConvert_api_AvoidPods_To_v1_AvoidPods(in, out, s) -} - -func autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { - out.DiskName = in.DiskName - out.DataDiskURI = in.DataDiskURI - out.CachingMode = (*api.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) - out.FSType = (*string)(unsafe.Pointer(in.FSType)) - out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) - return nil -} - -func Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in, out, s) -} - -func autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *AzureDiskVolumeSource, s conversion.Scope) error { - out.DiskName = in.DiskName - out.DataDiskURI = in.DataDiskURI - out.CachingMode = (*AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) - out.FSType = (*string)(unsafe.Pointer(in.FSType)) - out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) - return nil -} - -func Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *AzureDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { - return autoConvert_v1_Binding_To_api_Binding(in, out, s) -} - -func autoConvert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - return nil -} - -func Convert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error { - return autoConvert_api_Binding_To_v1_Binding(in, out, s) -} - -func autoConvert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { - out.Add = *(*[]api.Capability)(unsafe.Pointer(&in.Add)) - out.Drop = *(*[]api.Capability)(unsafe.Pointer(&in.Drop)) - return nil -} - -func Convert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { - return autoConvert_v1_Capabilities_To_api_Capabilities(in, out, s) -} - -func autoConvert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error { - out.Add = *(*[]Capability)(unsafe.Pointer(&in.Add)) - out.Drop = *(*[]Capability)(unsafe.Pointer(&in.Drop)) - return nil -} - -func Convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error { - return autoConvert_api_Capabilities_To_v1_Capabilities(in, out, s) -} - -func autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in, out, s) -} - -func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { - if in.Monitors == nil { - out.Monitors = make([]string, 0) - } else { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - } - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) -} - -func autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in, out, s) -} - -func autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error { - return autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) -} - -func autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { - out.Type = api.ComponentConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.Message = in.Message - out.Error = in.Error - return nil -} - -func Convert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { - return autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in, out, s) -} - -func autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error { - out.Type = ComponentConditionType(in.Type) - out.Status = ConditionStatus(in.Status) - out.Message = in.Message - out.Error = in.Error - return nil -} - -func Convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error { - return autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in, out, s) -} - -func autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Conditions = *(*[]api.ComponentCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -func Convert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { - return autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in, out, s) -} - -func autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Conditions = *(*[]ComponentCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -func Convert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error { - return autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in, out, s) -} - -func autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ComponentStatus)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { - return autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in, out, s) -} - -func autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]ComponentStatus, 0) - } else { - out.Items = *(*[]ComponentStatus)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error { - return autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) -} - -func autoConvert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) - return nil -} - -func Convert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { - return autoConvert_v1_ConfigMap_To_api_ConfigMap(in, out, s) -} - -func autoConvert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) - return nil -} - -func Convert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error { - return autoConvert_api_ConfigMap_To_v1_ConfigMap(in, out, s) -} - -func autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { - return autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in, out, s) -} - -func autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *ConfigMapEnvSource, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *ConfigMapEnvSource, s conversion.Scope) error { - return autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s) -} - -func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in, out, s) -} - -func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) -} - -func autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ConfigMap)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { - return autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in, out, s) -} - -func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]ConfigMap, 0) - } else { - out.Items = *(*[]ConfigMap)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { - return autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in, out, s) -} - -func autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { - return autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in, out, s) -} - -func autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *ConfigMapProjection, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *ConfigMapProjection, s conversion.Scope) error { - return autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s) -} - -func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in, out, s) -} - -func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) -} - -func autoConvert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { - out.Name = in.Name - out.Image = in.Image - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) - out.WorkingDir = in.WorkingDir - out.Ports = *(*[]api.ContainerPort)(unsafe.Pointer(&in.Ports)) - out.EnvFrom = *(*[]api.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - out.Env = *(*[]api.EnvVar)(unsafe.Pointer(&in.Env)) - if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeMounts = *(*[]api.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - out.LivenessProbe = (*api.Probe)(unsafe.Pointer(in.LivenessProbe)) - out.ReadinessProbe = (*api.Probe)(unsafe.Pointer(in.ReadinessProbe)) - out.Lifecycle = (*api.Lifecycle)(unsafe.Pointer(in.Lifecycle)) - out.TerminationMessagePath = in.TerminationMessagePath - out.TerminationMessagePolicy = api.TerminationMessagePolicy(in.TerminationMessagePolicy) - out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) - out.SecurityContext = (*api.SecurityContext)(unsafe.Pointer(in.SecurityContext)) - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -func Convert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { - return autoConvert_v1_Container_To_api_Container(in, out, s) -} - -func autoConvert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error { - out.Name = in.Name - out.Image = in.Image - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) - out.WorkingDir = in.WorkingDir - out.Ports = *(*[]ContainerPort)(unsafe.Pointer(&in.Ports)) - out.EnvFrom = *(*[]EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - out.Env = *(*[]EnvVar)(unsafe.Pointer(&in.Env)) - if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeMounts = *(*[]VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - out.LivenessProbe = (*Probe)(unsafe.Pointer(in.LivenessProbe)) - out.ReadinessProbe = (*Probe)(unsafe.Pointer(in.ReadinessProbe)) - out.Lifecycle = (*Lifecycle)(unsafe.Pointer(in.Lifecycle)) - out.TerminationMessagePath = in.TerminationMessagePath - out.TerminationMessagePolicy = TerminationMessagePolicy(in.TerminationMessagePolicy) - out.ImagePullPolicy = PullPolicy(in.ImagePullPolicy) - out.SecurityContext = (*SecurityContext)(unsafe.Pointer(in.SecurityContext)) - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -func Convert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error { - return autoConvert_api_Container_To_v1_Container(in, out, s) -} - -func autoConvert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { - out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) - out.SizeBytes = in.SizeBytes - return nil -} - -func Convert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { - return autoConvert_v1_ContainerImage_To_api_ContainerImage(in, out, s) -} - -func autoConvert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error { - if in.Names == nil { - out.Names = make([]string, 0) - } else { - out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) - } - out.SizeBytes = in.SizeBytes - return nil -} - -func Convert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error { - return autoConvert_api_ContainerImage_To_v1_ContainerImage(in, out, s) -} - -func autoConvert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = api.Protocol(in.Protocol) - out.HostIP = in.HostIP - return nil -} - -func Convert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - return autoConvert_v1_ContainerPort_To_api_ContainerPort(in, out, s) -} - -func autoConvert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error { - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = Protocol(in.Protocol) - out.HostIP = in.HostIP - return nil -} - -func Convert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error { - return autoConvert_api_ContainerPort_To_v1_ContainerPort(in, out, s) -} - -func autoConvert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { - out.Waiting = (*api.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) - out.Running = (*api.ContainerStateRunning)(unsafe.Pointer(in.Running)) - out.Terminated = (*api.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) - return nil -} - -func Convert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { - return autoConvert_v1_ContainerState_To_api_ContainerState(in, out, s) -} - -func autoConvert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { - out.Waiting = (*ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) - out.Running = (*ContainerStateRunning)(unsafe.Pointer(in.Running)) - out.Terminated = (*ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) - return nil -} - -func Convert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { - return autoConvert_api_ContainerState_To_v1_ContainerState(in, out, s) -} - -func autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { - out.StartedAt = in.StartedAt - return nil -} - -func Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { - return autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in, out, s) -} - -func autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { - out.StartedAt = in.StartedAt - return nil -} - -func Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { - return autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) -} - -func autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - out.StartedAt = in.StartedAt - out.FinishedAt = in.FinishedAt - out.ContainerID = in.ContainerID - return nil -} - -func Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { - return autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in, out, s) -} - -func autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - out.StartedAt = in.StartedAt - out.FinishedAt = in.FinishedAt - out.ContainerID = in.ContainerID - return nil -} - -func Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { - return autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) -} - -func autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { - return autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in, out, s) -} - -func autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error { - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error { - return autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) -} - -func autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_v1_ContainerState_To_api_ContainerState(&in.State, &out.State, s); err != nil { - return err - } - if err := Convert_v1_ContainerState_To_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { - return err - } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID - return nil -} - -func Convert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { - return autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in, out, s) -} - -func autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_api_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { - return err - } - if err := Convert_api_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { - return err - } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID - return nil -} - -func Convert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error { - return autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in, out, s) -} - -func autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { - out.Port = in.Port - return nil -} - -func Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { - return autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in, out, s) -} - -func autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { - out.Port = in.Port - return nil -} - -func Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { - return autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) -} - -func autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { - out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) - out.Preconditions = (*api.Preconditions)(unsafe.Pointer(in.Preconditions)) - out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) - out.PropagationPolicy = (*api.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) - return nil -} - -func Convert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { - return autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in, out, s) -} - -func autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { - out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) - out.Preconditions = (*Preconditions)(unsafe.Pointer(in.Preconditions)) - out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) - out.PropagationPolicy = (*DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) - return nil -} - -func Convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { - return autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in, out, s) -} - -func autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { - out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in, out, s) -} - -func autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *DownwardAPIProjection, s conversion.Scope) error { - out.Items = *(*[]DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *DownwardAPIProjection, s conversion.Scope) error { - return autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - out.Path = in.Path - out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -func Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { - out.Path = in.Path - out.FieldRef = (*ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -func Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -func Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error { - out.Items = *(*[]DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - out.Medium = api.StorageMedium(in.Medium) - return nil -} - -func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in, out, s) -} - -func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { - out.Medium = StorageMedium(in.Medium) - return nil -} - -func Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) -} - -func autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) - out.TargetRef = (*api.ObjectReference)(unsafe.Pointer(in.TargetRef)) - return nil -} - -func Convert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { - return autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in, out, s) -} - -func autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) - out.TargetRef = (*ObjectReference)(unsafe.Pointer(in.TargetRef)) - return nil -} - -func Convert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error { - return autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in, out, s) -} - -func autoConvert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { - out.Name = in.Name - out.Port = in.Port - out.Protocol = api.Protocol(in.Protocol) - return nil -} - -func Convert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { - return autoConvert_v1_EndpointPort_To_api_EndpointPort(in, out, s) -} - -func autoConvert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error { - out.Name = in.Name - out.Port = in.Port - out.Protocol = Protocol(in.Protocol) - return nil -} - -func Convert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error { - return autoConvert_api_EndpointPort_To_v1_EndpointPort(in, out, s) -} - -func autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { - out.Addresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.Addresses)) - out.NotReadyAddresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) - out.Ports = *(*[]api.EndpointPort)(unsafe.Pointer(&in.Ports)) - return nil -} - -func Convert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { - return autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in, out, s) -} - -func autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error { - out.Addresses = *(*[]EndpointAddress)(unsafe.Pointer(&in.Addresses)) - out.NotReadyAddresses = *(*[]EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) - out.Ports = *(*[]EndpointPort)(unsafe.Pointer(&in.Ports)) - return nil -} - -func Convert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error { - return autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in, out, s) -} - -func autoConvert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Subsets = *(*[]api.EndpointSubset)(unsafe.Pointer(&in.Subsets)) - return nil -} - -func Convert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { - return autoConvert_v1_Endpoints_To_api_Endpoints(in, out, s) -} - -func autoConvert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if in.Subsets == nil { - out.Subsets = make([]EndpointSubset, 0) - } else { - out.Subsets = *(*[]EndpointSubset)(unsafe.Pointer(&in.Subsets)) - } - return nil -} - -func Convert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error { - return autoConvert_api_Endpoints_To_v1_Endpoints(in, out, s) -} - -func autoConvert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Endpoints)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { - return autoConvert_v1_EndpointsList_To_api_EndpointsList(in, out, s) -} - -func autoConvert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]Endpoints, 0) - } else { - out.Items = *(*[]Endpoints)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error { - return autoConvert_api_EndpointsList_To_v1_EndpointsList(in, out, s) -} - -func autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in *EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { - out.Prefix = in.Prefix - out.ConfigMapRef = (*api.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) - out.SecretRef = (*api.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) - return nil -} - -func Convert_v1_EnvFromSource_To_api_EnvFromSource(in *EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { - return autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in, out, s) -} - -func autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *EnvFromSource, s conversion.Scope) error { - out.Prefix = in.Prefix - out.ConfigMapRef = (*ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) - out.SecretRef = (*SecretEnvSource)(unsafe.Pointer(in.SecretRef)) - return nil -} - -func Convert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *EnvFromSource, s conversion.Scope) error { - return autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in, out, s) -} - -func autoConvert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - out.ValueFrom = (*api.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) - return nil -} - -func Convert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { - return autoConvert_v1_EnvVar_To_api_EnvVar(in, out, s) -} - -func autoConvert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - out.ValueFrom = (*EnvVarSource)(unsafe.Pointer(in.ValueFrom)) - return nil -} - -func Convert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error { - return autoConvert_api_EnvVar_To_v1_EnvVar(in, out, s) -} - -func autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.ConfigMapKeyRef = (*api.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) - out.SecretKeyRef = (*api.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) - return nil -} - -func Convert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - return autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in, out, s) -} - -func autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error { - out.FieldRef = (*ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.ConfigMapKeyRef = (*ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) - out.SecretKeyRef = (*SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) - return nil -} - -func Convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error { - return autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in, out, s) -} - -func autoConvert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - if err := Convert_v1_EventSource_To_api_EventSource(&in.Source, &out.Source, s); err != nil { - return err - } - out.FirstTimestamp = in.FirstTimestamp - out.LastTimestamp = in.LastTimestamp - out.Count = in.Count - out.Type = in.Type - return nil -} - -func Convert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { - return autoConvert_v1_Event_To_api_Event(in, out, s) -} - -func autoConvert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - if err := Convert_api_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { - return err - } - out.FirstTimestamp = in.FirstTimestamp - out.LastTimestamp = in.LastTimestamp - out.Count = in.Count - out.Type = in.Type - return nil -} - -func Convert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { - return autoConvert_api_Event_To_v1_Event(in, out, s) -} - -func autoConvert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Event)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { - return autoConvert_v1_EventList_To_api_EventList(in, out, s) -} - -func autoConvert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]Event, 0) - } else { - out.Items = *(*[]Event)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { - return autoConvert_api_EventList_To_v1_EventList(in, out, s) -} - -func autoConvert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { - out.Component = in.Component - out.Host = in.Host - return nil -} - -func Convert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { - return autoConvert_v1_EventSource_To_api_EventSource(in, out, s) -} - -func autoConvert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error { - out.Component = in.Component - out.Host = in.Host - return nil -} - -func Convert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error { - return autoConvert_api_EventSource_To_v1_EventSource(in, out, s) -} - -func autoConvert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -func Convert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { - return autoConvert_v1_ExecAction_To_api_ExecAction(in, out, s) -} - -func autoConvert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error { - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -func Convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error { - return autoConvert_api_ExecAction_To_v1_ExecAction(in, out, s) -} - -func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) - out.Lun = (*int32)(unsafe.Pointer(in.Lun)) - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in, out, s) -} - -func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error { - if in.TargetWWNs == nil { - out.TargetWWNs = make([]string, 0) - } else { - out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) - } - out.Lun = (*int32)(unsafe.Pointer(in.Lun)) - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error { - return autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) -} - -func autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - out.Driver = in.Driver - out.FSType = in.FSType - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) - return nil -} - -func Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in, out, s) -} - -func autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error { - out.Driver = in.Driver - out.FSType = in.FSType - out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) - return nil -} - -func Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) -} - -func autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - out.DatasetName = in.DatasetName - out.DatasetUUID = in.DatasetUUID - return nil -} - -func Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in, out, s) -} - -func autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *FlockerVolumeSource, s conversion.Scope) error { - out.DatasetName = in.DatasetName - out.DatasetUUID = in.DatasetUUID - return nil -} - -func Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) -} - -func autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -func Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -func Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in, out, s) -} - -func autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) -} - -func autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - out.Path = in.Path - out.Port = in.Port - out.Host = in.Host - out.Scheme = api.URIScheme(in.Scheme) - out.HTTPHeaders = *(*[]api.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) - return nil -} - -func Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - return autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in, out, s) -} - -func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error { - out.Path = in.Path - out.Port = in.Port - out.Host = in.Host - out.Scheme = URIScheme(in.Scheme) - out.HTTPHeaders = *(*[]HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) - return nil -} - -func Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error { - return autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) -} - -func autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -func Convert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - return autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in, out, s) -} - -func autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -func Convert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader, s conversion.Scope) error { - return autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in, out, s) -} - -func autoConvert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { - out.Exec = (*api.ExecAction)(unsafe.Pointer(in.Exec)) - out.HTTPGet = (*api.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) - out.TCPSocket = (*api.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) - return nil -} - -func Convert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { - return autoConvert_v1_Handler_To_api_Handler(in, out, s) -} - -func autoConvert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error { - out.Exec = (*ExecAction)(unsafe.Pointer(in.Exec)) - out.HTTPGet = (*HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) - out.TCPSocket = (*TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) - return nil -} - -func Convert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error { - return autoConvert_api_Handler_To_v1_Handler(in, out, s) -} - -func autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -func Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in, out, s) -} - -func autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -func Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) -} - -func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) - return nil -} - -func Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) - return nil -} - -func Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - out.Key = in.Key - out.Path = in.Path - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -func Convert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - return autoConvert_v1_KeyToPath_To_api_KeyToPath(in, out, s) -} - -func autoConvert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error { - out.Key = in.Key - out.Path = in.Path - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -func Convert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error { - return autoConvert_api_KeyToPath_To_v1_KeyToPath(in, out, s) -} - -func autoConvert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - out.PostStart = (*api.Handler)(unsafe.Pointer(in.PostStart)) - out.PreStop = (*api.Handler)(unsafe.Pointer(in.PreStop)) - return nil -} - -func Convert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - return autoConvert_v1_Lifecycle_To_api_Lifecycle(in, out, s) -} - -func autoConvert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error { - out.PostStart = (*Handler)(unsafe.Pointer(in.PostStart)) - out.PreStop = (*Handler)(unsafe.Pointer(in.PreStop)) - return nil -} - -func Convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error { - return autoConvert_api_Lifecycle_To_v1_Lifecycle(in, out, s) -} - -func autoConvert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { - return autoConvert_v1_LimitRange_To_api_LimitRange(in, out, s) -} - -func autoConvert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error { - return autoConvert_api_LimitRange_To_v1_LimitRange(in, out, s) -} - -func autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { - out.Type = api.LimitType(in.Type) - out.Max = *(*api.ResourceList)(unsafe.Pointer(&in.Max)) - out.Min = *(*api.ResourceList)(unsafe.Pointer(&in.Min)) - out.Default = *(*api.ResourceList)(unsafe.Pointer(&in.Default)) - out.DefaultRequest = *(*api.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) - out.MaxLimitRequestRatio = *(*api.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) - return nil -} - -func Convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { - return autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in, out, s) -} - -func autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error { - out.Type = LimitType(in.Type) - out.Max = *(*ResourceList)(unsafe.Pointer(&in.Max)) - out.Min = *(*ResourceList)(unsafe.Pointer(&in.Min)) - out.Default = *(*ResourceList)(unsafe.Pointer(&in.Default)) - out.DefaultRequest = *(*ResourceList)(unsafe.Pointer(&in.DefaultRequest)) - out.MaxLimitRequestRatio = *(*ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) - return nil -} - -func Convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error { - return autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) -} - -func autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.LimitRange)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { - return autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in, out, s) -} - -func autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]LimitRange, 0) - } else { - out.Items = *(*[]LimitRange)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error { - return autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in, out, s) -} - -func autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { - out.Limits = *(*[]api.LimitRangeItem)(unsafe.Pointer(&in.Limits)) - return nil -} - -func Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { - return autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in, out, s) -} - -func autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { - if in.Limits == nil { - out.Limits = make([]LimitRangeItem, 0) - } else { - out.Limits = *(*[]LimitRangeItem)(unsafe.Pointer(&in.Limits)) - } - return nil -} - -func Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { - return autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) -} - -func autoConvert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.Object, len(*in)) - for i := range *in { - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { - return autoConvert_v1_List_To_api_List(in, out, s) -} - -func autoConvert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.RawExtension, len(*in)) - for i := range *in { - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]runtime.RawExtension, 0) - } - return nil -} - -func Convert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error { - return autoConvert_api_List_To_v1_List(in, out, s) -} - -func autoConvert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { - if err := meta_v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - if err := meta_v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -func Convert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { - return autoConvert_v1_ListOptions_To_api_ListOptions(in, out, s) -} - -func autoConvert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { - if err := meta_v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - if err := meta_v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -func Convert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { - return autoConvert_api_ListOptions_To_v1_ListOptions(in, out, s) -} - -func autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -func Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in, out, s) -} - -func autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -func Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error { - return autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) -} - -func autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - out.Ingress = *(*[]api.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) - return nil -} - -func Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in, out, s) -} - -func autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error { - out.Ingress = *(*[]LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) - return nil -} - -func Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) -} - -func autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -func Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - return autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in, out, s) -} - -func autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -func Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error { - return autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) -} - -func autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in, out, s) -} - -func autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) -} - -func autoConvert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_NamespaceSpec_To_api_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_NamespaceStatus_To_api_NamespaceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { - return autoConvert_v1_Namespace_To_api_Namespace(in, out, s) -} - -func autoConvert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error { - return autoConvert_api_Namespace_To_v1_Namespace(in, out, s) -} - -func autoConvert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Namespace)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { - return autoConvert_v1_NamespaceList_To_api_NamespaceList(in, out, s) -} - -func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]Namespace, 0) - } else { - out.Items = *(*[]Namespace)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { - return autoConvert_api_NamespaceList_To_v1_NamespaceList(in, out, s) -} - -func autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { - out.Finalizers = *(*[]api.FinalizerName)(unsafe.Pointer(&in.Finalizers)) - return nil -} - -func Convert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { - return autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in, out, s) -} - -func autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { - out.Finalizers = *(*[]FinalizerName)(unsafe.Pointer(&in.Finalizers)) - return nil -} - -func Convert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { - return autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) -} - -func autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { - out.Phase = api.NamespacePhase(in.Phase) - return nil -} - -func Convert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { - return autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in, out, s) -} - -func autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error { - out.Phase = NamespacePhase(in.Phase) - return nil -} - -func Convert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error { - return autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) -} - -func autoConvert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_NodeSpec_To_api_NodeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_NodeStatus_To_api_NodeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { - return autoConvert_v1_Node_To_api_Node(in, out, s) -} - -func autoConvert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error { - return autoConvert_api_Node_To_v1_Node(in, out, s) -} - -func autoConvert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { - out.Type = api.NodeAddressType(in.Type) - out.Address = in.Address - return nil -} - -func Convert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { - return autoConvert_v1_NodeAddress_To_api_NodeAddress(in, out, s) -} - -func autoConvert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error { - out.Type = NodeAddressType(in.Type) - out.Address = in.Address - return nil -} - -func Convert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error { - return autoConvert_api_NodeAddress_To_v1_NodeAddress(in, out, s) -} - -func autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = (*api.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -func Convert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { - return autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in, out, s) -} - -func autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = (*NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -func Convert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error { - return autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in, out, s) -} - -func autoConvert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { - out.Type = api.NodeConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastHeartbeatTime = in.LastHeartbeatTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { - return autoConvert_v1_NodeCondition_To_api_NodeCondition(in, out, s) -} - -func autoConvert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error { - out.Type = NodeConditionType(in.Type) - out.Status = ConditionStatus(in.Status) - out.LastHeartbeatTime = in.LastHeartbeatTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error { - return autoConvert_api_NodeCondition_To_v1_NodeCondition(in, out, s) -} - -func autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { - if err := Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { - return err - } - return nil -} - -func Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { - return autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in, out, s) -} - -func autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error { - if err := Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { - return err - } - return nil -} - -func Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error { - return autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) -} - -func autoConvert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Node)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { - return autoConvert_v1_NodeList_To_api_NodeList(in, out, s) -} - -func autoConvert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]Node, 0) - } else { - out.Items = *(*[]Node)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error { - return autoConvert_api_NodeList_To_v1_NodeList(in, out, s) -} - -func autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -func Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { - return autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in, out, s) -} - -func autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *NodeProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -func Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *NodeProxyOptions, s conversion.Scope) error { - return autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) -} - -func autoConvert_v1_NodeResources_To_api_NodeResources(in *NodeResources, out *api.NodeResources, s conversion.Scope) error { - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - return nil -} - -func Convert_v1_NodeResources_To_api_NodeResources(in *NodeResources, out *api.NodeResources, s conversion.Scope) error { - return autoConvert_v1_NodeResources_To_api_NodeResources(in, out, s) -} - -func autoConvert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *NodeResources, s conversion.Scope) error { - out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) - return nil -} - -func Convert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *NodeResources, s conversion.Scope) error { - return autoConvert_api_NodeResources_To_v1_NodeResources(in, out, s) -} - -func autoConvert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error { - out.NodeSelectorTerms = *(*[]api.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) - return nil -} - -func Convert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error { - return autoConvert_v1_NodeSelector_To_api_NodeSelector(in, out, s) -} - -func autoConvert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error { - if in.NodeSelectorTerms == nil { - out.NodeSelectorTerms = make([]NodeSelectorTerm, 0) - } else { - out.NodeSelectorTerms = *(*[]NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) - } - return nil -} - -func Convert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error { - return autoConvert_api_NodeSelector_To_v1_NodeSelector(in, out, s) -} - -func autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = api.NodeSelectorOperator(in.Operator) - out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) - return nil -} - -func Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { - return autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in, out, s) -} - -func autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = NodeSelectorOperator(in.Operator) - out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) - return nil -} - -func Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error { - return autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) -} - -func autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { - out.MatchExpressions = *(*[]api.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) - return nil -} - -func Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { - return autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in, out, s) -} - -func autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error { - if in.MatchExpressions == nil { - out.MatchExpressions = make([]NodeSelectorRequirement, 0) - } else { - out.MatchExpressions = *(*[]NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) - } - return nil -} - -func Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error { - return autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) -} - -func autoConvert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable - out.Taints = *(*[]api.Taint)(unsafe.Pointer(&in.Taints)) - return nil -} - -func Convert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { - return autoConvert_v1_NodeSpec_To_api_NodeSpec(in, out, s) -} - -func autoConvert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable - out.Taints = *(*[]Taint)(unsafe.Pointer(&in.Taints)) - return nil -} - -func Convert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { - return autoConvert_api_NodeSpec_To_v1_NodeSpec(in, out, s) -} - -func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Allocatable = *(*api.ResourceList)(unsafe.Pointer(&in.Allocatable)) - out.Phase = api.NodePhase(in.Phase) - out.Conditions = *(*[]api.NodeCondition)(unsafe.Pointer(&in.Conditions)) - out.Addresses = *(*[]api.NodeAddress)(unsafe.Pointer(&in.Addresses)) - if err := Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { - return err - } - if err := Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { - return err - } - out.Images = *(*[]api.ContainerImage)(unsafe.Pointer(&in.Images)) - out.VolumesInUse = *(*[]api.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) - out.VolumesAttached = *(*[]api.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) - return nil -} - -func Convert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { - return autoConvert_v1_NodeStatus_To_api_NodeStatus(in, out, s) -} - -func autoConvert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { - out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Allocatable = *(*ResourceList)(unsafe.Pointer(&in.Allocatable)) - out.Phase = NodePhase(in.Phase) - out.Conditions = *(*[]NodeCondition)(unsafe.Pointer(&in.Conditions)) - out.Addresses = *(*[]NodeAddress)(unsafe.Pointer(&in.Addresses)) - if err := Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { - return err - } - if err := Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { - return err - } - out.Images = *(*[]ContainerImage)(unsafe.Pointer(&in.Images)) - out.VolumesInUse = *(*[]UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) - out.VolumesAttached = *(*[]AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) - return nil -} - -func Convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { - return autoConvert_api_NodeStatus_To_v1_NodeStatus(in, out, s) -} - -func autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture - return nil -} - -func Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { - return autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in, out, s) -} - -func autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture - return nil -} - -func Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { - return autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) -} - -func autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in, out, s) -} - -func autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) -} - -func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = types.UID(in.UID) - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - out.CreationTimestamp = in.CreationTimestamp - out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) - out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) - out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) - out.ClusterName = in.ClusterName - return nil -} - -func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s) -} - -func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = types.UID(in.UID) - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - out.CreationTimestamp = in.CreationTimestamp - out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) - out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) - out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) - out.ClusterName = in.ClusterName - return nil -} - -func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { - return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s) -} - -func autoConvert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = types.UID(in.UID) - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error { - return autoConvert_v1_ObjectReference_To_api_ObjectReference(in, out, s) -} - -func autoConvert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = types.UID(in.UID) - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { - return autoConvert_api_ObjectReference_To_v1_ObjectReference(in, out, s) -} - -func autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { - return autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in, out, s) -} - -func autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { - return autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]PersistentVolumeClaim, 0) - } else { - out.Items = *(*[]PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { - out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeName = in.VolumeName - out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) - return nil -} - -func Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { - out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeName = in.VolumeName - out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) - return nil -} - -func Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { - out.Phase = api.PersistentVolumeClaimPhase(in.Phase) - out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - return nil -} - -func Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { - out.Phase = PersistentVolumeClaimPhase(in.Phase) - out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) - return nil -} - -func Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.PersistentVolume, len(*in)) - for i := range *in { - if err := Convert_v1_PersistentVolume_To_api_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in, out, s) -} - -func autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolume, len(*in)) - for i := range *in { - if err := Convert_api_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]PersistentVolume, 0) - } - return nil -} - -func Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) -} - -func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { - out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.RBD = (*api.RBDVolumeSource)(unsafe.Pointer(in.RBD)) - out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*api.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) - out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.AzureFile = (*api.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) - return nil -} - -func Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in, out, s) -} - -func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { - out.GCEPersistentDisk = (*GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.HostPath = (*HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.Glusterfs = (*GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.NFS = (*NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.RBD = (*RBDVolumeSource)(unsafe.Pointer(in.RBD)) - out.Quobyte = (*QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.ISCSI = (*ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.FlexVolume = (*FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) - out.FC = (*FCVolumeSource)(unsafe.Pointer(in.FC)) - out.Flocker = (*FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.AzureFile = (*AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.PortworxVolume = (*PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) - return nil -} - -func Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - if err := Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { - return err - } - out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.ClaimRef = (*api.ObjectReference)(unsafe.Pointer(in.ClaimRef)) - out.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) - out.StorageClassName = in.StorageClassName - return nil -} - -func Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in, out, s) -} - -func autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { - out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) - if err := Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { - return err - } - out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.ClaimRef = (*ObjectReference)(unsafe.Pointer(in.ClaimRef)) - out.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) - out.StorageClassName = in.StorageClassName - return nil -} - -func Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) -} - -func autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { - out.Phase = api.PersistentVolumePhase(in.Phase) - out.Message = in.Message - out.Reason = in.Reason - return nil -} - -func Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in, out, s) -} - -func autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { - out.Phase = PersistentVolumePhase(in.Phase) - out.Message = in.Message - out.Reason = in.Reason - return nil -} - -func Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) -} - -func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - out.PdID = in.PdID - out.FSType = in.FSType - return nil -} - -func Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - out.PdID = in.PdID - out.FSType = in.FSType - return nil -} - -func Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -func Convert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { - return autoConvert_v1_PodAffinity_To_api_PodAffinity(in, out, s) -} - -func autoConvert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -func Convert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { - return autoConvert_api_PodAffinity_To_v1_PodAffinity(in, out, s) -} - -func autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { - out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) - out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) - out.TopologyKey = in.TopologyKey - return nil -} - -func Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { - return autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in, out, s) -} - -func autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { - out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) - out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) - out.TopologyKey = in.TopologyKey - return nil -} - -func Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { - return autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) -} - -func autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -func Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { - return autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in, out, s) -} - -func autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -func Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { - return autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) -} - -func autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - return nil -} - -func Convert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { - return autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in, out, s) -} - -func autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - return nil -} - -func Convert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { - return autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) -} - -func autoConvert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error { - out.Type = api.PodConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error { - return autoConvert_v1_PodCondition_To_api_PodCondition(in, out, s) -} - -func autoConvert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { - out.Type = PodConditionType(in.Type) - out.Status = ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { - return autoConvert_api_PodCondition_To_v1_PodCondition(in, out, s) -} - -func autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -func Convert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { - return autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in, out, s) -} - -func autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - if in.Command == nil { - out.Command = make([]string, 0) - } else { - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - } - return nil -} - -func Convert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { - return autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in, out, s) -} - -func autoConvert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Pod, len(*in)) - for i := range *in { - if err := Convert_v1_Pod_To_api_Pod(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error { - return autoConvert_v1_PodList_To_api_PodList(in, out, s) -} - -func autoConvert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pod, len(*in)) - for i := range *in { - if err := Convert_api_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]Pod, 0) - } - return nil -} - -func Convert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { - return autoConvert_api_PodList_To_v1_PodList(in, out, s) -} - -func autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) - out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) - out.Timestamps = in.Timestamps - out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) - out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) - return nil -} - -func Convert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { - return autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in, out, s) -} - -func autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) - out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) - out.Timestamps = in.Timestamps - out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) - out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) - return nil -} - -func Convert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { - return autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in, out, s) -} - -func autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { - out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) - return nil -} - -func Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { - return autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in, out, s) -} - -func autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *PodPortForwardOptions, s conversion.Scope) error { - out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) - return nil -} - -func Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *PodPortForwardOptions, s conversion.Scope) error { - return autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) -} - -func autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -func Convert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { - return autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in, out, s) -} - -func autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -func Convert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { - return autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) -} - -func autoConvert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { - out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) - out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) - return nil -} - -func autoConvert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *PodSecurityContext, s conversion.Scope) error { - // INFO: in.HostNetwork opted out of conversion generation - // INFO: in.HostPID opted out of conversion generation - // INFO: in.HostIPC opted out of conversion generation - out.SELinuxOptions = (*SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) - out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) - return nil -} - -func autoConvert_v1_PodSignature_To_api_PodSignature(in *PodSignature, out *api.PodSignature, s conversion.Scope) error { - out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) - return nil -} - -func Convert_v1_PodSignature_To_api_PodSignature(in *PodSignature, out *api.PodSignature, s conversion.Scope) error { - return autoConvert_v1_PodSignature_To_api_PodSignature(in, out, s) -} - -func autoConvert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *PodSignature, s conversion.Scope) error { - out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) - return nil -} - -func Convert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *PodSignature, s conversion.Scope) error { - return autoConvert_api_PodSignature_To_v1_PodSignature(in, out, s) -} - -func autoConvert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error { - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]api.Volume, len(*in)) - for i := range *in { - if err := Convert_v1_Volume_To_api_Volume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - out.InitContainers = *(*[]api.Container)(unsafe.Pointer(&in.InitContainers)) - out.Containers = *(*[]api.Container)(unsafe.Pointer(&in.Containers)) - out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) - out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) - out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) - out.ServiceAccountName = in.ServiceAccountName - // INFO: in.DeprecatedServiceAccount opted out of conversion generation - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - out.NodeName = in.NodeName - // INFO: in.HostNetwork opted out of conversion generation - // INFO: in.HostPID opted out of conversion generation - // INFO: in.HostIPC opted out of conversion generation - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(api.PodSecurityContext) - if err := Convert_v1_PodSecurityContext_To_api_PodSecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - out.Affinity = (*api.Affinity)(unsafe.Pointer(in.Affinity)) - out.SchedulerName = in.SchedulerName - out.Tolerations = *(*[]api.Toleration)(unsafe.Pointer(&in.Tolerations)) - return nil -} - -func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]Volume, len(*in)) - for i := range *in { - if err := Convert_api_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - out.InitContainers = *(*[]Container)(unsafe.Pointer(&in.InitContainers)) - if in.Containers == nil { - out.Containers = make([]Container, 0) - } else { - out.Containers = *(*[]Container)(unsafe.Pointer(&in.Containers)) - } - out.RestartPolicy = RestartPolicy(in.RestartPolicy) - out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.DNSPolicy = DNSPolicy(in.DNSPolicy) - out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) - out.ServiceAccountName = in.ServiceAccountName - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - out.NodeName = in.NodeName - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(PodSecurityContext) - if err := Convert_api_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.ImagePullSecrets = *(*[]LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - out.Affinity = (*Affinity)(unsafe.Pointer(in.Affinity)) - out.SchedulerName = in.SchedulerName - out.Tolerations = *(*[]Toleration)(unsafe.Pointer(&in.Tolerations)) - return nil -} - -func autoConvert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error { - out.Phase = api.PodPhase(in.Phase) - out.Conditions = *(*[]api.PodCondition)(unsafe.Pointer(&in.Conditions)) - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) - out.InitContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) - out.ContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) - out.QOSClass = api.PodQOSClass(in.QOSClass) - return nil -} - -func Convert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error { - return autoConvert_v1_PodStatus_To_api_PodStatus(in, out, s) -} - -func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { - out.Phase = PodPhase(in.Phase) - out.Conditions = *(*[]PodCondition)(unsafe.Pointer(&in.Conditions)) - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) - out.QOSClass = PodQOSClass(in.QOSClass) - out.InitContainerStatuses = *(*[]ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) - out.ContainerStatuses = *(*[]ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) - return nil -} - -func Convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { - return autoConvert_api_PodStatus_To_v1_PodStatus(in, out, s) -} - -func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { - return autoConvert_v1_PodTemplate_To_api_PodTemplate(in, out, s) -} - -func autoConvert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { - return autoConvert_api_PodTemplate_To_v1_PodTemplate(in, out, s) -} - -func autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.PodTemplate, len(*in)) - for i := range *in { - if err := Convert_v1_PodTemplate_To_api_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { - return autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in, out, s) -} - -func autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodTemplate, len(*in)) - for i := range *in { - if err := Convert_api_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]PodTemplate, 0) - } - return nil -} - -func Convert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { - return autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in, out, s) -} - -func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in, out, s) -} - -func autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *PortworxVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *PortworxVolumeSource, s conversion.Scope) error { - return autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s) -} - -func autoConvert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { - out.UID = (*types.UID)(unsafe.Pointer(in.UID)) - return nil -} - -func Convert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { - return autoConvert_v1_Preconditions_To_api_Preconditions(in, out, s) -} - -func autoConvert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error { - out.UID = (*types.UID)(unsafe.Pointer(in.UID)) - return nil -} - -func Convert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error { - return autoConvert_api_Preconditions_To_v1_Preconditions(in, out, s) -} - -func autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { - if err := Convert_v1_PodSignature_To_api_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { - return err - } - out.EvictionTime = in.EvictionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { - return autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in, out, s) -} - -func autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *PreferAvoidPodsEntry, s conversion.Scope) error { - if err := Convert_api_PodSignature_To_v1_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { - return err - } - out.EvictionTime = in.EvictionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *PreferAvoidPodsEntry, s conversion.Scope) error { - return autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in, out, s) -} - -func autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { - return err - } - return nil -} - -func Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { - return autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in, out, s) -} - -func autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { - return err - } - return nil -} - -func Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error { - return autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) -} - -func autoConvert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error { - if err := Convert_v1_Handler_To_api_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -func Convert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error { - return autoConvert_v1_Probe_To_api_Probe(in, out, s) -} - -func autoConvert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { - if err := Convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { - return autoConvert_api_Probe_To_v1_Probe(in, out, s) -} - -func autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { - out.Sources = *(*[]api.VolumeProjection)(unsafe.Pointer(&in.Sources)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -func Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in, out, s) -} - -func autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *ProjectedVolumeSource, s conversion.Scope) error { - if in.Sources == nil { - out.Sources = make([]VolumeProjection, 0) - } else { - out.Sources = *(*[]VolumeProjection)(unsafe.Pointer(&in.Sources)) - } - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -func Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *ProjectedVolumeSource, s conversion.Scope) error { - return autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s) -} - -func autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { - out.Registry = in.Registry - out.Volume = in.Volume - out.ReadOnly = in.ReadOnly - out.User = in.User - out.Group = in.Group - return nil -} - -func Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { - return autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in, out, s) -} - -func autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *QuobyteVolumeSource, s conversion.Scope) error { - out.Registry = in.Registry - out.Volume = in.Volume - out.ReadOnly = in.ReadOnly - out.User = in.User - out.Group = in.Group - return nil -} - -func Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *QuobyteVolumeSource, s conversion.Scope) error { - return autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s) -} - -func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - return autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in, out, s) -} - -func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { - if in.CephMonitors == nil { - out.CephMonitors = make([]string, 0) - } else { - out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) - } - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { - return autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) -} - -func autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Range = in.Range - out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) - return nil -} - -func Convert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { - return autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in, out, s) -} - -func autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Range = in.Range - if in.Data == nil { - out.Data = make([]byte, 0) - } else { - out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) - } - return nil -} - -func Convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { - return autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in, out, s) -} - -func autoConvert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error { - return autoConvert_v1_ReplicationController_To_api_ReplicationController(in, out, s) -} - -func autoConvert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { - return autoConvert_api_ReplicationController_To_v1_ReplicationController(in, out, s) -} - -func autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { - out.Type = api.ReplicationControllerConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in, out, s) -} - -func autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *ReplicationControllerCondition, s conversion.Scope) error { - out.Type = ReplicationControllerConditionType(in.Type) - out.Status = ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *ReplicationControllerCondition, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in, out, s) -} - -func autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.ReplicationController, len(*in)) - for i := range *in { - if err := Convert_v1_ReplicationController_To_api_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in, out, s) -} - -func autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicationController, len(*in)) - for i := range *in { - if err := Convert_api_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]ReplicationController, 0) - } - return nil -} - -func Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) -} - -func autoConvert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { - if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(api.PodTemplateSpec) - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(*in, *out, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func autoConvert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error { - if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]api.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -func Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in, out, s) -} - -func autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -func Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) -} - -func autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { - out.ContainerName = in.ContainerName - out.Resource = in.Resource - out.Divisor = in.Divisor - return nil -} - -func Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { - return autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in, out, s) -} - -func autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error { - out.ContainerName = in.ContainerName - out.Resource = in.Resource - out.Divisor = in.Divisor - return nil -} - -func Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error { - return autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) -} - -func autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { - return autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in, out, s) -} - -func autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { - return autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in, out, s) -} - -func autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ResourceQuota)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in, out, s) -} - -func autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]ResourceQuota, 0) - } else { - out.Items = *(*[]ResourceQuota)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) -} - -func autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { - out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Scopes = *(*[]api.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) - return nil -} - -func Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in, out, s) -} - -func autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { - out.Hard = *(*ResourceList)(unsafe.Pointer(&in.Hard)) - out.Scopes = *(*[]ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) - return nil -} - -func Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) -} - -func autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { - out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Used = *(*api.ResourceList)(unsafe.Pointer(&in.Used)) - return nil -} - -func Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in, out, s) -} - -func autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { - out.Hard = *(*ResourceList)(unsafe.Pointer(&in.Hard)) - out.Used = *(*ResourceList)(unsafe.Pointer(&in.Used)) - return nil -} - -func Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) -} - -func autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - out.Limits = *(*api.ResourceList)(unsafe.Pointer(&in.Limits)) - out.Requests = *(*api.ResourceList)(unsafe.Pointer(&in.Requests)) - return nil -} - -func Convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - return autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in, out, s) -} - -func autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { - out.Limits = *(*ResourceList)(unsafe.Pointer(&in.Limits)) - out.Requests = *(*ResourceList)(unsafe.Pointer(&in.Requests)) - return nil -} - -func Convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { - return autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) -} - -func autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -func Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - return autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in, out, s) -} - -func autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { - return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) -} - -func autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { - out.Gateway = in.Gateway - out.System = in.System - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.SSLEnabled = in.SSLEnabled - out.ProtectionDomain = in.ProtectionDomain - out.StoragePool = in.StoragePool - out.StorageMode = in.StorageMode - out.VolumeName = in.VolumeName - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in, out, s) -} - -func autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *ScaleIOVolumeSource, s conversion.Scope) error { - out.Gateway = in.Gateway - out.System = in.System - out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.SSLEnabled = in.SSLEnabled - out.ProtectionDomain = in.ProtectionDomain - out.StoragePool = in.StoragePool - out.StorageMode = in.StorageMode - out.VolumeName = in.VolumeName - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *ScaleIOVolumeSource, s conversion.Scope) error { - return autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s) -} - -func autoConvert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) - // INFO: in.StringData opted out of conversion generation - out.Type = api.SecretType(in.Type) - return nil -} - -func autoConvert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) - out.Type = SecretType(in.Type) - return nil -} - -func Convert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { - return autoConvert_api_Secret_To_v1_Secret(in, out, s) -} - -func autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in *SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_v1_SecretEnvSource_To_api_SecretEnvSource(in *SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { - return autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in, out, s) -} - -func autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *SecretEnvSource, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *SecretEnvSource, s conversion.Scope) error { - return autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in, out, s) -} - -func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - return autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in, out, s) -} - -func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { - return autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) -} - -func autoConvert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Secret, len(*in)) - for i := range *in { - if err := Convert_v1_Secret_To_api_Secret(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error { - return autoConvert_v1_SecretList_To_api_SecretList(in, out, s) -} - -func autoConvert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Secret, len(*in)) - for i := range *in { - if err := Convert_api_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]Secret, 0) - } - return nil -} - -func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { - return autoConvert_api_SecretList_To_v1_SecretList(in, out, s) -} - -func autoConvert_v1_SecretProjection_To_api_SecretProjection(in *SecretProjection, out *api.SecretProjection, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_v1_SecretProjection_To_api_SecretProjection(in *SecretProjection, out *api.SecretProjection, s conversion.Scope) error { - return autoConvert_v1_SecretProjection_To_api_SecretProjection(in, out, s) -} - -func autoConvert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *SecretProjection, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *SecretProjection, s conversion.Scope) error { - return autoConvert_api_SecretProjection_To_v1_SecretProjection(in, out, s) -} - -func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - return autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in, out, s) -} - -func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -func Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { - return autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) -} - -func autoConvert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - out.Capabilities = (*api.Capabilities)(unsafe.Pointer(in.Capabilities)) - out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) - out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) - return nil -} - -func Convert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - return autoConvert_v1_SecurityContext_To_api_SecurityContext(in, out, s) -} - -func autoConvert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { - out.Capabilities = (*Capabilities)(unsafe.Pointer(in.Capabilities)) - out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) - out.SELinuxOptions = (*SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) - return nil -} - -func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { - return autoConvert_api_SecurityContext_To_v1_SecurityContext(in, out, s) -} - -func autoConvert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error { - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Reference, &out.Reference, s); err != nil { - return err - } - return nil -} - -func Convert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error { - return autoConvert_v1_SerializedReference_To_api_SerializedReference(in, out, s) -} - -func autoConvert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { - return err - } - return nil -} - -func Convert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { - return autoConvert_api_SerializedReference_To_v1_SerializedReference(in, out, s) -} - -func autoConvert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ServiceSpec_To_api_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ServiceStatus_To_api_ServiceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error { - return autoConvert_v1_Service_To_api_Service(in, out, s) -} - -func autoConvert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { - return autoConvert_api_Service_To_v1_Service(in, out, s) -} - -func autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Secrets = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Secrets)) - out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - return nil -} - -func Convert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { - return autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in, out, s) -} - -func autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Secrets = *(*[]ObjectReference)(unsafe.Pointer(&in.Secrets)) - out.ImagePullSecrets = *(*[]LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - return nil -} - -func Convert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { - return autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in, out, s) -} - -func autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ServiceAccount)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { - return autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in, out, s) -} - -func autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]ServiceAccount, 0) - } else { - out.Items = *(*[]ServiceAccount)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { - return autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) -} - -func autoConvert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Service, len(*in)) - for i := range *in { - if err := Convert_v1_Service_To_api_Service(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error { - return autoConvert_v1_ServiceList_To_api_ServiceList(in, out, s) -} - -func autoConvert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Service, len(*in)) - for i := range *in { - if err := Convert_api_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]Service, 0) - } - return nil -} - -func Convert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { - return autoConvert_api_ServiceList_To_v1_ServiceList(in, out, s) -} - -func autoConvert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error { - out.Name = in.Name - out.Protocol = api.Protocol(in.Protocol) - out.Port = in.Port - out.TargetPort = in.TargetPort - out.NodePort = in.NodePort - return nil -} - -func Convert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error { - return autoConvert_v1_ServicePort_To_api_ServicePort(in, out, s) -} - -func autoConvert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { - out.Name = in.Name - out.Protocol = Protocol(in.Protocol) - out.Port = in.Port - out.TargetPort = in.TargetPort - out.NodePort = in.NodePort - return nil -} - -func Convert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { - return autoConvert_api_ServicePort_To_v1_ServicePort(in, out, s) -} - -func autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -func Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { - return autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in, out, s) -} - -func autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -func Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { - return autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) -} - -func autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { - out.Ports = *(*[]api.ServicePort)(unsafe.Pointer(&in.Ports)) - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - out.ClusterIP = in.ClusterIP - out.Type = api.ServiceType(in.Type) - out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) - // INFO: in.DeprecatedPublicIPs opted out of conversion generation - out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity) - out.LoadBalancerIP = in.LoadBalancerIP - out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) - out.ExternalName = in.ExternalName - return nil -} - -func autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { - out.Type = ServiceType(in.Type) - out.Ports = *(*[]ServicePort)(unsafe.Pointer(&in.Ports)) - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - out.ClusterIP = in.ClusterIP - out.ExternalName = in.ExternalName - out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) - out.LoadBalancerIP = in.LoadBalancerIP - out.SessionAffinity = ServiceAffinity(in.SessionAffinity) - out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) - return nil -} - -func autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { - if err := Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { - return autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in, out, s) -} - -func autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { - if err := Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { - return err - } - return nil -} - -func Convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { - return autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in, out, s) -} - -func autoConvert_v1_Sysctl_To_api_Sysctl(in *Sysctl, out *api.Sysctl, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -func Convert_v1_Sysctl_To_api_Sysctl(in *Sysctl, out *api.Sysctl, s conversion.Scope) error { - return autoConvert_v1_Sysctl_To_api_Sysctl(in, out, s) -} - -func autoConvert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *Sysctl, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -func Convert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *Sysctl, s conversion.Scope) error { - return autoConvert_api_Sysctl_To_v1_Sysctl(in, out, s) -} - -func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - out.Port = in.Port - return nil -} - -func Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - return autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in, out, s) -} - -func autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { - out.Port = in.Port - return nil -} - -func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { - return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) -} - -func autoConvert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error { - out.Key = in.Key - out.Value = in.Value - out.Effect = api.TaintEffect(in.Effect) - out.TimeAdded = in.TimeAdded - return nil -} - -func Convert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error { - return autoConvert_v1_Taint_To_api_Taint(in, out, s) -} - -func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error { - out.Key = in.Key - out.Value = in.Value - out.Effect = TaintEffect(in.Effect) - out.TimeAdded = in.TimeAdded - return nil -} - -func Convert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error { - return autoConvert_api_Taint_To_v1_Taint(in, out, s) -} - -func autoConvert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error { - out.Key = in.Key - out.Operator = api.TolerationOperator(in.Operator) - out.Value = in.Value - out.Effect = api.TaintEffect(in.Effect) - out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) - return nil -} - -func Convert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error { - return autoConvert_v1_Toleration_To_api_Toleration(in, out, s) -} - -func autoConvert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error { - out.Key = in.Key - out.Operator = TolerationOperator(in.Operator) - out.Value = in.Value - out.Effect = TaintEffect(in.Effect) - out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) - return nil -} - -func Convert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error { - return autoConvert_api_Toleration_To_v1_Toleration(in, out, s) -} - -func autoConvert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_v1_VolumeSource_To_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { - return autoConvert_v1_Volume_To_api_Volume(in, out, s) -} - -func autoConvert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -func Convert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { - return autoConvert_api_Volume_To_v1_Volume(in, out, s) -} - -func autoConvert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath - return nil -} - -func Convert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - return autoConvert_v1_VolumeMount_To_api_VolumeMount(in, out, s) -} - -func autoConvert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath - return nil -} - -func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { - return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s) -} - -func autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in *VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { - out.Secret = (*api.SecretProjection)(unsafe.Pointer(in.Secret)) - out.DownwardAPI = (*api.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) - out.ConfigMap = (*api.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) - return nil -} - -func Convert_v1_VolumeProjection_To_api_VolumeProjection(in *VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { - return autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in, out, s) -} - -func autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *VolumeProjection, s conversion.Scope) error { - out.Secret = (*SecretProjection)(unsafe.Pointer(in.Secret)) - out.DownwardAPI = (*DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) - out.ConfigMap = (*ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) - return nil -} - -func Convert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *VolumeProjection, s conversion.Scope) error { - return autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in, out, s) -} - -func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.EmptyDir = (*api.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) - out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.GitRepo = (*api.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) - out.Secret = (*api.SecretVolumeSource)(unsafe.Pointer(in.Secret)) - out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.PersistentVolumeClaim = (*api.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) - out.RBD = (*api.RBDVolumeSource)(unsafe.Pointer(in.RBD)) - out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*api.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) - out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.DownwardAPI = (*api.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) - out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.AzureFile = (*api.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.ConfigMap = (*api.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) - out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.Projected = (*api.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) - out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) - return nil -} - -func Convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - return autoConvert_v1_VolumeSource_To_api_VolumeSource(in, out, s) -} - -func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { - out.HostPath = (*HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.EmptyDir = (*EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) - out.GCEPersistentDisk = (*GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.GitRepo = (*GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) - out.Secret = (*SecretVolumeSource)(unsafe.Pointer(in.Secret)) - out.NFS = (*NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.ISCSI = (*ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.Glusterfs = (*GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.PersistentVolumeClaim = (*PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) - out.RBD = (*RBDVolumeSource)(unsafe.Pointer(in.RBD)) - out.Quobyte = (*QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.FlexVolume = (*FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) - out.Flocker = (*FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.DownwardAPI = (*DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) - out.FC = (*FCVolumeSource)(unsafe.Pointer(in.FC)) - out.AzureFile = (*AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.ConfigMap = (*ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) - out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.Projected = (*ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) - out.PortworxVolume = (*PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) - return nil -} - -func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { - return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s) -} - -func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - out.VolumePath = in.VolumePath - out.FSType = in.FSType - return nil -} - -func Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in, out, s) -} - -func autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - out.VolumePath = in.VolumePath - out.FSType = in.FSType - return nil -} - -func Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { - return err - } - return nil -} - -func Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { - return autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in, out, s) -} - -func autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { - return err - } - return nil -} - -func Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { - return autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go deleted file mode 100644 index 463e94680..000000000 --- a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,3500 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1 - -import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AWSElasticBlockStoreVolumeSource, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Affinity, InType: reflect.TypeOf(&Affinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AttachedVolume, InType: reflect.TypeOf(&AttachedVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AvoidPods, InType: reflect.TypeOf(&AvoidPods{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AzureDiskVolumeSource, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AzureFileVolumeSource, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Binding, InType: reflect.TypeOf(&Binding{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Capabilities, InType: reflect.TypeOf(&Capabilities{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CephFSVolumeSource, InType: reflect.TypeOf(&CephFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CinderVolumeSource, InType: reflect.TypeOf(&CinderVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentCondition, InType: reflect.TypeOf(&ComponentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentStatus, InType: reflect.TypeOf(&ComponentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentStatusList, InType: reflect.TypeOf(&ComponentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMap, InType: reflect.TypeOf(&ConfigMap{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapEnvSource, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapKeySelector, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapList, InType: reflect.TypeOf(&ConfigMapList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapProjection, InType: reflect.TypeOf(&ConfigMapProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapVolumeSource, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Container, InType: reflect.TypeOf(&Container{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerImage, InType: reflect.TypeOf(&ContainerImage{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerPort, InType: reflect.TypeOf(&ContainerPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerState, InType: reflect.TypeOf(&ContainerState{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStateRunning, InType: reflect.TypeOf(&ContainerStateRunning{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStateTerminated, InType: reflect.TypeOf(&ContainerStateTerminated{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStateWaiting, InType: reflect.TypeOf(&ContainerStateWaiting{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStatus, InType: reflect.TypeOf(&ContainerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DaemonEndpoint, InType: reflect.TypeOf(&DaemonEndpoint{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIProjection, InType: reflect.TypeOf(&DownwardAPIProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIVolumeFile, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIVolumeSource, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EmptyDirVolumeSource, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointAddress, InType: reflect.TypeOf(&EndpointAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointPort, InType: reflect.TypeOf(&EndpointPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointSubset, InType: reflect.TypeOf(&EndpointSubset{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Endpoints, InType: reflect.TypeOf(&Endpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointsList, InType: reflect.TypeOf(&EndpointsList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvFromSource, InType: reflect.TypeOf(&EnvFromSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvVar, InType: reflect.TypeOf(&EnvVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvVarSource, InType: reflect.TypeOf(&EnvVarSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Event, InType: reflect.TypeOf(&Event{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EventList, InType: reflect.TypeOf(&EventList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EventSource, InType: reflect.TypeOf(&EventSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ExecAction, InType: reflect.TypeOf(&ExecAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FCVolumeSource, InType: reflect.TypeOf(&FCVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FlexVolumeSource, InType: reflect.TypeOf(&FlexVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FlockerVolumeSource, InType: reflect.TypeOf(&FlockerVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GCEPersistentDiskVolumeSource, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GitRepoVolumeSource, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GlusterfsVolumeSource, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HTTPGetAction, InType: reflect.TypeOf(&HTTPGetAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HTTPHeader, InType: reflect.TypeOf(&HTTPHeader{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Handler, InType: reflect.TypeOf(&Handler{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HostPathVolumeSource, InType: reflect.TypeOf(&HostPathVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ISCSIVolumeSource, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_KeyToPath, InType: reflect.TypeOf(&KeyToPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Lifecycle, InType: reflect.TypeOf(&Lifecycle{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRange, InType: reflect.TypeOf(&LimitRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRangeItem, InType: reflect.TypeOf(&LimitRangeItem{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRangeList, InType: reflect.TypeOf(&LimitRangeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRangeSpec, InType: reflect.TypeOf(&LimitRangeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_List, InType: reflect.TypeOf(&List{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LoadBalancerIngress, InType: reflect.TypeOf(&LoadBalancerIngress{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LoadBalancerStatus, InType: reflect.TypeOf(&LoadBalancerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LocalObjectReference, InType: reflect.TypeOf(&LocalObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NFSVolumeSource, InType: reflect.TypeOf(&NFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Namespace, InType: reflect.TypeOf(&Namespace{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NamespaceList, InType: reflect.TypeOf(&NamespaceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NamespaceSpec, InType: reflect.TypeOf(&NamespaceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NamespaceStatus, InType: reflect.TypeOf(&NamespaceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Node, InType: reflect.TypeOf(&Node{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeAddress, InType: reflect.TypeOf(&NodeAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeAffinity, InType: reflect.TypeOf(&NodeAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeCondition, InType: reflect.TypeOf(&NodeCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeDaemonEndpoints, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeList, InType: reflect.TypeOf(&NodeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeProxyOptions, InType: reflect.TypeOf(&NodeProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeResources, InType: reflect.TypeOf(&NodeResources{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelector, InType: reflect.TypeOf(&NodeSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelectorRequirement, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelectorTerm, InType: reflect.TypeOf(&NodeSelectorTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSpec, InType: reflect.TypeOf(&NodeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeStatus, InType: reflect.TypeOf(&NodeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSystemInfo, InType: reflect.TypeOf(&NodeSystemInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectFieldSelector, InType: reflect.TypeOf(&ObjectFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectReference, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolume, InType: reflect.TypeOf(&PersistentVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaim, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimList, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimSpec, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimStatus, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimVolumeSource, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeList, InType: reflect.TypeOf(&PersistentVolumeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeSource, InType: reflect.TypeOf(&PersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeSpec, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeStatus, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PhotonPersistentDiskVolumeSource, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Pod, InType: reflect.TypeOf(&Pod{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAffinity, InType: reflect.TypeOf(&PodAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAffinityTerm, InType: reflect.TypeOf(&PodAffinityTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAntiAffinity, InType: reflect.TypeOf(&PodAntiAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAttachOptions, InType: reflect.TypeOf(&PodAttachOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodCondition, InType: reflect.TypeOf(&PodCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodExecOptions, InType: reflect.TypeOf(&PodExecOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodList, InType: reflect.TypeOf(&PodList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodLogOptions, InType: reflect.TypeOf(&PodLogOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodPortForwardOptions, InType: reflect.TypeOf(&PodPortForwardOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodProxyOptions, InType: reflect.TypeOf(&PodProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSecurityContext, InType: reflect.TypeOf(&PodSecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSignature, InType: reflect.TypeOf(&PodSignature{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSpec, InType: reflect.TypeOf(&PodSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodStatus, InType: reflect.TypeOf(&PodStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodStatusResult, InType: reflect.TypeOf(&PodStatusResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplate, InType: reflect.TypeOf(&PodTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplateList, InType: reflect.TypeOf(&PodTemplateList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplateSpec, InType: reflect.TypeOf(&PodTemplateSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PortworxVolumeSource, InType: reflect.TypeOf(&PortworxVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PreferAvoidPodsEntry, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PreferredSchedulingTerm, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Probe, InType: reflect.TypeOf(&Probe{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ProjectedVolumeSource, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_QuobyteVolumeSource, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_RBDVolumeSource, InType: reflect.TypeOf(&RBDVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_RangeAllocation, InType: reflect.TypeOf(&RangeAllocation{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationController, InType: reflect.TypeOf(&ReplicationController{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerCondition, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerList, InType: reflect.TypeOf(&ReplicationControllerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerSpec, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerStatus, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceFieldSelector, InType: reflect.TypeOf(&ResourceFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuota, InType: reflect.TypeOf(&ResourceQuota{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaList, InType: reflect.TypeOf(&ResourceQuotaList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaSpec, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaStatus, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceRequirements, InType: reflect.TypeOf(&ResourceRequirements{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SELinuxOptions, InType: reflect.TypeOf(&SELinuxOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleIOVolumeSource, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Secret, InType: reflect.TypeOf(&Secret{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretEnvSource, InType: reflect.TypeOf(&SecretEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretKeySelector, InType: reflect.TypeOf(&SecretKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretList, InType: reflect.TypeOf(&SecretList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretProjection, InType: reflect.TypeOf(&SecretProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretVolumeSource, InType: reflect.TypeOf(&SecretVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecurityContext, InType: reflect.TypeOf(&SecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SerializedReference, InType: reflect.TypeOf(&SerializedReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Service, InType: reflect.TypeOf(&Service{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceAccount, InType: reflect.TypeOf(&ServiceAccount{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceAccountList, InType: reflect.TypeOf(&ServiceAccountList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceList, InType: reflect.TypeOf(&ServiceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServicePort, InType: reflect.TypeOf(&ServicePort{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceProxyOptions, InType: reflect.TypeOf(&ServiceProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceSpec, InType: reflect.TypeOf(&ServiceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceStatus, InType: reflect.TypeOf(&ServiceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Sysctl, InType: reflect.TypeOf(&Sysctl{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TCPSocketAction, InType: reflect.TypeOf(&TCPSocketAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Taint, InType: reflect.TypeOf(&Taint{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Toleration, InType: reflect.TypeOf(&Toleration{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Volume, InType: reflect.TypeOf(&Volume{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeMount, InType: reflect.TypeOf(&VolumeMount{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeProjection, InType: reflect.TypeOf(&VolumeProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeSource, InType: reflect.TypeOf(&VolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VsphereVirtualDiskVolumeSource, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_WeightedPodAffinityTerm, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, - ) -} - -func DeepCopy_v1_AWSElasticBlockStoreVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*AWSElasticBlockStoreVolumeSource) - out := out.(*AWSElasticBlockStoreVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_Affinity(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Affinity) - out := out.(*Affinity) - *out = *in - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - *out = new(NodeAffinity) - if err := DeepCopy_v1_NodeAffinity(*in, *out, c); err != nil { - return err - } - } - if in.PodAffinity != nil { - in, out := &in.PodAffinity, &out.PodAffinity - *out = new(PodAffinity) - if err := DeepCopy_v1_PodAffinity(*in, *out, c); err != nil { - return err - } - } - if in.PodAntiAffinity != nil { - in, out := &in.PodAntiAffinity, &out.PodAntiAffinity - *out = new(PodAntiAffinity) - if err := DeepCopy_v1_PodAntiAffinity(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1_AttachedVolume(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*AttachedVolume) - out := out.(*AttachedVolume) - *out = *in - return nil - } -} - -func DeepCopy_v1_AvoidPods(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*AvoidPods) - out := out.(*AvoidPods) - *out = *in - if in.PreferAvoidPods != nil { - in, out := &in.PreferAvoidPods, &out.PreferAvoidPods - *out = make([]PreferAvoidPodsEntry, len(*in)) - for i := range *in { - if err := DeepCopy_v1_PreferAvoidPodsEntry(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_AzureDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*AzureDiskVolumeSource) - out := out.(*AzureDiskVolumeSource) - *out = *in - if in.CachingMode != nil { - in, out := &in.CachingMode, &out.CachingMode - *out = new(AzureDataDiskCachingMode) - **out = **in - } - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - *out = new(string) - **out = **in - } - if in.ReadOnly != nil { - in, out := &in.ReadOnly, &out.ReadOnly - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_AzureFileVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*AzureFileVolumeSource) - out := out.(*AzureFileVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_Binding(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Binding) - out := out.(*Binding) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - return nil - } -} - -func DeepCopy_v1_Capabilities(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Capabilities) - out := out.(*Capabilities) - *out = *in - if in.Add != nil { - in, out := &in.Add, &out.Add - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - if in.Drop != nil { - in, out := &in.Drop, &out.Drop - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_CephFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CephFSVolumeSource) - out := out.(*CephFSVolumeSource) - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_CinderVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CinderVolumeSource) - out := out.(*CinderVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_ComponentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ComponentCondition) - out := out.(*ComponentCondition) - *out = *in - return nil - } -} - -func DeepCopy_v1_ComponentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ComponentStatus) - out := out.(*ComponentStatus) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ComponentCondition, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_ComponentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ComponentStatusList) - out := out.(*ComponentStatusList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ComponentStatus, len(*in)) - for i := range *in { - if err := DeepCopy_v1_ComponentStatus(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_ConfigMap(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ConfigMap) - out := out.(*ConfigMap) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - return nil - } -} - -func DeepCopy_v1_ConfigMapEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ConfigMapEnvSource) - out := out.(*ConfigMapEnvSource) - *out = *in - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_ConfigMapKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ConfigMapKeySelector) - out := out.(*ConfigMapKeySelector) - *out = *in - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_ConfigMapList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ConfigMapList) - out := out.(*ConfigMapList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ConfigMap, len(*in)) - for i := range *in { - if err := DeepCopy_v1_ConfigMap(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_ConfigMapProjection(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ConfigMapProjection) - out := out.(*ConfigMapProjection) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_ConfigMapVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ConfigMapVolumeSource) - out := out.(*ConfigMapVolumeSource) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_Container(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Container) - out := out.(*Container) - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ContainerPort, len(*in)) - copy(*out, *in) - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]EnvFromSource, len(*in)) - for i := range *in { - if err := DeepCopy_v1_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - for i := range *in { - if err := DeepCopy_v1_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if err := DeepCopy_v1_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { - return err - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(*in)) - copy(*out, *in) - } - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - *out = new(Probe) - if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { - return err - } - } - if in.ReadinessProbe != nil { - in, out := &in.ReadinessProbe, &out.ReadinessProbe - *out = new(Probe) - if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { - return err - } - } - if in.Lifecycle != nil { - in, out := &in.Lifecycle, &out.Lifecycle - *out = new(Lifecycle) - if err := DeepCopy_v1_Lifecycle(*in, *out, c); err != nil { - return err - } - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(SecurityContext) - if err := DeepCopy_v1_SecurityContext(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1_ContainerImage(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerImage) - out := out.(*ContainerImage) - *out = *in - if in.Names != nil { - in, out := &in.Names, &out.Names - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_ContainerPort(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerPort) - out := out.(*ContainerPort) - *out = *in - return nil - } -} - -func DeepCopy_v1_ContainerState(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerState) - out := out.(*ContainerState) - *out = *in - if in.Waiting != nil { - in, out := &in.Waiting, &out.Waiting - *out = new(ContainerStateWaiting) - **out = **in - } - if in.Running != nil { - in, out := &in.Running, &out.Running - *out = new(ContainerStateRunning) - if err := DeepCopy_v1_ContainerStateRunning(*in, *out, c); err != nil { - return err - } - } - if in.Terminated != nil { - in, out := &in.Terminated, &out.Terminated - *out = new(ContainerStateTerminated) - if err := DeepCopy_v1_ContainerStateTerminated(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1_ContainerStateRunning(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerStateRunning) - out := out.(*ContainerStateRunning) - *out = *in - out.StartedAt = in.StartedAt.DeepCopy() - return nil - } -} - -func DeepCopy_v1_ContainerStateTerminated(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerStateTerminated) - out := out.(*ContainerStateTerminated) - *out = *in - out.StartedAt = in.StartedAt.DeepCopy() - out.FinishedAt = in.FinishedAt.DeepCopy() - return nil - } -} - -func DeepCopy_v1_ContainerStateWaiting(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerStateWaiting) - out := out.(*ContainerStateWaiting) - *out = *in - return nil - } -} - -func DeepCopy_v1_ContainerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerStatus) - out := out.(*ContainerStatus) - *out = *in - if err := DeepCopy_v1_ContainerState(&in.State, &out.State, c); err != nil { - return err - } - if err := DeepCopy_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_DaemonEndpoint(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DaemonEndpoint) - out := out.(*DaemonEndpoint) - *out = *in - return nil - } -} - -func DeepCopy_v1_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeleteOptions) - out := out.(*DeleteOptions) - *out = *in - if in.GracePeriodSeconds != nil { - in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds - *out = new(int64) - **out = **in - } - if in.Preconditions != nil { - in, out := &in.Preconditions, &out.Preconditions - *out = new(Preconditions) - if err := DeepCopy_v1_Preconditions(*in, *out, c); err != nil { - return err - } - } - if in.OrphanDependents != nil { - in, out := &in.OrphanDependents, &out.OrphanDependents - *out = new(bool) - **out = **in - } - if in.PropagationPolicy != nil { - in, out := &in.PropagationPolicy, &out.PropagationPolicy - *out = new(DeletionPropagation) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_DownwardAPIProjection(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DownwardAPIProjection) - out := out.(*DownwardAPIProjection) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - if err := DeepCopy_v1_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_DownwardAPIVolumeFile(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DownwardAPIVolumeFile) - out := out.(*DownwardAPIVolumeFile) - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - **out = **in - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { - return err - } - } - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_DownwardAPIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DownwardAPIVolumeSource) - out := out.(*DownwardAPIVolumeSource) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - if err := DeepCopy_v1_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_EmptyDirVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EmptyDirVolumeSource) - out := out.(*EmptyDirVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_EndpointAddress(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EndpointAddress) - out := out.(*EndpointAddress) - *out = *in - if in.NodeName != nil { - in, out := &in.NodeName, &out.NodeName - *out = new(string) - **out = **in - } - if in.TargetRef != nil { - in, out := &in.TargetRef, &out.TargetRef - *out = new(ObjectReference) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_EndpointPort(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EndpointPort) - out := out.(*EndpointPort) - *out = *in - return nil - } -} - -func DeepCopy_v1_EndpointSubset(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EndpointSubset) - out := out.(*EndpointSubset) - *out = *in - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - if err := DeepCopy_v1_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.NotReadyAddresses != nil { - in, out := &in.NotReadyAddresses, &out.NotReadyAddresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - if err := DeepCopy_v1_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]EndpointPort, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_Endpoints(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Endpoints) - out := out.(*Endpoints) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if in.Subsets != nil { - in, out := &in.Subsets, &out.Subsets - *out = make([]EndpointSubset, len(*in)) - for i := range *in { - if err := DeepCopy_v1_EndpointSubset(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_EndpointsList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EndpointsList) - out := out.(*EndpointsList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Endpoints, len(*in)) - for i := range *in { - if err := DeepCopy_v1_Endpoints(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_EnvFromSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EnvFromSource) - out := out.(*EnvFromSource) - *out = *in - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - *out = new(ConfigMapEnvSource) - if err := DeepCopy_v1_ConfigMapEnvSource(*in, *out, c); err != nil { - return err - } - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretEnvSource) - if err := DeepCopy_v1_SecretEnvSource(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1_EnvVar(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EnvVar) - out := out.(*EnvVar) - *out = *in - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - *out = new(EnvVarSource) - if err := DeepCopy_v1_EnvVarSource(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1_EnvVarSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EnvVarSource) - out := out.(*EnvVarSource) - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - **out = **in - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { - return err - } - } - if in.ConfigMapKeyRef != nil { - in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef - *out = new(ConfigMapKeySelector) - if err := DeepCopy_v1_ConfigMapKeySelector(*in, *out, c); err != nil { - return err - } - } - if in.SecretKeyRef != nil { - in, out := &in.SecretKeyRef, &out.SecretKeyRef - *out = new(SecretKeySelector) - if err := DeepCopy_v1_SecretKeySelector(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1_Event(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Event) - out := out.(*Event) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - out.FirstTimestamp = in.FirstTimestamp.DeepCopy() - out.LastTimestamp = in.LastTimestamp.DeepCopy() - return nil - } -} - -func DeepCopy_v1_EventList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EventList) - out := out.(*EventList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Event, len(*in)) - for i := range *in { - if err := DeepCopy_v1_Event(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_EventSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EventSource) - out := out.(*EventSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_ExecAction(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ExecAction) - out := out.(*ExecAction) - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_FCVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*FCVolumeSource) - out := out.(*FCVolumeSource) - *out = *in - if in.TargetWWNs != nil { - in, out := &in.TargetWWNs, &out.TargetWWNs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Lun != nil { - in, out := &in.Lun, &out.Lun - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_FlexVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*FlexVolumeSource) - out := out.(*FlexVolumeSource) - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - return nil - } -} - -func DeepCopy_v1_FlockerVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*FlockerVolumeSource) - out := out.(*FlockerVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_GCEPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GCEPersistentDiskVolumeSource) - out := out.(*GCEPersistentDiskVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_GitRepoVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GitRepoVolumeSource) - out := out.(*GitRepoVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_GlusterfsVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GlusterfsVolumeSource) - out := out.(*GlusterfsVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_HTTPGetAction(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HTTPGetAction) - out := out.(*HTTPGetAction) - *out = *in - if in.HTTPHeaders != nil { - in, out := &in.HTTPHeaders, &out.HTTPHeaders - *out = make([]HTTPHeader, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_HTTPHeader(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HTTPHeader) - out := out.(*HTTPHeader) - *out = *in - return nil - } -} - -func DeepCopy_v1_Handler(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Handler) - out := out.(*Handler) - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecAction) - if err := DeepCopy_v1_ExecAction(*in, *out, c); err != nil { - return err - } - } - if in.HTTPGet != nil { - in, out := &in.HTTPGet, &out.HTTPGet - *out = new(HTTPGetAction) - if err := DeepCopy_v1_HTTPGetAction(*in, *out, c); err != nil { - return err - } - } - if in.TCPSocket != nil { - in, out := &in.TCPSocket, &out.TCPSocket - *out = new(TCPSocketAction) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_HostPathVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HostPathVolumeSource) - out := out.(*HostPathVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_ISCSIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ISCSIVolumeSource) - out := out.(*ISCSIVolumeSource) - *out = *in - if in.Portals != nil { - in, out := &in.Portals, &out.Portals - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_KeyToPath(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KeyToPath) - out := out.(*KeyToPath) - *out = *in - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_Lifecycle(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Lifecycle) - out := out.(*Lifecycle) - *out = *in - if in.PostStart != nil { - in, out := &in.PostStart, &out.PostStart - *out = new(Handler) - if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { - return err - } - } - if in.PreStop != nil { - in, out := &in.PreStop, &out.PreStop - *out = new(Handler) - if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1_LimitRange(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LimitRange) - out := out.(*LimitRange) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_LimitRangeSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_LimitRangeItem(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LimitRangeItem) - out := out.(*LimitRangeItem) - *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.DefaultRequest != nil { - in, out := &in.DefaultRequest, &out.DefaultRequest - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.MaxLimitRequestRatio != nil { - in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return nil - } -} - -func DeepCopy_v1_LimitRangeList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LimitRangeList) - out := out.(*LimitRangeList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]LimitRange, len(*in)) - for i := range *in { - if err := DeepCopy_v1_LimitRange(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_LimitRangeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LimitRangeSpec) - out := out.(*LimitRangeSpec) - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make([]LimitRangeItem, len(*in)) - for i := range *in { - if err := DeepCopy_v1_LimitRangeItem(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_List(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*List) - out := out.(*List) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.RawExtension, len(*in)) - for i := range *in { - if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { - return err - } else { - (*out)[i] = *newVal.(*runtime.RawExtension) - } - } - } - return nil - } -} - -func DeepCopy_v1_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ListOptions) - out := out.(*ListOptions) - *out = *in - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int64) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_LoadBalancerIngress(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LoadBalancerIngress) - out := out.(*LoadBalancerIngress) - *out = *in - return nil - } -} - -func DeepCopy_v1_LoadBalancerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LoadBalancerStatus) - out := out.(*LoadBalancerStatus) - *out = *in - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]LoadBalancerIngress, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_LocalObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LocalObjectReference) - out := out.(*LocalObjectReference) - *out = *in - return nil - } -} - -func DeepCopy_v1_NFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NFSVolumeSource) - out := out.(*NFSVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_Namespace(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Namespace) - out := out.(*Namespace) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_NamespaceSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_NamespaceList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NamespaceList) - out := out.(*NamespaceList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Namespace, len(*in)) - for i := range *in { - if err := DeepCopy_v1_Namespace(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_NamespaceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NamespaceSpec) - out := out.(*NamespaceSpec) - *out = *in - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]FinalizerName, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_NamespaceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NamespaceStatus) - out := out.(*NamespaceStatus) - *out = *in - return nil - } -} - -func DeepCopy_v1_Node(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Node) - out := out.(*Node) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_NodeSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_NodeStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_NodeAddress(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeAddress) - out := out.(*NodeAddress) - *out = *in - return nil - } -} - -func DeepCopy_v1_NodeAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeAffinity) - out := out.(*NodeAffinity) - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = new(NodeSelector) - if err := DeepCopy_v1_NodeSelector(*in, *out, c); err != nil { - return err - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]PreferredSchedulingTerm, len(*in)) - for i := range *in { - if err := DeepCopy_v1_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_NodeCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeCondition) - out := out.(*NodeCondition) - *out = *in - out.LastHeartbeatTime = in.LastHeartbeatTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - return nil - } -} - -func DeepCopy_v1_NodeDaemonEndpoints(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeDaemonEndpoints) - out := out.(*NodeDaemonEndpoints) - *out = *in - return nil - } -} - -func DeepCopy_v1_NodeList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeList) - out := out.(*NodeList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Node, len(*in)) - for i := range *in { - if err := DeepCopy_v1_Node(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_NodeProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeProxyOptions) - out := out.(*NodeProxyOptions) - *out = *in - return nil - } -} - -func DeepCopy_v1_NodeResources(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeResources) - out := out.(*NodeResources) - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return nil - } -} - -func DeepCopy_v1_NodeSelector(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeSelector) - out := out.(*NodeSelector) - *out = *in - if in.NodeSelectorTerms != nil { - in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms - *out = make([]NodeSelectorTerm, len(*in)) - for i := range *in { - if err := DeepCopy_v1_NodeSelectorTerm(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_NodeSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeSelectorRequirement) - out := out.(*NodeSelectorRequirement) - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_NodeSelectorTerm(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeSelectorTerm) - out := out.(*NodeSelectorTerm) - *out = *in - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]NodeSelectorRequirement, len(*in)) - for i := range *in { - if err := DeepCopy_v1_NodeSelectorRequirement(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_NodeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeSpec) - out := out.(*NodeSpec) - *out = *in - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]Taint, len(*in)) - for i := range *in { - if err := DeepCopy_v1_Taint(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_NodeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeStatus) - out := out.(*NodeStatus) - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Allocatable != nil { - in, out := &in.Allocatable, &out.Allocatable - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]NodeCondition, len(*in)) - for i := range *in { - if err := DeepCopy_v1_NodeCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]NodeAddress, len(*in)) - copy(*out, *in) - } - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make([]ContainerImage, len(*in)) - for i := range *in { - if err := DeepCopy_v1_ContainerImage(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.VolumesInUse != nil { - in, out := &in.VolumesInUse, &out.VolumesInUse - *out = make([]UniqueVolumeName, len(*in)) - copy(*out, *in) - } - if in.VolumesAttached != nil { - in, out := &in.VolumesAttached, &out.VolumesAttached - *out = make([]AttachedVolume, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_NodeSystemInfo(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeSystemInfo) - out := out.(*NodeSystemInfo) - *out = *in - return nil - } -} - -func DeepCopy_v1_ObjectFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectFieldSelector) - out := out.(*ObjectFieldSelector) - *out = *in - return nil - } -} - -func DeepCopy_v1_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectMeta) - out := out.(*ObjectMeta) - *out = *in - out.CreationTimestamp = in.CreationTimestamp.DeepCopy() - if in.DeletionTimestamp != nil { - in, out := &in.DeletionTimestamp, &out.DeletionTimestamp - *out = new(meta_v1.Time) - **out = (*in).DeepCopy() - } - if in.DeletionGracePeriodSeconds != nil { - in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds - *out = new(int64) - **out = **in - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.OwnerReferences != nil { - in, out := &in.OwnerReferences, &out.OwnerReferences - *out = make([]meta_v1.OwnerReference, len(*in)) - for i := range *in { - if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { - return err - } else { - (*out)[i] = *newVal.(*meta_v1.OwnerReference) - } - } - } - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_ObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectReference) - out := out.(*ObjectReference) - *out = *in - return nil - } -} - -func DeepCopy_v1_PersistentVolume(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolume) - out := out.(*PersistentVolume) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_PersistentVolumeClaim(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeClaim) - out := out.(*PersistentVolumeClaim) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_PersistentVolumeClaimList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeClaimList) - out := out.(*PersistentVolumeClaimList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := DeepCopy_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_PersistentVolumeClaimSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeClaimSpec) - out := out.(*PersistentVolumeClaimSpec) - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*meta_v1.LabelSelector) - } - } - if err := DeepCopy_v1_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { - return err - } - if in.StorageClassName != nil { - in, out := &in.StorageClassName, &out.StorageClassName - *out = new(string) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_PersistentVolumeClaimStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeClaimStatus) - out := out.(*PersistentVolumeClaimStatus) - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return nil - } -} - -func DeepCopy_v1_PersistentVolumeClaimVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeClaimVolumeSource) - out := out.(*PersistentVolumeClaimVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_PersistentVolumeList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeList) - out := out.(*PersistentVolumeList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolume, len(*in)) - for i := range *in { - if err := DeepCopy_v1_PersistentVolume(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_PersistentVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeSource) - out := out.(*PersistentVolumeSource) - *out = *in - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - **out = **in - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(NFSVolumeSource) - **out = **in - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(RBDVolumeSource) - if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(ISCSIVolumeSource) - if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(CinderVolumeSource) - **out = **in - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(CephFSVolumeSource) - if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(FCVolumeSource) - if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - **out = **in - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(FlexVolumeSource) - if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(AzureFileVolumeSource) - **out = **in - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - *out = new(QuobyteVolumeSource) - **out = **in - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - *out = new(AzureDiskVolumeSource) - if err := DeepCopy_v1_AzureDiskVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - *out = new(PortworxVolumeSource) - **out = **in - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - *out = new(ScaleIOVolumeSource) - if err := DeepCopy_v1_ScaleIOVolumeSource(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1_PersistentVolumeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeSpec) - out := out.(*PersistentVolumeSpec) - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if err := DeepCopy_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { - return err - } - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.ClaimRef != nil { - in, out := &in.ClaimRef, &out.ClaimRef - *out = new(ObjectReference) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_PersistentVolumeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeStatus) - out := out.(*PersistentVolumeStatus) - *out = *in - return nil - } -} - -func DeepCopy_v1_PhotonPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PhotonPersistentDiskVolumeSource) - out := out.(*PhotonPersistentDiskVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_Pod(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Pod) - out := out.(*Pod) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_PodSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_PodStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_PodAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodAffinity) - out := out.(*PodAffinity) - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - if err := DeepCopy_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - if err := DeepCopy_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_PodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodAffinityTerm) - out := out.(*PodAffinityTerm) - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*meta_v1.LabelSelector) - } - } - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_PodAntiAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodAntiAffinity) - out := out.(*PodAntiAffinity) - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - if err := DeepCopy_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - if err := DeepCopy_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_PodAttachOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodAttachOptions) - out := out.(*PodAttachOptions) - *out = *in - return nil - } -} - -func DeepCopy_v1_PodCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodCondition) - out := out.(*PodCondition) - *out = *in - out.LastProbeTime = in.LastProbeTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - return nil - } -} - -func DeepCopy_v1_PodExecOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodExecOptions) - out := out.(*PodExecOptions) - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_PodList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodList) - out := out.(*PodList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pod, len(*in)) - for i := range *in { - if err := DeepCopy_v1_Pod(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_PodLogOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodLogOptions) - out := out.(*PodLogOptions) - *out = *in - if in.SinceSeconds != nil { - in, out := &in.SinceSeconds, &out.SinceSeconds - *out = new(int64) - **out = **in - } - if in.SinceTime != nil { - in, out := &in.SinceTime, &out.SinceTime - *out = new(meta_v1.Time) - **out = (*in).DeepCopy() - } - if in.TailLines != nil { - in, out := &in.TailLines, &out.TailLines - *out = new(int64) - **out = **in - } - if in.LimitBytes != nil { - in, out := &in.LimitBytes, &out.LimitBytes - *out = new(int64) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_PodPortForwardOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodPortForwardOptions) - out := out.(*PodPortForwardOptions) - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]int32, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_PodProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodProxyOptions) - out := out.(*PodProxyOptions) - *out = *in - return nil - } -} - -func DeepCopy_v1_PodSecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodSecurityContext) - out := out.(*PodSecurityContext) - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - **out = **in - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = **in - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = **in - } - if in.SupplementalGroups != nil { - in, out := &in.SupplementalGroups, &out.SupplementalGroups - *out = make([]int64, len(*in)) - copy(*out, *in) - } - if in.FSGroup != nil { - in, out := &in.FSGroup, &out.FSGroup - *out = new(int64) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_PodSignature(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodSignature) - out := out.(*PodSignature) - *out = *in - if in.PodController != nil { - in, out := &in.PodController, &out.PodController - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*meta_v1.OwnerReference) - } - } - return nil - } -} - -func DeepCopy_v1_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodSpec) - out := out.(*PodSpec) - *out = *in - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]Volume, len(*in)) - for i := range *in { - if err := DeepCopy_v1_Volume(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]Container, len(*in)) - for i := range *in { - if err := DeepCopy_v1_Container(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]Container, len(*in)) - for i := range *in { - if err := DeepCopy_v1_Container(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - *out = new(int64) - **out = **in - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - *out = new(bool) - **out = **in - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(PodSecurityContext) - if err := DeepCopy_v1_PodSecurityContext(*in, *out, c); err != nil { - return err - } - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(Affinity) - if err := DeepCopy_v1_Affinity(*in, *out, c); err != nil { - return err - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]Toleration, len(*in)) - for i := range *in { - if err := DeepCopy_v1_Toleration(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_PodStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodStatus) - out := out.(*PodStatus) - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PodCondition, len(*in)) - for i := range *in { - if err := DeepCopy_v1_PodCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = new(meta_v1.Time) - **out = (*in).DeepCopy() - } - if in.InitContainerStatuses != nil { - in, out := &in.InitContainerStatuses, &out.InitContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - if err := DeepCopy_v1_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.ContainerStatuses != nil { - in, out := &in.ContainerStatuses, &out.ContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - if err := DeepCopy_v1_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_PodStatusResult(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodStatusResult) - out := out.(*PodStatusResult) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_PodStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_PodTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodTemplate) - out := out.(*PodTemplate) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_PodTemplateList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodTemplateList) - out := out.(*PodTemplateList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodTemplate, len(*in)) - for i := range *in { - if err := DeepCopy_v1_PodTemplate(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_PodTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodTemplateSpec) - out := out.(*PodTemplateSpec) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_PodSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_PortworxVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PortworxVolumeSource) - out := out.(*PortworxVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Preconditions) - out := out.(*Preconditions) - *out = *in - if in.UID != nil { - in, out := &in.UID, &out.UID - *out = new(types.UID) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_PreferAvoidPodsEntry(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PreferAvoidPodsEntry) - out := out.(*PreferAvoidPodsEntry) - *out = *in - if err := DeepCopy_v1_PodSignature(&in.PodSignature, &out.PodSignature, c); err != nil { - return err - } - out.EvictionTime = in.EvictionTime.DeepCopy() - return nil - } -} - -func DeepCopy_v1_PreferredSchedulingTerm(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PreferredSchedulingTerm) - out := out.(*PreferredSchedulingTerm) - *out = *in - if err := DeepCopy_v1_NodeSelectorTerm(&in.Preference, &out.Preference, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_Probe(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Probe) - out := out.(*Probe) - *out = *in - if err := DeepCopy_v1_Handler(&in.Handler, &out.Handler, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_ProjectedVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ProjectedVolumeSource) - out := out.(*ProjectedVolumeSource) - *out = *in - if in.Sources != nil { - in, out := &in.Sources, &out.Sources - *out = make([]VolumeProjection, len(*in)) - for i := range *in { - if err := DeepCopy_v1_VolumeProjection(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_QuobyteVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*QuobyteVolumeSource) - out := out.(*QuobyteVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_RBDVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RBDVolumeSource) - out := out.(*RBDVolumeSource) - *out = *in - if in.CephMonitors != nil { - in, out := &in.CephMonitors, &out.CephMonitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_RangeAllocation(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RangeAllocation) - out := out.(*RangeAllocation) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_ReplicationController(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicationController) - out := out.(*ReplicationController) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_ReplicationControllerStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_ReplicationControllerCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicationControllerCondition) - out := out.(*ReplicationControllerCondition) - *out = *in - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - return nil - } -} - -func DeepCopy_v1_ReplicationControllerList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicationControllerList) - out := out.(*ReplicationControllerList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicationController, len(*in)) - for i := range *in { - if err := DeepCopy_v1_ReplicationController(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_ReplicationControllerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicationControllerSpec) - out := out.(*ReplicationControllerSpec) - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(PodTemplateSpec) - if err := DeepCopy_v1_PodTemplateSpec(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1_ReplicationControllerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicationControllerStatus) - out := out.(*ReplicationControllerStatus) - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicationControllerCondition, len(*in)) - for i := range *in { - if err := DeepCopy_v1_ReplicationControllerCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_ResourceFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceFieldSelector) - out := out.(*ResourceFieldSelector) - *out = *in - out.Divisor = in.Divisor.DeepCopy() - return nil - } -} - -func DeepCopy_v1_ResourceQuota(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceQuota) - out := out.(*ResourceQuota) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_ResourceQuotaStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_ResourceQuotaList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceQuotaList) - out := out.(*ResourceQuotaList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceQuota, len(*in)) - for i := range *in { - if err := DeepCopy_v1_ResourceQuota(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_ResourceQuotaSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceQuotaSpec) - out := out.(*ResourceQuotaSpec) - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]ResourceQuotaScope, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_ResourceQuotaStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceQuotaStatus) - out := out.(*ResourceQuotaStatus) - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Used != nil { - in, out := &in.Used, &out.Used - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return nil - } -} - -func DeepCopy_v1_ResourceRequirements(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceRequirements) - out := out.(*ResourceRequirements) - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return nil - } -} - -func DeepCopy_v1_SELinuxOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SELinuxOptions) - out := out.(*SELinuxOptions) - *out = *in - return nil - } -} - -func DeepCopy_v1_ScaleIOVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ScaleIOVolumeSource) - out := out.(*ScaleIOVolumeSource) - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_Secret(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Secret) - out := out.(*Secret) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string][]byte) - for key, val := range *in { - if newVal, err := c.DeepCopy(&val); err != nil { - return err - } else { - (*out)[key] = *newVal.(*[]byte) - } - } - } - if in.StringData != nil { - in, out := &in.StringData, &out.StringData - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - return nil - } -} - -func DeepCopy_v1_SecretEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SecretEnvSource) - out := out.(*SecretEnvSource) - *out = *in - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_SecretKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SecretKeySelector) - out := out.(*SecretKeySelector) - *out = *in - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_SecretList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SecretList) - out := out.(*SecretList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Secret, len(*in)) - for i := range *in { - if err := DeepCopy_v1_Secret(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_SecretProjection(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SecretProjection) - out := out.(*SecretProjection) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_SecretVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SecretVolumeSource) - out := out.(*SecretVolumeSource) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_SecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SecurityContext) - out := out.(*SecurityContext) - *out = *in - if in.Capabilities != nil { - in, out := &in.Capabilities, &out.Capabilities - *out = new(Capabilities) - if err := DeepCopy_v1_Capabilities(*in, *out, c); err != nil { - return err - } - } - if in.Privileged != nil { - in, out := &in.Privileged, &out.Privileged - *out = new(bool) - **out = **in - } - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - **out = **in - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = **in - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = **in - } - if in.ReadOnlyRootFilesystem != nil { - in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_SerializedReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SerializedReference) - out := out.(*SerializedReference) - *out = *in - return nil - } -} - -func DeepCopy_v1_Service(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Service) - out := out.(*Service) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_ServiceSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_ServiceStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_ServiceAccount(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServiceAccount) - out := out.(*ServiceAccount) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]ObjectReference, len(*in)) - copy(*out, *in) - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_ServiceAccountList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServiceAccountList) - out := out.(*ServiceAccountList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceAccount, len(*in)) - for i := range *in { - if err := DeepCopy_v1_ServiceAccount(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_ServiceList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServiceList) - out := out.(*ServiceList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Service, len(*in)) - for i := range *in { - if err := DeepCopy_v1_Service(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_ServicePort(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServicePort) - out := out.(*ServicePort) - *out = *in - return nil - } -} - -func DeepCopy_v1_ServiceProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServiceProxyOptions) - out := out.(*ServiceProxyOptions) - *out = *in - return nil - } -} - -func DeepCopy_v1_ServiceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServiceSpec) - out := out.(*ServiceSpec) - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ServicePort, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ExternalIPs != nil { - in, out := &in.ExternalIPs, &out.ExternalIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DeprecatedPublicIPs != nil { - in, out := &in.DeprecatedPublicIPs, &out.DeprecatedPublicIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.LoadBalancerSourceRanges != nil { - in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1_ServiceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServiceStatus) - out := out.(*ServiceStatus) - *out = *in - if err := DeepCopy_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_Sysctl(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Sysctl) - out := out.(*Sysctl) - *out = *in - return nil - } -} - -func DeepCopy_v1_TCPSocketAction(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TCPSocketAction) - out := out.(*TCPSocketAction) - *out = *in - return nil - } -} - -func DeepCopy_v1_Taint(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Taint) - out := out.(*Taint) - *out = *in - out.TimeAdded = in.TimeAdded.DeepCopy() - return nil - } -} - -func DeepCopy_v1_Toleration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Toleration) - out := out.(*Toleration) - *out = *in - if in.TolerationSeconds != nil { - in, out := &in.TolerationSeconds, &out.TolerationSeconds - *out = new(int64) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_Volume(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Volume) - out := out.(*Volume) - *out = *in - if err := DeepCopy_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_VolumeMount(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*VolumeMount) - out := out.(*VolumeMount) - *out = *in - return nil - } -} - -func DeepCopy_v1_VolumeProjection(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*VolumeProjection) - out := out.(*VolumeProjection) - *out = *in - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(SecretProjection) - if err := DeepCopy_v1_SecretProjection(*in, *out, c); err != nil { - return err - } - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - *out = new(DownwardAPIProjection) - if err := DeepCopy_v1_DownwardAPIProjection(*in, *out, c); err != nil { - return err - } - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapProjection) - if err := DeepCopy_v1_ConfigMapProjection(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*VolumeSource) - out := out.(*VolumeSource) - *out = *in - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - **out = **in - } - if in.EmptyDir != nil { - in, out := &in.EmptyDir, &out.EmptyDir - *out = new(EmptyDirVolumeSource) - **out = **in - } - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - if in.GitRepo != nil { - in, out := &in.GitRepo, &out.GitRepo - *out = new(GitRepoVolumeSource) - **out = **in - } - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(SecretVolumeSource) - if err := DeepCopy_v1_SecretVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(NFSVolumeSource) - **out = **in - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(ISCSIVolumeSource) - if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in - } - if in.PersistentVolumeClaim != nil { - in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - *out = new(PersistentVolumeClaimVolumeSource) - **out = **in - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(RBDVolumeSource) - if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(FlexVolumeSource) - if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(CinderVolumeSource) - **out = **in - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(CephFSVolumeSource) - if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - **out = **in - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - *out = new(DownwardAPIVolumeSource) - if err := DeepCopy_v1_DownwardAPIVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(FCVolumeSource) - if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(AzureFileVolumeSource) - **out = **in - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapVolumeSource) - if err := DeepCopy_v1_ConfigMapVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - *out = new(QuobyteVolumeSource) - **out = **in - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - *out = new(AzureDiskVolumeSource) - if err := DeepCopy_v1_AzureDiskVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - if in.Projected != nil { - in, out := &in.Projected, &out.Projected - *out = new(ProjectedVolumeSource) - if err := DeepCopy_v1_ProjectedVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - *out = new(PortworxVolumeSource) - **out = **in - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - *out = new(ScaleIOVolumeSource) - if err := DeepCopy_v1_ScaleIOVolumeSource(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1_VsphereVirtualDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*VsphereVirtualDiskVolumeSource) - out := out.(*VsphereVirtualDiskVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_v1_WeightedPodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*WeightedPodAffinityTerm) - out := out.(*WeightedPodAffinityTerm) - *out = *in - if err := DeepCopy_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { - return err - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go deleted file mode 100644 index 121b39185..000000000 --- a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go +++ /dev/null @@ -1,631 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&ConfigMap{}, func(obj interface{}) { SetObjectDefaults_ConfigMap(obj.(*ConfigMap)) }) - scheme.AddTypeDefaultingFunc(&ConfigMapList{}, func(obj interface{}) { SetObjectDefaults_ConfigMapList(obj.(*ConfigMapList)) }) - scheme.AddTypeDefaultingFunc(&Endpoints{}, func(obj interface{}) { SetObjectDefaults_Endpoints(obj.(*Endpoints)) }) - scheme.AddTypeDefaultingFunc(&EndpointsList{}, func(obj interface{}) { SetObjectDefaults_EndpointsList(obj.(*EndpointsList)) }) - scheme.AddTypeDefaultingFunc(&LimitRange{}, func(obj interface{}) { SetObjectDefaults_LimitRange(obj.(*LimitRange)) }) - scheme.AddTypeDefaultingFunc(&LimitRangeList{}, func(obj interface{}) { SetObjectDefaults_LimitRangeList(obj.(*LimitRangeList)) }) - scheme.AddTypeDefaultingFunc(&Namespace{}, func(obj interface{}) { SetObjectDefaults_Namespace(obj.(*Namespace)) }) - scheme.AddTypeDefaultingFunc(&NamespaceList{}, func(obj interface{}) { SetObjectDefaults_NamespaceList(obj.(*NamespaceList)) }) - scheme.AddTypeDefaultingFunc(&Node{}, func(obj interface{}) { SetObjectDefaults_Node(obj.(*Node)) }) - scheme.AddTypeDefaultingFunc(&NodeList{}, func(obj interface{}) { SetObjectDefaults_NodeList(obj.(*NodeList)) }) - scheme.AddTypeDefaultingFunc(&PersistentVolume{}, func(obj interface{}) { SetObjectDefaults_PersistentVolume(obj.(*PersistentVolume)) }) - scheme.AddTypeDefaultingFunc(&PersistentVolumeClaim{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaim(obj.(*PersistentVolumeClaim)) }) - scheme.AddTypeDefaultingFunc(&PersistentVolumeClaimList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaimList(obj.(*PersistentVolumeClaimList)) }) - scheme.AddTypeDefaultingFunc(&PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*PersistentVolumeList)) }) - scheme.AddTypeDefaultingFunc(&Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*Pod)) }) - scheme.AddTypeDefaultingFunc(&PodAttachOptions{}, func(obj interface{}) { SetObjectDefaults_PodAttachOptions(obj.(*PodAttachOptions)) }) - scheme.AddTypeDefaultingFunc(&PodExecOptions{}, func(obj interface{}) { SetObjectDefaults_PodExecOptions(obj.(*PodExecOptions)) }) - scheme.AddTypeDefaultingFunc(&PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*PodList)) }) - scheme.AddTypeDefaultingFunc(&PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*PodTemplate)) }) - scheme.AddTypeDefaultingFunc(&PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*PodTemplateList)) }) - scheme.AddTypeDefaultingFunc(&ReplicationController{}, func(obj interface{}) { SetObjectDefaults_ReplicationController(obj.(*ReplicationController)) }) - scheme.AddTypeDefaultingFunc(&ReplicationControllerList{}, func(obj interface{}) { SetObjectDefaults_ReplicationControllerList(obj.(*ReplicationControllerList)) }) - scheme.AddTypeDefaultingFunc(&ResourceQuota{}, func(obj interface{}) { SetObjectDefaults_ResourceQuota(obj.(*ResourceQuota)) }) - scheme.AddTypeDefaultingFunc(&ResourceQuotaList{}, func(obj interface{}) { SetObjectDefaults_ResourceQuotaList(obj.(*ResourceQuotaList)) }) - scheme.AddTypeDefaultingFunc(&Secret{}, func(obj interface{}) { SetObjectDefaults_Secret(obj.(*Secret)) }) - scheme.AddTypeDefaultingFunc(&SecretList{}, func(obj interface{}) { SetObjectDefaults_SecretList(obj.(*SecretList)) }) - scheme.AddTypeDefaultingFunc(&Service{}, func(obj interface{}) { SetObjectDefaults_Service(obj.(*Service)) }) - scheme.AddTypeDefaultingFunc(&ServiceList{}, func(obj interface{}) { SetObjectDefaults_ServiceList(obj.(*ServiceList)) }) - return nil -} - -func SetObjectDefaults_ConfigMap(in *ConfigMap) { - SetDefaults_ConfigMap(in) -} - -func SetObjectDefaults_ConfigMapList(in *ConfigMapList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_ConfigMap(a) - } -} - -func SetObjectDefaults_Endpoints(in *Endpoints) { - SetDefaults_Endpoints(in) -} - -func SetObjectDefaults_EndpointsList(in *EndpointsList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Endpoints(a) - } -} - -func SetObjectDefaults_LimitRange(in *LimitRange) { - for i := range in.Spec.Limits { - a := &in.Spec.Limits[i] - SetDefaults_LimitRangeItem(a) - SetDefaults_ResourceList(&a.Max) - SetDefaults_ResourceList(&a.Min) - SetDefaults_ResourceList(&a.Default) - SetDefaults_ResourceList(&a.DefaultRequest) - SetDefaults_ResourceList(&a.MaxLimitRequestRatio) - } -} - -func SetObjectDefaults_LimitRangeList(in *LimitRangeList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_LimitRange(a) - } -} - -func SetObjectDefaults_Namespace(in *Namespace) { - SetDefaults_NamespaceStatus(&in.Status) -} - -func SetObjectDefaults_NamespaceList(in *NamespaceList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Namespace(a) - } -} - -func SetObjectDefaults_Node(in *Node) { - SetDefaults_Node(in) - SetDefaults_NodeStatus(&in.Status) - SetDefaults_ResourceList(&in.Status.Capacity) - SetDefaults_ResourceList(&in.Status.Allocatable) -} - -func SetObjectDefaults_NodeList(in *NodeList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Node(a) - } -} - -func SetObjectDefaults_PersistentVolume(in *PersistentVolume) { - SetDefaults_PersistentVolume(in) - SetDefaults_ResourceList(&in.Spec.Capacity) - if in.Spec.PersistentVolumeSource.RBD != nil { - SetDefaults_RBDVolumeSource(in.Spec.PersistentVolumeSource.RBD) - } - if in.Spec.PersistentVolumeSource.ISCSI != nil { - SetDefaults_ISCSIVolumeSource(in.Spec.PersistentVolumeSource.ISCSI) - } - if in.Spec.PersistentVolumeSource.AzureDisk != nil { - SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk) - } - if in.Spec.PersistentVolumeSource.ScaleIO != nil { - SetDefaults_ScaleIOVolumeSource(in.Spec.PersistentVolumeSource.ScaleIO) - } -} - -func SetObjectDefaults_PersistentVolumeClaim(in *PersistentVolumeClaim) { - SetDefaults_PersistentVolumeClaim(in) - SetDefaults_ResourceList(&in.Spec.Resources.Limits) - SetDefaults_ResourceList(&in.Spec.Resources.Requests) - SetDefaults_ResourceList(&in.Status.Capacity) -} - -func SetObjectDefaults_PersistentVolumeClaimList(in *PersistentVolumeClaimList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_PersistentVolumeClaim(a) - } -} - -func SetObjectDefaults_PersistentVolumeList(in *PersistentVolumeList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_PersistentVolume(a) - } -} - -func SetObjectDefaults_Pod(in *Pod) { - SetDefaults_Pod(in) - SetDefaults_PodSpec(&in.Spec) - for i := range in.Spec.Volumes { - a := &in.Spec.Volumes[i] - SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Spec.InitContainers { - a := &in.Spec.InitContainers[i] - SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - SetDefaults_ResourceList(&a.Resources.Limits) - SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.Containers { - a := &in.Spec.Containers[i] - SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - SetDefaults_ResourceList(&a.Resources.Limits) - SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } -} - -func SetObjectDefaults_PodAttachOptions(in *PodAttachOptions) { - SetDefaults_PodAttachOptions(in) -} - -func SetObjectDefaults_PodExecOptions(in *PodExecOptions) { - SetDefaults_PodExecOptions(in) -} - -func SetObjectDefaults_PodList(in *PodList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Pod(a) - } -} - -func SetObjectDefaults_PodTemplate(in *PodTemplate) { - SetDefaults_PodSpec(&in.Template.Spec) - for i := range in.Template.Spec.Volumes { - a := &in.Template.Spec.Volumes[i] - SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Template.Spec.InitContainers { - a := &in.Template.Spec.InitContainers[i] - SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - SetDefaults_ResourceList(&a.Resources.Limits) - SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Template.Spec.Containers { - a := &in.Template.Spec.Containers[i] - SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - SetDefaults_ResourceList(&a.Resources.Limits) - SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } -} - -func SetObjectDefaults_PodTemplateList(in *PodTemplateList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_PodTemplate(a) - } -} - -func SetObjectDefaults_ReplicationController(in *ReplicationController) { - SetDefaults_ReplicationController(in) - if in.Spec.Template != nil { - SetDefaults_PodSpec(&in.Spec.Template.Spec) - for i := range in.Spec.Template.Spec.Volumes { - a := &in.Spec.Template.Spec.Volumes[i] - SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Spec.Template.Spec.InitContainers { - a := &in.Spec.Template.Spec.InitContainers[i] - SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - SetDefaults_ResourceList(&a.Resources.Limits) - SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.Template.Spec.Containers { - a := &in.Spec.Template.Spec.Containers[i] - SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - SetDefaults_ResourceList(&a.Resources.Limits) - SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - } -} - -func SetObjectDefaults_ReplicationControllerList(in *ReplicationControllerList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_ReplicationController(a) - } -} - -func SetObjectDefaults_ResourceQuota(in *ResourceQuota) { - SetDefaults_ResourceList(&in.Spec.Hard) - SetDefaults_ResourceList(&in.Status.Hard) - SetDefaults_ResourceList(&in.Status.Used) -} - -func SetObjectDefaults_ResourceQuotaList(in *ResourceQuotaList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_ResourceQuota(a) - } -} - -func SetObjectDefaults_Secret(in *Secret) { - SetDefaults_Secret(in) -} - -func SetObjectDefaults_SecretList(in *SecretList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Secret(a) - } -} - -func SetObjectDefaults_Service(in *Service) { - SetDefaults_ServiceSpec(&in.Spec) -} - -func SetObjectDefaults_ServiceList(in *ServiceList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Service(a) - } -} diff --git a/vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go deleted file mode 100644 index c018bcc4e..000000000 --- a/vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go +++ /dev/null @@ -1,3527 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package api - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - fields "k8s.io/apimachinery/pkg/fields" - labels "k8s.io/apimachinery/pkg/labels" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AWSElasticBlockStoreVolumeSource, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Affinity, InType: reflect.TypeOf(&Affinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AttachedVolume, InType: reflect.TypeOf(&AttachedVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AvoidPods, InType: reflect.TypeOf(&AvoidPods{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AzureDiskVolumeSource, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AzureFileVolumeSource, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Binding, InType: reflect.TypeOf(&Binding{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Capabilities, InType: reflect.TypeOf(&Capabilities{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_CephFSVolumeSource, InType: reflect.TypeOf(&CephFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_CinderVolumeSource, InType: reflect.TypeOf(&CinderVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentCondition, InType: reflect.TypeOf(&ComponentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentStatus, InType: reflect.TypeOf(&ComponentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentStatusList, InType: reflect.TypeOf(&ComponentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMap, InType: reflect.TypeOf(&ConfigMap{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapEnvSource, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapKeySelector, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapList, InType: reflect.TypeOf(&ConfigMapList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapProjection, InType: reflect.TypeOf(&ConfigMapProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapVolumeSource, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Container, InType: reflect.TypeOf(&Container{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerImage, InType: reflect.TypeOf(&ContainerImage{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerPort, InType: reflect.TypeOf(&ContainerPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerState, InType: reflect.TypeOf(&ContainerState{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateRunning, InType: reflect.TypeOf(&ContainerStateRunning{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateTerminated, InType: reflect.TypeOf(&ContainerStateTerminated{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateWaiting, InType: reflect.TypeOf(&ContainerStateWaiting{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStatus, InType: reflect.TypeOf(&ContainerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConversionError, InType: reflect.TypeOf(&ConversionError{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DaemonEndpoint, InType: reflect.TypeOf(&DaemonEndpoint{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIProjection, InType: reflect.TypeOf(&DownwardAPIProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIVolumeFile, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIVolumeSource, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EmptyDirVolumeSource, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointAddress, InType: reflect.TypeOf(&EndpointAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointPort, InType: reflect.TypeOf(&EndpointPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointSubset, InType: reflect.TypeOf(&EndpointSubset{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Endpoints, InType: reflect.TypeOf(&Endpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointsList, InType: reflect.TypeOf(&EndpointsList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvFromSource, InType: reflect.TypeOf(&EnvFromSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvVar, InType: reflect.TypeOf(&EnvVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvVarSource, InType: reflect.TypeOf(&EnvVarSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Event, InType: reflect.TypeOf(&Event{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EventList, InType: reflect.TypeOf(&EventList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EventSource, InType: reflect.TypeOf(&EventSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ExecAction, InType: reflect.TypeOf(&ExecAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FCVolumeSource, InType: reflect.TypeOf(&FCVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FlexVolumeSource, InType: reflect.TypeOf(&FlexVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FlockerVolumeSource, InType: reflect.TypeOf(&FlockerVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GCEPersistentDiskVolumeSource, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GitRepoVolumeSource, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GlusterfsVolumeSource, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HTTPGetAction, InType: reflect.TypeOf(&HTTPGetAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HTTPHeader, InType: reflect.TypeOf(&HTTPHeader{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Handler, InType: reflect.TypeOf(&Handler{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HostPathVolumeSource, InType: reflect.TypeOf(&HostPathVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ISCSIVolumeSource, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_KeyToPath, InType: reflect.TypeOf(&KeyToPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Lifecycle, InType: reflect.TypeOf(&Lifecycle{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRange, InType: reflect.TypeOf(&LimitRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeItem, InType: reflect.TypeOf(&LimitRangeItem{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeList, InType: reflect.TypeOf(&LimitRangeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeSpec, InType: reflect.TypeOf(&LimitRangeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_List, InType: reflect.TypeOf(&List{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LoadBalancerIngress, InType: reflect.TypeOf(&LoadBalancerIngress{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LoadBalancerStatus, InType: reflect.TypeOf(&LoadBalancerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LocalObjectReference, InType: reflect.TypeOf(&LocalObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NFSVolumeSource, InType: reflect.TypeOf(&NFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Namespace, InType: reflect.TypeOf(&Namespace{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceList, InType: reflect.TypeOf(&NamespaceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceSpec, InType: reflect.TypeOf(&NamespaceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceStatus, InType: reflect.TypeOf(&NamespaceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Node, InType: reflect.TypeOf(&Node{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeAddress, InType: reflect.TypeOf(&NodeAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeAffinity, InType: reflect.TypeOf(&NodeAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeCondition, InType: reflect.TypeOf(&NodeCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeDaemonEndpoints, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeList, InType: reflect.TypeOf(&NodeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeProxyOptions, InType: reflect.TypeOf(&NodeProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeResources, InType: reflect.TypeOf(&NodeResources{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelector, InType: reflect.TypeOf(&NodeSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelectorRequirement, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelectorTerm, InType: reflect.TypeOf(&NodeSelectorTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSpec, InType: reflect.TypeOf(&NodeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeStatus, InType: reflect.TypeOf(&NodeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSystemInfo, InType: reflect.TypeOf(&NodeSystemInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectFieldSelector, InType: reflect.TypeOf(&ObjectFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectReference, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolume, InType: reflect.TypeOf(&PersistentVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaim, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimList, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimSpec, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimStatus, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimVolumeSource, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeList, InType: reflect.TypeOf(&PersistentVolumeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeSource, InType: reflect.TypeOf(&PersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeSpec, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeStatus, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PhotonPersistentDiskVolumeSource, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Pod, InType: reflect.TypeOf(&Pod{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAffinity, InType: reflect.TypeOf(&PodAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAffinityTerm, InType: reflect.TypeOf(&PodAffinityTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAntiAffinity, InType: reflect.TypeOf(&PodAntiAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAttachOptions, InType: reflect.TypeOf(&PodAttachOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodCondition, InType: reflect.TypeOf(&PodCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodExecOptions, InType: reflect.TypeOf(&PodExecOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodList, InType: reflect.TypeOf(&PodList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodLogOptions, InType: reflect.TypeOf(&PodLogOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodPortForwardOptions, InType: reflect.TypeOf(&PodPortForwardOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodProxyOptions, InType: reflect.TypeOf(&PodProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSecurityContext, InType: reflect.TypeOf(&PodSecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSignature, InType: reflect.TypeOf(&PodSignature{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSpec, InType: reflect.TypeOf(&PodSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodStatus, InType: reflect.TypeOf(&PodStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodStatusResult, InType: reflect.TypeOf(&PodStatusResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplate, InType: reflect.TypeOf(&PodTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplateList, InType: reflect.TypeOf(&PodTemplateList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplateSpec, InType: reflect.TypeOf(&PodTemplateSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PortworxVolumeSource, InType: reflect.TypeOf(&PortworxVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PreferAvoidPodsEntry, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PreferredSchedulingTerm, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Probe, InType: reflect.TypeOf(&Probe{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ProjectedVolumeSource, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_QuobyteVolumeSource, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_RBDVolumeSource, InType: reflect.TypeOf(&RBDVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_RangeAllocation, InType: reflect.TypeOf(&RangeAllocation{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationController, InType: reflect.TypeOf(&ReplicationController{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerCondition, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerList, InType: reflect.TypeOf(&ReplicationControllerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerSpec, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerStatus, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceFieldSelector, InType: reflect.TypeOf(&ResourceFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuota, InType: reflect.TypeOf(&ResourceQuota{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaList, InType: reflect.TypeOf(&ResourceQuotaList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaSpec, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaStatus, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceRequirements, InType: reflect.TypeOf(&ResourceRequirements{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SELinuxOptions, InType: reflect.TypeOf(&SELinuxOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ScaleIOVolumeSource, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Secret, InType: reflect.TypeOf(&Secret{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretEnvSource, InType: reflect.TypeOf(&SecretEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretKeySelector, InType: reflect.TypeOf(&SecretKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretList, InType: reflect.TypeOf(&SecretList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretProjection, InType: reflect.TypeOf(&SecretProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretVolumeSource, InType: reflect.TypeOf(&SecretVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecurityContext, InType: reflect.TypeOf(&SecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SerializedReference, InType: reflect.TypeOf(&SerializedReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Service, InType: reflect.TypeOf(&Service{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceAccount, InType: reflect.TypeOf(&ServiceAccount{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceAccountList, InType: reflect.TypeOf(&ServiceAccountList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceList, InType: reflect.TypeOf(&ServiceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServicePort, InType: reflect.TypeOf(&ServicePort{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceProxyOptions, InType: reflect.TypeOf(&ServiceProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceSpec, InType: reflect.TypeOf(&ServiceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceStatus, InType: reflect.TypeOf(&ServiceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Sysctl, InType: reflect.TypeOf(&Sysctl{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_TCPSocketAction, InType: reflect.TypeOf(&TCPSocketAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Taint, InType: reflect.TypeOf(&Taint{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Toleration, InType: reflect.TypeOf(&Toleration{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Volume, InType: reflect.TypeOf(&Volume{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeMount, InType: reflect.TypeOf(&VolumeMount{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeProjection, InType: reflect.TypeOf(&VolumeProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeSource, InType: reflect.TypeOf(&VolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VsphereVirtualDiskVolumeSource, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_WeightedPodAffinityTerm, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, - ) -} - -func DeepCopy_api_AWSElasticBlockStoreVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*AWSElasticBlockStoreVolumeSource) - out := out.(*AWSElasticBlockStoreVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_Affinity(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Affinity) - out := out.(*Affinity) - *out = *in - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - *out = new(NodeAffinity) - if err := DeepCopy_api_NodeAffinity(*in, *out, c); err != nil { - return err - } - } - if in.PodAffinity != nil { - in, out := &in.PodAffinity, &out.PodAffinity - *out = new(PodAffinity) - if err := DeepCopy_api_PodAffinity(*in, *out, c); err != nil { - return err - } - } - if in.PodAntiAffinity != nil { - in, out := &in.PodAntiAffinity, &out.PodAntiAffinity - *out = new(PodAntiAffinity) - if err := DeepCopy_api_PodAntiAffinity(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_api_AttachedVolume(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*AttachedVolume) - out := out.(*AttachedVolume) - *out = *in - return nil - } -} - -func DeepCopy_api_AvoidPods(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*AvoidPods) - out := out.(*AvoidPods) - *out = *in - if in.PreferAvoidPods != nil { - in, out := &in.PreferAvoidPods, &out.PreferAvoidPods - *out = make([]PreferAvoidPodsEntry, len(*in)) - for i := range *in { - if err := DeepCopy_api_PreferAvoidPodsEntry(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_AzureDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*AzureDiskVolumeSource) - out := out.(*AzureDiskVolumeSource) - *out = *in - if in.CachingMode != nil { - in, out := &in.CachingMode, &out.CachingMode - *out = new(AzureDataDiskCachingMode) - **out = **in - } - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - *out = new(string) - **out = **in - } - if in.ReadOnly != nil { - in, out := &in.ReadOnly, &out.ReadOnly - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_api_AzureFileVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*AzureFileVolumeSource) - out := out.(*AzureFileVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_Binding(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Binding) - out := out.(*Binding) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - return nil - } -} - -func DeepCopy_api_Capabilities(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Capabilities) - out := out.(*Capabilities) - *out = *in - if in.Add != nil { - in, out := &in.Add, &out.Add - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - if in.Drop != nil { - in, out := &in.Drop, &out.Drop - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_CephFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CephFSVolumeSource) - out := out.(*CephFSVolumeSource) - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return nil - } -} - -func DeepCopy_api_CinderVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CinderVolumeSource) - out := out.(*CinderVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_ComponentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ComponentCondition) - out := out.(*ComponentCondition) - *out = *in - return nil - } -} - -func DeepCopy_api_ComponentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ComponentStatus) - out := out.(*ComponentStatus) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ComponentCondition, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_ComponentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ComponentStatusList) - out := out.(*ComponentStatusList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ComponentStatus, len(*in)) - for i := range *in { - if err := DeepCopy_api_ComponentStatus(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_ConfigMap(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ConfigMap) - out := out.(*ConfigMap) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - return nil - } -} - -func DeepCopy_api_ConfigMapEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ConfigMapEnvSource) - out := out.(*ConfigMapEnvSource) - *out = *in - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_api_ConfigMapKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ConfigMapKeySelector) - out := out.(*ConfigMapKeySelector) - *out = *in - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_api_ConfigMapList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ConfigMapList) - out := out.(*ConfigMapList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ConfigMap, len(*in)) - for i := range *in { - if err := DeepCopy_api_ConfigMap(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_ConfigMapProjection(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ConfigMapProjection) - out := out.(*ConfigMapProjection) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_api_ConfigMapVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ConfigMapVolumeSource) - out := out.(*ConfigMapVolumeSource) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_api_Container(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Container) - out := out.(*Container) - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ContainerPort, len(*in)) - copy(*out, *in) - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]EnvFromSource, len(*in)) - for i := range *in { - if err := DeepCopy_api_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - for i := range *in { - if err := DeepCopy_api_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if err := DeepCopy_api_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { - return err - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(*in)) - copy(*out, *in) - } - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - *out = new(Probe) - if err := DeepCopy_api_Probe(*in, *out, c); err != nil { - return err - } - } - if in.ReadinessProbe != nil { - in, out := &in.ReadinessProbe, &out.ReadinessProbe - *out = new(Probe) - if err := DeepCopy_api_Probe(*in, *out, c); err != nil { - return err - } - } - if in.Lifecycle != nil { - in, out := &in.Lifecycle, &out.Lifecycle - *out = new(Lifecycle) - if err := DeepCopy_api_Lifecycle(*in, *out, c); err != nil { - return err - } - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(SecurityContext) - if err := DeepCopy_api_SecurityContext(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_api_ContainerImage(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerImage) - out := out.(*ContainerImage) - *out = *in - if in.Names != nil { - in, out := &in.Names, &out.Names - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_ContainerPort(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerPort) - out := out.(*ContainerPort) - *out = *in - return nil - } -} - -func DeepCopy_api_ContainerState(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerState) - out := out.(*ContainerState) - *out = *in - if in.Waiting != nil { - in, out := &in.Waiting, &out.Waiting - *out = new(ContainerStateWaiting) - **out = **in - } - if in.Running != nil { - in, out := &in.Running, &out.Running - *out = new(ContainerStateRunning) - if err := DeepCopy_api_ContainerStateRunning(*in, *out, c); err != nil { - return err - } - } - if in.Terminated != nil { - in, out := &in.Terminated, &out.Terminated - *out = new(ContainerStateTerminated) - if err := DeepCopy_api_ContainerStateTerminated(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_api_ContainerStateRunning(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerStateRunning) - out := out.(*ContainerStateRunning) - *out = *in - out.StartedAt = in.StartedAt.DeepCopy() - return nil - } -} - -func DeepCopy_api_ContainerStateTerminated(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerStateTerminated) - out := out.(*ContainerStateTerminated) - *out = *in - out.StartedAt = in.StartedAt.DeepCopy() - out.FinishedAt = in.FinishedAt.DeepCopy() - return nil - } -} - -func DeepCopy_api_ContainerStateWaiting(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerStateWaiting) - out := out.(*ContainerStateWaiting) - *out = *in - return nil - } -} - -func DeepCopy_api_ContainerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ContainerStatus) - out := out.(*ContainerStatus) - *out = *in - if err := DeepCopy_api_ContainerState(&in.State, &out.State, c); err != nil { - return err - } - if err := DeepCopy_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_ConversionError(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ConversionError) - out := out.(*ConversionError) - *out = *in - // in.In is kind 'Interface' - if in.In != nil { - if newVal, err := c.DeepCopy(&in.In); err != nil { - return err - } else { - out.In = *newVal.(*interface{}) - } - } - // in.Out is kind 'Interface' - if in.Out != nil { - if newVal, err := c.DeepCopy(&in.Out); err != nil { - return err - } else { - out.Out = *newVal.(*interface{}) - } - } - return nil - } -} - -func DeepCopy_api_DaemonEndpoint(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DaemonEndpoint) - out := out.(*DaemonEndpoint) - *out = *in - return nil - } -} - -func DeepCopy_api_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeleteOptions) - out := out.(*DeleteOptions) - *out = *in - if in.GracePeriodSeconds != nil { - in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds - *out = new(int64) - **out = **in - } - if in.Preconditions != nil { - in, out := &in.Preconditions, &out.Preconditions - *out = new(Preconditions) - if err := DeepCopy_api_Preconditions(*in, *out, c); err != nil { - return err - } - } - if in.OrphanDependents != nil { - in, out := &in.OrphanDependents, &out.OrphanDependents - *out = new(bool) - **out = **in - } - if in.PropagationPolicy != nil { - in, out := &in.PropagationPolicy, &out.PropagationPolicy - *out = new(DeletionPropagation) - **out = **in - } - return nil - } -} - -func DeepCopy_api_DownwardAPIProjection(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DownwardAPIProjection) - out := out.(*DownwardAPIProjection) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - if err := DeepCopy_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_DownwardAPIVolumeFile(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DownwardAPIVolumeFile) - out := out.(*DownwardAPIVolumeFile) - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - **out = **in - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { - return err - } - } - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_api_DownwardAPIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DownwardAPIVolumeSource) - out := out.(*DownwardAPIVolumeSource) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - if err := DeepCopy_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_api_EmptyDirVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EmptyDirVolumeSource) - out := out.(*EmptyDirVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_EndpointAddress(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EndpointAddress) - out := out.(*EndpointAddress) - *out = *in - if in.NodeName != nil { - in, out := &in.NodeName, &out.NodeName - *out = new(string) - **out = **in - } - if in.TargetRef != nil { - in, out := &in.TargetRef, &out.TargetRef - *out = new(ObjectReference) - **out = **in - } - return nil - } -} - -func DeepCopy_api_EndpointPort(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EndpointPort) - out := out.(*EndpointPort) - *out = *in - return nil - } -} - -func DeepCopy_api_EndpointSubset(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EndpointSubset) - out := out.(*EndpointSubset) - *out = *in - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - if err := DeepCopy_api_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.NotReadyAddresses != nil { - in, out := &in.NotReadyAddresses, &out.NotReadyAddresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - if err := DeepCopy_api_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]EndpointPort, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_Endpoints(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Endpoints) - out := out.(*Endpoints) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Subsets != nil { - in, out := &in.Subsets, &out.Subsets - *out = make([]EndpointSubset, len(*in)) - for i := range *in { - if err := DeepCopy_api_EndpointSubset(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_EndpointsList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EndpointsList) - out := out.(*EndpointsList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Endpoints, len(*in)) - for i := range *in { - if err := DeepCopy_api_Endpoints(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_EnvFromSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EnvFromSource) - out := out.(*EnvFromSource) - *out = *in - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - *out = new(ConfigMapEnvSource) - if err := DeepCopy_api_ConfigMapEnvSource(*in, *out, c); err != nil { - return err - } - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretEnvSource) - if err := DeepCopy_api_SecretEnvSource(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_api_EnvVar(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EnvVar) - out := out.(*EnvVar) - *out = *in - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - *out = new(EnvVarSource) - if err := DeepCopy_api_EnvVarSource(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_api_EnvVarSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EnvVarSource) - out := out.(*EnvVarSource) - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - **out = **in - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { - return err - } - } - if in.ConfigMapKeyRef != nil { - in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef - *out = new(ConfigMapKeySelector) - if err := DeepCopy_api_ConfigMapKeySelector(*in, *out, c); err != nil { - return err - } - } - if in.SecretKeyRef != nil { - in, out := &in.SecretKeyRef, &out.SecretKeyRef - *out = new(SecretKeySelector) - if err := DeepCopy_api_SecretKeySelector(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_api_Event(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Event) - out := out.(*Event) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - out.FirstTimestamp = in.FirstTimestamp.DeepCopy() - out.LastTimestamp = in.LastTimestamp.DeepCopy() - return nil - } -} - -func DeepCopy_api_EventList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EventList) - out := out.(*EventList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Event, len(*in)) - for i := range *in { - if err := DeepCopy_api_Event(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_EventSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*EventSource) - out := out.(*EventSource) - *out = *in - return nil - } -} - -func DeepCopy_api_ExecAction(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ExecAction) - out := out.(*ExecAction) - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_FCVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*FCVolumeSource) - out := out.(*FCVolumeSource) - *out = *in - if in.TargetWWNs != nil { - in, out := &in.TargetWWNs, &out.TargetWWNs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Lun != nil { - in, out := &in.Lun, &out.Lun - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_api_FlexVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*FlexVolumeSource) - out := out.(*FlexVolumeSource) - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - return nil - } -} - -func DeepCopy_api_FlockerVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*FlockerVolumeSource) - out := out.(*FlockerVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_GCEPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GCEPersistentDiskVolumeSource) - out := out.(*GCEPersistentDiskVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_GitRepoVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GitRepoVolumeSource) - out := out.(*GitRepoVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_GlusterfsVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GlusterfsVolumeSource) - out := out.(*GlusterfsVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_HTTPGetAction(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HTTPGetAction) - out := out.(*HTTPGetAction) - *out = *in - if in.HTTPHeaders != nil { - in, out := &in.HTTPHeaders, &out.HTTPHeaders - *out = make([]HTTPHeader, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_HTTPHeader(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HTTPHeader) - out := out.(*HTTPHeader) - *out = *in - return nil - } -} - -func DeepCopy_api_Handler(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Handler) - out := out.(*Handler) - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecAction) - if err := DeepCopy_api_ExecAction(*in, *out, c); err != nil { - return err - } - } - if in.HTTPGet != nil { - in, out := &in.HTTPGet, &out.HTTPGet - *out = new(HTTPGetAction) - if err := DeepCopy_api_HTTPGetAction(*in, *out, c); err != nil { - return err - } - } - if in.TCPSocket != nil { - in, out := &in.TCPSocket, &out.TCPSocket - *out = new(TCPSocketAction) - **out = **in - } - return nil - } -} - -func DeepCopy_api_HostPathVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HostPathVolumeSource) - out := out.(*HostPathVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_ISCSIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ISCSIVolumeSource) - out := out.(*ISCSIVolumeSource) - *out = *in - if in.Portals != nil { - in, out := &in.Portals, &out.Portals - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_KeyToPath(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*KeyToPath) - out := out.(*KeyToPath) - *out = *in - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_api_Lifecycle(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Lifecycle) - out := out.(*Lifecycle) - *out = *in - if in.PostStart != nil { - in, out := &in.PostStart, &out.PostStart - *out = new(Handler) - if err := DeepCopy_api_Handler(*in, *out, c); err != nil { - return err - } - } - if in.PreStop != nil { - in, out := &in.PreStop, &out.PreStop - *out = new(Handler) - if err := DeepCopy_api_Handler(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_api_LimitRange(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LimitRange) - out := out.(*LimitRange) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_api_LimitRangeSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_LimitRangeItem(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LimitRangeItem) - out := out.(*LimitRangeItem) - *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.DefaultRequest != nil { - in, out := &in.DefaultRequest, &out.DefaultRequest - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.MaxLimitRequestRatio != nil { - in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return nil - } -} - -func DeepCopy_api_LimitRangeList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LimitRangeList) - out := out.(*LimitRangeList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]LimitRange, len(*in)) - for i := range *in { - if err := DeepCopy_api_LimitRange(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_LimitRangeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LimitRangeSpec) - out := out.(*LimitRangeSpec) - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make([]LimitRangeItem, len(*in)) - for i := range *in { - if err := DeepCopy_api_LimitRangeItem(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_List(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*List) - out := out.(*List) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.Object, len(*in)) - for i := range *in { - if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { - return err - } else { - (*out)[i] = *newVal.(*runtime.Object) - } - } - } - return nil - } -} - -func DeepCopy_api_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ListOptions) - out := out.(*ListOptions) - *out = *in - // in.LabelSelector is kind 'Interface' - if in.LabelSelector != nil { - if newVal, err := c.DeepCopy(&in.LabelSelector); err != nil { - return err - } else { - out.LabelSelector = *newVal.(*labels.Selector) - } - } - // in.FieldSelector is kind 'Interface' - if in.FieldSelector != nil { - if newVal, err := c.DeepCopy(&in.FieldSelector); err != nil { - return err - } else { - out.FieldSelector = *newVal.(*fields.Selector) - } - } - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int64) - **out = **in - } - return nil - } -} - -func DeepCopy_api_LoadBalancerIngress(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LoadBalancerIngress) - out := out.(*LoadBalancerIngress) - *out = *in - return nil - } -} - -func DeepCopy_api_LoadBalancerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LoadBalancerStatus) - out := out.(*LoadBalancerStatus) - *out = *in - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]LoadBalancerIngress, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_LocalObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LocalObjectReference) - out := out.(*LocalObjectReference) - *out = *in - return nil - } -} - -func DeepCopy_api_NFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NFSVolumeSource) - out := out.(*NFSVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_Namespace(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Namespace) - out := out.(*Namespace) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_api_NamespaceSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_NamespaceList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NamespaceList) - out := out.(*NamespaceList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Namespace, len(*in)) - for i := range *in { - if err := DeepCopy_api_Namespace(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_NamespaceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NamespaceSpec) - out := out.(*NamespaceSpec) - *out = *in - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]FinalizerName, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_NamespaceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NamespaceStatus) - out := out.(*NamespaceStatus) - *out = *in - return nil - } -} - -func DeepCopy_api_Node(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Node) - out := out.(*Node) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_api_NodeSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_NodeStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_NodeAddress(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeAddress) - out := out.(*NodeAddress) - *out = *in - return nil - } -} - -func DeepCopy_api_NodeAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeAffinity) - out := out.(*NodeAffinity) - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = new(NodeSelector) - if err := DeepCopy_api_NodeSelector(*in, *out, c); err != nil { - return err - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]PreferredSchedulingTerm, len(*in)) - for i := range *in { - if err := DeepCopy_api_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_NodeCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeCondition) - out := out.(*NodeCondition) - *out = *in - out.LastHeartbeatTime = in.LastHeartbeatTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - return nil - } -} - -func DeepCopy_api_NodeDaemonEndpoints(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeDaemonEndpoints) - out := out.(*NodeDaemonEndpoints) - *out = *in - return nil - } -} - -func DeepCopy_api_NodeList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeList) - out := out.(*NodeList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Node, len(*in)) - for i := range *in { - if err := DeepCopy_api_Node(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_NodeProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeProxyOptions) - out := out.(*NodeProxyOptions) - *out = *in - return nil - } -} - -func DeepCopy_api_NodeResources(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeResources) - out := out.(*NodeResources) - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return nil - } -} - -func DeepCopy_api_NodeSelector(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeSelector) - out := out.(*NodeSelector) - *out = *in - if in.NodeSelectorTerms != nil { - in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms - *out = make([]NodeSelectorTerm, len(*in)) - for i := range *in { - if err := DeepCopy_api_NodeSelectorTerm(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_NodeSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeSelectorRequirement) - out := out.(*NodeSelectorRequirement) - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_NodeSelectorTerm(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeSelectorTerm) - out := out.(*NodeSelectorTerm) - *out = *in - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]NodeSelectorRequirement, len(*in)) - for i := range *in { - if err := DeepCopy_api_NodeSelectorRequirement(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_NodeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeSpec) - out := out.(*NodeSpec) - *out = *in - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]Taint, len(*in)) - for i := range *in { - if err := DeepCopy_api_Taint(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_NodeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeStatus) - out := out.(*NodeStatus) - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Allocatable != nil { - in, out := &in.Allocatable, &out.Allocatable - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]NodeCondition, len(*in)) - for i := range *in { - if err := DeepCopy_api_NodeCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]NodeAddress, len(*in)) - copy(*out, *in) - } - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make([]ContainerImage, len(*in)) - for i := range *in { - if err := DeepCopy_api_ContainerImage(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.VolumesInUse != nil { - in, out := &in.VolumesInUse, &out.VolumesInUse - *out = make([]UniqueVolumeName, len(*in)) - copy(*out, *in) - } - if in.VolumesAttached != nil { - in, out := &in.VolumesAttached, &out.VolumesAttached - *out = make([]AttachedVolume, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_NodeSystemInfo(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NodeSystemInfo) - out := out.(*NodeSystemInfo) - *out = *in - return nil - } -} - -func DeepCopy_api_ObjectFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectFieldSelector) - out := out.(*ObjectFieldSelector) - *out = *in - return nil - } -} - -func DeepCopy_api_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectMeta) - out := out.(*ObjectMeta) - *out = *in - out.CreationTimestamp = in.CreationTimestamp.DeepCopy() - if in.DeletionTimestamp != nil { - in, out := &in.DeletionTimestamp, &out.DeletionTimestamp - *out = new(v1.Time) - **out = (*in).DeepCopy() - } - if in.DeletionGracePeriodSeconds != nil { - in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds - *out = new(int64) - **out = **in - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.OwnerReferences != nil { - in, out := &in.OwnerReferences, &out.OwnerReferences - *out = make([]v1.OwnerReference, len(*in)) - for i := range *in { - if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { - return err - } else { - (*out)[i] = *newVal.(*v1.OwnerReference) - } - } - } - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_ObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectReference) - out := out.(*ObjectReference) - *out = *in - return nil - } -} - -func DeepCopy_api_PersistentVolume(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolume) - out := out.(*PersistentVolume) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_api_PersistentVolumeSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_PersistentVolumeClaim(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeClaim) - out := out.(*PersistentVolumeClaim) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_PersistentVolumeClaimList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeClaimList) - out := out.(*PersistentVolumeClaimList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := DeepCopy_api_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_PersistentVolumeClaimSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeClaimSpec) - out := out.(*PersistentVolumeClaimSpec) - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if err := DeepCopy_api_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { - return err - } - if in.StorageClassName != nil { - in, out := &in.StorageClassName, &out.StorageClassName - *out = new(string) - **out = **in - } - return nil - } -} - -func DeepCopy_api_PersistentVolumeClaimStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeClaimStatus) - out := out.(*PersistentVolumeClaimStatus) - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return nil - } -} - -func DeepCopy_api_PersistentVolumeClaimVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeClaimVolumeSource) - out := out.(*PersistentVolumeClaimVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_PersistentVolumeList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeList) - out := out.(*PersistentVolumeList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolume, len(*in)) - for i := range *in { - if err := DeepCopy_api_PersistentVolume(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_PersistentVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeSource) - out := out.(*PersistentVolumeSource) - *out = *in - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - **out = **in - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(NFSVolumeSource) - **out = **in - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(RBDVolumeSource) - if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - *out = new(QuobyteVolumeSource) - **out = **in - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(ISCSIVolumeSource) - if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(FlexVolumeSource) - if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(CinderVolumeSource) - **out = **in - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(CephFSVolumeSource) - if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(FCVolumeSource) - if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - **out = **in - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(AzureFileVolumeSource) - **out = **in - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - *out = new(AzureDiskVolumeSource) - if err := DeepCopy_api_AzureDiskVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - *out = new(PortworxVolumeSource) - **out = **in - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - *out = new(ScaleIOVolumeSource) - if err := DeepCopy_api_ScaleIOVolumeSource(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_api_PersistentVolumeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeSpec) - out := out.(*PersistentVolumeSpec) - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if err := DeepCopy_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { - return err - } - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.ClaimRef != nil { - in, out := &in.ClaimRef, &out.ClaimRef - *out = new(ObjectReference) - **out = **in - } - return nil - } -} - -func DeepCopy_api_PersistentVolumeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PersistentVolumeStatus) - out := out.(*PersistentVolumeStatus) - *out = *in - return nil - } -} - -func DeepCopy_api_PhotonPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PhotonPersistentDiskVolumeSource) - out := out.(*PhotonPersistentDiskVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_Pod(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Pod) - out := out.(*Pod) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_api_PodSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_PodStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_PodAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodAffinity) - out := out.(*PodAffinity) - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - if err := DeepCopy_api_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - if err := DeepCopy_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_PodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodAffinityTerm) - out := out.(*PodAffinityTerm) - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_PodAntiAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodAntiAffinity) - out := out.(*PodAntiAffinity) - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - if err := DeepCopy_api_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - if err := DeepCopy_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_PodAttachOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodAttachOptions) - out := out.(*PodAttachOptions) - *out = *in - return nil - } -} - -func DeepCopy_api_PodCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodCondition) - out := out.(*PodCondition) - *out = *in - out.LastProbeTime = in.LastProbeTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - return nil - } -} - -func DeepCopy_api_PodExecOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodExecOptions) - out := out.(*PodExecOptions) - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_PodList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodList) - out := out.(*PodList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pod, len(*in)) - for i := range *in { - if err := DeepCopy_api_Pod(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_PodLogOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodLogOptions) - out := out.(*PodLogOptions) - *out = *in - if in.SinceSeconds != nil { - in, out := &in.SinceSeconds, &out.SinceSeconds - *out = new(int64) - **out = **in - } - if in.SinceTime != nil { - in, out := &in.SinceTime, &out.SinceTime - *out = new(v1.Time) - **out = (*in).DeepCopy() - } - if in.TailLines != nil { - in, out := &in.TailLines, &out.TailLines - *out = new(int64) - **out = **in - } - if in.LimitBytes != nil { - in, out := &in.LimitBytes, &out.LimitBytes - *out = new(int64) - **out = **in - } - return nil - } -} - -func DeepCopy_api_PodPortForwardOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodPortForwardOptions) - out := out.(*PodPortForwardOptions) - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]int32, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_PodProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodProxyOptions) - out := out.(*PodProxyOptions) - *out = *in - return nil - } -} - -func DeepCopy_api_PodSecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodSecurityContext) - out := out.(*PodSecurityContext) - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - **out = **in - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = **in - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = **in - } - if in.SupplementalGroups != nil { - in, out := &in.SupplementalGroups, &out.SupplementalGroups - *out = make([]int64, len(*in)) - copy(*out, *in) - } - if in.FSGroup != nil { - in, out := &in.FSGroup, &out.FSGroup - *out = new(int64) - **out = **in - } - return nil - } -} - -func DeepCopy_api_PodSignature(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodSignature) - out := out.(*PodSignature) - *out = *in - if in.PodController != nil { - in, out := &in.PodController, &out.PodController - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.OwnerReference) - } - } - return nil - } -} - -func DeepCopy_api_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodSpec) - out := out.(*PodSpec) - *out = *in - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]Volume, len(*in)) - for i := range *in { - if err := DeepCopy_api_Volume(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]Container, len(*in)) - for i := range *in { - if err := DeepCopy_api_Container(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]Container, len(*in)) - for i := range *in { - if err := DeepCopy_api_Container(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - *out = new(int64) - **out = **in - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - *out = new(bool) - **out = **in - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(PodSecurityContext) - if err := DeepCopy_api_PodSecurityContext(*in, *out, c); err != nil { - return err - } - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(Affinity) - if err := DeepCopy_api_Affinity(*in, *out, c); err != nil { - return err - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]Toleration, len(*in)) - for i := range *in { - if err := DeepCopy_api_Toleration(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_PodStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodStatus) - out := out.(*PodStatus) - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PodCondition, len(*in)) - for i := range *in { - if err := DeepCopy_api_PodCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = new(v1.Time) - **out = (*in).DeepCopy() - } - if in.InitContainerStatuses != nil { - in, out := &in.InitContainerStatuses, &out.InitContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - if err := DeepCopy_api_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.ContainerStatuses != nil { - in, out := &in.ContainerStatuses, &out.ContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - if err := DeepCopy_api_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_PodStatusResult(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodStatusResult) - out := out.(*PodStatusResult) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_api_PodStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_PodTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodTemplate) - out := out.(*PodTemplate) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_PodTemplateList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodTemplateList) - out := out.(*PodTemplateList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodTemplate, len(*in)) - for i := range *in { - if err := DeepCopy_api_PodTemplate(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_PodTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodTemplateSpec) - out := out.(*PodTemplateSpec) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_api_PodSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_PortworxVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PortworxVolumeSource) - out := out.(*PortworxVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Preconditions) - out := out.(*Preconditions) - *out = *in - if in.UID != nil { - in, out := &in.UID, &out.UID - *out = new(types.UID) - **out = **in - } - return nil - } -} - -func DeepCopy_api_PreferAvoidPodsEntry(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PreferAvoidPodsEntry) - out := out.(*PreferAvoidPodsEntry) - *out = *in - if err := DeepCopy_api_PodSignature(&in.PodSignature, &out.PodSignature, c); err != nil { - return err - } - out.EvictionTime = in.EvictionTime.DeepCopy() - return nil - } -} - -func DeepCopy_api_PreferredSchedulingTerm(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PreferredSchedulingTerm) - out := out.(*PreferredSchedulingTerm) - *out = *in - if err := DeepCopy_api_NodeSelectorTerm(&in.Preference, &out.Preference, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_Probe(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Probe) - out := out.(*Probe) - *out = *in - if err := DeepCopy_api_Handler(&in.Handler, &out.Handler, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_ProjectedVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ProjectedVolumeSource) - out := out.(*ProjectedVolumeSource) - *out = *in - if in.Sources != nil { - in, out := &in.Sources, &out.Sources - *out = make([]VolumeProjection, len(*in)) - for i := range *in { - if err := DeepCopy_api_VolumeProjection(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_api_QuobyteVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*QuobyteVolumeSource) - out := out.(*QuobyteVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_RBDVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RBDVolumeSource) - out := out.(*RBDVolumeSource) - *out = *in - if in.CephMonitors != nil { - in, out := &in.CephMonitors, &out.CephMonitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return nil - } -} - -func DeepCopy_api_RangeAllocation(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RangeAllocation) - out := out.(*RangeAllocation) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_ReplicationController(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicationController) - out := out.(*ReplicationController) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_api_ReplicationControllerSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_ReplicationControllerStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_ReplicationControllerCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicationControllerCondition) - out := out.(*ReplicationControllerCondition) - *out = *in - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - return nil - } -} - -func DeepCopy_api_ReplicationControllerList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicationControllerList) - out := out.(*ReplicationControllerList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicationController, len(*in)) - for i := range *in { - if err := DeepCopy_api_ReplicationController(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_ReplicationControllerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicationControllerSpec) - out := out.(*ReplicationControllerSpec) - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(PodTemplateSpec) - if err := DeepCopy_api_PodTemplateSpec(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_api_ReplicationControllerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicationControllerStatus) - out := out.(*ReplicationControllerStatus) - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicationControllerCondition, len(*in)) - for i := range *in { - if err := DeepCopy_api_ReplicationControllerCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_ResourceFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceFieldSelector) - out := out.(*ResourceFieldSelector) - *out = *in - out.Divisor = in.Divisor.DeepCopy() - return nil - } -} - -func DeepCopy_api_ResourceQuota(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceQuota) - out := out.(*ResourceQuota) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_api_ResourceQuotaSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_ResourceQuotaStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_ResourceQuotaList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceQuotaList) - out := out.(*ResourceQuotaList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceQuota, len(*in)) - for i := range *in { - if err := DeepCopy_api_ResourceQuota(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_ResourceQuotaSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceQuotaSpec) - out := out.(*ResourceQuotaSpec) - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]ResourceQuotaScope, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_ResourceQuotaStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceQuotaStatus) - out := out.(*ResourceQuotaStatus) - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Used != nil { - in, out := &in.Used, &out.Used - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return nil - } -} - -func DeepCopy_api_ResourceRequirements(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceRequirements) - out := out.(*ResourceRequirements) - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make(ResourceList) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return nil - } -} - -func DeepCopy_api_SELinuxOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SELinuxOptions) - out := out.(*SELinuxOptions) - *out = *in - return nil - } -} - -func DeepCopy_api_ScaleIOVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ScaleIOVolumeSource) - out := out.(*ScaleIOVolumeSource) - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return nil - } -} - -func DeepCopy_api_Secret(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Secret) - out := out.(*Secret) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string][]byte) - for key, val := range *in { - if newVal, err := c.DeepCopy(&val); err != nil { - return err - } else { - (*out)[key] = *newVal.(*[]byte) - } - } - } - return nil - } -} - -func DeepCopy_api_SecretEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SecretEnvSource) - out := out.(*SecretEnvSource) - *out = *in - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_api_SecretKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SecretKeySelector) - out := out.(*SecretKeySelector) - *out = *in - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_api_SecretList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SecretList) - out := out.(*SecretList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Secret, len(*in)) - for i := range *in { - if err := DeepCopy_api_Secret(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_SecretProjection(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SecretProjection) - out := out.(*SecretProjection) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_api_SecretVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SecretVolumeSource) - out := out.(*SecretVolumeSource) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_api_SecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SecurityContext) - out := out.(*SecurityContext) - *out = *in - if in.Capabilities != nil { - in, out := &in.Capabilities, &out.Capabilities - *out = new(Capabilities) - if err := DeepCopy_api_Capabilities(*in, *out, c); err != nil { - return err - } - } - if in.Privileged != nil { - in, out := &in.Privileged, &out.Privileged - *out = new(bool) - **out = **in - } - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - **out = **in - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = **in - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = **in - } - if in.ReadOnlyRootFilesystem != nil { - in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_api_SerializedReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SerializedReference) - out := out.(*SerializedReference) - *out = *in - return nil - } -} - -func DeepCopy_api_Service(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Service) - out := out.(*Service) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_api_ServiceSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_ServiceStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_ServiceAccount(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServiceAccount) - out := out.(*ServiceAccount) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]ObjectReference, len(*in)) - copy(*out, *in) - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - *out = new(bool) - **out = **in - } - return nil - } -} - -func DeepCopy_api_ServiceAccountList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServiceAccountList) - out := out.(*ServiceAccountList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceAccount, len(*in)) - for i := range *in { - if err := DeepCopy_api_ServiceAccount(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_ServiceList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServiceList) - out := out.(*ServiceList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Service, len(*in)) - for i := range *in { - if err := DeepCopy_api_Service(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_api_ServicePort(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServicePort) - out := out.(*ServicePort) - *out = *in - return nil - } -} - -func DeepCopy_api_ServiceProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServiceProxyOptions) - out := out.(*ServiceProxyOptions) - *out = *in - return nil - } -} - -func DeepCopy_api_ServiceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServiceSpec) - out := out.(*ServiceSpec) - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ServicePort, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ExternalIPs != nil { - in, out := &in.ExternalIPs, &out.ExternalIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.LoadBalancerSourceRanges != nil { - in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_api_ServiceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServiceStatus) - out := out.(*ServiceStatus) - *out = *in - if err := DeepCopy_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_Sysctl(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Sysctl) - out := out.(*Sysctl) - *out = *in - return nil - } -} - -func DeepCopy_api_TCPSocketAction(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TCPSocketAction) - out := out.(*TCPSocketAction) - *out = *in - return nil - } -} - -func DeepCopy_api_Taint(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Taint) - out := out.(*Taint) - *out = *in - out.TimeAdded = in.TimeAdded.DeepCopy() - return nil - } -} - -func DeepCopy_api_Toleration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Toleration) - out := out.(*Toleration) - *out = *in - if in.TolerationSeconds != nil { - in, out := &in.TolerationSeconds, &out.TolerationSeconds - *out = new(int64) - **out = **in - } - return nil - } -} - -func DeepCopy_api_Volume(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Volume) - out := out.(*Volume) - *out = *in - if err := DeepCopy_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_api_VolumeMount(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*VolumeMount) - out := out.(*VolumeMount) - *out = *in - return nil - } -} - -func DeepCopy_api_VolumeProjection(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*VolumeProjection) - out := out.(*VolumeProjection) - *out = *in - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(SecretProjection) - if err := DeepCopy_api_SecretProjection(*in, *out, c); err != nil { - return err - } - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - *out = new(DownwardAPIProjection) - if err := DeepCopy_api_DownwardAPIProjection(*in, *out, c); err != nil { - return err - } - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapProjection) - if err := DeepCopy_api_ConfigMapProjection(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*VolumeSource) - out := out.(*VolumeSource) - *out = *in - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - **out = **in - } - if in.EmptyDir != nil { - in, out := &in.EmptyDir, &out.EmptyDir - *out = new(EmptyDirVolumeSource) - **out = **in - } - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - if in.GitRepo != nil { - in, out := &in.GitRepo, &out.GitRepo - *out = new(GitRepoVolumeSource) - **out = **in - } - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(SecretVolumeSource) - if err := DeepCopy_api_SecretVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(NFSVolumeSource) - **out = **in - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(ISCSIVolumeSource) - if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in - } - if in.PersistentVolumeClaim != nil { - in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - *out = new(PersistentVolumeClaimVolumeSource) - **out = **in - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(RBDVolumeSource) - if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - *out = new(QuobyteVolumeSource) - **out = **in - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(FlexVolumeSource) - if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(CinderVolumeSource) - **out = **in - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(CephFSVolumeSource) - if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - **out = **in - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - *out = new(DownwardAPIVolumeSource) - if err := DeepCopy_api_DownwardAPIVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(FCVolumeSource) - if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(AzureFileVolumeSource) - **out = **in - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapVolumeSource) - if err := DeepCopy_api_ConfigMapVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - *out = new(AzureDiskVolumeSource) - if err := DeepCopy_api_AzureDiskVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - if in.Projected != nil { - in, out := &in.Projected, &out.Projected - *out = new(ProjectedVolumeSource) - if err := DeepCopy_api_ProjectedVolumeSource(*in, *out, c); err != nil { - return err - } - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - *out = new(PortworxVolumeSource) - **out = **in - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - *out = new(ScaleIOVolumeSource) - if err := DeepCopy_api_ScaleIOVolumeSource(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_api_VsphereVirtualDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*VsphereVirtualDiskVolumeSource) - out := out.(*VsphereVirtualDiskVolumeSource) - *out = *in - return nil - } -} - -func DeepCopy_api_WeightedPodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*WeightedPodAffinityTerm) - out := out.(*WeightedPodAffinityTerm) - *out = *in - if err := DeepCopy_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { - return err - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/OWNERS b/vendor/k8s.io/client-go/pkg/apis/apps/OWNERS deleted file mode 100755 index 1cdc56eec..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/apps/OWNERS +++ /dev/null @@ -1,21 +0,0 @@ -reviewers: -- thockin -- lavalamp -- smarterclayton -- deads2k -- caesarxuchao -- bprashanth -- pmorie -- sttts -- saad-ali -- ncdc -- timstclair -- timothysc -- dims -- errordeveloper -- mml -- m1093782566 -- mbohlool -- david-mcmahon -- kevin-wangzefeng -- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/install/install.go b/vendor/k8s.io/client-go/pkg/apis/apps/install/install.go deleted file mode 100644 index ca50f3ea4..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/apps/install/install.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the apps API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/apis/apps" - "k8s.io/client-go/pkg/apis/apps/v1beta1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: apps.GroupName, - VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/client-go/pkg/apis/apps", - AddInternalObjectsToScheme: apps.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/register.go b/vendor/k8s.io/client-go/pkg/apis/apps/register.go deleted file mode 100644 index d1d3bab26..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/apps/register.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apps - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/pkg/apis/extensions" -) - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// GroupName is the group name use in this package -const GroupName = "apps" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - // TODO this will get cleaned up with the scheme types are fixed - scheme.AddKnownTypes(SchemeGroupVersion, - &extensions.Deployment{}, - &extensions.DeploymentList{}, - &extensions.DeploymentRollback{}, - &extensions.Scale{}, - &StatefulSet{}, - &StatefulSetList{}, - ) - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/types.go b/vendor/k8s.io/client-go/pkg/apis/apps/types.go deleted file mode 100644 index cd5ce8284..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/apps/types.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apps - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api" -) - -// +genclient=true - -// StatefulSet represents a set of pods with consistent identities. -// Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. -// The StatefulSet guarantees that a given network identity will always -// map to the same storage identity. -type StatefulSet struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired identities of pods in this set. - // +optional - Spec StatefulSetSpec - - // Status is the current status of Pods in this StatefulSet. This data - // may be out of date by some window of time. - // +optional - Status StatefulSetStatus -} - -// A StatefulSetSpec is the specification of a StatefulSet. -type StatefulSetSpec struct { - // Replicas is the desired number of replicas of the given Template. - // These are replicas in the sense that they are instantiations of the - // same Template, but individual replicas also have a consistent identity. - // If unspecified, defaults to 1. - // TODO: Consider a rename of this field. - // +optional - Replicas int32 - - // Selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - Selector *metav1.LabelSelector - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the StatefulSet - // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. - Template api.PodTemplateSpec - - // VolumeClaimTemplates is a list of claims that pods are allowed to reference. - // The StatefulSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pod. Every claim in - // this list must have at least one matching (by name) volumeMount in one - // container in the template. A claim in this list takes precedence over - // any volumes in the template, with the same name. - // TODO: Define the behavior if a claim already exists with the same name. - // +optional - VolumeClaimTemplates []api.PersistentVolumeClaim - - // ServiceName is the name of the service that governs this StatefulSet. - // This service must exist before the StatefulSet, and is responsible for - // the network identity of the set. Pods get DNS/hostnames that follow the - // pattern: pod-specific-string.serviceName.default.svc.cluster.local - // where "pod-specific-string" is managed by the StatefulSet controller. - ServiceName string -} - -// StatefulSetStatus represents the current state of a StatefulSet. -type StatefulSetStatus struct { - // most recent generation observed by this StatefulSet. - // +optional - ObservedGeneration *int64 - - // Replicas is the number of actual replicas. - Replicas int32 -} - -// StatefulSetList is a collection of StatefulSets. -type StatefulSetList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []StatefulSet -} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/conversion.go deleted file mode 100644 index 96d3330f8..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/conversion.go +++ /dev/null @@ -1,297 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/api/v1" - "k8s.io/client-go/pkg/apis/apps" - "k8s.io/client-go/pkg/apis/extensions" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions to handle the *int32 -> int32 - // conversion. A pointer is useful in the versioned type so we can default - // it, but a plain int32 is more convenient in the internal type. These - // functions are the same as the autogenerated ones in every other way. - err := scheme.AddConversionFuncs( - Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec, - Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec, - // extensions - // TODO: below conversions should be dropped in favor of auto-generated - // ones, see https://github.com/kubernetes/kubernetextensionsssues/39865 - Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, - Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, - Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, - Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, - Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, - Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, - ) - if err != nil { - return err - } - - // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. - err = scheme.AddFieldLabelConversionFunc("apps/v1beta1", "StatefulSet", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", "metadata.namespace", "status.successful": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported for StatefulSet: %s", label) - } - }) - if err != nil { - return err - } - err = api.Scheme.AddFieldLabelConversionFunc("apps/v1beta1", "Deployment", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", "metadata.namespace": - return label, value, nil - default: - return "", "", fmt.Errorf("field label %q not supported for Deployment", label) - } - }) - if err != nil { - return err - } - - return nil -} - -func Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := s.Convert(&in.Template, &out.Template, 0); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]api.PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.VolumeClaimTemplates = nil - } - out.ServiceName = in.ServiceName - return nil -} - -func Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *StatefulSetSpec, s conversion.Scope) error { - out.Replicas = new(int32) - *out.Replicas = in.Replicas - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := s.Convert(&in.Template, &out.Template, 0); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]v1.PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.VolumeClaimTemplates = nil - } - out.ServiceName = in.ServiceName - return nil -} - -func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { - out.Replicas = int32(in.Replicas) - - out.Selector = nil - out.TargetSelector = "" - if in.Selector != nil { - if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { - out.Selector = in.Selector.MatchLabels - } - - selector, err := metav1.LabelSelectorAsSelector(in.Selector) - if err != nil { - return fmt.Errorf("invalid label selector: %v", err) - } - out.TargetSelector = selector.String() - } - return nil -} - -func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - - // Normally when 2 fields map to the same internal value we favor the old field, since - // old clients can't be expected to know about new fields but clients that know about the - // new field can be expected to know about the old field (though that's not quite true, due - // to kubectl apply). However, these fields are readonly, so any non-nil value should work. - if in.TargetSelector != "" { - labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) - if err != nil { - out.Selector = nil - return fmt.Errorf("failed to parse target selector: %v", err) - } - out.Selector = labelSelector - } else if in.Selector != nil { - out.Selector = new(metav1.LabelSelector) - selector := make(map[string]string) - for key, val := range in.Selector { - selector[key] = val - } - out.Selector.MatchLabels = selector - } else { - out.Selector = nil - } - return nil -} - -func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - out.Selector = in.Selector - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { - return err - } - out.RevisionHistoryLimit = in.RevisionHistoryLimit - out.MinReadySeconds = in.MinReadySeconds - out.Paused = in.Paused - if in.RollbackTo != nil { - out.RollbackTo = new(extensions.RollbackConfig) - out.RollbackTo.Revision = in.RollbackTo.Revision - } else { - out.RollbackTo = nil - } - if in.ProgressDeadlineSeconds != nil { - out.ProgressDeadlineSeconds = new(int32) - *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds - } - return nil -} - -func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { - out.Replicas = &in.Replicas - out.Selector = in.Selector - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { - return err - } - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int32) - *out.RevisionHistoryLimit = int32(*in.RevisionHistoryLimit) - } - out.MinReadySeconds = int32(in.MinReadySeconds) - out.Paused = in.Paused - if in.RollbackTo != nil { - out.RollbackTo = new(RollbackConfig) - out.RollbackTo.Revision = int64(in.RollbackTo.Revision) - } else { - out.RollbackTo = nil - } - if in.ProgressDeadlineSeconds != nil { - out.ProgressDeadlineSeconds = new(int32) - *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds - } - return nil -} - -func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { - out.Type = DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { - if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { - return err - } - if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { - return err - } - return nil -} - -func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { - if out.MaxUnavailable == nil { - out.MaxUnavailable = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { - return err - } - if out.MaxSurge == nil { - out.MaxSurge = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/defaults.go deleted file mode 100644 index 004cecd3f..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/defaults.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - RegisterDefaults(scheme) - return scheme.AddDefaultingFuncs( - SetDefaults_StatefulSet, - SetDefaults_Deployment, - ) -} - -func SetDefaults_StatefulSet(obj *StatefulSet) { - labels := obj.Spec.Template.Labels - if labels != nil { - if obj.Spec.Selector == nil { - obj.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: labels, - } - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } -} - -// SetDefaults_Deployment sets additional defaults compared to its counterpart -// in extensions. These addons are: -// - MaxUnavailable during rolling update set to 25% (1 in extensions) -// - MaxSurge value during rolling update set to 25% (1 in extensions) -// - RevisionHistoryLimit set to 2 (not set in extensions) -// - ProgressDeadlineSeconds set to 600s (not set in extensions) -func SetDefaults_Deployment(obj *Deployment) { - // Default labels and selector to labels from pod template spec. - labels := obj.Spec.Template.Labels - - if labels != nil { - if obj.Spec.Selector == nil { - obj.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels} - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - // Set DeploymentSpec.Replicas to 1 if it is not set. - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } - strategy := &obj.Spec.Strategy - // Set default DeploymentStrategyType as RollingUpdate. - if strategy.Type == "" { - strategy.Type = RollingUpdateDeploymentStrategyType - } - if strategy.Type == RollingUpdateDeploymentStrategyType { - if strategy.RollingUpdate == nil { - rollingUpdate := RollingUpdateDeployment{} - strategy.RollingUpdate = &rollingUpdate - } - if strategy.RollingUpdate.MaxUnavailable == nil { - // Set default MaxUnavailable as 25% by default. - maxUnavailable := intstr.FromString("25%") - strategy.RollingUpdate.MaxUnavailable = &maxUnavailable - } - if strategy.RollingUpdate.MaxSurge == nil { - // Set default MaxSurge as 25% by default. - maxSurge := intstr.FromString("25%") - strategy.RollingUpdate.MaxSurge = &maxSurge - } - } - if obj.Spec.RevisionHistoryLimit == nil { - obj.Spec.RevisionHistoryLimit = new(int32) - *obj.Spec.RevisionHistoryLimit = 2 - } - if obj.Spec.ProgressDeadlineSeconds == nil { - obj.Spec.ProgressDeadlineSeconds = new(int32) - *obj.Spec.ProgressDeadlineSeconds = 600 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.generated.go deleted file mode 100644 index 409504120..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.generated.go +++ /dev/null @@ -1,6485 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg4_resource "k8s.io/apimachinery/pkg/api/resource" - pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg2_types "k8s.io/apimachinery/pkg/types" - pkg5_intstr "k8s.io/apimachinery/pkg/util/intstr" - pkg3_v1 "k8s.io/client-go/pkg/api/v1" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg4_resource.Quantity - var v1 pkg1_v1.TypeMeta - var v2 pkg2_types.UID - var v3 pkg5_intstr.IntOrString - var v4 pkg3_v1.PodTemplateSpec - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv4 := &x.Replicas - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv7 := &x.Replicas - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*int32)(yyv7)) = int32(r.DecodeInt(32)) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.Selector) != 0 - yyq2[2] = x.TargetSelector != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv4 := &x.Replicas - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv6 := &x.Selector - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecMapStringStringX(yyv6, false, d) - } - } - case "targetSelector": - if r.TryDecodeAsNil() { - x.TargetSelector = "" - } else { - yyv8 := &x.TargetSelector - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv11 := &x.Replicas - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int32)(yyv11)) = int32(r.DecodeInt(32)) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv13 := &x.Selector - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - z.F.DecMapStringStringX(yyv13, false, d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetSelector = "" - } else { - yyv15 := &x.TargetSelector - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *StatefulSet) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StatefulSet) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StatefulSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = StatefulSetSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = StatefulSetStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StatefulSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = StatefulSetSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = StatefulSetStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *StatefulSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != nil - yyq2[1] = x.Selector != nil - yyq2[3] = len(x.VolumeClaimTemplates) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Replicas == nil { - r.EncodeNil() - } else { - yy4 := *x.Replicas - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Replicas == nil { - r.EncodeNil() - } else { - yy6 := *x.Replicas - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.Template - yy12.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Template - yy14.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.VolumeClaimTemplates == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encSlicev1_PersistentVolumeClaim(([]pkg3_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeClaimTemplates")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumeClaimTemplates == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - h.encSlicev1_PersistentVolumeClaim(([]pkg3_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StatefulSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StatefulSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_v1.LabelSelector) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg3_v1.PodTemplateSpec{} - } else { - yyv8 := &x.Template - yyv8.CodecDecodeSelf(d) - } - case "volumeClaimTemplates": - if r.TryDecodeAsNil() { - x.VolumeClaimTemplates = nil - } else { - yyv9 := &x.VolumeClaimTemplates - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicev1_PersistentVolumeClaim((*[]pkg3_v1.PersistentVolumeClaim)(yyv9), d) - } - } - case "serviceName": - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - yyv11 := &x.ServiceName - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StatefulSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_v1.LabelSelector) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg3_v1.PodTemplateSpec{} - } else { - yyv18 := &x.Template - yyv18.CodecDecodeSelf(d) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeClaimTemplates = nil - } else { - yyv19 := &x.VolumeClaimTemplates - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSlicev1_PersistentVolumeClaim((*[]pkg3_v1.PersistentVolumeClaim)(yyv19), d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - yyv21 := &x.ServiceName - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *StatefulSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy4 := *x.ObservedGeneration - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy6 := *x.ObservedGeneration - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StatefulSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StatefulSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv6 := &x.Replicas - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(yyv6)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StatefulSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv11 := &x.Replicas - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int32)(yyv11)) = int32(r.DecodeInt(32)) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *StatefulSetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceStatefulSet(([]StatefulSet)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceStatefulSet(([]StatefulSet)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StatefulSetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StatefulSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceStatefulSet((*[]StatefulSet)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StatefulSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceStatefulSet((*[]StatefulSet)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [9]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != nil - yyq2[1] = x.Selector != nil - yyq2[3] = true - yyq2[4] = x.MinReadySeconds != 0 - yyq2[5] = x.RevisionHistoryLimit != nil - yyq2[6] = x.Paused != false - yyq2[7] = x.RollbackTo != nil - yyq2[8] = x.ProgressDeadlineSeconds != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(9) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Replicas == nil { - r.EncodeNil() - } else { - yy4 := *x.Replicas - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Replicas == nil { - r.EncodeNil() - } else { - yy6 := *x.Replicas - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.Template - yy12.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Template - yy14.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy17 := &x.Strategy - yy17.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("strategy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy19 := &x.Strategy - yy19.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.RevisionHistoryLimit == nil { - r.EncodeNil() - } else { - yy25 := *x.RevisionHistoryLimit - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeInt(int64(yy25)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RevisionHistoryLimit == nil { - r.EncodeNil() - } else { - yy27 := *x.RevisionHistoryLimit - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(yy27)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("paused")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.ProgressDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy36 := *x.ProgressDeadlineSeconds - yym37 := z.EncBinary() - _ = yym37 - if false { - } else { - r.EncodeInt(int64(yy36)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("progressDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ProgressDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy38 := *x.ProgressDeadlineSeconds - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - r.EncodeInt(int64(yy38)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_v1.LabelSelector) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg3_v1.PodTemplateSpec{} - } else { - yyv8 := &x.Template - yyv8.CodecDecodeSelf(d) - } - case "strategy": - if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} - } else { - yyv9 := &x.Strategy - yyv9.CodecDecodeSelf(d) - } - case "minReadySeconds": - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - yyv10 := &x.MinReadySeconds - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(yyv10)) = int32(r.DecodeInt(32)) - } - } - case "revisionHistoryLimit": - if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } - } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int32) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - case "paused": - if r.TryDecodeAsNil() { - x.Paused = false - } else { - yyv14 := &x.Paused - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*bool)(yyv14)) = r.DecodeBool() - } - } - case "rollbackTo": - if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } - } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) - } - case "progressDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ProgressDeadlineSeconds != nil { - x.ProgressDeadlineSeconds = nil - } - } else { - if x.ProgressDeadlineSeconds == nil { - x.ProgressDeadlineSeconds = new(int32) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj19 int - var yyb19 bool - var yyhl19 bool = l >= 0 - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_v1.LabelSelector) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg3_v1.PodTemplateSpec{} - } else { - yyv24 := &x.Template - yyv24.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} - } else { - yyv25 := &x.Strategy - yyv25.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - yyv26 := &x.MinReadySeconds - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - *((*int32)(yyv26)) = int32(r.DecodeInt(32)) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } - } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int32) - } - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Paused = false - } else { - yyv30 := &x.Paused - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - *((*bool)(yyv30)) = r.DecodeBool() - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } - } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ProgressDeadlineSeconds != nil { - x.ProgressDeadlineSeconds = nil - } - } else { - if x.ProgressDeadlineSeconds == nil { - x.ProgressDeadlineSeconds = new(int32) - } - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) - } - } - for { - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj19-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[3] = len(x.UpdatedAnnotations) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.UpdatedAnnotations == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.UpdatedAnnotations == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy16 := &x.RollbackTo - yy16.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy18 := &x.RollbackTo - yy18.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv8 := &x.Name - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "updatedAnnotations": - if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil - } else { - yyv10 := &x.UpdatedAnnotations - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - z.F.DecMapStringStringX(yyv10, false, d) - } - } - case "rollbackTo": - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv12 := &x.RollbackTo - yyv12.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv14 := &x.Kind - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv16 := &x.APIVersion - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv18 := &x.Name - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*string)(yyv18)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil - } else { - yyv20 := &x.UpdatedAnnotations - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - z.F.DecMapStringStringX(yyv20, false, d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv22 := &x.RollbackTo - yyv22.CodecDecodeSelf(d) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Revision != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revision")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "revision": - if r.TryDecodeAsNil() { - x.Revision = 0 - } else { - yyv4 := &x.Revision - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(yyv4)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Revision = 0 - } else { - yyv7 := &x.Revision - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*int64)(yyv7)) = int64(r.DecodeInt(64)) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Type != "" - yyq2[1] = x.RollingUpdate != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "rollingUpdate": - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv7 := &x.Type - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.MaxUnavailable != nil - yyq2[1] = x.MaxSurge != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.MaxUnavailable == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { - } else if !yym4 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxUnavailable) - } else { - z.EncFallback(x.MaxUnavailable) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxUnavailable == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxUnavailable) - } else { - z.EncFallback(x.MaxUnavailable) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.MaxSurge == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxSurge) - } else { - z.EncFallback(x.MaxSurge) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxSurge == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxSurge) - } else { - z.EncFallback(x.MaxSurge) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "maxUnavailable": - if r.TryDecodeAsNil() { - if x.MaxUnavailable != nil { - x.MaxUnavailable = nil - } - } else { - if x.MaxUnavailable == nil { - x.MaxUnavailable = new(pkg5_intstr.IntOrString) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxUnavailable) - } else { - z.DecFallback(x.MaxUnavailable, false) - } - } - case "maxSurge": - if r.TryDecodeAsNil() { - if x.MaxSurge != nil { - x.MaxSurge = nil - } - } else { - if x.MaxSurge == nil { - x.MaxSurge = new(pkg5_intstr.IntOrString) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxSurge) - } else { - z.DecFallback(x.MaxSurge, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MaxUnavailable != nil { - x.MaxUnavailable = nil - } - } else { - if x.MaxUnavailable == nil { - x.MaxUnavailable = new(pkg5_intstr.IntOrString) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxUnavailable) - } else { - z.DecFallback(x.MaxUnavailable, false) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MaxSurge != nil { - x.MaxSurge = nil - } - } else { - if x.MaxSurge == nil { - x.MaxSurge = new(pkg5_intstr.IntOrString) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxSurge) - } else { - z.DecFallback(x.MaxSurge, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != 0 - yyq2[1] = x.Replicas != 0 - yyq2[2] = x.UpdatedReplicas != 0 - yyq2[3] = x.ReadyReplicas != 0 - yyq2[4] = x.AvailableReplicas != 0 - yyq2[5] = x.UnavailableReplicas != 0 - yyq2[6] = len(x.Conditions) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.UpdatedReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.UpdatedReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.ReadyReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.ReadyReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.UnavailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.UnavailableReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - yyv4 := &x.ObservedGeneration - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(yyv4)) = int64(r.DecodeInt(64)) - } - } - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv6 := &x.Replicas - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(yyv6)) = int32(r.DecodeInt(32)) - } - } - case "updatedReplicas": - if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 - } else { - yyv8 := &x.UpdatedReplicas - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - case "readyReplicas": - if r.TryDecodeAsNil() { - x.ReadyReplicas = 0 - } else { - yyv10 := &x.ReadyReplicas - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(yyv10)) = int32(r.DecodeInt(32)) - } - } - case "availableReplicas": - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - yyv12 := &x.AvailableReplicas - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int32)(yyv12)) = int32(r.DecodeInt(32)) - } - } - case "unavailableReplicas": - if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 - } else { - yyv14 := &x.UnavailableReplicas - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*int32)(yyv14)) = int32(r.DecodeInt(32)) - } - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv16 := &x.Conditions - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv16), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - yyv19 := &x.ObservedGeneration - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int64)(yyv19)) = int64(r.DecodeInt(64)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv21 := &x.Replicas - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int32)(yyv21)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 - } else { - yyv23 := &x.UpdatedReplicas - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*int32)(yyv23)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadyReplicas = 0 - } else { - yyv25 := &x.ReadyReplicas - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*int32)(yyv25)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - yyv27 := &x.AvailableReplicas - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*int32)(yyv27)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 - } else { - yyv29 := &x.UnavailableReplicas - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*int32)(yyv29)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv31 := &x.Conditions - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv31), d) - } - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DeploymentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DeploymentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *DeploymentCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf7 := &x.Status - yysf7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf8 := &x.Status - yysf8.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastUpdateTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastUpdateTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "lastUpdateTime": - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_v1.Time{} - } else { - yyv6 := &x.LastUpdateTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_v1.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv10 := &x.Reason - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv12 := &x.Message - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv15 := &x.Type - yyv15.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv16 := &x.Status - yyv16.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_v1.Time{} - } else { - yyv17 := &x.LastUpdateTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_v1.Time{} - } else { - yyv19 := &x.LastTransitionTime - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else if yym20 { - z.DecBinaryUnmarshal(yyv19) - } else if !yym20 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv19) - } else { - z.DecFallback(yyv19, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv21 := &x.Reason - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv23 := &x.Message - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*string)(yyv23)) = r.DecodeString() - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSlicev1_PersistentVolumeClaim(v []pkg3_v1.PersistentVolumeClaim, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicev1_PersistentVolumeClaim(v *[]pkg3_v1.PersistentVolumeClaim, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []pkg3_v1.PersistentVolumeClaim{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]pkg3_v1.PersistentVolumeClaim, yyrl1) - } - } else { - yyv1 = make([]pkg3_v1.PersistentVolumeClaim, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg3_v1.PersistentVolumeClaim{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, pkg3_v1.PersistentVolumeClaim{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg3_v1.PersistentVolumeClaim{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, pkg3_v1.PersistentVolumeClaim{}) // var yyz1 pkg3_v1.PersistentVolumeClaim - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg3_v1.PersistentVolumeClaim{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg3_v1.PersistentVolumeClaim{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceStatefulSet(v []StatefulSet, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceStatefulSet(v *[]StatefulSet, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []StatefulSet{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 856) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]StatefulSet, yyrl1) - } - } else { - yyv1 = make([]StatefulSet, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = StatefulSet{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, StatefulSet{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = StatefulSet{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, StatefulSet{}) // var yyz1 StatefulSet - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = StatefulSet{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []StatefulSet{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceDeploymentCondition(v []DeploymentCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDeploymentCondition(v *[]DeploymentCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []DeploymentCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]DeploymentCondition, yyrl1) - } - } else { - yyv1 = make([]DeploymentCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DeploymentCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, DeploymentCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DeploymentCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, DeploymentCondition{}) // var yyz1 DeploymentCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = DeploymentCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []DeploymentCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Deployment{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 920) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Deployment, yyrl1) - } - } else { - yyv1 = make([]Deployment, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Deployment{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Deployment{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.conversion.go deleted file mode 100644 index 9226c82d9..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,166 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/client-go/pkg/api" - api_v1 "k8s.io/client-go/pkg/api/v1" - apps "k8s.io/client-go/pkg/apis/apps" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_StatefulSet_To_apps_StatefulSet, - Convert_apps_StatefulSet_To_v1beta1_StatefulSet, - Convert_v1beta1_StatefulSetList_To_apps_StatefulSetList, - Convert_apps_StatefulSetList_To_v1beta1_StatefulSetList, - Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec, - Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec, - Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus, - Convert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus, - ) -} - -func autoConvert_v1beta1_StatefulSet_To_apps_StatefulSet(in *StatefulSet, out *apps.StatefulSet, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_StatefulSet_To_apps_StatefulSet(in *StatefulSet, out *apps.StatefulSet, s conversion.Scope) error { - return autoConvert_v1beta1_StatefulSet_To_apps_StatefulSet(in, out, s) -} - -func autoConvert_apps_StatefulSet_To_v1beta1_StatefulSet(in *apps.StatefulSet, out *StatefulSet, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_apps_StatefulSet_To_v1beta1_StatefulSet(in *apps.StatefulSet, out *StatefulSet, s conversion.Scope) error { - return autoConvert_apps_StatefulSet_To_v1beta1_StatefulSet(in, out, s) -} - -func autoConvert_v1beta1_StatefulSetList_To_apps_StatefulSetList(in *StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]apps.StatefulSet, len(*in)) - for i := range *in { - if err := Convert_v1beta1_StatefulSet_To_apps_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_StatefulSetList_To_apps_StatefulSetList(in *StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error { - return autoConvert_v1beta1_StatefulSetList_To_apps_StatefulSetList(in, out, s) -} - -func autoConvert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in *apps.StatefulSetList, out *StatefulSetList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StatefulSet, len(*in)) - for i := range *in { - if err := Convert_apps_StatefulSet_To_v1beta1_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]StatefulSet, 0) - } - return nil -} - -func Convert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in *apps.StatefulSetList, out *StatefulSetList, s conversion.Scope) error { - return autoConvert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in, out, s) -} - -func autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - out.VolumeClaimTemplates = *(*[]api.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) - out.ServiceName = in.ServiceName - return nil -} - -func autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *StatefulSetSpec, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - out.VolumeClaimTemplates = *(*[]api_v1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) - out.ServiceName = in.ServiceName - return nil -} - -func autoConvert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in *StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { - out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) - out.Replicas = in.Replicas - return nil -} - -func Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in *StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { - return autoConvert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in, out, s) -} - -func autoConvert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(in *apps.StatefulSetStatus, out *StatefulSetStatus, s conversion.Scope) error { - out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) - out.Replicas = in.Replicas - return nil -} - -func Convert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(in *apps.StatefulSetStatus, out *StatefulSetStatus, s conversion.Scope) error { - return autoConvert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index d9b9d3d3c..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,355 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" - api_v1 "k8s.io/client-go/pkg/api/v1" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Deployment, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentRollback, InType: reflect.TypeOf(&DeploymentRollback{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentSpec, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStatus, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Scale, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSet, InType: reflect.TypeOf(&StatefulSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetList, InType: reflect.TypeOf(&StatefulSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetSpec, InType: reflect.TypeOf(&StatefulSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetStatus, InType: reflect.TypeOf(&StatefulSetStatus{})}, - ) -} - -func DeepCopy_v1beta1_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Deployment) - out := out.(*Deployment) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_DeploymentStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_DeploymentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentCondition) - out := out.(*DeploymentCondition) - *out = *in - out.LastUpdateTime = in.LastUpdateTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - return nil - } -} - -func DeepCopy_v1beta1_DeploymentList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentList) - out := out.(*DeploymentList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Deployment, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_Deployment(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_DeploymentRollback(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentRollback) - out := out.(*DeploymentRollback) - *out = *in - if in.UpdatedAnnotations != nil { - in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - return nil - } -} - -func DeepCopy_v1beta1_DeploymentSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentSpec) - out := out.(*DeploymentSpec) - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil { - return err - } - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - if in.RollbackTo != nil { - in, out := &in.RollbackTo, &out.RollbackTo - *out = new(RollbackConfig) - **out = **in - } - if in.ProgressDeadlineSeconds != nil { - in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_v1beta1_DeploymentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentStatus) - out := out.(*DeploymentStatus) - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DeploymentCondition, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_DeploymentCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_DeploymentStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentStrategy) - out := out.(*DeploymentStrategy) - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDeployment) - if err := DeepCopy_v1beta1_RollingUpdateDeployment(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1beta1_RollbackConfig(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RollbackConfig) - out := out.(*RollbackConfig) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_RollingUpdateDeployment(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RollingUpdateDeployment) - out := out.(*RollingUpdateDeployment) - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - if in.MaxSurge != nil { - in, out := &in.MaxSurge, &out.MaxSurge - *out = new(intstr.IntOrString) - **out = **in - } - return nil - } -} - -func DeepCopy_v1beta1_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Scale) - out := out.(*Scale) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_ScaleStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ScaleSpec) - out := out.(*ScaleSpec) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ScaleStatus) - out := out.(*ScaleStatus) - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - return nil - } -} - -func DeepCopy_v1beta1_StatefulSet(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatefulSet) - out := out.(*StatefulSet) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_StatefulSetSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_StatefulSetStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_StatefulSetList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatefulSetList) - out := out.(*StatefulSetList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StatefulSet, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_StatefulSet(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_StatefulSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatefulSetSpec) - out := out.(*StatefulSetSpec) - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]api_v1.PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := api_v1.DeepCopy_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_StatefulSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatefulSetStatus) - out := out.(*StatefulSetStatus) - *out = *in - if in.ObservedGeneration != nil { - in, out := &in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = **in - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.defaults.go deleted file mode 100644 index 9b822c84c..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.defaults.go +++ /dev/null @@ -1,326 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/client-go/pkg/api/v1" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*Deployment)) }) - scheme.AddTypeDefaultingFunc(&DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*DeploymentList)) }) - scheme.AddTypeDefaultingFunc(&StatefulSet{}, func(obj interface{}) { SetObjectDefaults_StatefulSet(obj.(*StatefulSet)) }) - scheme.AddTypeDefaultingFunc(&StatefulSetList{}, func(obj interface{}) { SetObjectDefaults_StatefulSetList(obj.(*StatefulSetList)) }) - return nil -} - -func SetObjectDefaults_Deployment(in *Deployment) { - SetDefaults_Deployment(in) - v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) - for i := range in.Spec.Template.Spec.Volumes { - a := &in.Spec.Template.Spec.Volumes[i] - v1.SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Spec.Template.Spec.InitContainers { - a := &in.Spec.Template.Spec.InitContainers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.Template.Spec.Containers { - a := &in.Spec.Template.Spec.Containers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } -} - -func SetObjectDefaults_DeploymentList(in *DeploymentList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Deployment(a) - } -} - -func SetObjectDefaults_StatefulSet(in *StatefulSet) { - SetDefaults_StatefulSet(in) - v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) - for i := range in.Spec.Template.Spec.Volumes { - a := &in.Spec.Template.Spec.Volumes[i] - v1.SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Spec.Template.Spec.InitContainers { - a := &in.Spec.Template.Spec.InitContainers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.Template.Spec.Containers { - a := &in.Spec.Template.Spec.Containers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.VolumeClaimTemplates { - a := &in.Spec.VolumeClaimTemplates[i] - v1.SetDefaults_PersistentVolumeClaim(a) - v1.SetDefaults_ResourceList(&a.Spec.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Spec.Resources.Requests) - v1.SetDefaults_ResourceList(&a.Status.Capacity) - } -} - -func SetObjectDefaults_StatefulSetList(in *StatefulSetList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_StatefulSet(a) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/apps/zz_generated.deepcopy.go deleted file mode 100644 index 5048531ca..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/apps/zz_generated.deepcopy.go +++ /dev/null @@ -1,125 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package apps - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/client-go/pkg/api" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_apps_StatefulSet, InType: reflect.TypeOf(&StatefulSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_apps_StatefulSetList, InType: reflect.TypeOf(&StatefulSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_apps_StatefulSetSpec, InType: reflect.TypeOf(&StatefulSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_apps_StatefulSetStatus, InType: reflect.TypeOf(&StatefulSetStatus{})}, - ) -} - -func DeepCopy_apps_StatefulSet(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatefulSet) - out := out.(*StatefulSet) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_apps_StatefulSetSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_apps_StatefulSetStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_apps_StatefulSetList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatefulSetList) - out := out.(*StatefulSetList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StatefulSet, len(*in)) - for i := range *in { - if err := DeepCopy_apps_StatefulSet(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_apps_StatefulSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatefulSetSpec) - out := out.(*StatefulSetSpec) - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]api.PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := api.DeepCopy_api_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_apps_StatefulSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatefulSetStatus) - out := out.(*StatefulSetStatus) - *out = *in - if in.ObservedGeneration != nil { - in, out := &in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = **in - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS deleted file mode 100755 index 4135522b2..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -reviewers: -- liggitt -- lavalamp -- wojtek-t -- deads2k -- sttts -- timothysc -- mbohlool -- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/install/install.go b/vendor/k8s.io/client-go/pkg/apis/authentication/install/install.go deleted file mode 100644 index b8a9521a6..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/install/install.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/apis/authentication" - "k8s.io/client-go/pkg/apis/authentication/v1" - "k8s.io/client-go/pkg/apis/authentication/v1beta1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: authentication.GroupName, - VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v1beta1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/client-go/pkg/apis/authentication", - RootScopedKinds: sets.NewString("TokenReview"), - AddInternalObjectsToScheme: authentication.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, - v1.SchemeGroupVersion.Version: v1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/types.go b/vendor/k8s.io/client-go/pkg/apis/authentication/types.go deleted file mode 100644 index 9c1e66b7b..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/types.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package authentication - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // ImpersonateUserHeader is used to impersonate a particular user during an API server request - ImpersonateUserHeader = "Impersonate-User" - - // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. - // It can be repeated multiplied times for multiple groups. - ImpersonateGroupHeader = "Impersonate-Group" - - // ImpersonateUserExtraHeaderPrefix is a prefix for any header used to impersonate an entry in the - // extra map[string][]string for user.Info. The key will be every after the prefix. - // It can be repeated multiplied times for multiple map keys and the same key can be repeated multiple - // times to have multiple elements in the slice under a single key - ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-" -) - -// +genclient=true -// +nonNamespaced=true -// +noMethods=true - -// TokenReview attempts to authenticate a token to a known user. -type TokenReview struct { - metav1.TypeMeta - // ObjectMeta fulfills the metav1.ObjectMetaAccessor interface so that the stock - // REST handler paths work - metav1.ObjectMeta - - // Spec holds information about the request being evaluated - Spec TokenReviewSpec - - // Status is filled in by the server and indicates whether the request can be authenticated. - Status TokenReviewStatus -} - -// TokenReviewSpec is a description of the token authentication request. -type TokenReviewSpec struct { - // Token is the opaque bearer token. - Token string -} - -// TokenReviewStatus is the result of the token authentication request. -// This type mirrors the authentication.Token interface -type TokenReviewStatus struct { - // Authenticated indicates that the token was associated with a known user. - Authenticated bool - // User is the UserInfo associated with the provided token. - User UserInfo - // Error indicates that the token couldn't be checked - Error string -} - -// UserInfo holds the information about the user needed to implement the -// user.Info interface. -type UserInfo struct { - // The name that uniquely identifies this user among all active users. - Username string - // A unique value that identifies this user across time. If this user is - // deleted and another user by the same name is added, they will have - // different UIDs. - UID string - // The names of groups this user is a part of. - Groups []string - // Any additional information provided by the authenticator. - Extra map[string]ExtraValue -} - -// ExtraValue masks the value so protobuf can generate -type ExtraValue []string diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go deleted file mode 100644 index e3dfb9d3a..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go +++ /dev/null @@ -1,1281 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto - - It has these top-level messages: - ExtraValue - TokenReview - TokenReviewSpec - TokenReviewStatus - UserInfo -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func init() { - proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.ExtraValue") - proto.RegisterType((*TokenReview)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReview") - proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReviewSpec") - proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReviewStatus") - proto.RegisterType((*UserInfo)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.UserInfo") -} -func (m ExtraValue) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m ExtraValue) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *TokenReview) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *TokenReview) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *TokenReviewSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *TokenReviewSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Token))) - i += copy(data[i:], m.Token) - return i, nil -} - -func (m *TokenReviewStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *TokenReviewStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Authenticated { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.User.Size())) - n4, err := m.User.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Error))) - i += copy(data[i:], m.Error) - return i, nil -} - -func (m *UserInfo) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *UserInfo) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Username))) - i += copy(data[i:], m.Username) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.UID))) - i += copy(data[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - data[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.Extra) > 0 { - for k := range m.Extra { - data[i] = 0x22 - i++ - v := m.Extra[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n5, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - } - } - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m ExtraValue) Size() (n int) { - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *TokenReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TokenReviewSpec) Size() (n int) { - var l int - _ = l - l = len(m.Token) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TokenReviewStatus) Size() (n int) { - var l int - _ = l - n += 2 - l = m.User.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *UserInfo) Size() (n int) { - var l int - _ = l - l = len(m.Username) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Extra) > 0 { - for k, v := range m.Extra { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *TokenReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *TokenReviewSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenReviewSpec{`, - `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `}`, - }, "") - return s -} -func (this *TokenReviewStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenReviewStatus{`, - `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, - `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *UserInfo) String() string { - if this == nil { - return "nil" - } - keysForExtra := make([]string, 0, len(this.Extra)) - for k := range this.Extra { - keysForExtra = append(keysForExtra, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - mapStringForExtra := "map[string]ExtraValue{" - for _, k := range keysForExtra { - mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) - } - mapStringForExtra += "}" - s := strings.Join([]string{`&UserInfo{`, - `Username:` + fmt.Sprintf("%v", this.Username) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, - `Extra:` + mapStringForExtra + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ExtraValue) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - *m = append(*m, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenReview) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenReviewSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenReviewStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Authenticated = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.User.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserInfo) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Username = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Extra == nil { - m.Extra = make(map[string]ExtraValue) - } - m.Extra[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 655 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x53, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xb5, 0xf3, 0x53, 0x25, 0x93, 0xaf, 0x1f, 0x65, 0x24, 0xa4, 0x28, 0x12, 0x4e, 0x14, 0x58, - 0x74, 0x51, 0xc6, 0xa4, 0xa0, 0x52, 0x15, 0x10, 0xaa, 0x45, 0x85, 0xba, 0x00, 0xa4, 0x81, 0x22, - 0xc4, 0x06, 0x26, 0xce, 0xad, 0x63, 0x52, 0xff, 0x68, 0x3c, 0x36, 0xed, 0xae, 0x8f, 0xc0, 0x92, - 0x25, 0xaf, 0xc1, 0x1b, 0x74, 0x47, 0x77, 0xb0, 0x40, 0x15, 0x0d, 0x2f, 0x82, 0x66, 0x3c, 0xd4, - 0x2e, 0x69, 0x85, 0xda, 0xdd, 0xcc, 0x99, 0x7b, 0xce, 0xbd, 0xe7, 0xde, 0xb9, 0xe8, 0xc1, 0x64, - 0x35, 0x21, 0x7e, 0x64, 0x4f, 0xd2, 0x21, 0xf0, 0x10, 0x04, 0x24, 0x76, 0x3c, 0xf1, 0x6c, 0x16, - 0xfb, 0x89, 0xcd, 0x52, 0x31, 0x86, 0x50, 0xf8, 0x2e, 0x13, 0x7e, 0x14, 0xda, 0xd9, 0xc0, 0xf6, - 0x20, 0x04, 0xce, 0x04, 0x8c, 0x48, 0xcc, 0x23, 0x11, 0xe1, 0xa5, 0x9c, 0x4d, 0x0a, 0x36, 0x89, - 0x27, 0x1e, 0x91, 0x6c, 0x72, 0x9a, 0x4d, 0xb2, 0x41, 0xe7, 0x96, 0xe7, 0x8b, 0x71, 0x3a, 0x24, - 0x6e, 0x14, 0xd8, 0x5e, 0xe4, 0x45, 0xb6, 0x12, 0x19, 0xa6, 0xdb, 0xea, 0xa6, 0x2e, 0xea, 0x94, - 0x8b, 0x77, 0xee, 0xea, 0xd2, 0x58, 0xec, 0x07, 0xcc, 0x1d, 0xfb, 0x21, 0xf0, 0xbd, 0xa2, 0xb8, - 0x00, 0x04, 0x3b, 0xa3, 0xa4, 0x8e, 0x7d, 0x1e, 0x8b, 0xa7, 0xa1, 0xf0, 0x03, 0x98, 0x21, 0xac, - 0xfc, 0x8b, 0x90, 0xb8, 0x63, 0x08, 0xd8, 0x0c, 0xef, 0xce, 0x79, 0xbc, 0x54, 0xf8, 0x3b, 0xb6, - 0x1f, 0x8a, 0x44, 0xf0, 0x19, 0x52, 0xc9, 0x53, 0x02, 0x3c, 0x03, 0x5e, 0x18, 0x82, 0x5d, 0x16, - 0xc4, 0x3b, 0x70, 0x86, 0xa7, 0xfe, 0x3d, 0x84, 0x36, 0x76, 0x05, 0x67, 0xaf, 0xd8, 0x4e, 0x0a, - 0xb8, 0x8b, 0xea, 0xbe, 0x80, 0x20, 0x69, 0x9b, 0xbd, 0xea, 0x62, 0xd3, 0x69, 0x4e, 0x8f, 0xba, - 0xf5, 0x4d, 0x09, 0xd0, 0x1c, 0x5f, 0x6b, 0x7c, 0xfa, 0xdc, 0x35, 0xf6, 0x7f, 0xf4, 0x8c, 0xfe, - 0x97, 0x0a, 0x6a, 0xbd, 0x8c, 0x26, 0x10, 0x52, 0xc8, 0x7c, 0xf8, 0x80, 0xdf, 0xa1, 0x86, 0xec, - 0xdb, 0x88, 0x09, 0xd6, 0x36, 0x7b, 0xe6, 0x62, 0x6b, 0xf9, 0x36, 0xd1, 0x23, 0x2c, 0xdb, 0x28, - 0x86, 0x28, 0xa3, 0x49, 0x36, 0x20, 0xcf, 0x87, 0xef, 0xc1, 0x15, 0x4f, 0x41, 0x30, 0x07, 0x1f, - 0x1c, 0x75, 0x8d, 0xe9, 0x51, 0x17, 0x15, 0x18, 0x3d, 0x51, 0xc5, 0x6f, 0x51, 0x2d, 0x89, 0xc1, - 0x6d, 0x57, 0x94, 0xfa, 0x43, 0x72, 0x91, 0x0f, 0x42, 0x4a, 0xa5, 0xbe, 0x88, 0xc1, 0x75, 0xfe, - 0xd3, 0xa9, 0x6a, 0xf2, 0x46, 0x95, 0x30, 0xf6, 0xd0, 0x5c, 0x22, 0x98, 0x48, 0x93, 0x76, 0x55, - 0xa5, 0x78, 0x74, 0xf9, 0x14, 0x4a, 0xc6, 0xf9, 0x5f, 0x27, 0x99, 0xcb, 0xef, 0x54, 0xcb, 0xf7, - 0x57, 0xd0, 0x95, 0xbf, 0xea, 0xc1, 0x37, 0x50, 0x5d, 0x48, 0x48, 0xf5, 0xae, 0xe9, 0xcc, 0x6b, - 0x66, 0x3d, 0x8f, 0xcb, 0xdf, 0xfa, 0x5f, 0x4d, 0x74, 0x75, 0x26, 0x0b, 0xbe, 0x8f, 0xe6, 0x4b, - 0xc5, 0xc0, 0x48, 0x49, 0x34, 0x9c, 0x6b, 0x5a, 0x62, 0x7e, 0xbd, 0xfc, 0x48, 0x4f, 0xc7, 0xe2, - 0xd7, 0xa8, 0x96, 0x26, 0xc0, 0x75, 0x53, 0x57, 0x2e, 0xe6, 0x78, 0x2b, 0x01, 0xbe, 0x19, 0x6e, - 0x47, 0x45, 0x37, 0x25, 0x42, 0x95, 0xa2, 0x74, 0x04, 0x9c, 0x47, 0x5c, 0x35, 0xb3, 0xe4, 0x68, - 0x43, 0x82, 0x34, 0x7f, 0xeb, 0x7f, 0xab, 0xa0, 0xc6, 0x1f, 0x15, 0xbc, 0x84, 0x1a, 0x92, 0x19, - 0xb2, 0x00, 0x74, 0x1b, 0x16, 0x34, 0x49, 0xc5, 0x48, 0x9c, 0x9e, 0x44, 0xe0, 0xeb, 0xa8, 0x9a, - 0xfa, 0x23, 0x55, 0x78, 0xd3, 0x69, 0xe9, 0xc0, 0xea, 0xd6, 0xe6, 0x63, 0x2a, 0x71, 0xdc, 0x47, - 0x73, 0x1e, 0x8f, 0xd2, 0x58, 0x0e, 0x53, 0xfe, 0x65, 0x24, 0xe7, 0xf0, 0x44, 0x21, 0x54, 0xbf, - 0xe0, 0x6d, 0x54, 0x07, 0xf9, 0xf9, 0xdb, 0xb5, 0x5e, 0x75, 0xb1, 0xb5, 0xbc, 0x7e, 0x39, 0xf7, - 0x44, 0x2d, 0xd0, 0x46, 0x28, 0xf8, 0x5e, 0xc9, 0xa5, 0xc4, 0x68, 0x2e, 0xdf, 0xe1, 0x7a, 0xc9, - 0x54, 0x0c, 0x5e, 0x40, 0xd5, 0x09, 0xec, 0xe5, 0x0e, 0xa9, 0x3c, 0xe2, 0x67, 0xa8, 0x9e, 0xc9, - 0xfd, 0xd3, 0x53, 0x58, 0xbd, 0x58, 0x1d, 0xc5, 0xfe, 0xd2, 0x5c, 0x66, 0xad, 0xb2, 0x6a, 0x3a, - 0x37, 0x0f, 0x8e, 0x2d, 0xe3, 0xf0, 0xd8, 0x32, 0xbe, 0x1f, 0x5b, 0xc6, 0xfe, 0xd4, 0x32, 0x0f, - 0xa6, 0x96, 0x79, 0x38, 0xb5, 0xcc, 0x9f, 0x53, 0xcb, 0xfc, 0xf8, 0xcb, 0x32, 0xde, 0x54, 0xb2, - 0xc1, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x6b, 0x11, 0x20, 0xa4, 0x05, 0x00, 0x00, -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go deleted file mode 100644 index e6ff58705..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient=true -// +nonNamespaced=true -// +noMethods=true - -// TokenReview attempts to authenticate a token to a known user. -// Note: TokenReview requests may be cached by the webhook token authenticator -// plugin in the kube-apiserver. -type TokenReview struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec holds information about the request being evaluated - Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status is filled in by the server and indicates whether the request can be authenticated. - // +optional - Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// TokenReviewSpec is a description of the token authentication request. -type TokenReviewSpec struct { - // Token is the opaque bearer token. - // +optional - Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` -} - -// TokenReviewStatus is the result of the token authentication request. -type TokenReviewStatus struct { - // Authenticated indicates that the token was associated with a known user. - // +optional - Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"` - // User is the UserInfo associated with the provided token. - // +optional - User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` - // Error indicates that the token couldn't be checked - // +optional - Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` -} - -// UserInfo holds the information about the user needed to implement the -// user.Info interface. -type UserInfo struct { - // The name that uniquely identifies this user among all active users. - // +optional - Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` - // A unique value that identifies this user across time. If this user is - // deleted and another user by the same name is added, they will have - // different UIDs. - // +optional - UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` - // The names of groups this user is a part of. - // +optional - Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` - // Any additional information provided by the authenticator. - // +optional - Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` -} - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -type ExtraValue []string - -func (t ExtraValue) String() string { - return fmt.Sprintf("%v", []string(t)) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go deleted file mode 100644 index 9c1335e91..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go +++ /dev/null @@ -1,145 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - authentication "k8s.io/client-go/pkg/apis/authentication" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1_TokenReview_To_authentication_TokenReview, - Convert_authentication_TokenReview_To_v1_TokenReview, - Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec, - Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec, - Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus, - Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus, - Convert_v1_UserInfo_To_authentication_UserInfo, - Convert_authentication_UserInfo_To_v1_UserInfo, - ) -} - -func autoConvert_v1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { - return autoConvert_v1_TokenReview_To_authentication_TokenReview(in, out, s) -} - -func autoConvert_authentication_TokenReview_To_v1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_authentication_TokenReview_To_v1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { - return autoConvert_authentication_TokenReview_To_v1_TokenReview(in, out, s) -} - -func autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { - out.Token = in.Token - return nil -} - -func Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { - return autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in, out, s) -} - -func autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { - out.Token = in.Token - return nil -} - -func Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { - return autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in, out, s) -} - -func autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { - out.Authenticated = in.Authenticated - if err := Convert_v1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil { - return err - } - out.Error = in.Error - return nil -} - -func Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { - return autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in, out, s) -} - -func autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { - out.Authenticated = in.Authenticated - if err := Convert_authentication_UserInfo_To_v1_UserInfo(&in.User, &out.User, s); err != nil { - return err - } - out.Error = in.Error - return nil -} - -func Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { - return autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in, out, s) -} - -func autoConvert_v1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { - out.Username = in.Username - out.UID = in.UID - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - out.Extra = *(*map[string]authentication.ExtraValue)(unsafe.Pointer(&in.Extra)) - return nil -} - -func Convert_v1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { - return autoConvert_v1_UserInfo_To_authentication_UserInfo(in, out, s) -} - -func autoConvert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { - out.Username = in.Username - out.UID = in.UID - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) - return nil -} - -func Convert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { - return autoConvert_authentication_UserInfo_To_v1_UserInfo(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go deleted file mode 100644 index 0bc564067..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,106 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1 - -import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReview, InType: reflect.TypeOf(&TokenReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReviewSpec, InType: reflect.TypeOf(&TokenReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReviewStatus, InType: reflect.TypeOf(&TokenReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, - ) -} - -func DeepCopy_v1_TokenReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReview) - out := out.(*TokenReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_TokenReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReviewSpec) - out := out.(*TokenReviewSpec) - *out = *in - return nil - } -} - -func DeepCopy_v1_TokenReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReviewStatus) - out := out.(*TokenReviewStatus) - *out = *in - if err := DeepCopy_v1_UserInfo(&in.User, &out.User, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_UserInfo(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*UserInfo) - out := out.(*UserInfo) - *out = *in - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue) - for key, val := range *in { - if newVal, err := c.DeepCopy(&val); err != nil { - return err - } else { - (*out)[key] = *newVal.(*ExtraValue) - } - } - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.generated.go deleted file mode 100644 index b8990af18..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.generated.go +++ /dev/null @@ -1,1568 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg2_types "k8s.io/apimachinery/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_v1.TypeMeta - var v1 pkg2_types.UID - var v2 time.Time - _, _, _ = v0, v1, v2 - } -} - -func (x *TokenReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TokenReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TokenReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = TokenReviewSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = TokenReviewStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = TokenReviewSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = TokenReviewStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *TokenReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Token != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Token)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("token")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Token)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TokenReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TokenReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "token": - if r.TryDecodeAsNil() { - x.Token = "" - } else { - yyv4 := &x.Token - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TokenReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Token = "" - } else { - yyv7 := &x.Token - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*string)(yyv7)) = r.DecodeString() - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *TokenReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Authenticated != false - yyq2[1] = true - yyq2[2] = x.Error != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Authenticated)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("authenticated")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Authenticated)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy7 := &x.User - yy7.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.User - yy9.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Error)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("error")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Error)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TokenReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TokenReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "authenticated": - if r.TryDecodeAsNil() { - x.Authenticated = false - } else { - yyv4 := &x.Authenticated - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*bool)(yyv4)) = r.DecodeBool() - } - } - case "user": - if r.TryDecodeAsNil() { - x.User = UserInfo{} - } else { - yyv6 := &x.User - yyv6.CodecDecodeSelf(d) - } - case "error": - if r.TryDecodeAsNil() { - x.Error = "" - } else { - yyv7 := &x.Error - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*string)(yyv7)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Authenticated = false - } else { - yyv10 := &x.Authenticated - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = UserInfo{} - } else { - yyv12 := &x.User - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Error = "" - } else { - yyv13 := &x.Error - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Username != "" - yyq2[1] = x.UID != "" - yyq2[2] = len(x.Groups) != 0 - yyq2[3] = len(x.Extra) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("username")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Groups == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("groups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Extra == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("extra")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *UserInfo) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *UserInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "username": - if r.TryDecodeAsNil() { - x.Username = "" - } else { - yyv4 := &x.Username - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - yyv6 := &x.UID - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "groups": - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv8 := &x.Groups - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } - } - case "extra": - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv10 := &x.Extra - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Username = "" - } else { - yyv13 := &x.Username - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - yyv15 := &x.UID - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv17 := &x.Groups - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - z.F.DecSliceStringX(yyv17, false, d) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv19 := &x.Extra - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encExtraValue((ExtraValue)(x), e) - } - } -} - -func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decExtraValue((*ExtraValue)(x), d) - } -} - -func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv1 == nil { - r.EncodeNil() - } else { - yyv1.CodecEncodeSelf(e) - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) - yyv1 = make(map[string]ExtraValue, yyrl1) - *v = yyv1 - } - var yymk1 string - var yymv1 ExtraValue - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv2 := &yymk1 - yym3 := z.DecBinary() - _ = yym3 - if false { - } else { - *((*string)(yyv2)) = r.DecodeString() - } - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv4 := &yymv1 - yyv4.CodecDecodeSelf(d) - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv5 := &yymk1 - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*string)(yyv5)) = r.DecodeString() - } - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv7 := &yymv1 - yyv7.CodecDecodeSelf(d) - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1)) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]string, yyrl1) - } - } else { - yyv1 = make([]string, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv2 := &yyv1[yyj1] - yym3 := z.DecBinary() - _ = yym3 - if false { - } else { - *((*string)(yyv2)) = r.DecodeString() - } - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv4 := &yyv1[yyj1] - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 string - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv6 := &yyv1[yyj1] - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.conversion.go deleted file mode 100644 index 5fc83362f..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,145 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - authentication "k8s.io/client-go/pkg/apis/authentication" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_TokenReview_To_authentication_TokenReview, - Convert_authentication_TokenReview_To_v1beta1_TokenReview, - Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec, - Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec, - Convert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus, - Convert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus, - Convert_v1beta1_UserInfo_To_authentication_UserInfo, - Convert_authentication_UserInfo_To_v1beta1_UserInfo, - ) -} - -func autoConvert_v1beta1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { - return autoConvert_v1beta1_TokenReview_To_authentication_TokenReview(in, out, s) -} - -func autoConvert_authentication_TokenReview_To_v1beta1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_authentication_TokenReview_To_v1beta1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { - return autoConvert_authentication_TokenReview_To_v1beta1_TokenReview(in, out, s) -} - -func autoConvert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { - out.Token = in.Token - return nil -} - -func Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { - return autoConvert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in, out, s) -} - -func autoConvert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { - out.Token = in.Token - return nil -} - -func Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { - return autoConvert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in, out, s) -} - -func autoConvert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { - out.Authenticated = in.Authenticated - if err := Convert_v1beta1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil { - return err - } - out.Error = in.Error - return nil -} - -func Convert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { - return autoConvert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(in, out, s) -} - -func autoConvert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { - out.Authenticated = in.Authenticated - if err := Convert_authentication_UserInfo_To_v1beta1_UserInfo(&in.User, &out.User, s); err != nil { - return err - } - out.Error = in.Error - return nil -} - -func Convert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { - return autoConvert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in, out, s) -} - -func autoConvert_v1beta1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { - out.Username = in.Username - out.UID = in.UID - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - out.Extra = *(*map[string]authentication.ExtraValue)(unsafe.Pointer(&in.Extra)) - return nil -} - -func Convert_v1beta1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { - return autoConvert_v1beta1_UserInfo_To_authentication_UserInfo(in, out, s) -} - -func autoConvert_authentication_UserInfo_To_v1beta1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { - out.Username = in.Username - out.UID = in.UID - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) - return nil -} - -func Convert_authentication_UserInfo_To_v1beta1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { - return autoConvert_authentication_UserInfo_To_v1beta1_UserInfo(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 01260cc13..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,106 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_TokenReview, InType: reflect.TypeOf(&TokenReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_TokenReviewSpec, InType: reflect.TypeOf(&TokenReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_TokenReviewStatus, InType: reflect.TypeOf(&TokenReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, - ) -} - -func DeepCopy_v1beta1_TokenReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReview) - out := out.(*TokenReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_TokenReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReviewSpec) - out := out.(*TokenReviewSpec) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_TokenReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReviewStatus) - out := out.(*TokenReviewStatus) - *out = *in - if err := DeepCopy_v1beta1_UserInfo(&in.User, &out.User, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_UserInfo(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*UserInfo) - out := out.(*UserInfo) - *out = *in - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue) - for key, val := range *in { - if newVal, err := c.DeepCopy(&val); err != nil { - return err - } else { - (*out)[key] = *newVal.(*ExtraValue) - } - } - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go deleted file mode 100644 index ec322c5f8..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go +++ /dev/null @@ -1,106 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package authentication - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_TokenReview, InType: reflect.TypeOf(&TokenReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_TokenReviewSpec, InType: reflect.TypeOf(&TokenReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_TokenReviewStatus, InType: reflect.TypeOf(&TokenReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, - ) -} - -func DeepCopy_authentication_TokenReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReview) - out := out.(*TokenReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_authentication_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_authentication_TokenReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReviewSpec) - out := out.(*TokenReviewSpec) - *out = *in - return nil - } -} - -func DeepCopy_authentication_TokenReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReviewStatus) - out := out.(*TokenReviewStatus) - *out = *in - if err := DeepCopy_authentication_UserInfo(&in.User, &out.User, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_authentication_UserInfo(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*UserInfo) - out := out.(*UserInfo) - *out = *in - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue) - for key, val := range *in { - if newVal, err := c.DeepCopy(&val); err != nil { - return err - } else { - (*out)[key] = *newVal.(*ExtraValue) - } - } - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/OWNERS b/vendor/k8s.io/client-go/pkg/apis/authorization/OWNERS deleted file mode 100755 index 2fef50443..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/OWNERS +++ /dev/null @@ -1,17 +0,0 @@ -reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- liggitt -- nikhiljindal -- erictune -- sttts -- ncdc -- timothysc -- dims -- mml -- mbohlool -- david-mcmahon -- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/install/install.go b/vendor/k8s.io/client-go/pkg/apis/authorization/install/install.go deleted file mode 100644 index 33eee6618..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/install/install.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/apis/authorization" - "k8s.io/client-go/pkg/apis/authorization/v1" - "k8s.io/client-go/pkg/apis/authorization/v1beta1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: authorization.GroupName, - VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v1beta1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/client-go/pkg/apis/authorization", - RootScopedKinds: sets.NewString("SubjectAccessReview", "SelfSubjectAccessReview"), - AddInternalObjectsToScheme: authorization.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, - v1.SchemeGroupVersion.Version: v1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/types.go b/vendor/k8s.io/client-go/pkg/apis/authorization/types.go deleted file mode 100644 index d8ccfaf35..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/types.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package authorization - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient=true -// +nonNamespaced=true -// +noMethods=true - -// SubjectAccessReview checks whether or not a user or group can perform an action. Not filling in a -// spec.namespace means "in all namespaces". -type SubjectAccessReview struct { - metav1.TypeMeta - metav1.ObjectMeta - - // Spec holds information about the request being evaluated - Spec SubjectAccessReviewSpec - - // Status is filled in by the server and indicates whether the request is allowed or not - Status SubjectAccessReviewStatus -} - -// +genclient=true -// +nonNamespaced=true -// +noMethods=true - -// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a -// spec.namespace means "in all namespaces". Self is a special case, because users should always be able -// to check whether they can perform an action -type SelfSubjectAccessReview struct { - metav1.TypeMeta - metav1.ObjectMeta - - // Spec holds information about the request being evaluated. - Spec SelfSubjectAccessReviewSpec - - // Status is filled in by the server and indicates whether the request is allowed or not - Status SubjectAccessReviewStatus -} - -// +genclient=true -// +noMethods=true - -// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. -// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions -// checking. -type LocalSubjectAccessReview struct { - metav1.TypeMeta - metav1.ObjectMeta - - // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace - // you made the request against. If empty, it is defaulted. - Spec SubjectAccessReviewSpec - - // Status is filled in by the server and indicates whether the request is allowed or not - Status SubjectAccessReviewStatus -} - -// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface -type ResourceAttributes struct { - // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces - // "" (empty) is defaulted for LocalSubjectAccessReviews - // "" (empty) is empty for cluster-scoped resources - // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview - Namespace string - // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. - Verb string - // Group is the API Group of the Resource. "*" means all. - Group string - // Version is the API Version of the Resource. "*" means all. - Version string - // Resource is one of the existing resource types. "*" means all. - Resource string - // Subresource is one of the existing resource types. "" means none. - Subresource string - // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. - Name string -} - -// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface -type NonResourceAttributes struct { - // Path is the URL path of the request - Path string - // Verb is the standard HTTP verb - Verb string -} - -// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAttributes -// and NonResourceAttributes must be set -type SubjectAccessReviewSpec struct { - // ResourceAttributes describes information for a resource access request - ResourceAttributes *ResourceAttributes - // NonResourceAttributes describes information for a non-resource access request - NonResourceAttributes *NonResourceAttributes - - // User is the user you're testing for. - // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups - User string - // Groups is the groups you're testing for. - Groups []string - // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer - // it needs a reflection here. - Extra map[string]ExtraValue -} - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -type ExtraValue []string - -// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAttributes -// and NonResourceAttributes must be set -type SelfSubjectAccessReviewSpec struct { - // ResourceAttributes describes information for a resource access request - ResourceAttributes *ResourceAttributes - // NonResourceAttributes describes information for a non-resource access request - NonResourceAttributes *NonResourceAttributes -} - -// SubjectAccessReviewStatus -type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. - Allowed bool - // Reason is optional. It indicates why a request was allowed or denied. - Reason string - // EvaluationError is an indication that some error occurred during the authorization check. - // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. - // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. - EvaluationError string -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.pb.go deleted file mode 100644 index 7bacc5169..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.pb.go +++ /dev/null @@ -1,2344 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/authorization/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/authorization/v1/generated.proto - - It has these top-level messages: - ExtraValue - LocalSubjectAccessReview - NonResourceAttributes - ResourceAttributes - SelfSubjectAccessReview - SelfSubjectAccessReviewSpec - SubjectAccessReview - SubjectAccessReviewSpec - SubjectAccessReviewStatus -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } -func (*LocalSubjectAccessReview) ProtoMessage() {} -func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} -} - -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } -func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} -func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} -} - -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } -func (*SubjectAccessReviewSpec) ProtoMessage() {} -func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } -func (*SubjectAccessReviewStatus) ProtoMessage() {} -func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} -} - -func init() { - proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.ExtraValue") - proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.LocalSubjectAccessReview") - proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.NonResourceAttributes") - proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.ResourceAttributes") - proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SelfSubjectAccessReview") - proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SelfSubjectAccessReviewSpec") - proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SubjectAccessReview") - proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SubjectAccessReviewSpec") - proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SubjectAccessReviewStatus") -} -func (m ExtraValue) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m ExtraValue) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *LocalSubjectAccessReview) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LocalSubjectAccessReview) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *NonResourceAttributes) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NonResourceAttributes) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) - i += copy(data[i:], m.Verb) - return i, nil -} - -func (m *ResourceAttributes) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceAttributes) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) - i += copy(data[i:], m.Namespace) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) - i += copy(data[i:], m.Verb) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) - i += copy(data[i:], m.Resource) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Subresource))) - i += copy(data[i:], m.Subresource) - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - return i, nil -} - -func (m *SelfSubjectAccessReview) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SelfSubjectAccessReview) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - return i, nil -} - -func (m *SelfSubjectAccessReviewSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SelfSubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ResourceAttributes != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) - n7, err := m.ResourceAttributes.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.NonResourceAttributes != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) - n8, err := m.NonResourceAttributes.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil -} - -func (m *SubjectAccessReview) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SubjectAccessReview) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n9 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n11 - return i, nil -} - -func (m *SubjectAccessReviewSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ResourceAttributes != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) - n12, err := m.ResourceAttributes.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n12 - } - if m.NonResourceAttributes != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) - n13, err := m.NonResourceAttributes.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n13 - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.User))) - i += copy(data[i:], m.User) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - data[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.Extra) > 0 { - for k := range m.Extra { - data[i] = 0x2a - i++ - v := m.Extra[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n14, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n14 - } - } - return i, nil -} - -func (m *SubjectAccessReviewStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SubjectAccessReviewStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Allowed { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.EvaluationError))) - i += copy(data[i:], m.EvaluationError) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m ExtraValue) Size() (n int) { - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LocalSubjectAccessReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NonResourceAttributes) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Verb) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceAttributes) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Verb) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Subresource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SelfSubjectAccessReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SelfSubjectAccessReviewSpec) Size() (n int) { - var l int - _ = l - if m.ResourceAttributes != nil { - l = m.ResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NonResourceAttributes != nil { - l = m.NonResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SubjectAccessReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SubjectAccessReviewSpec) Size() (n int) { - var l int - _ = l - if m.ResourceAttributes != nil { - l = m.ResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NonResourceAttributes != nil { - l = m.NonResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Extra) > 0 { - for k, v := range m.Extra { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *SubjectAccessReviewStatus) Size() (n int) { - var l int - _ = l - n += 2 - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.EvaluationError) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *LocalSubjectAccessReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NonResourceAttributes) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NonResourceAttributes{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceAttributes) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceAttributes{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *SelfSubjectAccessReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SelfSubjectAccessReviewSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, - `}`, - }, "") - return s -} -func (this *SubjectAccessReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SubjectAccessReviewSpec) String() string { - if this == nil { - return "nil" - } - keysForExtra := make([]string, 0, len(this.Extra)) - for k := range this.Extra { - keysForExtra = append(keysForExtra, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - mapStringForExtra := "map[string]ExtraValue{" - for _, k := range keysForExtra { - mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) - } - mapStringForExtra += "}" - s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, - `Extra:` + mapStringForExtra + `,`, - `}`, - }, "") - return s -} -func (this *SubjectAccessReviewStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubjectAccessReviewStatus{`, - `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ExtraValue) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - *m = append(*m, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LocalSubjectAccessReview) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NonResourceAttributes) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NonResourceAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NonResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verb = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceAttributes) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verb = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subresource = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SelfSubjectAccessReview) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SelfSubjectAccessReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SelfSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SelfSubjectAccessReviewSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourceAttributes == nil { - m.ResourceAttributes = &ResourceAttributes{} - } - if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NonResourceAttributes == nil { - m.NonResourceAttributes = &NonResourceAttributes{} - } - if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectAccessReview) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectAccessReviewSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectAccessReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourceAttributes == nil { - m.ResourceAttributes = &ResourceAttributes{} - } - if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NonResourceAttributes == nil { - m.NonResourceAttributes = &NonResourceAttributes{} - } - if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Extra == nil { - m.Extra = make(map[string]ExtraValue) - } - m.Extra[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectAccessReviewStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectAccessReviewStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectAccessReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Allowed = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EvaluationError = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 904 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xfa, 0x5f, 0xec, 0x09, 0x90, 0x32, 0x55, 0xc9, 0x36, 0x48, 0xb6, 0x65, 0x10, 0x0a, - 0xa2, 0xec, 0x92, 0xf2, 0xa7, 0x55, 0x39, 0xa0, 0xac, 0x08, 0x7f, 0x24, 0xda, 0xa2, 0x89, 0xc8, - 0x01, 0x2e, 0x8c, 0x37, 0x2f, 0xf6, 0xd6, 0xde, 0x9d, 0x65, 0x66, 0x76, 0xdb, 0x70, 0xea, 0x8d, - 0x2b, 0x12, 0x17, 0x8e, 0x7c, 0x05, 0x3e, 0x00, 0x9c, 0x73, 0xa3, 0x07, 0x24, 0x38, 0x20, 0x8b, - 0x2c, 0x17, 0x3e, 0x06, 0x9a, 0xd9, 0x89, 0x37, 0xc6, 0x6b, 0x2a, 0x43, 0x25, 0x7a, 0xe8, 0x6d, - 0xe7, 0xbd, 0xdf, 0xef, 0xbd, 0xdf, 0xbc, 0x79, 0xb3, 0x6f, 0xd0, 0xdb, 0xe3, 0xeb, 0xc2, 0x09, - 0x98, 0x3b, 0x4e, 0x06, 0xc0, 0x23, 0x90, 0x20, 0xdc, 0x78, 0x3c, 0x74, 0x69, 0x1c, 0x08, 0x97, - 0x26, 0x72, 0xc4, 0x78, 0xf0, 0x25, 0x95, 0x01, 0x8b, 0xdc, 0x74, 0xc7, 0x1d, 0x42, 0x04, 0x9c, - 0x4a, 0x38, 0x74, 0x62, 0xce, 0x24, 0xc3, 0xaf, 0xe4, 0x64, 0xa7, 0x20, 0x3b, 0xf1, 0x78, 0xe8, - 0x28, 0xb2, 0x33, 0x47, 0x76, 0xd2, 0x9d, 0xad, 0x57, 0x87, 0x81, 0x1c, 0x25, 0x03, 0xc7, 0x67, - 0xa1, 0x3b, 0x64, 0x43, 0xe6, 0xea, 0x18, 0x83, 0xe4, 0x48, 0xaf, 0xf4, 0x42, 0x7f, 0xe5, 0xb1, - 0xb7, 0xde, 0x30, 0xc2, 0x68, 0x1c, 0x84, 0xd4, 0x1f, 0x05, 0x11, 0xf0, 0xe3, 0x42, 0x5a, 0x08, - 0x92, 0x96, 0x28, 0xda, 0x72, 0x97, 0xb1, 0x78, 0x12, 0xc9, 0x20, 0x84, 0x05, 0xc2, 0x5b, 0x0f, - 0x23, 0x08, 0x7f, 0x04, 0x21, 0x5d, 0xe0, 0xbd, 0xbe, 0x8c, 0x97, 0xc8, 0x60, 0xe2, 0x06, 0x91, - 0x14, 0x92, 0x2f, 0x90, 0xce, 0xed, 0x49, 0x00, 0x4f, 0x81, 0x17, 0x1b, 0x82, 0x7b, 0x34, 0x8c, - 0x27, 0x50, 0xb6, 0xa7, 0x2b, 0x4b, 0x8f, 0xa8, 0x04, 0xdd, 0xbf, 0x86, 0xd0, 0xde, 0x3d, 0xc9, - 0xe9, 0x01, 0x9d, 0x24, 0x80, 0xbb, 0xa8, 0x11, 0x48, 0x08, 0x85, 0x6d, 0xf5, 0x6a, 0xdb, 0x6d, - 0xaf, 0x9d, 0x4d, 0xbb, 0x8d, 0x0f, 0x95, 0x81, 0xe4, 0xf6, 0x1b, 0xad, 0x6f, 0xbf, 0xeb, 0x56, - 0xee, 0xff, 0xd6, 0xab, 0xf4, 0x7f, 0xae, 0x22, 0xfb, 0x23, 0xe6, 0xd3, 0xc9, 0x7e, 0x32, 0xb8, - 0x03, 0xbe, 0xdc, 0xf5, 0x7d, 0x10, 0x82, 0x40, 0x1a, 0xc0, 0x5d, 0xfc, 0x39, 0x6a, 0xa9, 0x92, - 0x1f, 0x52, 0x49, 0x6d, 0xab, 0x67, 0x6d, 0xaf, 0x5f, 0x7d, 0xcd, 0x31, 0x87, 0x7f, 0xbe, 0x02, - 0xc5, 0xf1, 0x2b, 0xb4, 0x93, 0xee, 0x38, 0xb7, 0x75, 0xac, 0x9b, 0x20, 0xa9, 0x87, 0x4f, 0xa6, - 0xdd, 0x4a, 0x36, 0xed, 0xa2, 0xc2, 0x46, 0x66, 0x51, 0xf1, 0x11, 0xaa, 0x8b, 0x18, 0x7c, 0xbb, - 0xaa, 0xa3, 0xbf, 0xeb, 0xac, 0xd0, 0x5a, 0x4e, 0x89, 0xe2, 0xfd, 0x18, 0x7c, 0xef, 0x29, 0x93, - 0xb1, 0xae, 0x56, 0x44, 0xc7, 0xc7, 0x11, 0x6a, 0x0a, 0x49, 0x65, 0x22, 0xec, 0x9a, 0xce, 0xf4, - 0xde, 0x7f, 0xce, 0xa4, 0xa3, 0x79, 0xcf, 0x98, 0x5c, 0xcd, 0x7c, 0x4d, 0x4c, 0x96, 0xfe, 0x67, - 0xe8, 0xd2, 0x2d, 0x16, 0x11, 0x10, 0x2c, 0xe1, 0x3e, 0xec, 0x4a, 0xc9, 0x83, 0x41, 0x22, 0x41, - 0xe0, 0x1e, 0xaa, 0xc7, 0x54, 0x8e, 0x74, 0x39, 0xdb, 0x85, 0xd4, 0x8f, 0xa9, 0x1c, 0x11, 0xed, - 0x51, 0x88, 0x14, 0xf8, 0x40, 0x97, 0xe4, 0x1c, 0xe2, 0x00, 0xf8, 0x80, 0x68, 0x4f, 0xff, 0xc7, - 0x2a, 0xc2, 0x25, 0xa1, 0x5d, 0xd4, 0x8e, 0x68, 0x08, 0x22, 0xa6, 0x3e, 0x98, 0xf8, 0xcf, 0x1a, - 0x76, 0xfb, 0xd6, 0x99, 0x83, 0x14, 0x98, 0x87, 0x67, 0xc2, 0x2f, 0xa0, 0xc6, 0x90, 0xb3, 0x24, - 0xd6, 0x55, 0x6b, 0x7b, 0x4f, 0x1b, 0x48, 0xe3, 0x7d, 0x65, 0x24, 0xb9, 0x0f, 0xbf, 0x8c, 0xd6, - 0x52, 0xe0, 0x22, 0x60, 0x91, 0x5d, 0xd7, 0xb0, 0x0d, 0x03, 0x5b, 0x3b, 0xc8, 0xcd, 0xe4, 0xcc, - 0x8f, 0xaf, 0xa0, 0x16, 0x37, 0xc2, 0xed, 0x86, 0xc6, 0x5e, 0x30, 0xd8, 0xd6, 0xd9, 0x86, 0xc8, - 0x0c, 0x81, 0xdf, 0x44, 0xeb, 0x22, 0x19, 0xcc, 0x08, 0x4d, 0x4d, 0xb8, 0x68, 0x08, 0xeb, 0xfb, - 0x85, 0x8b, 0x9c, 0xc7, 0xa9, 0x6d, 0xa9, 0x3d, 0xda, 0x6b, 0xf3, 0xdb, 0x52, 0x25, 0x20, 0xda, - 0xd3, 0xff, 0xa5, 0x8a, 0x36, 0xf7, 0x61, 0x72, 0xf4, 0xff, 0xf4, 0xfc, 0x9d, 0xb9, 0x9e, 0xff, - 0x60, 0xb5, 0x4e, 0x2c, 0x57, 0xfd, 0xd8, 0xf4, 0xfd, 0x0f, 0x55, 0xf4, 0xfc, 0x3f, 0x68, 0xc4, - 0x5f, 0x59, 0x08, 0xf3, 0x85, 0xd6, 0x35, 0x85, 0x7e, 0x67, 0x25, 0x71, 0x8b, 0x37, 0xc0, 0x7b, - 0x2e, 0x9b, 0x76, 0x4b, 0x6e, 0x06, 0x29, 0x49, 0x89, 0xbf, 0xb1, 0xd0, 0xa5, 0xa8, 0xec, 0x8a, - 0x9a, 0x73, 0xf1, 0x56, 0x12, 0x53, 0x7a, 0xd9, 0xbd, 0xcb, 0xd9, 0xb4, 0x5b, 0xfe, 0x1f, 0x20, - 0xe5, 0xb9, 0xfb, 0x3f, 0x55, 0xd1, 0xc5, 0x27, 0x7f, 0xe2, 0x47, 0xd9, 0x91, 0x7f, 0xd6, 0xd1, - 0xe6, 0x93, 0x6e, 0xfc, 0x57, 0xdd, 0x38, 0x1b, 0x10, 0xb5, 0xf9, 0x3f, 0xe9, 0x27, 0x02, 0xb8, - 0x19, 0x10, 0x7d, 0xd4, 0xd4, 0x43, 0x40, 0xd8, 0x75, 0xfd, 0xd4, 0x40, 0xea, 0x04, 0xf4, 0x74, - 0x10, 0xc4, 0x78, 0xb0, 0x44, 0x0d, 0x50, 0x6f, 0x13, 0xbb, 0xd1, 0xab, 0x6d, 0xaf, 0x5f, 0xbd, - 0xfd, 0x28, 0x5a, 0xcb, 0xd1, 0xaf, 0x9d, 0xbd, 0x48, 0xf2, 0xe3, 0x62, 0x2a, 0x69, 0x1b, 0xc9, - 0x93, 0x6d, 0x7d, 0x61, 0x5e, 0x44, 0x1a, 0x83, 0x2f, 0xa0, 0xda, 0x18, 0x8e, 0xf3, 0xa9, 0x48, - 0xd4, 0x27, 0xbe, 0x89, 0x1a, 0xa9, 0x7a, 0x2c, 0x99, 0x02, 0x5f, 0x5b, 0x49, 0x55, 0xf1, 0xd6, - 0x22, 0x79, 0x94, 0x1b, 0xd5, 0xeb, 0x56, 0xff, 0x7b, 0x0b, 0x5d, 0x5e, 0xda, 0xa0, 0x6a, 0x4c, - 0xd2, 0xc9, 0x84, 0xdd, 0x85, 0x43, 0x2d, 0xa3, 0x55, 0x8c, 0xc9, 0xdd, 0xdc, 0x4c, 0xce, 0xfc, - 0xf8, 0x25, 0xd4, 0xe4, 0x40, 0x05, 0x8b, 0xcc, 0x68, 0x9e, 0xf5, 0x36, 0xd1, 0x56, 0x62, 0xbc, - 0x78, 0x17, 0x6d, 0x80, 0x4a, 0xaf, 0x75, 0xed, 0x71, 0xce, 0xb8, 0x39, 0xaa, 0x4d, 0x43, 0xd8, - 0xd8, 0x9b, 0x77, 0x93, 0xbf, 0xe3, 0xbd, 0x17, 0x4f, 0x4e, 0x3b, 0x95, 0x07, 0xa7, 0x9d, 0xca, - 0xaf, 0xa7, 0x9d, 0xca, 0xfd, 0xac, 0x63, 0x9d, 0x64, 0x1d, 0xeb, 0x41, 0xd6, 0xb1, 0x7e, 0xcf, - 0x3a, 0xd6, 0xd7, 0x7f, 0x74, 0x2a, 0x9f, 0x56, 0xd3, 0x9d, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, - 0x8a, 0xc4, 0x1f, 0xd5, 0x30, 0x0c, 0x00, 0x00, -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.generated.go deleted file mode 100644 index 2528afa8e..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.generated.go +++ /dev/null @@ -1,3233 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg2_types "k8s.io/apimachinery/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_v1.TypeMeta - var v1 pkg2_types.UID - var v2 time.Time - _, _, _ = v0, v1, v2 - } -} - -func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SelfSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = SelfSubjectAccessReviewSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SelfSubjectAccessReviewSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LocalSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Namespace != "" - yyq2[1] = x.Verb != "" - yyq2[2] = x.Group != "" - yyq2[3] = x.Version != "" - yyq2[4] = x.Resource != "" - yyq2[5] = x.Subresource != "" - yyq2[6] = x.Name != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("verb")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("group")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Version)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("version")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Version)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subresource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - yyv4 := &x.Namespace - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "verb": - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - yyv6 := &x.Verb - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "group": - if r.TryDecodeAsNil() { - x.Group = "" - } else { - yyv8 := &x.Group - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "version": - if r.TryDecodeAsNil() { - x.Version = "" - } else { - yyv10 := &x.Version - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "resource": - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - yyv12 := &x.Resource - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - case "subresource": - if r.TryDecodeAsNil() { - x.Subresource = "" - } else { - yyv14 := &x.Subresource - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv16 := &x.Name - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - yyv19 := &x.Namespace - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - yyv21 := &x.Verb - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Group = "" - } else { - yyv23 := &x.Group - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*string)(yyv23)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Version = "" - } else { - yyv25 := &x.Version - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - yyv27 := &x.Resource - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*string)(yyv27)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subresource = "" - } else { - yyv29 := &x.Subresource - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*string)(yyv29)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv31 := &x.Name - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*string)(yyv31)) = r.DecodeString() - } - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Path != "" - yyq2[1] = x.Verb != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("verb")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NonResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv4 := &x.Path - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "verb": - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - yyv6 := &x.Verb - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv9 := &x.Path - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - yyv11 := &x.Verb - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ResourceAttributes != nil - yyq2[1] = x.NonResourceAttributes != nil - yyq2[2] = x.User != "" - yyq2[3] = len(x.Groups) != 0 - yyq2[4] = len(x.Extra) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Groups == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("groups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Extra == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("extra")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "resourceAttributes": - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - case "nonResourceAttributes": - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - case "user": - if r.TryDecodeAsNil() { - x.User = "" - } else { - yyv6 := &x.User - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "groups": - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv8 := &x.Groups - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } - } - case "extra": - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv10 := &x.Extra - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - yyv15 := &x.User - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv17 := &x.Groups - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - z.F.DecSliceStringX(yyv17, false, d) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv19 := &x.Extra - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encExtraValue((ExtraValue)(x), e) - } - } -} - -func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decExtraValue((*ExtraValue)(x), d) - } -} - -func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ResourceAttributes != nil - yyq2[1] = x.NonResourceAttributes != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SelfSubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "resourceAttributes": - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - case "nonResourceAttributes": - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Reason != "" - yyq2[2] = x.EvaluationError != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allowed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evaluationError")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "allowed": - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - yyv4 := &x.Allowed - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*bool)(yyv4)) = r.DecodeBool() - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv6 := &x.Reason - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "evaluationError": - if r.TryDecodeAsNil() { - x.EvaluationError = "" - } else { - yyv8 := &x.EvaluationError - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - yyv11 := &x.Allowed - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(yyv11)) = r.DecodeBool() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv13 := &x.Reason - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvaluationError = "" - } else { - yyv15 := &x.EvaluationError - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv1 == nil { - r.EncodeNil() - } else { - yyv1.CodecEncodeSelf(e) - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) - yyv1 = make(map[string]ExtraValue, yyrl1) - *v = yyv1 - } - var yymk1 string - var yymv1 ExtraValue - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv2 := &yymk1 - yym3 := z.DecBinary() - _ = yym3 - if false { - } else { - *((*string)(yyv2)) = r.DecodeString() - } - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv4 := &yymv1 - yyv4.CodecDecodeSelf(d) - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv5 := &yymk1 - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*string)(yyv5)) = r.DecodeString() - } - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv7 := &yymv1 - yyv7.CodecDecodeSelf(d) - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1)) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]string, yyrl1) - } - } else { - yyv1 = make([]string, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv2 := &yyv1[yyj1] - yym3 := z.DecBinary() - _ = yym3 - if false { - } else { - *((*string)(yyv2)) = r.DecodeString() - } - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv4 := &yyv1[yyj1] - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 string - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv6 := &yyv1[yyj1] - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.conversion.go deleted file mode 100644 index 92d130844..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.conversion.go +++ /dev/null @@ -1,263 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - authorization "k8s.io/client-go/pkg/apis/authorization" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview, - Convert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview, - Convert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes, - Convert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes, - Convert_v1_ResourceAttributes_To_authorization_ResourceAttributes, - Convert_authorization_ResourceAttributes_To_v1_ResourceAttributes, - Convert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview, - Convert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview, - Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec, - Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec, - Convert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview, - Convert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview, - Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec, - Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec, - Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus, - Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus, - ) -} - -func autoConvert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { - return autoConvert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in, out, s) -} - -func autoConvert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { - return autoConvert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in, out, s) -} - -func autoConvert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { - out.Path = in.Path - out.Verb = in.Verb - return nil -} - -func Convert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { - return autoConvert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in, out, s) -} - -func autoConvert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { - out.Path = in.Path - out.Verb = in.Verb - return nil -} - -func Convert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { - return autoConvert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in, out, s) -} - -func autoConvert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Verb = in.Verb - out.Group = in.Group - out.Version = in.Version - out.Resource = in.Resource - out.Subresource = in.Subresource - out.Name = in.Name - return nil -} - -func Convert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { - return autoConvert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in, out, s) -} - -func autoConvert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Verb = in.Verb - out.Group = in.Group - out.Version = in.Version - out.Resource = in.Resource - out.Subresource = in.Subresource - out.Name = in.Name - return nil -} - -func Convert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { - return autoConvert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in, out, s) -} - -func autoConvert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { - return autoConvert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in, out, s) -} - -func autoConvert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { - return autoConvert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in, out, s) -} - -func autoConvert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { - out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) - out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) - return nil -} - -func Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { - return autoConvert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in, out, s) -} - -func autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { - out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) - out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) - return nil -} - -func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { - return autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in, out, s) -} - -func autoConvert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { - return autoConvert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in, out, s) -} - -func autoConvert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { - return autoConvert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in, out, s) -} - -func autoConvert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { - out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) - out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) - out.User = in.User - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - out.Extra = *(*map[string]authorization.ExtraValue)(unsafe.Pointer(&in.Extra)) - return nil -} - -func Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { - return autoConvert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in, out, s) -} - -func autoConvert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { - out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) - out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) - out.User = in.User - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) - return nil -} - -func Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { - return autoConvert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in, out, s) -} - -func autoConvert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { - out.Allowed = in.Allowed - out.Reason = in.Reason - out.EvaluationError = in.EvaluationError - return nil -} - -func Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { - return autoConvert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in, out, s) -} - -func autoConvert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { - out.Allowed = in.Allowed - out.Reason = in.Reason - out.EvaluationError = in.EvaluationError - return nil -} - -func Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { - return autoConvert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.deepcopy.go deleted file mode 100644 index 1f3199a35..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,179 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1 - -import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LocalSubjectAccessReview, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NonResourceAttributes, InType: reflect.TypeOf(&NonResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceAttributes, InType: reflect.TypeOf(&ResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SelfSubjectAccessReview, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SelfSubjectAccessReviewSpec, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SubjectAccessReview, InType: reflect.TypeOf(&SubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SubjectAccessReviewSpec, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SubjectAccessReviewStatus, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, - ) -} - -func DeepCopy_v1_LocalSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LocalSubjectAccessReview) - out := out.(*LocalSubjectAccessReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_NonResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NonResourceAttributes) - out := out.(*NonResourceAttributes) - *out = *in - return nil - } -} - -func DeepCopy_v1_ResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceAttributes) - out := out.(*ResourceAttributes) - *out = *in - return nil - } -} - -func DeepCopy_v1_SelfSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SelfSubjectAccessReview) - out := out.(*SelfSubjectAccessReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_SelfSubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SelfSubjectAccessReviewSpec) - out := out.(*SelfSubjectAccessReviewSpec) - *out = *in - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - **out = **in - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_SubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SubjectAccessReview) - out := out.(*SubjectAccessReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_SubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SubjectAccessReviewSpec) - out := out.(*SubjectAccessReviewSpec) - *out = *in - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - **out = **in - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - **out = **in - } - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue) - for key, val := range *in { - if newVal, err := c.DeepCopy(&val); err != nil { - return err - } else { - (*out)[key] = *newVal.(*ExtraValue) - } - } - } - return nil - } -} - -func DeepCopy_v1_SubjectAccessReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SubjectAccessReviewStatus) - out := out.(*SubjectAccessReviewStatus) - *out = *in - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.pb.go deleted file mode 100644 index 65877a2ad..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.pb.go +++ /dev/null @@ -1,2344 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto - - It has these top-level messages: - ExtraValue - LocalSubjectAccessReview - NonResourceAttributes - ResourceAttributes - SelfSubjectAccessReview - SelfSubjectAccessReviewSpec - SubjectAccessReview - SubjectAccessReviewSpec - SubjectAccessReviewStatus -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } -func (*LocalSubjectAccessReview) ProtoMessage() {} -func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} -} - -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } -func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} -func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} -} - -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } -func (*SubjectAccessReviewSpec) ProtoMessage() {} -func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } -func (*SubjectAccessReviewStatus) ProtoMessage() {} -func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} -} - -func init() { - proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.ExtraValue") - proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.LocalSubjectAccessReview") - proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.NonResourceAttributes") - proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.ResourceAttributes") - proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SelfSubjectAccessReview") - proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SelfSubjectAccessReviewSpec") - proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SubjectAccessReview") - proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SubjectAccessReviewSpec") - proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SubjectAccessReviewStatus") -} -func (m ExtraValue) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m ExtraValue) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *LocalSubjectAccessReview) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LocalSubjectAccessReview) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *NonResourceAttributes) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NonResourceAttributes) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) - i += copy(data[i:], m.Verb) - return i, nil -} - -func (m *ResourceAttributes) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceAttributes) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) - i += copy(data[i:], m.Namespace) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) - i += copy(data[i:], m.Verb) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) - i += copy(data[i:], m.Resource) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Subresource))) - i += copy(data[i:], m.Subresource) - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - return i, nil -} - -func (m *SelfSubjectAccessReview) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SelfSubjectAccessReview) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - return i, nil -} - -func (m *SelfSubjectAccessReviewSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SelfSubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ResourceAttributes != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) - n7, err := m.ResourceAttributes.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.NonResourceAttributes != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) - n8, err := m.NonResourceAttributes.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil -} - -func (m *SubjectAccessReview) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SubjectAccessReview) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n9 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n11 - return i, nil -} - -func (m *SubjectAccessReviewSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ResourceAttributes != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) - n12, err := m.ResourceAttributes.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n12 - } - if m.NonResourceAttributes != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) - n13, err := m.NonResourceAttributes.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n13 - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.User))) - i += copy(data[i:], m.User) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - data[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.Extra) > 0 { - for k := range m.Extra { - data[i] = 0x2a - i++ - v := m.Extra[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n14, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n14 - } - } - return i, nil -} - -func (m *SubjectAccessReviewStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SubjectAccessReviewStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Allowed { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.EvaluationError))) - i += copy(data[i:], m.EvaluationError) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m ExtraValue) Size() (n int) { - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LocalSubjectAccessReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NonResourceAttributes) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Verb) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceAttributes) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Verb) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Subresource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SelfSubjectAccessReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SelfSubjectAccessReviewSpec) Size() (n int) { - var l int - _ = l - if m.ResourceAttributes != nil { - l = m.ResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NonResourceAttributes != nil { - l = m.NonResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SubjectAccessReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SubjectAccessReviewSpec) Size() (n int) { - var l int - _ = l - if m.ResourceAttributes != nil { - l = m.ResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NonResourceAttributes != nil { - l = m.NonResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Extra) > 0 { - for k, v := range m.Extra { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *SubjectAccessReviewStatus) Size() (n int) { - var l int - _ = l - n += 2 - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.EvaluationError) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *LocalSubjectAccessReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NonResourceAttributes) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NonResourceAttributes{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceAttributes) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceAttributes{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *SelfSubjectAccessReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SelfSubjectAccessReviewSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, - `}`, - }, "") - return s -} -func (this *SubjectAccessReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SubjectAccessReviewSpec) String() string { - if this == nil { - return "nil" - } - keysForExtra := make([]string, 0, len(this.Extra)) - for k := range this.Extra { - keysForExtra = append(keysForExtra, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - mapStringForExtra := "map[string]ExtraValue{" - for _, k := range keysForExtra { - mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) - } - mapStringForExtra += "}" - s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, - `Extra:` + mapStringForExtra + `,`, - `}`, - }, "") - return s -} -func (this *SubjectAccessReviewStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubjectAccessReviewStatus{`, - `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ExtraValue) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - *m = append(*m, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LocalSubjectAccessReview) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NonResourceAttributes) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NonResourceAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NonResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verb = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceAttributes) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verb = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subresource = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SelfSubjectAccessReview) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SelfSubjectAccessReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SelfSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SelfSubjectAccessReviewSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourceAttributes == nil { - m.ResourceAttributes = &ResourceAttributes{} - } - if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NonResourceAttributes == nil { - m.NonResourceAttributes = &NonResourceAttributes{} - } - if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectAccessReview) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectAccessReviewSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectAccessReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourceAttributes == nil { - m.ResourceAttributes = &ResourceAttributes{} - } - if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NonResourceAttributes == nil { - m.NonResourceAttributes = &NonResourceAttributes{} - } - if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Extra == nil { - m.Extra = make(map[string]ExtraValue) - } - m.Extra[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectAccessReviewStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectAccessReviewStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectAccessReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Allowed = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EvaluationError = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 904 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0xdc, 0x44, - 0x14, 0x5f, 0xef, 0x9f, 0x64, 0x77, 0x02, 0xa4, 0x4c, 0x55, 0xe2, 0x06, 0x69, 0x77, 0xb5, 0x48, - 0x28, 0x95, 0x8a, 0xdd, 0x94, 0x7f, 0x55, 0xc5, 0x81, 0x58, 0x44, 0x55, 0x05, 0x2d, 0x68, 0x02, - 0x39, 0xc0, 0x85, 0xb1, 0xf3, 0xba, 0x6b, 0x76, 0xed, 0xb1, 0x66, 0xc6, 0x6e, 0xc3, 0xa9, 0x1f, - 0x80, 0x03, 0xc7, 0x1e, 0xf9, 0x0a, 0x7c, 0x01, 0xae, 0xe4, 0xd8, 0x23, 0x48, 0x68, 0x45, 0xcc, - 0xb7, 0xe0, 0x84, 0x66, 0x3c, 0xbb, 0xce, 0xb2, 0x0e, 0xd5, 0x42, 0x11, 0x1c, 0x72, 0xb3, 0xdf, - 0xfb, 0xbd, 0xf7, 0x7e, 0xef, 0xcd, 0x9b, 0x79, 0x0f, 0xbd, 0x3f, 0xbe, 0x25, 0x9c, 0x90, 0xb9, - 0xe3, 0xd4, 0x07, 0x1e, 0x83, 0x04, 0xe1, 0x26, 0xe3, 0xa1, 0x4b, 0x93, 0x50, 0xb8, 0x34, 0x95, - 0x23, 0xc6, 0xc3, 0xaf, 0xa9, 0x0c, 0x59, 0xec, 0x66, 0xbb, 0x3e, 0x48, 0xba, 0xeb, 0x0e, 0x21, - 0x06, 0x4e, 0x25, 0x1c, 0x39, 0x09, 0x67, 0x92, 0xe1, 0x1b, 0x85, 0x07, 0xa7, 0xf4, 0xe0, 0x24, - 0xe3, 0xa1, 0xa3, 0x3c, 0x38, 0x0b, 0x1e, 0x1c, 0xe3, 0x61, 0xfb, 0x8d, 0x61, 0x28, 0x47, 0xa9, - 0xef, 0x04, 0x2c, 0x72, 0x87, 0x6c, 0xc8, 0x5c, 0xed, 0xc8, 0x4f, 0x1f, 0xe8, 0x3f, 0xfd, 0xa3, - 0xbf, 0x8a, 0x00, 0xdb, 0x6f, 0x19, 0x8a, 0x34, 0x09, 0x23, 0x1a, 0x8c, 0xc2, 0x18, 0xf8, 0x71, - 0x49, 0x32, 0x02, 0x49, 0xdd, 0x6c, 0x89, 0xd6, 0xb6, 0x7b, 0x9e, 0x15, 0x4f, 0x63, 0x19, 0x46, - 0xb0, 0x64, 0xf0, 0xce, 0xb3, 0x0c, 0x44, 0x30, 0x82, 0x88, 0x2e, 0xd9, 0xbd, 0x79, 0x9e, 0x5d, - 0x2a, 0xc3, 0x89, 0x1b, 0xc6, 0x52, 0x48, 0xbe, 0x64, 0x74, 0x26, 0x27, 0x01, 0x3c, 0x03, 0x5e, - 0x26, 0x04, 0x8f, 0x68, 0x94, 0x4c, 0xa0, 0x2a, 0xa7, 0xeb, 0xe7, 0x1e, 0x56, 0x05, 0x7a, 0xf0, - 0x2e, 0x42, 0xfb, 0x8f, 0x24, 0xa7, 0x87, 0x74, 0x92, 0x02, 0xee, 0xa1, 0x56, 0x28, 0x21, 0x12, - 0xb6, 0xd5, 0x6f, 0xec, 0x74, 0xbc, 0x4e, 0x3e, 0xed, 0xb5, 0xee, 0x2a, 0x01, 0x29, 0xe4, 0xb7, - 0xdb, 0x4f, 0xbe, 0xeb, 0xd5, 0x1e, 0xff, 0xd2, 0xaf, 0x0d, 0xa6, 0x75, 0x64, 0x7f, 0xc4, 0x02, - 0x3a, 0x39, 0x48, 0xfd, 0xaf, 0x20, 0x90, 0x7b, 0x41, 0x00, 0x42, 0x10, 0xc8, 0x42, 0x78, 0x88, - 0xbf, 0x44, 0x6d, 0x55, 0xf2, 0x23, 0x2a, 0xa9, 0x6d, 0xf5, 0xad, 0x9d, 0x8d, 0x9b, 0x37, 0x1c, - 0xd3, 0x01, 0x67, 0x2b, 0x50, 0xf6, 0x80, 0x42, 0x3b, 0xd9, 0xae, 0xf3, 0xb1, 0xf6, 0x75, 0x0f, - 0x24, 0xf5, 0xf0, 0xc9, 0xb4, 0x57, 0xcb, 0xa7, 0x3d, 0x54, 0xca, 0xc8, 0xdc, 0x2b, 0x1e, 0xa3, - 0xa6, 0x48, 0x20, 0xb0, 0xeb, 0xda, 0xfb, 0x5d, 0x67, 0xd5, 0xfe, 0x72, 0x2a, 0x68, 0x1f, 0x24, - 0x10, 0x78, 0x2f, 0x98, 0xb0, 0x4d, 0xf5, 0x47, 0x74, 0x10, 0x2c, 0xd0, 0x9a, 0x90, 0x54, 0xa6, - 0xc2, 0x6e, 0xe8, 0x70, 0x1f, 0x3e, 0x9f, 0x70, 0xda, 0xa5, 0xf7, 0x92, 0x09, 0xb8, 0x56, 0xfc, - 0x13, 0x13, 0x6a, 0xf0, 0x05, 0xba, 0x72, 0x9f, 0xc5, 0x04, 0x04, 0x4b, 0x79, 0x00, 0x7b, 0x52, - 0xf2, 0xd0, 0x4f, 0x25, 0x08, 0xdc, 0x47, 0xcd, 0x84, 0xca, 0x91, 0x2e, 0x6c, 0xa7, 0xe4, 0xfb, - 0x09, 0x95, 0x23, 0xa2, 0x35, 0x0a, 0x91, 0x01, 0xf7, 0x75, 0x71, 0xce, 0x20, 0x0e, 0x81, 0xfb, - 0x44, 0x6b, 0x06, 0x3f, 0xd4, 0x11, 0xae, 0x70, 0xed, 0xa2, 0x4e, 0x4c, 0x23, 0x10, 0x09, 0x0d, - 0xc0, 0xf8, 0x7f, 0xd9, 0x58, 0x77, 0xee, 0xcf, 0x14, 0xa4, 0xc4, 0x3c, 0x3b, 0x12, 0x7e, 0x0d, - 0xb5, 0x86, 0x9c, 0xa5, 0x89, 0x2e, 0x5d, 0xc7, 0x7b, 0xd1, 0x40, 0x5a, 0x77, 0x94, 0x90, 0x14, - 0x3a, 0x7c, 0x0d, 0xad, 0x67, 0xc0, 0x45, 0xc8, 0x62, 0xbb, 0xa9, 0x61, 0x9b, 0x06, 0xb6, 0x7e, - 0x58, 0x88, 0xc9, 0x4c, 0x8f, 0xaf, 0xa3, 0x36, 0x37, 0xc4, 0xed, 0x96, 0xc6, 0x5e, 0x32, 0xd8, - 0xf6, 0x2c, 0x21, 0x32, 0x47, 0xe0, 0xb7, 0xd1, 0x86, 0x48, 0xfd, 0xb9, 0xc1, 0x9a, 0x36, 0xb8, - 0x6c, 0x0c, 0x36, 0x0e, 0x4a, 0x15, 0x39, 0x8b, 0x53, 0x69, 0xa9, 0x1c, 0xed, 0xf5, 0xc5, 0xb4, - 0x54, 0x09, 0x88, 0xd6, 0x0c, 0x4e, 0xeb, 0x68, 0xeb, 0x00, 0x26, 0x0f, 0xfe, 0x9b, 0xee, 0x67, - 0x0b, 0xdd, 0x7f, 0xef, 0x6f, 0xb4, 0x63, 0x35, 0xf5, 0xff, 0xd7, 0x0d, 0xf8, 0xb1, 0x8e, 0x5e, - 0xfd, 0x0b, 0xa2, 0xf8, 0x1b, 0x0b, 0x61, 0xbe, 0xd4, 0xc4, 0xa6, 0xe4, 0x1f, 0xac, 0xce, 0x70, - 0xf9, 0x42, 0x78, 0xaf, 0xe4, 0xd3, 0x5e, 0xc5, 0x45, 0x21, 0x15, 0x71, 0xf1, 0x13, 0x0b, 0x5d, - 0x89, 0xab, 0x6e, 0xac, 0x39, 0xa6, 0x3b, 0xab, 0x33, 0xaa, 0x7c, 0x00, 0xbc, 0xab, 0xf9, 0xb4, - 0x57, 0xfd, 0x36, 0x90, 0x6a, 0x02, 0x83, 0x9f, 0xeb, 0xe8, 0xf2, 0xc5, 0x3b, 0xfd, 0xef, 0x74, - 0xe9, 0xef, 0x4d, 0xb4, 0x75, 0xd1, 0xa1, 0xff, 0xb0, 0x43, 0xe7, 0x83, 0xa4, 0xb1, 0xf8, 0xe2, - 0x7e, 0x26, 0x80, 0x9b, 0x41, 0xd2, 0x9f, 0x0d, 0x92, 0xa6, 0xde, 0x4d, 0x90, 0x3a, 0x0a, 0x3d, - 0x44, 0xc4, 0x6c, 0x8a, 0x1c, 0xa3, 0x16, 0xa8, 0x5d, 0xc6, 0x6e, 0xf5, 0x1b, 0x3b, 0x1b, 0x37, - 0x3f, 0x7d, 0x6e, 0xcd, 0xe6, 0xe8, 0x15, 0x69, 0x3f, 0x96, 0xfc, 0xb8, 0x1c, 0x60, 0x5a, 0x46, - 0x8a, 0x88, 0xdb, 0x99, 0x59, 0xa3, 0x34, 0x06, 0x5f, 0x42, 0x8d, 0x31, 0x1c, 0x17, 0x03, 0x94, - 0xa8, 0x4f, 0x4c, 0x50, 0x2b, 0x53, 0x1b, 0x96, 0x29, 0xf4, 0x7b, 0xab, 0x53, 0x2b, 0xb7, 0x34, - 0x52, 0xb8, 0xba, 0x5d, 0xbf, 0x65, 0x0d, 0xbe, 0xb7, 0xd0, 0xd5, 0x73, 0x5b, 0x56, 0x8d, 0x55, - 0x3a, 0x99, 0xb0, 0x87, 0x70, 0xa4, 0xb9, 0xb4, 0xcb, 0xb1, 0xba, 0x57, 0x88, 0xc9, 0x4c, 0x8f, - 0x5f, 0x47, 0x6b, 0x1c, 0xa8, 0x60, 0xb1, 0x19, 0xe5, 0xf3, 0x6e, 0x27, 0x5a, 0x4a, 0x8c, 0x16, - 0xef, 0xa1, 0x4d, 0x50, 0xe1, 0x35, 0xb9, 0x7d, 0xce, 0x19, 0x37, 0x47, 0xb6, 0x65, 0x0c, 0x36, - 0xf7, 0x17, 0xd5, 0xe4, 0xcf, 0x78, 0xef, 0xda, 0xc9, 0x69, 0xb7, 0xf6, 0xf4, 0xb4, 0x5b, 0xfb, - 0xe9, 0xb4, 0x5b, 0x7b, 0x9c, 0x77, 0xad, 0x93, 0xbc, 0x6b, 0x3d, 0xcd, 0xbb, 0xd6, 0xaf, 0x79, - 0xd7, 0xfa, 0xf6, 0xb7, 0x6e, 0xed, 0xf3, 0x75, 0x93, 0xf4, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x38, 0xbb, 0x77, 0xc4, 0x79, 0x0c, 0x00, 0x00, -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.generated.go deleted file mode 100644 index 939603c58..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.generated.go +++ /dev/null @@ -1,3233 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg2_types "k8s.io/apimachinery/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_v1.TypeMeta - var v1 pkg2_types.UID - var v2 time.Time - _, _, _ = v0, v1, v2 - } -} - -func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SelfSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = SelfSubjectAccessReviewSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SelfSubjectAccessReviewSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LocalSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Namespace != "" - yyq2[1] = x.Verb != "" - yyq2[2] = x.Group != "" - yyq2[3] = x.Version != "" - yyq2[4] = x.Resource != "" - yyq2[5] = x.Subresource != "" - yyq2[6] = x.Name != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("verb")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("group")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Version)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("version")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Version)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subresource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - yyv4 := &x.Namespace - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "verb": - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - yyv6 := &x.Verb - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "group": - if r.TryDecodeAsNil() { - x.Group = "" - } else { - yyv8 := &x.Group - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "version": - if r.TryDecodeAsNil() { - x.Version = "" - } else { - yyv10 := &x.Version - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "resource": - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - yyv12 := &x.Resource - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - case "subresource": - if r.TryDecodeAsNil() { - x.Subresource = "" - } else { - yyv14 := &x.Subresource - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv16 := &x.Name - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - yyv19 := &x.Namespace - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - yyv21 := &x.Verb - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Group = "" - } else { - yyv23 := &x.Group - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*string)(yyv23)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Version = "" - } else { - yyv25 := &x.Version - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - yyv27 := &x.Resource - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*string)(yyv27)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subresource = "" - } else { - yyv29 := &x.Subresource - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*string)(yyv29)) = r.DecodeString() - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv31 := &x.Name - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*string)(yyv31)) = r.DecodeString() - } - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Path != "" - yyq2[1] = x.Verb != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("verb")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NonResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv4 := &x.Path - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "verb": - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - yyv6 := &x.Verb - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv9 := &x.Path - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - yyv11 := &x.Verb - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ResourceAttributes != nil - yyq2[1] = x.NonResourceAttributes != nil - yyq2[2] = x.User != "" - yyq2[3] = len(x.Groups) != 0 - yyq2[4] = len(x.Extra) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Groups == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("group")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Extra == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("extra")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "resourceAttributes": - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - case "nonResourceAttributes": - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - case "user": - if r.TryDecodeAsNil() { - x.User = "" - } else { - yyv6 := &x.User - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "group": - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv8 := &x.Groups - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } - } - case "extra": - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv10 := &x.Extra - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - yyv15 := &x.User - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv17 := &x.Groups - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - z.F.DecSliceStringX(yyv17, false, d) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv19 := &x.Extra - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encExtraValue((ExtraValue)(x), e) - } - } -} - -func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decExtraValue((*ExtraValue)(x), d) - } -} - -func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ResourceAttributes != nil - yyq2[1] = x.NonResourceAttributes != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SelfSubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "resourceAttributes": - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - case "nonResourceAttributes": - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Reason != "" - yyq2[2] = x.EvaluationError != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allowed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evaluationError")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "allowed": - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - yyv4 := &x.Allowed - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*bool)(yyv4)) = r.DecodeBool() - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv6 := &x.Reason - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "evaluationError": - if r.TryDecodeAsNil() { - x.EvaluationError = "" - } else { - yyv8 := &x.EvaluationError - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - yyv11 := &x.Allowed - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(yyv11)) = r.DecodeBool() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv13 := &x.Reason - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvaluationError = "" - } else { - yyv15 := &x.EvaluationError - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv1 == nil { - r.EncodeNil() - } else { - yyv1.CodecEncodeSelf(e) - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) - yyv1 = make(map[string]ExtraValue, yyrl1) - *v = yyv1 - } - var yymk1 string - var yymv1 ExtraValue - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv2 := &yymk1 - yym3 := z.DecBinary() - _ = yym3 - if false { - } else { - *((*string)(yyv2)) = r.DecodeString() - } - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv4 := &yymv1 - yyv4.CodecDecodeSelf(d) - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv5 := &yymk1 - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*string)(yyv5)) = r.DecodeString() - } - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv7 := &yymv1 - yyv7.CodecDecodeSelf(d) - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1)) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]string, yyrl1) - } - } else { - yyv1 = make([]string, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv2 := &yyv1[yyj1] - yym3 := z.DecBinary() - _ = yym3 - if false { - } else { - *((*string)(yyv2)) = r.DecodeString() - } - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv4 := &yyv1[yyj1] - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 string - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv6 := &yyv1[yyj1] - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.conversion.go deleted file mode 100644 index 7702201d5..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,263 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - authorization "k8s.io/client-go/pkg/apis/authorization" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview, - Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview, - Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes, - Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes, - Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes, - Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes, - Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview, - Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview, - Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec, - Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec, - Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview, - Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview, - Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec, - Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec, - Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus, - Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus, - ) -} - -func autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { - return autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in, out, s) -} - -func autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { - return autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in, out, s) -} - -func autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { - out.Path = in.Path - out.Verb = in.Verb - return nil -} - -func Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { - return autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in, out, s) -} - -func autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { - out.Path = in.Path - out.Verb = in.Verb - return nil -} - -func Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { - return autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in, out, s) -} - -func autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Verb = in.Verb - out.Group = in.Group - out.Version = in.Version - out.Resource = in.Resource - out.Subresource = in.Subresource - out.Name = in.Name - return nil -} - -func Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { - return autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in, out, s) -} - -func autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Verb = in.Verb - out.Group = in.Group - out.Version = in.Version - out.Resource = in.Resource - out.Subresource = in.Subresource - out.Name = in.Name - return nil -} - -func Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { - return autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in, out, s) -} - -func autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { - return autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in, out, s) -} - -func autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { - return autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in, out, s) -} - -func autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { - out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) - out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) - return nil -} - -func Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { - return autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in, out, s) -} - -func autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { - out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) - out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) - return nil -} - -func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { - return autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in, out, s) -} - -func autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { - return autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in, out, s) -} - -func autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { - return autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in, out, s) -} - -func autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { - out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) - out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) - out.User = in.User - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - out.Extra = *(*map[string]authorization.ExtraValue)(unsafe.Pointer(&in.Extra)) - return nil -} - -func Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { - return autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in, out, s) -} - -func autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { - out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) - out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) - out.User = in.User - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) - return nil -} - -func Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { - return autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in, out, s) -} - -func autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { - out.Allowed = in.Allowed - out.Reason = in.Reason - out.EvaluationError = in.EvaluationError - return nil -} - -func Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { - return autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in, out, s) -} - -func autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { - out.Allowed = in.Allowed - out.Reason = in.Reason - out.EvaluationError = in.EvaluationError - return nil -} - -func Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { - return autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 2bbd414ad..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,179 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_LocalSubjectAccessReview, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NonResourceAttributes, InType: reflect.TypeOf(&NonResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ResourceAttributes, InType: reflect.TypeOf(&ResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SelfSubjectAccessReview, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SelfSubjectAccessReviewSpec, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SubjectAccessReview, InType: reflect.TypeOf(&SubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SubjectAccessReviewSpec, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SubjectAccessReviewStatus, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, - ) -} - -func DeepCopy_v1beta1_LocalSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LocalSubjectAccessReview) - out := out.(*LocalSubjectAccessReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_NonResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NonResourceAttributes) - out := out.(*NonResourceAttributes) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_ResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceAttributes) - out := out.(*ResourceAttributes) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_SelfSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SelfSubjectAccessReview) - out := out.(*SelfSubjectAccessReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SelfSubjectAccessReviewSpec) - out := out.(*SelfSubjectAccessReviewSpec) - *out = *in - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - **out = **in - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - **out = **in - } - return nil - } -} - -func DeepCopy_v1beta1_SubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SubjectAccessReview) - out := out.(*SubjectAccessReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_SubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SubjectAccessReviewSpec) - out := out.(*SubjectAccessReviewSpec) - *out = *in - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - **out = **in - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - **out = **in - } - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue) - for key, val := range *in { - if newVal, err := c.DeepCopy(&val); err != nil { - return err - } else { - (*out)[key] = *newVal.(*ExtraValue) - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_SubjectAccessReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SubjectAccessReviewStatus) - out := out.(*SubjectAccessReviewStatus) - *out = *in - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authorization/zz_generated.deepcopy.go deleted file mode 100644 index 19ccebdaa..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/zz_generated.deepcopy.go +++ /dev/null @@ -1,179 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package authorization - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_LocalSubjectAccessReview, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_NonResourceAttributes, InType: reflect.TypeOf(&NonResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_ResourceAttributes, InType: reflect.TypeOf(&ResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SelfSubjectAccessReview, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SelfSubjectAccessReviewSpec, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SubjectAccessReview, InType: reflect.TypeOf(&SubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SubjectAccessReviewSpec, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SubjectAccessReviewStatus, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, - ) -} - -func DeepCopy_authorization_LocalSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LocalSubjectAccessReview) - out := out.(*LocalSubjectAccessReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_authorization_NonResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NonResourceAttributes) - out := out.(*NonResourceAttributes) - *out = *in - return nil - } -} - -func DeepCopy_authorization_ResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceAttributes) - out := out.(*ResourceAttributes) - *out = *in - return nil - } -} - -func DeepCopy_authorization_SelfSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SelfSubjectAccessReview) - out := out.(*SelfSubjectAccessReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_authorization_SelfSubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SelfSubjectAccessReviewSpec) - out := out.(*SelfSubjectAccessReviewSpec) - *out = *in - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - **out = **in - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - **out = **in - } - return nil - } -} - -func DeepCopy_authorization_SubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SubjectAccessReview) - out := out.(*SubjectAccessReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_authorization_SubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SubjectAccessReviewSpec) - out := out.(*SubjectAccessReviewSpec) - *out = *in - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - **out = **in - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - **out = **in - } - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue) - for key, val := range *in { - if newVal, err := c.DeepCopy(&val); err != nil { - return err - } else { - (*out)[key] = *newVal.(*ExtraValue) - } - } - } - return nil - } -} - -func DeepCopy_authorization_SubjectAccessReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SubjectAccessReviewStatus) - out := out.(*SubjectAccessReviewStatus) - *out = *in - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/OWNERS b/vendor/k8s.io/client-go/pkg/apis/autoscaling/OWNERS deleted file mode 100755 index 9fbc54e93..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/OWNERS +++ /dev/null @@ -1,20 +0,0 @@ -reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- caesarxuchao -- erictune -- sttts -- ncdc -- timothysc -- piosz -- dims -- errordeveloper -- madhusudancs -- krousey -- mml -- mbohlool -- david-mcmahon -- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/annotations.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/annotations.go deleted file mode 100644 index 4c377561b..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/annotations.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -// MetricSpecsAnnotation is the annotation which holds non-CPU-utilization HPA metric -// specs when converting the `Metrics` field from autoscaling/v2alpha1 -const MetricSpecsAnnotation = "autoscaling.alpha.kubernetes.io/metrics" - -// MetricStatusesAnnotation is the annotation which holds non-CPU-utilization HPA metric -// statuses when converting the `CurrentMetrics` field from autoscaling/v2alpha1 -const MetricStatusesAnnotation = "autoscaling.alpha.kubernetes.io/current-metrics" - -// DefaultCPUUtilization is the default value for CPU utilization, provided no other -// metrics are present. This is here because it's used by both the v2alpha1 defaulting -// logic, and the pseudo-defaulting done in v1 conversion. -const DefaultCPUUtilization = 80 diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/doc.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/doc.go deleted file mode 100644 index 83ac82767..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/install/install.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/install/install.go deleted file mode 100644 index 0ecc0af9b..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/install/install.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/apis/autoscaling" - "k8s.io/client-go/pkg/apis/autoscaling/v1" - "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: autoscaling.GroupName, - VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v2alpha1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/client-go/pkg/apis/autoscaling", - AddInternalObjectsToScheme: autoscaling.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1.SchemeGroupVersion.Version: v1.AddToScheme, - v2alpha1.SchemeGroupVersion.Version: v2alpha1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/register.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/register.go deleted file mode 100644 index 2bcea84b9..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "autoscaling" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Scale{}, - &HorizontalPodAutoscaler{}, - &HorizontalPodAutoscalerList{}, - ) - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/types.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/types.go deleted file mode 100644 index 39f206b1f..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/types.go +++ /dev/null @@ -1,305 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -import ( - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api" -) - -// Scale represents a scaling request for a resource. -type Scale struct { - metav1.TypeMeta - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta - - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. - // +optional - Spec ScaleSpec - - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - Status ScaleStatus -} - -// ScaleSpec describes the attributes of a scale subresource. -type ScaleSpec struct { - // desired number of instances for the scaled object. - // +optional - Replicas int32 -} - -// ScaleStatus represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 - - // label query over pods that should match the replicas count. This is same - // as the label selector but in the string format to avoid introspection - // by clients. The string will be in the same format as the query-param syntax. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - Selector string -} - -// CrossVersionObjectReference contains enough information to let you identify the referred resource. -type CrossVersionObjectReference struct { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" - Kind string - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name string - // API version of the referent - // +optional - APIVersion string -} - -// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. -type HorizontalPodAutoscalerSpec struct { - // ScaleTargetRef points to the target resource to scale, and is used to the pods for which metrics - // should be collected, as well as to actually change the replica count. - ScaleTargetRef CrossVersionObjectReference - // MinReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. - // +optional - MinReplicas *int32 - // MaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. - // It cannot be less that minReplicas. - MaxReplicas int32 - // Metrics contains the specifications for which to use to calculate the - // desired replica count (the maximum replica count across all metrics will - // be used). The desired replica count is calculated multiplying the - // ratio between the target value and the current value by the current - // number of pods. Ergo, metrics used must decrease as the pod count is - // increased, and vice-versa. See the individual metric source types for - // more information about how each type of metric must respond. - // +optional - Metrics []MetricSpec -} - -// MetricSourceType indicates the type of metric. -type MetricSourceType string - -var ( - // ObjectMetricSourceType is a metric describing a kubernetes object - // (for example, hits-per-second on an Ingress object). - ObjectMetricSourceType MetricSourceType = "Object" - // PodsMetricSourceType is a metric describing each pod in the current scale - // target (for example, transactions-processed-per-second). The values - // will be averaged together before being compared to the target value. - PodsMetricSourceType MetricSourceType = "Pods" - // ResourceMetricSourceType is a resource metric known to Kubernetes, as - // specified in requests and limits, describing each pod in the current - // scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics (the "pods" source). - ResourceMetricSourceType MetricSourceType = "Resource" -) - -// MetricSpec specifies how to scale based on a single metric -// (only `type` and one other matching field should be set at once). -type MetricSpec struct { - // Type is the type of metric source. It should match one of the fields below. - Type MetricSourceType - - // Object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - Object *ObjectMetricSource - // Pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - Pods *PodsMetricSource - // Resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - Resource *ResourceMetricSource -} - -// ObjectMetricSource indicates how to scale on a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -type ObjectMetricSource struct { - // Target is the described Kubernetes object. - Target CrossVersionObjectReference - - // MetricName is the name of the metric in question. - MetricName string - // TargetValue is the target value of the metric (as a quantity). - TargetValue resource.Quantity -} - -// PodsMetricSource indicates how to scale on a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -// The values will be averaged together before being compared to the target -// value. -type PodsMetricSource struct { - // MetricName is the name of the metric in question - MetricName string - // TargetAverageValue is the target value of the average of the - // metric across all relevant pods (as a quantity) - TargetAverageValue resource.Quantity -} - -// ResourceMetricSource indicates how to scale on a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). The values will be averaged -// together before being compared to the target. Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. Only one "target" type -// should be set. -type ResourceMetricSource struct { - // Name is the name of the resource in question. - Name api.ResourceName - // TargetAverageUtilization is the target value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. - // +optional - TargetAverageUtilization *int32 - // TargetAverageValue is the the target value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // +optional - TargetAverageValue *resource.Quantity -} - -// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. -type HorizontalPodAutoscalerStatus struct { - // ObservedGeneration is the most recent generation observed by this autoscaler. - // +optional - ObservedGeneration *int64 - - // LastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, - // used by the autoscaler to control how often the number of pods is changed. - // +optional - LastScaleTime *metav1.Time - - // CurrentReplicas is current number of replicas of pods managed by this autoscaler, - // as last seen by the autoscaler. - CurrentReplicas int32 - - // DesiredReplicas is the desired number of replicas of pods managed by this autoscaler, - // as last calculated by the autoscaler. - DesiredReplicas int32 - - // CurrentMetrics is the last read state of the metrics used by this autoscaler. - CurrentMetrics []MetricStatus -} - -// MetricStatus describes the last-read state of a single metric. -type MetricStatus struct { - // Type is the type of metric source. It will match one of the fields below. - Type MetricSourceType - - // Object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - Object *ObjectMetricStatus - // Pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - Pods *PodsMetricStatus - // Resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - Resource *ResourceMetricStatus -} - -// ObjectMetricStatus indicates the current value of a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -type ObjectMetricStatus struct { - // Target is the described Kubernetes object. - Target CrossVersionObjectReference - - // MetricName is the name of the metric in question. - MetricName string - // CurrentValue is the current value of the metric (as a quantity). - CurrentValue resource.Quantity -} - -// PodsMetricStatus indicates the current value of a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -type PodsMetricStatus struct { - // MetricName is the name of the metric in question - MetricName string - // CurrentAverageValue is the current value of the average of the - // metric across all relevant pods (as a quantity) - CurrentAverageValue resource.Quantity -} - -// ResourceMetricStatus indicates the current value of a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. -type ResourceMetricStatus struct { - // Name is the name of the resource in question. - Name api.ResourceName - // CurrentAverageUtilization is the current value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. It will only be - // present if `targetAverageValue` was set in the corresponding metric - // specification. - // +optional - CurrentAverageUtilization *int32 - // CurrentAverageValue is the the current value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // It will always be set, regardless of the corresponding metric specification. - CurrentAverageValue resource.Quantity -} - -// +genclient=true - -// HorizontalPodAutoscaler is the configuration for a horizontal pod -// autoscaler, which automatically manages the replica count of any resource -// implementing the scale subresource based on the metrics specified. -type HorizontalPodAutoscaler struct { - metav1.TypeMeta - // Metadata is the standard object metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta - - // Spec is the specification for the behaviour of the autoscaler. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. - // +optional - Spec HorizontalPodAutoscalerSpec - - // Status is the current information about the autoscaler. - // +optional - Status HorizontalPodAutoscalerStatus -} - -// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. -type HorizontalPodAutoscalerList struct { - metav1.TypeMeta - // Metadata is the standard list metadata. - // +optional - metav1.ListMeta - - // Items is the list of horizontal pod autoscaler objects. - Items []HorizontalPodAutoscaler -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/conversion.go deleted file mode 100644 index 6b8306922..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/conversion.go +++ /dev/null @@ -1,244 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/json" - - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/apis/autoscaling" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, - Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, - Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, - Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, - Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, - Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, - ) - if err != nil { - return err - } - - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - if err := autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in, out, s); err != nil { - return err - } - - otherMetrics := make([]MetricSpec, 0, len(in.Spec.Metrics)) - for _, metric := range in.Spec.Metrics { - if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == api.ResourceCPU && metric.Resource.TargetAverageUtilization != nil { - continue - } - - convMetric := MetricSpec{} - if err := Convert_autoscaling_MetricSpec_To_v1_MetricSpec(&metric, &convMetric, s); err != nil { - return err - } - otherMetrics = append(otherMetrics, convMetric) - } - - // NB: we need to save the status even if it maps to a CPU utilization status in order to save the raw value as well - currentMetrics := make([]MetricStatus, len(in.Status.CurrentMetrics)) - for i, currentMetric := range in.Status.CurrentMetrics { - if err := Convert_autoscaling_MetricStatus_To_v1_MetricStatus(¤tMetric, ¤tMetrics[i], s); err != nil { - return err - } - } - - if len(otherMetrics) > 0 || len(in.Status.CurrentMetrics) > 0 { - old := out.Annotations - out.Annotations = make(map[string]string, len(old)+2) - if old != nil { - for k, v := range old { - out.Annotations[k] = v - } - } - } - - if len(otherMetrics) > 0 { - otherMetricsEnc, err := json.Marshal(otherMetrics) - if err != nil { - return err - } - out.Annotations[autoscaling.MetricSpecsAnnotation] = string(otherMetricsEnc) - } - - if len(in.Status.CurrentMetrics) > 0 { - currentMetricsEnc, err := json.Marshal(currentMetrics) - if err != nil { - return err - } - out.Annotations[autoscaling.MetricStatusesAnnotation] = string(currentMetricsEnc) - } - - return nil -} - -func Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { - if err := autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s); err != nil { - return err - } - - if otherMetricsEnc, hasOtherMetrics := out.Annotations[autoscaling.MetricSpecsAnnotation]; hasOtherMetrics { - var otherMetrics []MetricSpec - if err := json.Unmarshal([]byte(otherMetricsEnc), &otherMetrics); err != nil { - return err - } - - // the normal Spec conversion could have populated out.Spec.Metrics with a single element, so deal with that - outMetrics := make([]autoscaling.MetricSpec, len(otherMetrics)+len(out.Spec.Metrics)) - for i, metric := range otherMetrics { - if err := Convert_v1_MetricSpec_To_autoscaling_MetricSpec(&metric, &outMetrics[i], s); err != nil { - return err - } - } - if out.Spec.Metrics != nil { - outMetrics[len(otherMetrics)] = out.Spec.Metrics[0] - } - out.Spec.Metrics = outMetrics - delete(out.Annotations, autoscaling.MetricSpecsAnnotation) - } - - if currentMetricsEnc, hasCurrentMetrics := out.Annotations[autoscaling.MetricStatusesAnnotation]; hasCurrentMetrics { - // ignore any existing status values -- the ones here have more information - var currentMetrics []MetricStatus - if err := json.Unmarshal([]byte(currentMetricsEnc), ¤tMetrics); err != nil { - return err - } - - out.Status.CurrentMetrics = make([]autoscaling.MetricStatus, len(currentMetrics)) - for i, currentMetric := range currentMetrics { - if err := Convert_v1_MetricStatus_To_autoscaling_MetricStatus(¤tMetric, &out.Status.CurrentMetrics[i], s); err != nil { - return err - } - } - delete(out.Annotations, autoscaling.MetricStatusesAnnotation) - } - - // autoscaling/v1 formerly had an implicit default applied in the controller. In v2alpha1, we apply it explicitly. - // We apply it here, explicitly, since we have access to the full set of metrics from the annotation. - if len(out.Spec.Metrics) == 0 { - // no other metrics, no explicit CPU value set - out.Spec.Metrics = []autoscaling.MetricSpec{ - { - Type: autoscaling.ResourceMetricSourceType, - Resource: &autoscaling.ResourceMetricSource{ - Name: api.ResourceCPU, - }, - }, - } - out.Spec.Metrics[0].Resource.TargetAverageUtilization = new(int32) - *out.Spec.Metrics[0].Resource.TargetAverageUtilization = autoscaling.DefaultCPUUtilization - } - - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { - return err - } - - out.MinReplicas = in.MinReplicas - out.MaxReplicas = in.MaxReplicas - - for _, metric := range in.Metrics { - if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == api.ResourceCPU { - if metric.Resource.TargetAverageUtilization != nil { - out.TargetCPUUtilizationPercentage = new(int32) - *out.TargetCPUUtilizationPercentage = *metric.Resource.TargetAverageUtilization - } - break - } - } - - return nil -} - -func Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { - return err - } - - out.MinReplicas = in.MinReplicas - out.MaxReplicas = in.MaxReplicas - - if in.TargetCPUUtilizationPercentage != nil { - out.Metrics = []autoscaling.MetricSpec{ - { - Type: autoscaling.ResourceMetricSourceType, - Resource: &autoscaling.ResourceMetricSource{ - Name: api.ResourceCPU, - }, - }, - } - out.Metrics[0].Resource.TargetAverageUtilization = new(int32) - *out.Metrics[0].Resource.TargetAverageUtilization = *in.TargetCPUUtilizationPercentage - } - - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.LastScaleTime = in.LastScaleTime - - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - - for _, metric := range in.CurrentMetrics { - if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == api.ResourceCPU { - if metric.Resource.CurrentAverageUtilization != nil { - - out.CurrentCPUUtilizationPercentage = new(int32) - *out.CurrentCPUUtilizationPercentage = *metric.Resource.CurrentAverageUtilization - } - } - } - return nil -} - -func Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.LastScaleTime = in.LastScaleTime - - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - - if in.CurrentCPUUtilizationPercentage != nil { - out.CurrentMetrics = []autoscaling.MetricStatus{ - { - Type: autoscaling.ResourceMetricSourceType, - Resource: &autoscaling.ResourceMetricStatus{ - Name: api.ResourceCPU, - }, - }, - } - out.CurrentMetrics[0].Resource.CurrentAverageUtilization = new(int32) - *out.CurrentMetrics[0].Resource.CurrentAverageUtilization = *in.CurrentCPUUtilizationPercentage - } - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/defaults.go deleted file mode 100644 index d423ad125..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/defaults.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - RegisterDefaults(scheme) - return scheme.AddDefaultingFuncs( - SetDefaults_HorizontalPodAutoscaler, - ) -} - -func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { - if obj.Spec.MinReplicas == nil { - minReplicas := int32(1) - obj.Spec.MinReplicas = &minReplicas - } - - // NB: we apply a default for CPU utilization in conversion because - // we need access to the annotations to properly apply the default. -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.generated.go deleted file mode 100644 index aea5e3831..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.generated.go +++ /dev/null @@ -1,5216 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg3_resource "k8s.io/apimachinery/pkg/api/resource" - pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg2_types "k8s.io/apimachinery/pkg/types" - pkg4_v1 "k8s.io/client-go/pkg/api/v1" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg3_resource.Quantity - var v1 pkg1_v1.Time - var v2 pkg2_types.UID - var v3 pkg4_v1.ResourceName - var v4 time.Time - _, _, _, _, _ = v0, v1, v2, v3, v4 - } -} - -func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv6 := &x.Name - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv8 := &x.APIVersion - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv11 := &x.Kind - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv13 := &x.Name - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.MinReplicas != nil - yyq2[3] = x.TargetCPUUtilizationPercentage != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.ScaleTargetRef - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ScaleTargetRef - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy9 := *x.MinReplicas - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy11 := *x.MinReplicas - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(yy11)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.TargetCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy17 := *x.TargetCPUUtilizationPercentage - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeInt(int64(yy17)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetCPUUtilizationPercentage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy19 := *x.TargetCPUUtilizationPercentage - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(yy19)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "scaleTargetRef": - if r.TryDecodeAsNil() { - x.ScaleTargetRef = CrossVersionObjectReference{} - } else { - yyv4 := &x.ScaleTargetRef - yyv4.CodecDecodeSelf(d) - } - case "minReplicas": - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - case "maxReplicas": - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - yyv7 := &x.MaxReplicas - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*int32)(yyv7)) = int32(r.DecodeInt(32)) - } - } - case "targetCPUUtilizationPercentage": - if r.TryDecodeAsNil() { - if x.TargetCPUUtilizationPercentage != nil { - x.TargetCPUUtilizationPercentage = nil - } - } else { - if x.TargetCPUUtilizationPercentage == nil { - x.TargetCPUUtilizationPercentage = new(int32) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ScaleTargetRef = CrossVersionObjectReference{} - } else { - yyv12 := &x.ScaleTargetRef - yyv12.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - yyv15 := &x.MaxReplicas - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int32)(yyv15)) = int32(r.DecodeInt(32)) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TargetCPUUtilizationPercentage != nil { - x.TargetCPUUtilizationPercentage = nil - } - } else { - if x.TargetCPUUtilizationPercentage == nil { - x.TargetCPUUtilizationPercentage = new(int32) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != nil - yyq2[1] = x.LastScaleTime != nil - yyq2[4] = x.CurrentCPUUtilizationPercentage != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy4 := *x.ObservedGeneration - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy6 := *x.ObservedGeneration - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.LastScaleTime == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym9 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym9 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LastScaleTime == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym10 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.CurrentCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy18 := *x.CurrentCPUUtilizationPercentage - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(yy18)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CurrentCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy20 := *x.CurrentCPUUtilizationPercentage - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(yy20)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - case "lastScaleTime": - if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } - } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_v1.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) - } else { - z.DecFallback(x.LastScaleTime, false) - } - } - case "currentReplicas": - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - yyv8 := &x.CurrentReplicas - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - case "desiredReplicas": - if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 - } else { - yyv10 := &x.DesiredReplicas - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(yyv10)) = int32(r.DecodeInt(32)) - } - } - case "currentCPUUtilizationPercentage": - if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } - } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int32) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } - } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_v1.Time) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym18 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) - } else { - z.DecFallback(x.LastScaleTime, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - yyv19 := &x.CurrentReplicas - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int32)(yyv19)) = int32(r.DecodeInt(32)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 - } else { - yyv21 := &x.DesiredReplicas - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int32)(yyv21)) = int32(r.DecodeInt(32)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } - } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int32) - } - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv4 := &x.Replicas - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv7 := &x.Replicas - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*int32)(yyv7)) = int32(r.DecodeInt(32)) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Selector != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv4 := &x.Replicas - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = "" - } else { - yyv6 := &x.Selector - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv9 := &x.Replicas - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*int32)(yyv9)) = int32(r.DecodeInt(32)) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = "" - } else { - yyv11 := &x.Selector - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x MetricSourceType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *MetricSourceType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *MetricSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Object != nil - yyq2[2] = x.Pods != nil - yyq2[3] = x.Resource != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Object == nil { - r.EncodeNil() - } else { - x.Object.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("object")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Object == nil { - r.EncodeNil() - } else { - x.Object.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Pods == nil { - r.EncodeNil() - } else { - x.Pods.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Pods == nil { - r.EncodeNil() - } else { - x.Pods.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Resource == nil { - r.EncodeNil() - } else { - x.Resource.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Resource == nil { - r.EncodeNil() - } else { - x.Resource.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *MetricSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *MetricSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "object": - if r.TryDecodeAsNil() { - if x.Object != nil { - x.Object = nil - } - } else { - if x.Object == nil { - x.Object = new(ObjectMetricSource) - } - x.Object.CodecDecodeSelf(d) - } - case "pods": - if r.TryDecodeAsNil() { - if x.Pods != nil { - x.Pods = nil - } - } else { - if x.Pods == nil { - x.Pods = new(PodsMetricSource) - } - x.Pods.CodecDecodeSelf(d) - } - case "resource": - if r.TryDecodeAsNil() { - if x.Resource != nil { - x.Resource = nil - } - } else { - if x.Resource == nil { - x.Resource = new(ResourceMetricSource) - } - x.Resource.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *MetricSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv9 := &x.Type - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Object != nil { - x.Object = nil - } - } else { - if x.Object == nil { - x.Object = new(ObjectMetricSource) - } - x.Object.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Pods != nil { - x.Pods = nil - } - } else { - if x.Pods == nil { - x.Pods = new(PodsMetricSource) - } - x.Pods.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Resource != nil { - x.Resource = nil - } - } else { - if x.Resource == nil { - x.Resource = new(ResourceMetricSource) - } - x.Resource.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ObjectMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Target - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("target")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Target - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metricName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.TargetValue - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetValue")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.TargetValue - yym15 := z.EncBinary() - _ = yym15 - if false { - } else if z.HasExtensions() && z.EncExt(yy14) { - } else if !yym15 && z.IsJSONHandle() { - z.EncJSONMarshal(yy14) - } else { - z.EncFallback(yy14) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "target": - if r.TryDecodeAsNil() { - x.Target = CrossVersionObjectReference{} - } else { - yyv4 := &x.Target - yyv4.CodecDecodeSelf(d) - } - case "metricName": - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv5 := &x.MetricName - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*string)(yyv5)) = r.DecodeString() - } - } - case "targetValue": - if r.TryDecodeAsNil() { - x.TargetValue = pkg3_resource.Quantity{} - } else { - yyv7 := &x.TargetValue - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Target = CrossVersionObjectReference{} - } else { - yyv10 := &x.Target - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv11 := &x.MetricName - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetValue = pkg3_resource.Quantity{} - } else { - yyv13 := &x.TargetValue - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(yyv13) { - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv13) - } else { - z.DecFallback(yyv13, false) - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodsMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metricName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.TargetAverageValue - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.TargetAverageValue - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodsMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodsMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metricName": - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv4 := &x.MetricName - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "targetAverageValue": - if r.TryDecodeAsNil() { - x.TargetAverageValue = pkg3_resource.Quantity{} - } else { - yyv6 := &x.TargetAverageValue - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodsMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv9 := &x.MetricName - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetAverageValue = pkg3_resource.Quantity{} - } else { - yyv11 := &x.TargetAverageValue - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.TargetAverageUtilization != nil - yyq2[2] = x.TargetAverageValue != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf4 := &x.Name - yysf4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf5 := &x.Name - yysf5.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.TargetAverageUtilization == nil { - r.EncodeNil() - } else { - yy7 := *x.TargetAverageUtilization - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetAverageUtilization")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetAverageUtilization == nil { - r.EncodeNil() - } else { - yy9 := *x.TargetAverageUtilization - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.TargetAverageValue == nil { - r.EncodeNil() - } else { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { - } else if !yym12 && z.IsJSONHandle() { - z.EncJSONMarshal(x.TargetAverageValue) - } else { - z.EncFallback(x.TargetAverageValue) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetAverageValue == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(x.TargetAverageValue) - } else { - z.EncFallback(x.TargetAverageValue) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yyv4.CodecDecodeSelf(d) - } - case "targetAverageUtilization": - if r.TryDecodeAsNil() { - if x.TargetAverageUtilization != nil { - x.TargetAverageUtilization = nil - } - } else { - if x.TargetAverageUtilization == nil { - x.TargetAverageUtilization = new(int32) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) - } - } - case "targetAverageValue": - if r.TryDecodeAsNil() { - if x.TargetAverageValue != nil { - x.TargetAverageValue = nil - } - } else { - if x.TargetAverageValue == nil { - x.TargetAverageValue = new(pkg3_resource.Quantity) - } - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.TargetAverageValue) - } else { - z.DecFallback(x.TargetAverageValue, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv10 := &x.Name - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TargetAverageUtilization != nil { - x.TargetAverageUtilization = nil - } - } else { - if x.TargetAverageUtilization == nil { - x.TargetAverageUtilization = new(int32) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TargetAverageValue != nil { - x.TargetAverageValue = nil - } - } else { - if x.TargetAverageValue == nil { - x.TargetAverageValue = new(pkg3_resource.Quantity) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.TargetAverageValue) - } else { - z.DecFallback(x.TargetAverageValue, false) - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *MetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Object != nil - yyq2[2] = x.Pods != nil - yyq2[3] = x.Resource != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Object == nil { - r.EncodeNil() - } else { - x.Object.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("object")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Object == nil { - r.EncodeNil() - } else { - x.Object.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Pods == nil { - r.EncodeNil() - } else { - x.Pods.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Pods == nil { - r.EncodeNil() - } else { - x.Pods.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Resource == nil { - r.EncodeNil() - } else { - x.Resource.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Resource == nil { - r.EncodeNil() - } else { - x.Resource.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *MetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *MetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "object": - if r.TryDecodeAsNil() { - if x.Object != nil { - x.Object = nil - } - } else { - if x.Object == nil { - x.Object = new(ObjectMetricStatus) - } - x.Object.CodecDecodeSelf(d) - } - case "pods": - if r.TryDecodeAsNil() { - if x.Pods != nil { - x.Pods = nil - } - } else { - if x.Pods == nil { - x.Pods = new(PodsMetricStatus) - } - x.Pods.CodecDecodeSelf(d) - } - case "resource": - if r.TryDecodeAsNil() { - if x.Resource != nil { - x.Resource = nil - } - } else { - if x.Resource == nil { - x.Resource = new(ResourceMetricStatus) - } - x.Resource.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *MetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv9 := &x.Type - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Object != nil { - x.Object = nil - } - } else { - if x.Object == nil { - x.Object = new(ObjectMetricStatus) - } - x.Object.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Pods != nil { - x.Pods = nil - } - } else { - if x.Pods == nil { - x.Pods = new(PodsMetricStatus) - } - x.Pods.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Resource != nil { - x.Resource = nil - } - } else { - if x.Resource == nil { - x.Resource = new(ResourceMetricStatus) - } - x.Resource.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ObjectMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Target - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("target")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Target - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metricName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.CurrentValue - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentValue")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.CurrentValue - yym15 := z.EncBinary() - _ = yym15 - if false { - } else if z.HasExtensions() && z.EncExt(yy14) { - } else if !yym15 && z.IsJSONHandle() { - z.EncJSONMarshal(yy14) - } else { - z.EncFallback(yy14) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "target": - if r.TryDecodeAsNil() { - x.Target = CrossVersionObjectReference{} - } else { - yyv4 := &x.Target - yyv4.CodecDecodeSelf(d) - } - case "metricName": - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv5 := &x.MetricName - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*string)(yyv5)) = r.DecodeString() - } - } - case "currentValue": - if r.TryDecodeAsNil() { - x.CurrentValue = pkg3_resource.Quantity{} - } else { - yyv7 := &x.CurrentValue - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Target = CrossVersionObjectReference{} - } else { - yyv10 := &x.Target - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv11 := &x.MetricName - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentValue = pkg3_resource.Quantity{} - } else { - yyv13 := &x.CurrentValue - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(yyv13) { - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv13) - } else { - z.DecFallback(yyv13, false) - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodsMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metricName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.CurrentAverageValue - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.CurrentAverageValue - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodsMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodsMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metricName": - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv4 := &x.MetricName - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "currentAverageValue": - if r.TryDecodeAsNil() { - x.CurrentAverageValue = pkg3_resource.Quantity{} - } else { - yyv6 := &x.CurrentAverageValue - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodsMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv9 := &x.MetricName - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentAverageValue = pkg3_resource.Quantity{} - } else { - yyv11 := &x.CurrentAverageValue - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.CurrentAverageUtilization != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf4 := &x.Name - yysf4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf5 := &x.Name - yysf5.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.CurrentAverageUtilization == nil { - r.EncodeNil() - } else { - yy7 := *x.CurrentAverageUtilization - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentAverageUtilization")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CurrentAverageUtilization == nil { - r.EncodeNil() - } else { - yy9 := *x.CurrentAverageUtilization - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.CurrentAverageValue - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.CurrentAverageValue - yym15 := z.EncBinary() - _ = yym15 - if false { - } else if z.HasExtensions() && z.EncExt(yy14) { - } else if !yym15 && z.IsJSONHandle() { - z.EncJSONMarshal(yy14) - } else { - z.EncFallback(yy14) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yyv4.CodecDecodeSelf(d) - } - case "currentAverageUtilization": - if r.TryDecodeAsNil() { - if x.CurrentAverageUtilization != nil { - x.CurrentAverageUtilization = nil - } - } else { - if x.CurrentAverageUtilization == nil { - x.CurrentAverageUtilization = new(int32) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) - } - } - case "currentAverageValue": - if r.TryDecodeAsNil() { - x.CurrentAverageValue = pkg3_resource.Quantity{} - } else { - yyv7 := &x.CurrentAverageValue - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv10 := &x.Name - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CurrentAverageUtilization != nil { - x.CurrentAverageUtilization = nil - } - } else { - if x.CurrentAverageUtilization == nil { - x.CurrentAverageUtilization = new(int32) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentAverageValue = pkg3_resource.Quantity{} - } else { - yyv13 := &x.CurrentAverageValue - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(yyv13) { - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv13) - } else { - z.DecFallback(yyv13, false) - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 360) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) - } - } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.conversion.go deleted file mode 100644 index acd2283cd..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.conversion.go +++ /dev/null @@ -1,449 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1 - -import ( - resource "k8s.io/apimachinery/pkg/api/resource" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/client-go/pkg/api" - api_v1 "k8s.io/client-go/pkg/api/v1" - autoscaling "k8s.io/client-go/pkg/apis/autoscaling" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference, - Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference, - Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, - Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, - Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList, - Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList, - Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, - Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, - Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, - Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, - Convert_v1_MetricSpec_To_autoscaling_MetricSpec, - Convert_autoscaling_MetricSpec_To_v1_MetricSpec, - Convert_v1_MetricStatus_To_autoscaling_MetricStatus, - Convert_autoscaling_MetricStatus_To_v1_MetricStatus, - Convert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource, - Convert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource, - Convert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus, - Convert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus, - Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource, - Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource, - Convert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus, - Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus, - Convert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource, - Convert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource, - Convert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus, - Convert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus, - Convert_v1_Scale_To_autoscaling_Scale, - Convert_autoscaling_Scale_To_v1_Scale, - Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec, - Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec, - Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus, - Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus, - ) -} - -func autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - return nil -} - -func Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { - return autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s) -} - -func autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - return nil -} - -func Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { - return autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in, out, s) -} - -func autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]autoscaling.HorizontalPodAutoscaler, len(*in)) - for i := range *in { - if err := Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(*in)) - for i := range *in { - if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]HorizontalPodAutoscaler, 0) - } - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in, out, s) -} - -func autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { - return err - } - out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) - out.MaxReplicas = in.MaxReplicas - // WARNING: in.TargetCPUUtilizationPercentage requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { - return err - } - out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) - out.MaxReplicas = in.MaxReplicas - // WARNING: in.Metrics requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) - out.LastScaleTime = (*meta_v1.Time)(unsafe.Pointer(in.LastScaleTime)) - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - // WARNING: in.CurrentCPUUtilizationPercentage requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) - out.LastScaleTime = (*meta_v1.Time)(unsafe.Pointer(in.LastScaleTime)) - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - // WARNING: in.CurrentMetrics requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { - out.Type = autoscaling.MetricSourceType(in.Type) - out.Object = (*autoscaling.ObjectMetricSource)(unsafe.Pointer(in.Object)) - out.Pods = (*autoscaling.PodsMetricSource)(unsafe.Pointer(in.Pods)) - out.Resource = (*autoscaling.ResourceMetricSource)(unsafe.Pointer(in.Resource)) - return nil -} - -func Convert_v1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { - return autoConvert_v1_MetricSpec_To_autoscaling_MetricSpec(in, out, s) -} - -func autoConvert_autoscaling_MetricSpec_To_v1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { - out.Type = MetricSourceType(in.Type) - out.Object = (*ObjectMetricSource)(unsafe.Pointer(in.Object)) - out.Pods = (*PodsMetricSource)(unsafe.Pointer(in.Pods)) - out.Resource = (*ResourceMetricSource)(unsafe.Pointer(in.Resource)) - return nil -} - -func Convert_autoscaling_MetricSpec_To_v1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { - return autoConvert_autoscaling_MetricSpec_To_v1_MetricSpec(in, out, s) -} - -func autoConvert_v1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { - out.Type = autoscaling.MetricSourceType(in.Type) - out.Object = (*autoscaling.ObjectMetricStatus)(unsafe.Pointer(in.Object)) - out.Pods = (*autoscaling.PodsMetricStatus)(unsafe.Pointer(in.Pods)) - out.Resource = (*autoscaling.ResourceMetricStatus)(unsafe.Pointer(in.Resource)) - return nil -} - -func Convert_v1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { - return autoConvert_v1_MetricStatus_To_autoscaling_MetricStatus(in, out, s) -} - -func autoConvert_autoscaling_MetricStatus_To_v1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { - out.Type = MetricSourceType(in.Type) - out.Object = (*ObjectMetricStatus)(unsafe.Pointer(in.Object)) - out.Pods = (*PodsMetricStatus)(unsafe.Pointer(in.Pods)) - out.Resource = (*ResourceMetricStatus)(unsafe.Pointer(in.Resource)) - return nil -} - -func Convert_autoscaling_MetricStatus_To_v1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { - return autoConvert_autoscaling_MetricStatus_To_v1_MetricStatus(in, out, s) -} - -func autoConvert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { - if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - out.MetricName = in.MetricName - out.TargetValue = in.TargetValue - return nil -} - -func Convert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { - return autoConvert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in, out, s) -} - -func autoConvert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { - if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - out.MetricName = in.MetricName - out.TargetValue = in.TargetValue - return nil -} - -func Convert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { - return autoConvert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in, out, s) -} - -func autoConvert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { - if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - out.MetricName = in.MetricName - out.CurrentValue = in.CurrentValue - return nil -} - -func Convert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { - return autoConvert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in, out, s) -} - -func autoConvert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { - if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - out.MetricName = in.MetricName - out.CurrentValue = in.CurrentValue - return nil -} - -func Convert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { - return autoConvert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in, out, s) -} - -func autoConvert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { - out.MetricName = in.MetricName - out.TargetAverageValue = in.TargetAverageValue - return nil -} - -func Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { - return autoConvert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in, out, s) -} - -func autoConvert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { - out.MetricName = in.MetricName - out.TargetAverageValue = in.TargetAverageValue - return nil -} - -func Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { - return autoConvert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in, out, s) -} - -func autoConvert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { - out.MetricName = in.MetricName - out.CurrentAverageValue = in.CurrentAverageValue - return nil -} - -func Convert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { - return autoConvert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in, out, s) -} - -func autoConvert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { - out.MetricName = in.MetricName - out.CurrentAverageValue = in.CurrentAverageValue - return nil -} - -func Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { - return autoConvert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in, out, s) -} - -func autoConvert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { - out.Name = api.ResourceName(in.Name) - out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) - out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) - return nil -} - -func Convert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { - return autoConvert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in, out, s) -} - -func autoConvert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { - out.Name = api_v1.ResourceName(in.Name) - out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) - out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) - return nil -} - -func Convert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { - return autoConvert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in, out, s) -} - -func autoConvert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { - out.Name = api.ResourceName(in.Name) - out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) - out.CurrentAverageValue = in.CurrentAverageValue - return nil -} - -func Convert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { - return autoConvert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in, out, s) -} - -func autoConvert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { - out.Name = api_v1.ResourceName(in.Name) - out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) - out.CurrentAverageValue = in.CurrentAverageValue - return nil -} - -func Convert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { - return autoConvert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in, out, s) -} - -func autoConvert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { - return autoConvert_v1_Scale_To_autoscaling_Scale(in, out, s) -} - -func autoConvert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { - return autoConvert_autoscaling_Scale_To_v1_Scale(in, out, s) -} - -func autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -func Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { - return autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s) -} - -func autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -func Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { - return autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in, out, s) -} - -func autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.Selector = in.Selector - return nil -} - -func Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { - return autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in, out, s) -} - -func autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.Selector = in.Selector - return nil -} - -func Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { - return autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go deleted file mode 100644 index 07047c2b0..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,312 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1 - -import ( - resource "k8s.io/apimachinery/pkg/api/resource" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CrossVersionObjectReference, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscaler, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_MetricSpec, InType: reflect.TypeOf(&MetricSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_MetricStatus, InType: reflect.TypeOf(&MetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectMetricSource, InType: reflect.TypeOf(&ObjectMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectMetricStatus, InType: reflect.TypeOf(&ObjectMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodsMetricSource, InType: reflect.TypeOf(&PodsMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodsMetricStatus, InType: reflect.TypeOf(&PodsMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceMetricSource, InType: reflect.TypeOf(&ResourceMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceMetricStatus, InType: reflect.TypeOf(&ResourceMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Scale, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, - ) -} - -func DeepCopy_v1_CrossVersionObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CrossVersionObjectReference) - out := out.(*CrossVersionObjectReference) - *out = *in - return nil - } -} - -func DeepCopy_v1_HorizontalPodAutoscaler(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscaler) - out := out.(*HorizontalPodAutoscaler) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_HorizontalPodAutoscalerList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscalerList) - out := out.(*HorizontalPodAutoscalerList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(*in)) - for i := range *in { - if err := DeepCopy_v1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_HorizontalPodAutoscalerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscalerSpec) - out := out.(*HorizontalPodAutoscalerSpec) - *out = *in - if in.MinReplicas != nil { - in, out := &in.MinReplicas, &out.MinReplicas - *out = new(int32) - **out = **in - } - if in.TargetCPUUtilizationPercentage != nil { - in, out := &in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_HorizontalPodAutoscalerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscalerStatus) - out := out.(*HorizontalPodAutoscalerStatus) - *out = *in - if in.ObservedGeneration != nil { - in, out := &in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = **in - } - if in.LastScaleTime != nil { - in, out := &in.LastScaleTime, &out.LastScaleTime - *out = new(meta_v1.Time) - **out = (*in).DeepCopy() - } - if in.CurrentCPUUtilizationPercentage != nil { - in, out := &in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_v1_MetricSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*MetricSpec) - out := out.(*MetricSpec) - *out = *in - if in.Object != nil { - in, out := &in.Object, &out.Object - *out = new(ObjectMetricSource) - if err := DeepCopy_v1_ObjectMetricSource(*in, *out, c); err != nil { - return err - } - } - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = new(PodsMetricSource) - if err := DeepCopy_v1_PodsMetricSource(*in, *out, c); err != nil { - return err - } - } - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(ResourceMetricSource) - if err := DeepCopy_v1_ResourceMetricSource(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1_MetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*MetricStatus) - out := out.(*MetricStatus) - *out = *in - if in.Object != nil { - in, out := &in.Object, &out.Object - *out = new(ObjectMetricStatus) - if err := DeepCopy_v1_ObjectMetricStatus(*in, *out, c); err != nil { - return err - } - } - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = new(PodsMetricStatus) - if err := DeepCopy_v1_PodsMetricStatus(*in, *out, c); err != nil { - return err - } - } - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(ResourceMetricStatus) - if err := DeepCopy_v1_ResourceMetricStatus(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1_ObjectMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectMetricSource) - out := out.(*ObjectMetricSource) - *out = *in - out.TargetValue = in.TargetValue.DeepCopy() - return nil - } -} - -func DeepCopy_v1_ObjectMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectMetricStatus) - out := out.(*ObjectMetricStatus) - *out = *in - out.CurrentValue = in.CurrentValue.DeepCopy() - return nil - } -} - -func DeepCopy_v1_PodsMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodsMetricSource) - out := out.(*PodsMetricSource) - *out = *in - out.TargetAverageValue = in.TargetAverageValue.DeepCopy() - return nil - } -} - -func DeepCopy_v1_PodsMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodsMetricStatus) - out := out.(*PodsMetricStatus) - *out = *in - out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() - return nil - } -} - -func DeepCopy_v1_ResourceMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceMetricSource) - out := out.(*ResourceMetricSource) - *out = *in - if in.TargetAverageUtilization != nil { - in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization - *out = new(int32) - **out = **in - } - if in.TargetAverageValue != nil { - in, out := &in.TargetAverageValue, &out.TargetAverageValue - *out = new(resource.Quantity) - **out = (*in).DeepCopy() - } - return nil - } -} - -func DeepCopy_v1_ResourceMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceMetricStatus) - out := out.(*ResourceMetricStatus) - *out = *in - if in.CurrentAverageUtilization != nil { - in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization - *out = new(int32) - **out = **in - } - out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() - return nil - } -} - -func DeepCopy_v1_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Scale) - out := out.(*Scale) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - return nil - } -} - -func DeepCopy_v1_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ScaleSpec) - out := out.(*ScaleSpec) - *out = *in - return nil - } -} - -func DeepCopy_v1_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ScaleStatus) - out := out.(*ScaleStatus) - *out = *in - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.defaults.go deleted file mode 100644 index af20e9884..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.defaults.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&HorizontalPodAutoscaler{}, func(obj interface{}) { SetObjectDefaults_HorizontalPodAutoscaler(obj.(*HorizontalPodAutoscaler)) }) - scheme.AddTypeDefaultingFunc(&HorizontalPodAutoscalerList{}, func(obj interface{}) { - SetObjectDefaults_HorizontalPodAutoscalerList(obj.(*HorizontalPodAutoscalerList)) - }) - return nil -} - -func SetObjectDefaults_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler) { - SetDefaults_HorizontalPodAutoscaler(in) -} - -func SetObjectDefaults_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_HorizontalPodAutoscaler(a) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/defaults.go deleted file mode 100644 index 6cc60d298..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/defaults.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/pkg/api/v1" - "k8s.io/client-go/pkg/apis/autoscaling" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return scheme.AddDefaultingFuncs( - SetDefaults_HorizontalPodAutoscaler, - ) -} - -func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { - if obj.Spec.MinReplicas == nil { - minReplicas := int32(1) - obj.Spec.MinReplicas = &minReplicas - } - - if len(obj.Spec.Metrics) == 0 { - utilizationDefaultVal := int32(autoscaling.DefaultCPUUtilization) - obj.Spec.Metrics = []MetricSpec{ - { - Type: ResourceMetricSourceType, - Resource: &ResourceMetricSource{ - Name: v1.ResourceCPU, - TargetAverageUtilization: &utilizationDefaultVal, - }, - }, - } - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/doc.go deleted file mode 100644 index a9fe60b1c..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go deleted file mode 100644 index 69e71edd9..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go +++ /dev/null @@ -1,3062 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v2alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - HorizontalPodAutoscaler - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricSpec - MetricStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus -*/ -package v2alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } -func (*CrossVersionObjectReference) ProtoMessage() {} -func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} -} - -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } -func (*HorizontalPodAutoscalerList) ProtoMessage() {} -func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } -func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} -func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} -} - -func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } -func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} -func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} -} - -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func init() { - proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.CrossVersionObjectReference") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerStatus") - proto.RegisterType((*MetricSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.MetricSpec") - proto.RegisterType((*MetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.MetricStatus") - proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.ObjectMetricSource") - proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.ObjectMetricStatus") - proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.PodsMetricSource") - proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.PodsMetricStatus") - proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.ResourceMetricSource") - proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.ResourceMetricStatus") -} -func (m *CrossVersionObjectReference) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CrossVersionObjectReference) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - return i, nil -} - -func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ScaleTargetRef.Size())) - n5, err := m.ScaleTargetRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.MinReplicas != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas)) - } - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, msg := range m.Metrics { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ObservedGeneration != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size())) - n6, err := m.LastScaleTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - } - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas)) - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas)) - if len(m.CurrentMetrics) > 0 { - for _, msg := range m.CurrentMetrics { - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *MetricSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *MetricSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - if m.Object != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) - n7, err := m.Object.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.Pods != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) - n8, err := m.Pods.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.Resource != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) - n9, err := m.Resource.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n9 - } - return i, nil -} - -func (m *MetricStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *MetricStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - if m.Object != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) - n10, err := m.Object.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 - } - if m.Pods != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) - n11, err := m.Pods.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n11 - } - if m.Resource != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) - n12, err := m.Resource.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n12 - } - return i, nil -} - -func (m *ObjectMetricSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ObjectMetricSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) - n13, err := m.Target.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n13 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) - i += copy(data[i:], m.MetricName) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size())) - n14, err := m.TargetValue.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n14 - return i, nil -} - -func (m *ObjectMetricStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ObjectMetricStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) - n15, err := m.Target.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n15 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) - i += copy(data[i:], m.MetricName) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size())) - n16, err := m.CurrentValue.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n16 - return i, nil -} - -func (m *PodsMetricSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodsMetricSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) - i += copy(data[i:], m.MetricName) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) - n17, err := m.TargetAverageValue.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n17 - return i, nil -} - -func (m *PodsMetricStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodsMetricStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) - i += copy(data[i:], m.MetricName) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) - n18, err := m.CurrentAverageValue.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n18 - return i, nil -} - -func (m *ResourceMetricSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceMetricSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - if m.TargetAverageUtilization != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.TargetAverageUtilization)) - } - if m.TargetAverageValue != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) - n19, err := m.TargetAverageValue.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n19 - } - return i, nil -} - -func (m *ResourceMetricStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceMetricStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - if m.CurrentAverageUtilization != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.CurrentAverageUtilization)) - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) - n20, err := m.CurrentAverageValue.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n20 - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *CrossVersionObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HorizontalPodAutoscaler) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HorizontalPodAutoscalerList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HorizontalPodAutoscalerSpec) Size() (n int) { - var l int - _ = l - l = m.ScaleTargetRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.MinReplicas != nil { - n += 1 + sovGenerated(uint64(*m.MinReplicas)) - } - n += 1 + sovGenerated(uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, e := range m.Metrics { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HorizontalPodAutoscalerStatus) Size() (n int) { - var l int - _ = l - if m.ObservedGeneration != nil { - n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - l = m.LastScaleTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.CurrentReplicas)) - n += 1 + sovGenerated(uint64(m.DesiredReplicas)) - if len(m.CurrentMetrics) > 0 { - for _, e := range m.CurrentMetrics { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *MetricSpec) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.Object != nil { - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Pods != nil { - l = m.Pods.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *MetricStatus) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.Object != nil { - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Pods != nil { - l = m.Pods.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ObjectMetricSource) Size() (n int) { - var l int - _ = l - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.TargetValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ObjectMetricStatus) Size() (n int) { - var l int - _ = l - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.CurrentValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodsMetricSource) Size() (n int) { - var l int - _ = l - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.TargetAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodsMetricStatus) Size() (n int) { - var l int - _ = l - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.CurrentAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceMetricSource) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.TargetAverageUtilization != nil { - n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) - } - if m.TargetAverageValue != nil { - l = m.TargetAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceMetricStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.CurrentAverageUtilization != nil { - n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) - } - l = m.CurrentAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CrossVersionObjectReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CrossVersionObjectReference{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscaler) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, - `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, - `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, - `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, - `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, - `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *MetricSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricSpec{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricStatus{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ObjectMetricSource{`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ObjectMetricStatus{`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodsMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodsMetricSource{`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodsMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodsMetricStatus{`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceMetricSource{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceMetricStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, HorizontalPodAutoscaler{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ScaleTargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.MinReplicas = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) - } - m.MaxReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metrics = append(m.Metrics, MetricSpec{}) - if err := m.Metrics[len(m.Metrics)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ObservedGeneration = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) - } - m.CurrentReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) - } - m.DesiredReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentMetrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CurrentMetrics = append(m.CurrentMetrics, MetricStatus{}) - if err := m.CurrentMetrics[len(m.CurrentMetrics)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = MetricSourceType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Object == nil { - m.Object = &ObjectMetricSource{} - } - if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pods == nil { - m.Pods = &PodsMetricSource{} - } - if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &ResourceMetricSource{} - } - if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = MetricSourceType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Object == nil { - m.Object = &ObjectMetricStatus{} - } - if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pods == nil { - m.Pods = &PodsMetricStatus{} - } - if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &ResourceMetricStatus{} - } - if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMetricSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMetricStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodsMetricSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodsMetricStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceMetricSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetAverageUtilization = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceMetricStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CurrentAverageUtilization = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 1208 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x57, 0x5b, 0x6f, 0x1b, 0x45, - 0x14, 0x8e, 0x2f, 0x49, 0xc3, 0x38, 0x37, 0x26, 0x55, 0xea, 0x26, 0xd4, 0x8e, 0xf6, 0xa9, 0x54, - 0xb0, 0x4b, 0x4c, 0x41, 0x54, 0x08, 0xa1, 0xd8, 0x5c, 0x5a, 0x11, 0xa7, 0x61, 0x1a, 0x2a, 0x04, - 0x48, 0x30, 0x59, 0x4f, 0x9c, 0x21, 0xde, 0x8b, 0x76, 0x66, 0xad, 0x26, 0x52, 0x25, 0x7e, 0x00, - 0x0f, 0xbc, 0xf0, 0x13, 0x90, 0xf8, 0x07, 0x3c, 0x83, 0x84, 0x94, 0xc7, 0xf2, 0xc6, 0x93, 0x45, - 0xdc, 0x37, 0x7e, 0x42, 0x25, 0x2e, 0xda, 0x99, 0xf1, 0x5e, 0xbc, 0x5e, 0x13, 0x87, 0xb4, 0x82, - 0x37, 0xef, 0xcc, 0x39, 0xdf, 0x77, 0xce, 0xf9, 0xce, 0x9c, 0x19, 0x83, 0xb7, 0x0f, 0xdf, 0x60, - 0x3a, 0x75, 0x8c, 0x43, 0x7f, 0x8f, 0x78, 0x36, 0xe1, 0x84, 0x19, 0xee, 0x61, 0xdb, 0xc0, 0x2e, - 0x65, 0x06, 0xf6, 0xb9, 0xc3, 0x4c, 0xdc, 0xa1, 0x76, 0xdb, 0xe8, 0xd6, 0x70, 0xc7, 0x3d, 0xc0, - 0x1b, 0x46, 0x9b, 0xd8, 0xc4, 0xc3, 0x9c, 0xb4, 0x74, 0xd7, 0x73, 0xb8, 0x03, 0x0d, 0x09, 0xa0, - 0x47, 0x00, 0xba, 0x7b, 0xd8, 0xd6, 0x03, 0x00, 0x3d, 0x06, 0xa0, 0x0f, 0x00, 0x56, 0x5f, 0x6e, - 0x53, 0x7e, 0xe0, 0xef, 0xe9, 0xa6, 0x63, 0x19, 0x6d, 0xa7, 0xed, 0x18, 0x02, 0x67, 0xcf, 0xdf, - 0x17, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0xf8, 0xab, 0x37, 0x55, 0x80, 0xd8, 0xa5, 0x16, 0x36, 0x0f, - 0xa8, 0x4d, 0xbc, 0xa3, 0x41, 0x88, 0x86, 0x47, 0x98, 0xe3, 0x7b, 0x26, 0x19, 0x8e, 0x6a, 0xac, - 0x17, 0x33, 0x2c, 0xc2, 0xb1, 0xd1, 0x4d, 0xe5, 0xb2, 0x6a, 0x64, 0x79, 0x79, 0xbe, 0xcd, 0xa9, - 0x95, 0xa6, 0x79, 0xfd, 0x9f, 0x1c, 0x98, 0x79, 0x40, 0x2c, 0x9c, 0xf2, 0x7b, 0x35, 0xcb, 0xcf, - 0xe7, 0xb4, 0x63, 0x50, 0x9b, 0x33, 0xee, 0x8d, 0xcb, 0x89, 0x11, 0xaf, 0x4b, 0xbc, 0x28, 0x21, - 0xf2, 0x00, 0x5b, 0x6e, 0x87, 0x8c, 0xca, 0xe9, 0xa5, 0x4c, 0x81, 0x47, 0x59, 0xdf, 0x3a, 0x6b, - 0x3b, 0xa4, 0x5c, 0xb5, 0x6f, 0x73, 0x60, 0xad, 0xe1, 0x39, 0x8c, 0xdd, 0x27, 0x1e, 0xa3, 0x8e, - 0x7d, 0x77, 0xef, 0x4b, 0x62, 0x72, 0x44, 0xf6, 0x89, 0x47, 0x6c, 0x93, 0xc0, 0x75, 0x50, 0x3c, - 0xa4, 0x76, 0xab, 0x9c, 0x5b, 0xcf, 0x5d, 0x7f, 0xae, 0x3e, 0x77, 0xd2, 0xab, 0x4e, 0xf5, 0x7b, - 0xd5, 0xe2, 0x07, 0xd4, 0x6e, 0x21, 0xb1, 0x13, 0x58, 0xd8, 0xd8, 0x22, 0xe5, 0x7c, 0xd2, 0x62, - 0x1b, 0x5b, 0x04, 0x89, 0x1d, 0x58, 0x03, 0x00, 0xbb, 0x54, 0x11, 0x94, 0x0b, 0xc2, 0x0e, 0x2a, - 0x3b, 0xb0, 0xb9, 0x73, 0x47, 0xed, 0xa0, 0x98, 0x95, 0xf6, 0x38, 0x0f, 0xae, 0xdc, 0x76, 0x3c, - 0x7a, 0xec, 0xd8, 0x1c, 0x77, 0x76, 0x9c, 0xd6, 0xa6, 0xca, 0x83, 0x78, 0xf0, 0x0b, 0x30, 0x1b, - 0xf4, 0x42, 0x0b, 0x73, 0x2c, 0xe2, 0x2a, 0xd5, 0x5e, 0xd1, 0x55, 0x3f, 0xc7, 0xa5, 0x89, 0x3a, - 0x3a, 0xb0, 0xd6, 0xbb, 0x1b, 0xba, 0x4c, 0xae, 0x49, 0x38, 0x8e, 0xf8, 0xa3, 0x35, 0x14, 0xa2, - 0x42, 0x1b, 0x14, 0x99, 0x4b, 0x4c, 0x91, 0x53, 0xa9, 0xb6, 0xa5, 0x4f, 0x78, 0x5a, 0xf4, 0x8c, - 0xc8, 0xef, 0xb9, 0xc4, 0x8c, 0x2a, 0x14, 0x7c, 0x21, 0xc1, 0x03, 0xbb, 0x60, 0x86, 0x71, 0xcc, - 0x7d, 0x26, 0xaa, 0x53, 0xaa, 0x6d, 0x5f, 0x18, 0xa3, 0x40, 0xad, 0x2f, 0x28, 0xce, 0x19, 0xf9, - 0x8d, 0x14, 0x9b, 0xf6, 0x7b, 0x0e, 0xac, 0x65, 0x78, 0x6e, 0x51, 0xc6, 0xe1, 0x67, 0xa9, 0x4a, - 0xeb, 0x67, 0xab, 0x74, 0xe0, 0x2d, 0xea, 0xbc, 0xa4, 0x98, 0x67, 0x07, 0x2b, 0xb1, 0x2a, 0x5b, - 0x60, 0x9a, 0x72, 0x62, 0xb1, 0x72, 0x7e, 0xbd, 0x70, 0xbd, 0x54, 0xbb, 0x7d, 0x51, 0x49, 0xd7, - 0xe7, 0x15, 0xe9, 0xf4, 0x9d, 0x00, 0x1e, 0x49, 0x16, 0xed, 0xcf, 0x7c, 0x66, 0xb2, 0x81, 0x14, - 0xf0, 0xeb, 0x1c, 0x58, 0x10, 0x9f, 0xbb, 0xd8, 0x6b, 0x93, 0xe0, 0x0c, 0xa8, 0x9c, 0x27, 0xd7, - 0x7f, 0xcc, 0x89, 0xaa, 0xaf, 0xa8, 0xe0, 0x16, 0xee, 0x25, 0xb8, 0xd0, 0x10, 0x37, 0xdc, 0x00, - 0x25, 0x8b, 0xda, 0x88, 0xb8, 0x1d, 0x6a, 0x62, 0x26, 0x5a, 0x71, 0xba, 0xbe, 0xd8, 0xef, 0x55, - 0x4b, 0xcd, 0x68, 0x19, 0xc5, 0x6d, 0xe0, 0x6b, 0xa0, 0x64, 0xe1, 0x07, 0xa1, 0x4b, 0x41, 0xb8, - 0x2c, 0x2b, 0xbe, 0x52, 0x33, 0xda, 0x42, 0x71, 0x3b, 0xb8, 0x0f, 0x2e, 0x59, 0x84, 0x7b, 0xd4, - 0x64, 0xe5, 0xa2, 0x50, 0xe2, 0xcd, 0x89, 0x13, 0x6e, 0x0a, 0x7f, 0xd1, 0xdf, 0x8b, 0x8a, 0xef, - 0x92, 0x5c, 0x63, 0x68, 0x00, 0xae, 0xfd, 0x52, 0x00, 0xd7, 0xc6, 0xf6, 0x29, 0x7c, 0x0f, 0x40, - 0x67, 0x4f, 0x8c, 0xc9, 0xd6, 0xfb, 0x72, 0x50, 0x05, 0x13, 0x23, 0x50, 0xa1, 0x50, 0x5f, 0xe9, - 0xf7, 0xaa, 0xf0, 0x6e, 0x6a, 0x17, 0x8d, 0xf0, 0x80, 0x26, 0x98, 0xef, 0x60, 0xc6, 0x65, 0x85, - 0xa9, 0x1a, 0x4e, 0xa5, 0xda, 0x8d, 0xb3, 0x35, 0x6f, 0xe0, 0x51, 0x7f, 0xbe, 0xdf, 0xab, 0xce, - 0x6f, 0xc5, 0x41, 0x50, 0x12, 0x13, 0x6e, 0x82, 0x45, 0xd3, 0xf7, 0x3c, 0x62, 0xf3, 0xa1, 0x8a, - 0x5f, 0x51, 0x15, 0x58, 0x6c, 0x24, 0xb7, 0xd1, 0xb0, 0x7d, 0x00, 0xd1, 0x22, 0x8c, 0x7a, 0xa4, - 0x15, 0x42, 0x14, 0x93, 0x10, 0xef, 0x24, 0xb7, 0xd1, 0xb0, 0x3d, 0x7c, 0x08, 0x16, 0x14, 0xaa, - 0xaa, 0x77, 0x79, 0x5a, 0x68, 0xf8, 0xd6, 0x79, 0x35, 0x94, 0x13, 0x23, 0xec, 0xd2, 0x46, 0x02, - 0x1c, 0x0d, 0x91, 0x69, 0x7f, 0xe4, 0x01, 0x88, 0xc4, 0x87, 0x37, 0x41, 0x91, 0x1f, 0xb9, 0x44, - 0x5d, 0x17, 0xeb, 0x83, 0x51, 0xb7, 0x7b, 0xe4, 0x92, 0x27, 0xbd, 0xea, 0x92, 0xb2, 0x14, 0xb7, - 0x7f, 0xb0, 0x86, 0x84, 0x35, 0x6c, 0x83, 0x19, 0x47, 0x9c, 0x12, 0xa5, 0x53, 0x63, 0xe2, 0xd8, - 0xc3, 0x29, 0x1e, 0xc2, 0xd7, 0x41, 0x30, 0xef, 0xd4, 0xe1, 0x53, 0xf0, 0xf0, 0x73, 0x50, 0x74, - 0x9d, 0xd6, 0x60, 0xca, 0x6e, 0x4e, 0x4c, 0xb3, 0xe3, 0xb4, 0x58, 0x82, 0x64, 0x36, 0xc8, 0x2e, - 0x58, 0x45, 0x02, 0x18, 0x3a, 0x60, 0x76, 0xf0, 0xba, 0x11, 0x4a, 0x96, 0x6a, 0xef, 0x4e, 0x4c, - 0x82, 0x14, 0x40, 0x82, 0x68, 0x2e, 0x98, 0xa1, 0x83, 0x1d, 0x14, 0x92, 0x68, 0x7f, 0xe5, 0xc1, - 0x5c, 0x5c, 0xb8, 0xff, 0x86, 0x02, 0xb2, 0x87, 0x9e, 0xb2, 0x02, 0x92, 0xe4, 0x19, 0x28, 0x20, - 0x89, 0xb2, 0x14, 0xf8, 0x2e, 0x0f, 0x60, 0xba, 0xfd, 0x20, 0x07, 0x33, 0x5c, 0xcc, 0xf2, 0xa7, - 0x72, 0x89, 0x84, 0x17, 0xba, 0xba, 0x2f, 0x14, 0x57, 0xf0, 0xd4, 0x92, 0xd3, 0x76, 0x3b, 0x7a, - 0x92, 0x85, 0x4f, 0x9d, 0x66, 0xb8, 0x83, 0x62, 0x56, 0x90, 0x80, 0x92, 0xf4, 0xbe, 0x8f, 0x3b, - 0x3e, 0x51, 0xca, 0x8c, 0xbd, 0xe7, 0xf5, 0x41, 0xf2, 0xfa, 0x87, 0x3e, 0xb6, 0x39, 0xe5, 0x47, - 0xd1, 0x2d, 0xb3, 0x1b, 0x41, 0xa1, 0x38, 0xae, 0xf6, 0xfd, 0x70, 0x9d, 0x64, 0xbf, 0xfe, 0x7f, - 0xea, 0x74, 0x00, 0xe6, 0xd4, 0xf0, 0xfb, 0x37, 0x85, 0xba, 0xac, 0x58, 0xe6, 0x1a, 0x31, 0x2c, - 0x94, 0x40, 0xd6, 0x7e, 0xca, 0x81, 0xa5, 0xe1, 0x51, 0x33, 0x14, 0x72, 0xee, 0x4c, 0x21, 0x1f, - 0x03, 0x28, 0x13, 0xde, 0xec, 0x12, 0x0f, 0xb7, 0x89, 0x0c, 0x3c, 0x7f, 0xae, 0xc0, 0x57, 0x15, - 0x17, 0xdc, 0x4d, 0x21, 0xa2, 0x11, 0x2c, 0xda, 0xcf, 0xc9, 0x24, 0xa4, 0xda, 0xe7, 0x49, 0xe2, - 0x21, 0x58, 0x56, 0xd5, 0xb9, 0x80, 0x2c, 0xd6, 0x14, 0xd9, 0x72, 0x23, 0x0d, 0x89, 0x46, 0xf1, - 0x68, 0x3f, 0xe4, 0xc1, 0xe5, 0x51, 0x23, 0x19, 0x36, 0xd5, 0x1f, 0x1f, 0x99, 0xc5, 0xad, 0xf8, - 0x1f, 0x9f, 0x27, 0xbd, 0xea, 0x8b, 0xe3, 0xfe, 0xc1, 0x85, 0x13, 0x26, 0xf6, 0x2f, 0xe9, 0x63, - 0x50, 0x4e, 0x54, 0xf1, 0x23, 0x4e, 0x3b, 0xf4, 0x58, 0xbe, 0x80, 0xe4, 0xe3, 0xef, 0x85, 0x7e, - 0xaf, 0x5a, 0xde, 0xcd, 0xb0, 0x41, 0x99, 0xde, 0xb0, 0x3b, 0xb2, 0x0b, 0xce, 0xd7, 0xbe, 0x2b, - 0x13, 0x74, 0xc0, 0x8f, 0xe9, 0xca, 0xc9, 0x2e, 0xb8, 0xe0, 0xca, 0x7d, 0x0a, 0xae, 0x26, 0x85, - 0x4b, 0x97, 0xee, 0x5a, 0xbf, 0x57, 0xbd, 0xda, 0xc8, 0x32, 0x42, 0xd9, 0xfe, 0x59, 0xdd, 0x57, - 0x78, 0x36, 0xdd, 0x57, 0xbf, 0x71, 0x72, 0x5a, 0x99, 0x7a, 0x74, 0x5a, 0x99, 0xfa, 0xf5, 0xb4, - 0x32, 0xf5, 0x55, 0xbf, 0x92, 0x3b, 0xe9, 0x57, 0x72, 0x8f, 0xfa, 0x95, 0xdc, 0x6f, 0xfd, 0x4a, - 0xee, 0x9b, 0xc7, 0x95, 0xa9, 0x4f, 0x66, 0x07, 0x83, 0xf0, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x95, 0xf2, 0xec, 0x8a, 0x16, 0x12, 0x00, 0x00, -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/register.go deleted file mode 100644 index 2fc437f93..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/register.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "autoscaling" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2alpha1"} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &HorizontalPodAutoscaler{}, - &HorizontalPodAutoscalerList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.generated.go deleted file mode 100644 index 9eb6919a3..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.generated.go +++ /dev/null @@ -1,4621 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v2alpha1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_resource "k8s.io/apimachinery/pkg/api/resource" - pkg3_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg4_types "k8s.io/apimachinery/pkg/types" - pkg2_v1 "k8s.io/client-go/pkg/api/v1" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_resource.Quantity - var v1 pkg3_v1.Time - var v2 pkg4_types.UID - var v3 pkg2_v1.ResourceName - var v4 time.Time - _, _, _, _, _ = v0, v1, v2, v3, v4 - } -} - -func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv6 := &x.Name - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv8 := &x.APIVersion - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv11 := &x.Kind - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv13 := &x.Name - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.MinReplicas != nil - yyq2[3] = len(x.Metrics) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.ScaleTargetRef - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ScaleTargetRef - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy9 := *x.MinReplicas - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy11 := *x.MinReplicas - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(yy11)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Metrics == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encSliceMetricSpec(([]MetricSpec)(x.Metrics), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metrics")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Metrics == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - h.encSliceMetricSpec(([]MetricSpec)(x.Metrics), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "scaleTargetRef": - if r.TryDecodeAsNil() { - x.ScaleTargetRef = CrossVersionObjectReference{} - } else { - yyv4 := &x.ScaleTargetRef - yyv4.CodecDecodeSelf(d) - } - case "minReplicas": - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - case "maxReplicas": - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - yyv7 := &x.MaxReplicas - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*int32)(yyv7)) = int32(r.DecodeInt(32)) - } - } - case "metrics": - if r.TryDecodeAsNil() { - x.Metrics = nil - } else { - yyv9 := &x.Metrics - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceMetricSpec((*[]MetricSpec)(yyv9), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ScaleTargetRef = CrossVersionObjectReference{} - } else { - yyv12 := &x.ScaleTargetRef - yyv12.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - yyv15 := &x.MaxReplicas - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int32)(yyv15)) = int32(r.DecodeInt(32)) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Metrics = nil - } else { - yyv17 := &x.Metrics - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - h.decSliceMetricSpec((*[]MetricSpec)(yyv17), d) - } - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x MetricSourceType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *MetricSourceType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *MetricSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Object != nil - yyq2[2] = x.Pods != nil - yyq2[3] = x.Resource != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Object == nil { - r.EncodeNil() - } else { - x.Object.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("object")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Object == nil { - r.EncodeNil() - } else { - x.Object.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Pods == nil { - r.EncodeNil() - } else { - x.Pods.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Pods == nil { - r.EncodeNil() - } else { - x.Pods.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Resource == nil { - r.EncodeNil() - } else { - x.Resource.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Resource == nil { - r.EncodeNil() - } else { - x.Resource.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *MetricSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *MetricSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "object": - if r.TryDecodeAsNil() { - if x.Object != nil { - x.Object = nil - } - } else { - if x.Object == nil { - x.Object = new(ObjectMetricSource) - } - x.Object.CodecDecodeSelf(d) - } - case "pods": - if r.TryDecodeAsNil() { - if x.Pods != nil { - x.Pods = nil - } - } else { - if x.Pods == nil { - x.Pods = new(PodsMetricSource) - } - x.Pods.CodecDecodeSelf(d) - } - case "resource": - if r.TryDecodeAsNil() { - if x.Resource != nil { - x.Resource = nil - } - } else { - if x.Resource == nil { - x.Resource = new(ResourceMetricSource) - } - x.Resource.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *MetricSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv9 := &x.Type - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Object != nil { - x.Object = nil - } - } else { - if x.Object == nil { - x.Object = new(ObjectMetricSource) - } - x.Object.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Pods != nil { - x.Pods = nil - } - } else { - if x.Pods == nil { - x.Pods = new(PodsMetricSource) - } - x.Pods.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Resource != nil { - x.Resource = nil - } - } else { - if x.Resource == nil { - x.Resource = new(ResourceMetricSource) - } - x.Resource.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ObjectMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Target - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("target")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Target - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metricName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.TargetValue - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetValue")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.TargetValue - yym15 := z.EncBinary() - _ = yym15 - if false { - } else if z.HasExtensions() && z.EncExt(yy14) { - } else if !yym15 && z.IsJSONHandle() { - z.EncJSONMarshal(yy14) - } else { - z.EncFallback(yy14) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "target": - if r.TryDecodeAsNil() { - x.Target = CrossVersionObjectReference{} - } else { - yyv4 := &x.Target - yyv4.CodecDecodeSelf(d) - } - case "metricName": - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv5 := &x.MetricName - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*string)(yyv5)) = r.DecodeString() - } - } - case "targetValue": - if r.TryDecodeAsNil() { - x.TargetValue = pkg1_resource.Quantity{} - } else { - yyv7 := &x.TargetValue - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Target = CrossVersionObjectReference{} - } else { - yyv10 := &x.Target - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv11 := &x.MetricName - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetValue = pkg1_resource.Quantity{} - } else { - yyv13 := &x.TargetValue - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(yyv13) { - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv13) - } else { - z.DecFallback(yyv13, false) - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodsMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metricName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.TargetAverageValue - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.TargetAverageValue - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodsMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodsMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metricName": - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv4 := &x.MetricName - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "targetAverageValue": - if r.TryDecodeAsNil() { - x.TargetAverageValue = pkg1_resource.Quantity{} - } else { - yyv6 := &x.TargetAverageValue - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodsMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv9 := &x.MetricName - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetAverageValue = pkg1_resource.Quantity{} - } else { - yyv11 := &x.TargetAverageValue - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.TargetAverageUtilization != nil - yyq2[2] = x.TargetAverageValue != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf4 := &x.Name - yysf4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf5 := &x.Name - yysf5.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.TargetAverageUtilization == nil { - r.EncodeNil() - } else { - yy7 := *x.TargetAverageUtilization - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetAverageUtilization")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetAverageUtilization == nil { - r.EncodeNil() - } else { - yy9 := *x.TargetAverageUtilization - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.TargetAverageValue == nil { - r.EncodeNil() - } else { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { - } else if !yym12 && z.IsJSONHandle() { - z.EncJSONMarshal(x.TargetAverageValue) - } else { - z.EncFallback(x.TargetAverageValue) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetAverageValue == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(x.TargetAverageValue) - } else { - z.EncFallback(x.TargetAverageValue) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yyv4.CodecDecodeSelf(d) - } - case "targetAverageUtilization": - if r.TryDecodeAsNil() { - if x.TargetAverageUtilization != nil { - x.TargetAverageUtilization = nil - } - } else { - if x.TargetAverageUtilization == nil { - x.TargetAverageUtilization = new(int32) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) - } - } - case "targetAverageValue": - if r.TryDecodeAsNil() { - if x.TargetAverageValue != nil { - x.TargetAverageValue = nil - } - } else { - if x.TargetAverageValue == nil { - x.TargetAverageValue = new(pkg1_resource.Quantity) - } - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.TargetAverageValue) - } else { - z.DecFallback(x.TargetAverageValue, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv10 := &x.Name - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TargetAverageUtilization != nil { - x.TargetAverageUtilization = nil - } - } else { - if x.TargetAverageUtilization == nil { - x.TargetAverageUtilization = new(int32) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TargetAverageValue != nil { - x.TargetAverageValue = nil - } - } else { - if x.TargetAverageValue == nil { - x.TargetAverageValue = new(pkg1_resource.Quantity) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.TargetAverageValue) - } else { - z.DecFallback(x.TargetAverageValue, false) - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != nil - yyq2[1] = x.LastScaleTime != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy4 := *x.ObservedGeneration - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy6 := *x.ObservedGeneration - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.LastScaleTime == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym9 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym9 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LastScaleTime == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym10 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.CurrentMetrics == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - h.encSliceMetricStatus(([]MetricStatus)(x.CurrentMetrics), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentMetrics")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CurrentMetrics == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - h.encSliceMetricStatus(([]MetricStatus)(x.CurrentMetrics), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - case "lastScaleTime": - if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } - } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg3_v1.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) - } else { - z.DecFallback(x.LastScaleTime, false) - } - } - case "currentReplicas": - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - yyv8 := &x.CurrentReplicas - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - case "desiredReplicas": - if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 - } else { - yyv10 := &x.DesiredReplicas - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(yyv10)) = int32(r.DecodeInt(32)) - } - } - case "currentMetrics": - if r.TryDecodeAsNil() { - x.CurrentMetrics = nil - } else { - yyv12 := &x.CurrentMetrics - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - h.decSliceMetricStatus((*[]MetricStatus)(yyv12), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } - } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg3_v1.Time) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym18 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) - } else { - z.DecFallback(x.LastScaleTime, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - yyv19 := &x.CurrentReplicas - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int32)(yyv19)) = int32(r.DecodeInt(32)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 - } else { - yyv21 := &x.DesiredReplicas - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int32)(yyv21)) = int32(r.DecodeInt(32)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentMetrics = nil - } else { - yyv23 := &x.CurrentMetrics - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - h.decSliceMetricStatus((*[]MetricStatus)(yyv23), d) - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *MetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Object != nil - yyq2[2] = x.Pods != nil - yyq2[3] = x.Resource != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Object == nil { - r.EncodeNil() - } else { - x.Object.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("object")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Object == nil { - r.EncodeNil() - } else { - x.Object.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Pods == nil { - r.EncodeNil() - } else { - x.Pods.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Pods == nil { - r.EncodeNil() - } else { - x.Pods.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Resource == nil { - r.EncodeNil() - } else { - x.Resource.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Resource == nil { - r.EncodeNil() - } else { - x.Resource.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *MetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *MetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "object": - if r.TryDecodeAsNil() { - if x.Object != nil { - x.Object = nil - } - } else { - if x.Object == nil { - x.Object = new(ObjectMetricStatus) - } - x.Object.CodecDecodeSelf(d) - } - case "pods": - if r.TryDecodeAsNil() { - if x.Pods != nil { - x.Pods = nil - } - } else { - if x.Pods == nil { - x.Pods = new(PodsMetricStatus) - } - x.Pods.CodecDecodeSelf(d) - } - case "resource": - if r.TryDecodeAsNil() { - if x.Resource != nil { - x.Resource = nil - } - } else { - if x.Resource == nil { - x.Resource = new(ResourceMetricStatus) - } - x.Resource.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *MetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv9 := &x.Type - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Object != nil { - x.Object = nil - } - } else { - if x.Object == nil { - x.Object = new(ObjectMetricStatus) - } - x.Object.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Pods != nil { - x.Pods = nil - } - } else { - if x.Pods == nil { - x.Pods = new(PodsMetricStatus) - } - x.Pods.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Resource != nil { - x.Resource = nil - } - } else { - if x.Resource == nil { - x.Resource = new(ResourceMetricStatus) - } - x.Resource.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ObjectMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Target - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("target")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Target - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metricName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.CurrentValue - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentValue")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.CurrentValue - yym15 := z.EncBinary() - _ = yym15 - if false { - } else if z.HasExtensions() && z.EncExt(yy14) { - } else if !yym15 && z.IsJSONHandle() { - z.EncJSONMarshal(yy14) - } else { - z.EncFallback(yy14) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "target": - if r.TryDecodeAsNil() { - x.Target = CrossVersionObjectReference{} - } else { - yyv4 := &x.Target - yyv4.CodecDecodeSelf(d) - } - case "metricName": - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv5 := &x.MetricName - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*string)(yyv5)) = r.DecodeString() - } - } - case "currentValue": - if r.TryDecodeAsNil() { - x.CurrentValue = pkg1_resource.Quantity{} - } else { - yyv7 := &x.CurrentValue - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Target = CrossVersionObjectReference{} - } else { - yyv10 := &x.Target - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv11 := &x.MetricName - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentValue = pkg1_resource.Quantity{} - } else { - yyv13 := &x.CurrentValue - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(yyv13) { - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv13) - } else { - z.DecFallback(yyv13, false) - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodsMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metricName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.CurrentAverageValue - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.CurrentAverageValue - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodsMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodsMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metricName": - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv4 := &x.MetricName - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "currentAverageValue": - if r.TryDecodeAsNil() { - x.CurrentAverageValue = pkg1_resource.Quantity{} - } else { - yyv6 := &x.CurrentAverageValue - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodsMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MetricName = "" - } else { - yyv9 := &x.MetricName - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentAverageValue = pkg1_resource.Quantity{} - } else { - yyv11 := &x.CurrentAverageValue - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.CurrentAverageUtilization != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf4 := &x.Name - yysf4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf5 := &x.Name - yysf5.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.CurrentAverageUtilization == nil { - r.EncodeNil() - } else { - yy7 := *x.CurrentAverageUtilization - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentAverageUtilization")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CurrentAverageUtilization == nil { - r.EncodeNil() - } else { - yy9 := *x.CurrentAverageUtilization - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.CurrentAverageValue - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.CurrentAverageValue - yym15 := z.EncBinary() - _ = yym15 - if false { - } else if z.HasExtensions() && z.EncExt(yy14) { - } else if !yym15 && z.IsJSONHandle() { - z.EncJSONMarshal(yy14) - } else { - z.EncFallback(yy14) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yyv4.CodecDecodeSelf(d) - } - case "currentAverageUtilization": - if r.TryDecodeAsNil() { - if x.CurrentAverageUtilization != nil { - x.CurrentAverageUtilization = nil - } - } else { - if x.CurrentAverageUtilization == nil { - x.CurrentAverageUtilization = new(int32) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) - } - } - case "currentAverageValue": - if r.TryDecodeAsNil() { - x.CurrentAverageValue = pkg1_resource.Quantity{} - } else { - yyv7 := &x.CurrentAverageValue - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv10 := &x.Name - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CurrentAverageUtilization != nil { - x.CurrentAverageUtilization = nil - } - } else { - if x.CurrentAverageUtilization == nil { - x.CurrentAverageUtilization = new(int32) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentAverageValue = pkg1_resource.Quantity{} - } else { - yyv13 := &x.CurrentAverageValue - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(yyv13) { - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv13) - } else { - z.DecFallback(yyv13, false) - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg3_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg3_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceMetricSpec(v []MetricSpec, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceMetricSpec(v *[]MetricSpec, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []MetricSpec{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]MetricSpec, yyrl1) - } - } else { - yyv1 = make([]MetricSpec, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = MetricSpec{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, MetricSpec{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = MetricSpec{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, MetricSpec{}) // var yyz1 MetricSpec - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = MetricSpec{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []MetricSpec{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceMetricStatus(v []MetricStatus, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceMetricStatus(v *[]MetricStatus, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []MetricStatus{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]MetricStatus, yyrl1) - } - } else { - yyv1 = make([]MetricStatus, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = MetricStatus{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, MetricStatus{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = MetricStatus{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, MetricStatus{}) // var yyz1 MetricStatus - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = MetricStatus{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []MetricStatus{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 392) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) - } - } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.go deleted file mode 100644 index 4df8ae414..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.go +++ /dev/null @@ -1,269 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 - -import ( - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api/v1" -) - -// CrossVersionObjectReference contains enough information to let you identify the referred resource. -type CrossVersionObjectReference struct { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent - // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` -} - -// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. -type HorizontalPodAutoscalerSpec struct { - // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics - // should be collected, as well as to actually change the replica count. - ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. - // +optional - MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. - // It cannot be less that minReplicas. - MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // metrics contains the specifications for which to use to calculate the - // desired replica count (the maximum replica count across all metrics will - // be used). The desired replica count is calculated multiplying the - // ratio between the target value and the current value by the current - // number of pods. Ergo, metrics used must decrease as the pod count is - // increased, and vice-versa. See the individual metric source types for - // more information about how each type of metric must respond. - // +optional - Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` -} - -// MetricSourceType indicates the type of metric. -type MetricSourceType string - -var ( - // ObjectMetricSourceType is a metric describing a kubernetes object - // (for example, hits-per-second on an Ingress object). - ObjectMetricSourceType MetricSourceType = "Object" - // PodsMetricSourceType is a metric describing each pod in the current scale - // target (for example, transactions-processed-per-second). The values - // will be averaged together before being compared to the target value. - PodsMetricSourceType MetricSourceType = "Pods" - // ResourceMetricSourceType is a resource metric known to Kubernetes, as - // specified in requests and limits, describing each pod in the current - // scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics (the "pods" source). - ResourceMetricSourceType MetricSourceType = "Resource" -) - -// MetricSpec specifies how to scale based on a single metric -// (only `type` and one other matching field should be set at once). -type MetricSpec struct { - // type is the type of metric source. It should match one of the fields below. - Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` -} - -// ObjectMetricSource indicates how to scale on a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -type ObjectMetricSource struct { - // target is the described Kubernetes object. - Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` - - // metricName is the name of the metric in question. - MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` - // targetValue is the target value of the metric (as a quantity). - TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` -} - -// PodsMetricSource indicates how to scale on a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -// The values will be averaged together before being compared to the target -// value. -type PodsMetricSource struct { - // metricName is the name of the metric in question - MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` - // targetAverageValue is the target value of the average of the - // metric across all relevant pods (as a quantity) - TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` -} - -// ResourceMetricSource indicates how to scale on a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). The values will be averaged -// together before being compared to the target. Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. Only one "target" type -// should be set. -type ResourceMetricSource struct { - // name is the name of the resource in question. - Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` - // targetAverageUtilization is the target value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. - // +optional - TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` - // targetAverageValue is the the target value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // +optional - TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` -} - -// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. -type HorizontalPodAutoscalerStatus struct { - // observedGeneration is the most recent generation observed by this autoscaler. - // +optional - ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, - // used by the autoscaler to control how often the number of pods is changed. - // +optional - LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` - - // currentReplicas is current number of replicas of pods managed by this autoscaler, - // as last seen by the autoscaler. - CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` - - // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, - // as last calculated by the autoscaler. - DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` - - // currentMetrics is the last read state of the metrics used by this autoscaler. - CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` -} - -// MetricStatus describes the last-read state of a single metric. -type MetricStatus struct { - // type is the type of metric source. It will match one of the fields below. - Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` -} - -// ObjectMetricStatus indicates the current value of a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -type ObjectMetricStatus struct { - // target is the described Kubernetes object. - Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` - - // metricName is the name of the metric in question. - MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` - // currentValue is the current value of the metric (as a quantity). - CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` -} - -// PodsMetricStatus indicates the current value of a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -type PodsMetricStatus struct { - // metricName is the name of the metric in question - MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` - // currentAverageValue is the current value of the average of the - // metric across all relevant pods (as a quantity) - CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` -} - -// ResourceMetricStatus indicates the current value of a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. -type ResourceMetricStatus struct { - // name is the name of the resource in question. - Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` - // currentAverageUtilization is the current value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. It will only be - // present if `targetAverageValue` was set in the corresponding metric - // specification. - // +optional - CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` - // currentAverageValue is the the current value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // It will always be set, regardless of the corresponding metric specification. - CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` -} - -// +genclient=true - -// HorizontalPodAutoscaler is the configuration for a horizontal pod -// autoscaler, which automatically manages the replica count of any resource -// implementing the scale subresource based on the metrics specified. -type HorizontalPodAutoscaler struct { - metav1.TypeMeta `json:",inline"` - // metadata is the standard object metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec is the specification for the behaviour of the autoscaler. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. - // +optional - Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // status is the current information about the autoscaler. - // +optional - Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. -type HorizontalPodAutoscalerList struct { - metav1.TypeMeta `json:",inline"` - // metadata is the standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of horizontal pod autoscaler objects. - Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 6030e2dc3..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,175 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_CrossVersionObjectReference = map[string]string{ - "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", -} - -func (CrossVersionObjectReference) SwaggerDoc() map[string]string { - return map_CrossVersionObjectReference -} - -var map_HorizontalPodAutoscaler = map[string]string{ - "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", - "metadata": "metadata is the standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "spec is the specification for the behaviour of the autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", - "status": "status is the current information about the autoscaler.", -} - -func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscaler -} - -var map_HorizontalPodAutoscalerList = map[string]string{ - "": "HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects.", - "metadata": "metadata is the standard list metadata.", - "items": "items is the list of horizontal pod autoscaler objects.", -} - -func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerList -} - -var map_HorizontalPodAutoscalerSpec = map[string]string{ - "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", - "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", - "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", - "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.", -} - -func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerSpec -} - -var map_HorizontalPodAutoscalerStatus = map[string]string{ - "": "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.", - "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", - "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.", - "currentReplicas": "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.", - "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.", - "currentMetrics": "currentMetrics is the last read state of the metrics used by this autoscaler.", -} - -func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerStatus -} - -var map_MetricSpec = map[string]string{ - "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should match one of the fields below.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", -} - -func (MetricSpec) SwaggerDoc() map[string]string { - return map_MetricSpec -} - -var map_MetricStatus = map[string]string{ - "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will match one of the fields below.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", -} - -func (MetricStatus) SwaggerDoc() map[string]string { - return map_MetricStatus -} - -var map_ObjectMetricSource = map[string]string{ - "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", - "target": "target is the described Kubernetes object.", - "metricName": "metricName is the name of the metric in question.", - "targetValue": "targetValue is the target value of the metric (as a quantity).", -} - -func (ObjectMetricSource) SwaggerDoc() map[string]string { - return map_ObjectMetricSource -} - -var map_ObjectMetricStatus = map[string]string{ - "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", - "target": "target is the described Kubernetes object.", - "metricName": "metricName is the name of the metric in question.", - "currentValue": "currentValue is the current value of the metric (as a quantity).", -} - -func (ObjectMetricStatus) SwaggerDoc() map[string]string { - return map_ObjectMetricStatus -} - -var map_PodsMetricSource = map[string]string{ - "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "metricName": "metricName is the name of the metric in question", - "targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", -} - -func (PodsMetricSource) SwaggerDoc() map[string]string { - return map_PodsMetricSource -} - -var map_PodsMetricStatus = map[string]string{ - "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", - "metricName": "metricName is the name of the metric in question", - "currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", -} - -func (PodsMetricStatus) SwaggerDoc() map[string]string { - return map_PodsMetricStatus -} - -var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", - "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", - "targetAverageValue": "targetAverageValue is the the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", -} - -func (ResourceMetricSource) SwaggerDoc() map[string]string { - return map_ResourceMetricSource -} - -var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "name is the name of the resource in question.", - "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", - "currentAverageValue": "currentAverageValue is the the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", -} - -func (ResourceMetricStatus) SwaggerDoc() map[string]string { - return map_ResourceMetricStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go deleted file mode 100644 index 70d11d5e8..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,387 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v2alpha1 - -import ( - resource "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/client-go/pkg/api" - api_v1 "k8s.io/client-go/pkg/api/v1" - autoscaling "k8s.io/client-go/pkg/apis/autoscaling" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference, - Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference, - Convert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, - Convert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler, - Convert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList, - Convert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList, - Convert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, - Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec, - Convert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, - Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus, - Convert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec, - Convert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec, - Convert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus, - Convert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus, - Convert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource, - Convert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource, - Convert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus, - Convert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus, - Convert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource, - Convert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource, - Convert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus, - Convert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus, - Convert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource, - Convert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource, - Convert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus, - Convert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus, - ) -} - -func autoConvert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - return nil -} - -func Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { - return autoConvert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s) -} - -func autoConvert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - return nil -} - -func Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { - return autoConvert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(in, out, s) -} - -func autoConvert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler(in, out, s) -} - -func autoConvert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]autoscaling.HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]HorizontalPodAutoscaler, 0) - } else { - out.Items = *(*[]HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList(in, out, s) -} - -func autoConvert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if err := Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { - return err - } - out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) - out.MaxReplicas = in.MaxReplicas - out.Metrics = *(*[]autoscaling.MetricSpec)(unsafe.Pointer(&in.Metrics)) - return nil -} - -func Convert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - return autoConvert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if err := Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { - return err - } - out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) - out.MaxReplicas = in.MaxReplicas - out.Metrics = *(*[]MetricSpec)(unsafe.Pointer(&in.Metrics)) - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(in, out, s) -} - -func autoConvert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) - out.LastScaleTime = (*v1.Time)(unsafe.Pointer(in.LastScaleTime)) - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - out.CurrentMetrics = *(*[]autoscaling.MetricStatus)(unsafe.Pointer(&in.CurrentMetrics)) - return nil -} - -func Convert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) - out.LastScaleTime = (*v1.Time)(unsafe.Pointer(in.LastScaleTime)) - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - if in.CurrentMetrics == nil { - out.CurrentMetrics = make([]MetricStatus, 0) - } else { - out.CurrentMetrics = *(*[]MetricStatus)(unsafe.Pointer(&in.CurrentMetrics)) - } - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(in, out, s) -} - -func autoConvert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { - out.Type = autoscaling.MetricSourceType(in.Type) - out.Object = (*autoscaling.ObjectMetricSource)(unsafe.Pointer(in.Object)) - out.Pods = (*autoscaling.PodsMetricSource)(unsafe.Pointer(in.Pods)) - out.Resource = (*autoscaling.ResourceMetricSource)(unsafe.Pointer(in.Resource)) - return nil -} - -func Convert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { - return autoConvert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec(in, out, s) -} - -func autoConvert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { - out.Type = MetricSourceType(in.Type) - out.Object = (*ObjectMetricSource)(unsafe.Pointer(in.Object)) - out.Pods = (*PodsMetricSource)(unsafe.Pointer(in.Pods)) - out.Resource = (*ResourceMetricSource)(unsafe.Pointer(in.Resource)) - return nil -} - -func Convert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { - return autoConvert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec(in, out, s) -} - -func autoConvert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { - out.Type = autoscaling.MetricSourceType(in.Type) - out.Object = (*autoscaling.ObjectMetricStatus)(unsafe.Pointer(in.Object)) - out.Pods = (*autoscaling.PodsMetricStatus)(unsafe.Pointer(in.Pods)) - out.Resource = (*autoscaling.ResourceMetricStatus)(unsafe.Pointer(in.Resource)) - return nil -} - -func Convert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { - return autoConvert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus(in, out, s) -} - -func autoConvert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { - out.Type = MetricSourceType(in.Type) - out.Object = (*ObjectMetricStatus)(unsafe.Pointer(in.Object)) - out.Pods = (*PodsMetricStatus)(unsafe.Pointer(in.Pods)) - out.Resource = (*ResourceMetricStatus)(unsafe.Pointer(in.Resource)) - return nil -} - -func Convert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { - return autoConvert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus(in, out, s) -} - -func autoConvert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { - if err := Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - out.MetricName = in.MetricName - out.TargetValue = in.TargetValue - return nil -} - -func Convert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { - return autoConvert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in, out, s) -} - -func autoConvert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { - if err := Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - out.MetricName = in.MetricName - out.TargetValue = in.TargetValue - return nil -} - -func Convert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { - return autoConvert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource(in, out, s) -} - -func autoConvert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { - if err := Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - out.MetricName = in.MetricName - out.CurrentValue = in.CurrentValue - return nil -} - -func Convert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { - return autoConvert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in, out, s) -} - -func autoConvert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { - if err := Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - out.MetricName = in.MetricName - out.CurrentValue = in.CurrentValue - return nil -} - -func Convert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { - return autoConvert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus(in, out, s) -} - -func autoConvert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { - out.MetricName = in.MetricName - out.TargetAverageValue = in.TargetAverageValue - return nil -} - -func Convert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { - return autoConvert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource(in, out, s) -} - -func autoConvert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { - out.MetricName = in.MetricName - out.TargetAverageValue = in.TargetAverageValue - return nil -} - -func Convert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { - return autoConvert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource(in, out, s) -} - -func autoConvert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { - out.MetricName = in.MetricName - out.CurrentAverageValue = in.CurrentAverageValue - return nil -} - -func Convert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { - return autoConvert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in, out, s) -} - -func autoConvert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { - out.MetricName = in.MetricName - out.CurrentAverageValue = in.CurrentAverageValue - return nil -} - -func Convert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { - return autoConvert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus(in, out, s) -} - -func autoConvert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { - out.Name = api.ResourceName(in.Name) - out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) - out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) - return nil -} - -func Convert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { - return autoConvert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in, out, s) -} - -func autoConvert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { - out.Name = api_v1.ResourceName(in.Name) - out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) - out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) - return nil -} - -func Convert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { - return autoConvert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource(in, out, s) -} - -func autoConvert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { - out.Name = api.ResourceName(in.Name) - out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) - out.CurrentAverageValue = in.CurrentAverageValue - return nil -} - -func Convert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { - return autoConvert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in, out, s) -} - -func autoConvert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { - out.Name = api_v1.ResourceName(in.Name) - out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) - out.CurrentAverageValue = in.CurrentAverageValue - return nil -} - -func Convert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { - return autoConvert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 1953eaea6..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,285 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v2alpha1 - -import ( - resource "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CrossVersionObjectReference, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscaler, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_MetricSpec, InType: reflect.TypeOf(&MetricSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_MetricStatus, InType: reflect.TypeOf(&MetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ObjectMetricSource, InType: reflect.TypeOf(&ObjectMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ObjectMetricStatus, InType: reflect.TypeOf(&ObjectMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_PodsMetricSource, InType: reflect.TypeOf(&PodsMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_PodsMetricStatus, InType: reflect.TypeOf(&PodsMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ResourceMetricSource, InType: reflect.TypeOf(&ResourceMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ResourceMetricStatus, InType: reflect.TypeOf(&ResourceMetricStatus{})}, - ) -} - -func DeepCopy_v2alpha1_CrossVersionObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CrossVersionObjectReference) - out := out.(*CrossVersionObjectReference) - *out = *in - return nil - } -} - -func DeepCopy_v2alpha1_HorizontalPodAutoscaler(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscaler) - out := out.(*HorizontalPodAutoscaler) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v2alpha1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v2alpha1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v2alpha1_HorizontalPodAutoscalerList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscalerList) - out := out.(*HorizontalPodAutoscalerList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(*in)) - for i := range *in { - if err := DeepCopy_v2alpha1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v2alpha1_HorizontalPodAutoscalerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscalerSpec) - out := out.(*HorizontalPodAutoscalerSpec) - *out = *in - if in.MinReplicas != nil { - in, out := &in.MinReplicas, &out.MinReplicas - *out = new(int32) - **out = **in - } - if in.Metrics != nil { - in, out := &in.Metrics, &out.Metrics - *out = make([]MetricSpec, len(*in)) - for i := range *in { - if err := DeepCopy_v2alpha1_MetricSpec(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v2alpha1_HorizontalPodAutoscalerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscalerStatus) - out := out.(*HorizontalPodAutoscalerStatus) - *out = *in - if in.ObservedGeneration != nil { - in, out := &in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = **in - } - if in.LastScaleTime != nil { - in, out := &in.LastScaleTime, &out.LastScaleTime - *out = new(v1.Time) - **out = (*in).DeepCopy() - } - if in.CurrentMetrics != nil { - in, out := &in.CurrentMetrics, &out.CurrentMetrics - *out = make([]MetricStatus, len(*in)) - for i := range *in { - if err := DeepCopy_v2alpha1_MetricStatus(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v2alpha1_MetricSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*MetricSpec) - out := out.(*MetricSpec) - *out = *in - if in.Object != nil { - in, out := &in.Object, &out.Object - *out = new(ObjectMetricSource) - if err := DeepCopy_v2alpha1_ObjectMetricSource(*in, *out, c); err != nil { - return err - } - } - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = new(PodsMetricSource) - if err := DeepCopy_v2alpha1_PodsMetricSource(*in, *out, c); err != nil { - return err - } - } - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(ResourceMetricSource) - if err := DeepCopy_v2alpha1_ResourceMetricSource(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v2alpha1_MetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*MetricStatus) - out := out.(*MetricStatus) - *out = *in - if in.Object != nil { - in, out := &in.Object, &out.Object - *out = new(ObjectMetricStatus) - if err := DeepCopy_v2alpha1_ObjectMetricStatus(*in, *out, c); err != nil { - return err - } - } - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = new(PodsMetricStatus) - if err := DeepCopy_v2alpha1_PodsMetricStatus(*in, *out, c); err != nil { - return err - } - } - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(ResourceMetricStatus) - if err := DeepCopy_v2alpha1_ResourceMetricStatus(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v2alpha1_ObjectMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectMetricSource) - out := out.(*ObjectMetricSource) - *out = *in - out.TargetValue = in.TargetValue.DeepCopy() - return nil - } -} - -func DeepCopy_v2alpha1_ObjectMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectMetricStatus) - out := out.(*ObjectMetricStatus) - *out = *in - out.CurrentValue = in.CurrentValue.DeepCopy() - return nil - } -} - -func DeepCopy_v2alpha1_PodsMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodsMetricSource) - out := out.(*PodsMetricSource) - *out = *in - out.TargetAverageValue = in.TargetAverageValue.DeepCopy() - return nil - } -} - -func DeepCopy_v2alpha1_PodsMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodsMetricStatus) - out := out.(*PodsMetricStatus) - *out = *in - out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() - return nil - } -} - -func DeepCopy_v2alpha1_ResourceMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceMetricSource) - out := out.(*ResourceMetricSource) - *out = *in - if in.TargetAverageUtilization != nil { - in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization - *out = new(int32) - **out = **in - } - if in.TargetAverageValue != nil { - in, out := &in.TargetAverageValue, &out.TargetAverageValue - *out = new(resource.Quantity) - **out = (*in).DeepCopy() - } - return nil - } -} - -func DeepCopy_v2alpha1_ResourceMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceMetricStatus) - out := out.(*ResourceMetricStatus) - *out = *in - if in.CurrentAverageUtilization != nil { - in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization - *out = new(int32) - **out = **in - } - out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/zz_generated.deepcopy.go deleted file mode 100644 index 8639502a9..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/autoscaling/zz_generated.deepcopy.go +++ /dev/null @@ -1,320 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package autoscaling - -import ( - resource "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_CrossVersionObjectReference, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscaler, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_MetricSpec, InType: reflect.TypeOf(&MetricSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_MetricStatus, InType: reflect.TypeOf(&MetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ObjectMetricSource, InType: reflect.TypeOf(&ObjectMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ObjectMetricStatus, InType: reflect.TypeOf(&ObjectMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_PodsMetricSource, InType: reflect.TypeOf(&PodsMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_PodsMetricStatus, InType: reflect.TypeOf(&PodsMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ResourceMetricSource, InType: reflect.TypeOf(&ResourceMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ResourceMetricStatus, InType: reflect.TypeOf(&ResourceMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_Scale, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, - ) -} - -func DeepCopy_autoscaling_CrossVersionObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CrossVersionObjectReference) - out := out.(*CrossVersionObjectReference) - *out = *in - return nil - } -} - -func DeepCopy_autoscaling_HorizontalPodAutoscaler(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscaler) - out := out.(*HorizontalPodAutoscaler) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_autoscaling_HorizontalPodAutoscalerList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscalerList) - out := out.(*HorizontalPodAutoscalerList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(*in)) - for i := range *in { - if err := DeepCopy_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscalerSpec) - out := out.(*HorizontalPodAutoscalerSpec) - *out = *in - if in.MinReplicas != nil { - in, out := &in.MinReplicas, &out.MinReplicas - *out = new(int32) - **out = **in - } - if in.Metrics != nil { - in, out := &in.Metrics, &out.Metrics - *out = make([]MetricSpec, len(*in)) - for i := range *in { - if err := DeepCopy_autoscaling_MetricSpec(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_autoscaling_HorizontalPodAutoscalerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HorizontalPodAutoscalerStatus) - out := out.(*HorizontalPodAutoscalerStatus) - *out = *in - if in.ObservedGeneration != nil { - in, out := &in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = **in - } - if in.LastScaleTime != nil { - in, out := &in.LastScaleTime, &out.LastScaleTime - *out = new(v1.Time) - **out = (*in).DeepCopy() - } - if in.CurrentMetrics != nil { - in, out := &in.CurrentMetrics, &out.CurrentMetrics - *out = make([]MetricStatus, len(*in)) - for i := range *in { - if err := DeepCopy_autoscaling_MetricStatus(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_autoscaling_MetricSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*MetricSpec) - out := out.(*MetricSpec) - *out = *in - if in.Object != nil { - in, out := &in.Object, &out.Object - *out = new(ObjectMetricSource) - if err := DeepCopy_autoscaling_ObjectMetricSource(*in, *out, c); err != nil { - return err - } - } - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = new(PodsMetricSource) - if err := DeepCopy_autoscaling_PodsMetricSource(*in, *out, c); err != nil { - return err - } - } - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(ResourceMetricSource) - if err := DeepCopy_autoscaling_ResourceMetricSource(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_autoscaling_MetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*MetricStatus) - out := out.(*MetricStatus) - *out = *in - if in.Object != nil { - in, out := &in.Object, &out.Object - *out = new(ObjectMetricStatus) - if err := DeepCopy_autoscaling_ObjectMetricStatus(*in, *out, c); err != nil { - return err - } - } - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = new(PodsMetricStatus) - if err := DeepCopy_autoscaling_PodsMetricStatus(*in, *out, c); err != nil { - return err - } - } - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(ResourceMetricStatus) - if err := DeepCopy_autoscaling_ResourceMetricStatus(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_autoscaling_ObjectMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectMetricSource) - out := out.(*ObjectMetricSource) - *out = *in - out.TargetValue = in.TargetValue.DeepCopy() - return nil - } -} - -func DeepCopy_autoscaling_ObjectMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectMetricStatus) - out := out.(*ObjectMetricStatus) - *out = *in - out.CurrentValue = in.CurrentValue.DeepCopy() - return nil - } -} - -func DeepCopy_autoscaling_PodsMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodsMetricSource) - out := out.(*PodsMetricSource) - *out = *in - out.TargetAverageValue = in.TargetAverageValue.DeepCopy() - return nil - } -} - -func DeepCopy_autoscaling_PodsMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodsMetricStatus) - out := out.(*PodsMetricStatus) - *out = *in - out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() - return nil - } -} - -func DeepCopy_autoscaling_ResourceMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceMetricSource) - out := out.(*ResourceMetricSource) - *out = *in - if in.TargetAverageUtilization != nil { - in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization - *out = new(int32) - **out = **in - } - if in.TargetAverageValue != nil { - in, out := &in.TargetAverageValue, &out.TargetAverageValue - *out = new(resource.Quantity) - **out = (*in).DeepCopy() - } - return nil - } -} - -func DeepCopy_autoscaling_ResourceMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ResourceMetricStatus) - out := out.(*ResourceMetricStatus) - *out = *in - if in.CurrentAverageUtilization != nil { - in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization - *out = new(int32) - **out = **in - } - out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() - return nil - } -} - -func DeepCopy_autoscaling_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Scale) - out := out.(*Scale) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - return nil - } -} - -func DeepCopy_autoscaling_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ScaleSpec) - out := out.(*ScaleSpec) - *out = *in - return nil - } -} - -func DeepCopy_autoscaling_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ScaleStatus) - out := out.(*ScaleStatus) - *out = *in - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/OWNERS b/vendor/k8s.io/client-go/pkg/apis/batch/OWNERS deleted file mode 100755 index 502f90771..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/OWNERS +++ /dev/null @@ -1,19 +0,0 @@ -reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- caesarxuchao -- erictune -- sttts -- saad-ali -- ncdc -- timothysc -- soltysh -- dims -- errordeveloper -- mml -- mbohlool -- david-mcmahon -- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/doc.go b/vendor/k8s.io/client-go/pkg/apis/batch/doc.go deleted file mode 100644 index 6fe952226..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package batch diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/install/install.go b/vendor/k8s.io/client-go/pkg/apis/batch/install/install.go deleted file mode 100644 index d37f31820..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/install/install.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the batch API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/apis/batch" - "k8s.io/client-go/pkg/apis/batch/v1" - "k8s.io/client-go/pkg/apis/batch/v2alpha1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: batch.GroupName, - VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v2alpha1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/client-go/pkg/apis/batch", - AddInternalObjectsToScheme: batch.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1.SchemeGroupVersion.Version: v1.AddToScheme, - v2alpha1.SchemeGroupVersion.Version: v2alpha1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/register.go b/vendor/k8s.io/client-go/pkg/apis/batch/register.go deleted file mode 100644 index 4601ca4ec..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/register.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package batch - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "batch" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Job{}, - &JobList{}, - &JobTemplate{}, - &CronJob{}, - &CronJobList{}, - ) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{}) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{}) - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/types.go b/vendor/k8s.io/client-go/pkg/apis/batch/types.go deleted file mode 100644 index 7a2ad011f..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/types.go +++ /dev/null @@ -1,286 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package batch - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api" -) - -// +genclient=true - -// Job represents the configuration of a single job. -type Job struct { - metav1.TypeMeta - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Spec JobSpec - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Status JobStatus -} - -// JobList is a collection of jobs. -type JobList struct { - metav1.TypeMeta - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta - - // Items is the list of Job. - Items []Job -} - -// JobTemplate describes a template for creating copies of a predefined pod. -type JobTemplate struct { - metav1.TypeMeta - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta - - // Template defines jobs that will be created from this template - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Template JobTemplateSpec -} - -// JobTemplateSpec describes the data a Job should have when created from a template -type JobTemplateSpec struct { - // Standard object's metadata of the jobs created from this template. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta - - // Specification of the desired behavior of the job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Spec JobSpec -} - -// JobSpec describes how the job execution will look like. -type JobSpec struct { - - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // +optional - Parallelism *int32 - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // +optional - Completions *int32 - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - // +optional - ActiveDeadlineSeconds *int64 - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // +optional - Selector *metav1.LabelSelector - - // ManualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - // +optional - ManualSelector *bool - - // Template is the object that describes the pod that will be created when - // executing a job. - Template api.PodTemplateSpec -} - -// JobStatus represents the current state of a Job. -type JobStatus struct { - - // Conditions represent the latest available observations of an object's current state. - // +optional - Conditions []JobCondition - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - StartTime *metav1.Time - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - CompletionTime *metav1.Time - - // Active is the number of actively running pods. - // +optional - Active int32 - - // Succeeded is the number of pods which reached Phase Succeeded. - // +optional - Succeeded int32 - - // Failed is the number of pods which reached Phase Failed. - // +optional - Failed int32 -} - -type JobConditionType string - -// These are valid conditions of a job. -const ( - // JobComplete means the job has completed its execution. - JobComplete JobConditionType = "Complete" - // JobFailed means the job has failed its execution. - JobFailed JobConditionType = "Failed" -) - -// JobCondition describes current state of a job. -type JobCondition struct { - // Type of job condition, Complete or Failed. - Type JobConditionType - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus - // Last time the condition was checked. - // +optional - LastProbeTime metav1.Time - // Last time the condition transit from one status to another. - // +optional - LastTransitionTime metav1.Time - // (brief) reason for the condition's last transition. - // +optional - Reason string - // Human readable message indicating details about last transition. - // +optional - Message string -} - -// +genclient=true - -// CronJob represents the configuration of a single cron job. -type CronJob struct { - metav1.TypeMeta - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta - - // Spec is a structure defining the expected behavior of a job, including the schedule. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Spec CronJobSpec - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Status CronJobStatus -} - -// CronJobList is a collection of cron jobs. -type CronJobList struct { - metav1.TypeMeta - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta - - // Items is the list of CronJob. - Items []CronJob -} - -// CronJobSpec describes how the job execution will look like and when it will actually run. -type CronJobSpec struct { - - // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - Schedule string - - // Optional deadline in seconds for starting the job if it misses scheduled - // time for any reason. Missed jobs executions will be counted as failed ones. - // +optional - StartingDeadlineSeconds *int64 - - // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. - // +optional - ConcurrencyPolicy ConcurrencyPolicy - - // Suspend flag tells the controller to suspend subsequent executions, it does - // not apply to already started executions. Defaults to false. - // +optional - Suspend *bool - - // JobTemplate is the object that describes the job that will be created when - // executing a CronJob. - JobTemplate JobTemplateSpec - - // The number of successful finished jobs to retain. - // This is a pointer to distinguish between explicit zero and not specified. - // +optional - SuccessfulJobsHistoryLimit *int32 - - // The number of failed finished jobs to retain. - // This is a pointer to distinguish between explicit zero and not specified. - // +optional - FailedJobsHistoryLimit *int32 -} - -// ConcurrencyPolicy describes how the job will be handled. -// Only one of the following concurrent policies may be specified. -// If none of the following policies is specified, the default one -// is AllowConcurrent. -type ConcurrencyPolicy string - -const ( - // AllowConcurrent allows CronJobs to run concurrently. - AllowConcurrent ConcurrencyPolicy = "Allow" - - // ForbidConcurrent forbids concurrent runs, skipping next run if previous - // hasn't finished yet. - ForbidConcurrent ConcurrencyPolicy = "Forbid" - - // ReplaceConcurrent cancels currently running job and replaces it with a new one. - ReplaceConcurrent ConcurrencyPolicy = "Replace" -) - -// CronJobStatus represents the current state of a cron job. -type CronJobStatus struct { - // Active holds pointers to currently running jobs. - // +optional - Active []api.ObjectReference - - // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. - // +optional - LastScheduleTime *metav1.Time -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/conversion.go deleted file mode 100644 index 7f2bc9d15..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v1/conversion.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/client-go/pkg/api/v1" - "k8s.io/client-go/pkg/apis/batch" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_batch_JobSpec_To_v1_JobSpec, - Convert_v1_JobSpec_To_batch_JobSpec, - ) - if err != nil { - return err - } - - return scheme.AddFieldLabelConversionFunc("batch/v1", "Job", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", "metadata.namespace", "status.successful": - return label, value, nil - default: - return "", "", fmt.Errorf("field label %q not supported for Job", label) - } - }, - ) -} - -func Convert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - out.Selector = in.Selector - if in.ManualSelector != nil { - out.ManualSelector = new(bool) - *out.ManualSelector = *in.ManualSelector - } else { - out.ManualSelector = nil - } - - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - out.Selector = in.Selector - if in.ManualSelector != nil { - out.ManualSelector = new(bool) - *out.ManualSelector = *in.ManualSelector - } else { - out.ManualSelector = nil - } - - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/defaults.go deleted file mode 100644 index 3603247f2..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v1/defaults.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - RegisterDefaults(scheme) - return scheme.AddDefaultingFuncs( - SetDefaults_Job, - ) -} - -func SetDefaults_Job(obj *Job) { - // For a non-parallel job, you can leave both `.spec.completions` and - // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. - if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { - obj.Spec.Completions = new(int32) - *obj.Spec.Completions = 1 - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - if obj.Spec.Parallelism == nil { - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - labels := obj.Spec.Template.Labels - if labels != nil && len(obj.Labels) == 0 { - obj.Labels = labels - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/doc.go deleted file mode 100644 index c7be42d5a..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v1/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.generated.go deleted file mode 100644 index 49dab1f88..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.generated.go +++ /dev/null @@ -1,2681 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg4_resource "k8s.io/apimachinery/pkg/api/resource" - pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg2_types "k8s.io/apimachinery/pkg/types" - pkg5_intstr "k8s.io/apimachinery/pkg/util/intstr" - pkg3_v1 "k8s.io/client-go/pkg/api/v1" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg4_resource.Quantity - var v1 pkg1_v1.TypeMeta - var v2 pkg2_types.UID - var v3 pkg5_intstr.IntOrString - var v4 pkg3_v1.PodTemplateSpec - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceJob((*[]Job)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceJob((*[]Job)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Parallelism != nil - yyq2[1] = x.Completions != nil - yyq2[2] = x.ActiveDeadlineSeconds != nil - yyq2[3] = x.Selector != nil - yyq2[4] = x.ManualSelector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy4 := *x.Parallelism - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("parallelism")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy6 := *x.Parallelism - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Completions == nil { - r.EncodeNil() - } else { - yy9 := *x.Completions - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Completions == nil { - r.EncodeNil() - } else { - yy11 := *x.Completions - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(yy11)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy14 := *x.ActiveDeadlineSeconds - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(yy14)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy16 := *x.ActiveDeadlineSeconds - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(yy16)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy22 := *x.ManualSelector - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeBool(bool(yy22)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("manualSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy24 := *x.ManualSelector - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(yy24)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy27 := &x.Template - yy27.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy29 := &x.Template - yy29.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "parallelism": - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - case "completions": - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - case "activeDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_v1.LabelSelector) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "manualSelector": - if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } - } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg3_v1.PodTemplateSpec{} - } else { - yyv14 := &x.Template - yyv14.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj15 int - var yyb15 bool - var yyhl15 bool = l >= 0 - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_v1.LabelSelector) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } - } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym25 := z.DecBinary() - _ = yym25 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg3_v1.PodTemplateSpec{} - } else { - yyv26 := &x.Template - yyv26.CodecDecodeSelf(d) - } - for { - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj15-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Conditions) != 0 - yyq2[1] = x.StartTime != nil - yyq2[2] = x.CompletionTime != nil - yyq2[3] = x.Active != 0 - yyq2[4] = x.Succeeded != 0 - yyq2[5] = x.Failed != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.StartTime == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym7 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartTime == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym8 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym10 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym11 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("active")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("succeeded")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv4 := &x.Conditions - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv4), d) - } - } - case "startTime": - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_v1.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - case "completionTime": - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_v1.Time) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym9 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - case "active": - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - yyv10 := &x.Active - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(yyv10)) = int32(r.DecodeInt(32)) - } - } - case "succeeded": - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - yyv12 := &x.Succeeded - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int32)(yyv12)) = int32(r.DecodeInt(32)) - } - } - case "failed": - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - yyv14 := &x.Failed - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*int32)(yyv14)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv17 := &x.Conditions - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv17), d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_v1.Time) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym20 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym20 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_v1.Time) - } - yym22 := z.DecBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym22 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym22 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - yyv23 := &x.Active - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*int32)(yyv23)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - yyv25 := &x.Succeeded - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*int32)(yyv25)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - yyv27 := &x.Failed - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*int32)(yyv27)) = int32(r.DecodeInt(32)) - } - } - for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf7 := &x.Status - yysf7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf8 := &x.Status - yysf8.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastProbeTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastProbeTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_v1.Time{} - } else { - yyv6 := &x.LastProbeTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_v1.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv10 := &x.Reason - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv12 := &x.Message - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv15 := &x.Type - yyv15.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv16 := &x.Status - yyv16.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_v1.Time{} - } else { - yyv17 := &x.LastProbeTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_v1.Time{} - } else { - yyv19 := &x.LastTransitionTime - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else if yym20 { - z.DecBinaryUnmarshal(yyv19) - } else if !yym20 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv19) - } else { - z.DecFallback(yyv19, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv21 := &x.Reason - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv23 := &x.Message - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*string)(yyv23)) = r.DecodeString() - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Job{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 880) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Job, yyrl1) - } - } else { - yyv1 = make([]Job, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Job{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Job{}) // var yyz1 Job - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Job{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []JobCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]JobCondition, yyrl1) - } - } else { - yyv1 = make([]JobCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, JobCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []JobCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types_swagger_doc_generated.go deleted file mode 100644 index 3d224e703..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_Job = map[string]string{ - "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", -} - -func (Job) SwaggerDoc() map[string]string { - return map_Job -} - -var map_JobCondition = map[string]string{ - "": "JobCondition describes current state of a job.", - "type": "Type of job condition, Complete or Failed.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastProbeTime": "Last time the condition was checked.", - "lastTransitionTime": "Last time the condition transit from one status to another.", - "reason": "(brief) reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (JobCondition) SwaggerDoc() map[string]string { - return map_JobCondition -} - -var map_JobList = map[string]string{ - "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of Job.", -} - -func (JobList) SwaggerDoc() map[string]string { - return map_JobList -} - -var map_JobSpec = map[string]string{ - "": "JobSpec describes how the job execution will look like.", - "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://kubernetes.io/docs/user-guide/jobs", - "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://kubernetes.io/docs/user-guide/jobs", - "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", - "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md", - "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://kubernetes.io/docs/user-guide/jobs", -} - -func (JobSpec) SwaggerDoc() map[string]string { - return map_JobSpec -} - -var map_JobStatus = map[string]string{ - "": "JobStatus represents the current state of a Job.", - "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://kubernetes.io/docs/user-guide/jobs", - "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "active": "Active is the number of actively running pods.", - "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.", - "failed": "Failed is the number of pods which reached Phase Failed.", -} - -func (JobStatus) SwaggerDoc() map[string]string { - return map_JobStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.conversion.go deleted file mode 100644 index c7ee0a35b..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.conversion.go +++ /dev/null @@ -1,202 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1 - -import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/client-go/pkg/api" - api_v1 "k8s.io/client-go/pkg/api/v1" - batch "k8s.io/client-go/pkg/apis/batch" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1_Job_To_batch_Job, - Convert_batch_Job_To_v1_Job, - Convert_v1_JobCondition_To_batch_JobCondition, - Convert_batch_JobCondition_To_v1_JobCondition, - Convert_v1_JobList_To_batch_JobList, - Convert_batch_JobList_To_v1_JobList, - Convert_v1_JobSpec_To_batch_JobSpec, - Convert_batch_JobSpec_To_v1_JobSpec, - Convert_v1_JobStatus_To_batch_JobStatus, - Convert_batch_JobStatus_To_v1_JobStatus, - ) -} - -func autoConvert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { - return autoConvert_v1_Job_To_batch_Job(in, out, s) -} - -func autoConvert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_batch_JobStatus_To_v1_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error { - return autoConvert_batch_Job_To_v1_Job(in, out, s) -} - -func autoConvert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { - out.Type = batch.JobConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { - return autoConvert_v1_JobCondition_To_batch_JobCondition(in, out, s) -} - -func autoConvert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { - out.Type = JobConditionType(in.Type) - out.Status = api_v1.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { - return autoConvert_batch_JobCondition_To_v1_JobCondition(in, out, s) -} - -func autoConvert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]batch.Job, len(*in)) - for i := range *in { - if err := Convert_v1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { - return autoConvert_v1_JobList_To_batch_JobList(in, out, s) -} - -func autoConvert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Job, len(*in)) - for i := range *in { - if err := Convert_batch_Job_To_v1_Job(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]Job, 0) - } - return nil -} - -func Convert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { - return autoConvert_batch_JobList_To_v1_JobList(in, out, s) -} - -func autoConvert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { - out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism)) - out.Completions = (*int32)(unsafe.Pointer(in.Completions)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { - out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism)) - out.Completions = (*int32)(unsafe.Pointer(in.Completions)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { - out.Conditions = *(*[]batch.JobCondition)(unsafe.Pointer(&in.Conditions)) - out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) - out.CompletionTime = (*meta_v1.Time)(unsafe.Pointer(in.CompletionTime)) - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func Convert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { - return autoConvert_v1_JobStatus_To_batch_JobStatus(in, out, s) -} - -func autoConvert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { - out.Conditions = *(*[]JobCondition)(unsafe.Pointer(&in.Conditions)) - out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) - out.CompletionTime = (*meta_v1.Time)(unsafe.Pointer(in.CompletionTime)) - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func Convert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { - return autoConvert_batch_JobStatus_To_v1_JobStatus(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.deepcopy.go deleted file mode 100644 index 5e31d5964..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,162 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1 - -import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api_v1 "k8s.io/client-go/pkg/api/v1" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Job, InType: reflect.TypeOf(&Job{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JobCondition, InType: reflect.TypeOf(&JobCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JobList, InType: reflect.TypeOf(&JobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JobSpec, InType: reflect.TypeOf(&JobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JobStatus, InType: reflect.TypeOf(&JobStatus{})}, - ) -} - -func DeepCopy_v1_Job(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Job) - out := out.(*Job) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_JobSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_JobStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_JobCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobCondition) - out := out.(*JobCondition) - *out = *in - out.LastProbeTime = in.LastProbeTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - return nil - } -} - -func DeepCopy_v1_JobList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobList) - out := out.(*JobList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Job, len(*in)) - for i := range *in { - if err := DeepCopy_v1_Job(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1_JobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobSpec) - out := out.(*JobSpec) - *out = *in - if in.Parallelism != nil { - in, out := &in.Parallelism, &out.Parallelism - *out = new(int32) - **out = **in - } - if in.Completions != nil { - in, out := &in.Completions, &out.Completions - *out = new(int32) - **out = **in - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*meta_v1.LabelSelector) - } - } - if in.ManualSelector != nil { - in, out := &in.ManualSelector, &out.ManualSelector - *out = new(bool) - **out = **in - } - if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1_JobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobStatus) - out := out.(*JobStatus) - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]JobCondition, len(*in)) - for i := range *in { - if err := DeepCopy_v1_JobCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = new(meta_v1.Time) - **out = (*in).DeepCopy() - } - if in.CompletionTime != nil { - in, out := &in.CompletionTime, &out.CompletionTime - *out = new(meta_v1.Time) - **out = (*in).DeepCopy() - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.defaults.go deleted file mode 100644 index 417024f62..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.defaults.go +++ /dev/null @@ -1,176 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" - api_v1 "k8s.io/client-go/pkg/api/v1" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&Job{}, func(obj interface{}) { SetObjectDefaults_Job(obj.(*Job)) }) - scheme.AddTypeDefaultingFunc(&JobList{}, func(obj interface{}) { SetObjectDefaults_JobList(obj.(*JobList)) }) - return nil -} - -func SetObjectDefaults_Job(in *Job) { - SetDefaults_Job(in) - api_v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) - for i := range in.Spec.Template.Spec.Volumes { - a := &in.Spec.Template.Spec.Volumes[i] - api_v1.SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - api_v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - api_v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - api_v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - api_v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - api_v1.SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - api_v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - api_v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - api_v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - api_v1.SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - api_v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Spec.Template.Spec.InitContainers { - a := &in.Spec.Template.Spec.InitContainers[i] - api_v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - api_v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - api_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - api_v1.SetDefaults_ResourceList(&a.Resources.Limits) - api_v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - api_v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - api_v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.Template.Spec.Containers { - a := &in.Spec.Template.Spec.Containers[i] - api_v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - api_v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - api_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - api_v1.SetDefaults_ResourceList(&a.Resources.Limits) - api_v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - api_v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - api_v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } -} - -func SetObjectDefaults_JobList(in *JobList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Job(a) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/conversion.go deleted file mode 100644 index 2393fdca9..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/conversion.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - var err error - // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. - for _, k := range []string{"Job", "JobTemplate", "CronJob"} { - kind := k // don't close over range variables - err = scheme.AddFieldLabelConversionFunc("batch/v2alpha1", kind, - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", "metadata.namespace", "status.successful": - return label, value, nil - default: - return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) - } - }) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/doc.go deleted file mode 100644 index a9fe60b1c..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.generated.go deleted file mode 100644 index 8dd93175f..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.generated.go +++ /dev/null @@ -1,2525 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v2alpha1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg5_resource "k8s.io/apimachinery/pkg/api/resource" - pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg2_types "k8s.io/apimachinery/pkg/types" - pkg6_intstr "k8s.io/apimachinery/pkg/util/intstr" - pkg4_v1 "k8s.io/client-go/pkg/api/v1" - pkg3_v1 "k8s.io/client-go/pkg/apis/batch/v1" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg5_resource.Quantity - var v1 pkg1_v1.TypeMeta - var v2 pkg2_types.UID - var v3 pkg6_intstr.IntOrString - var v4 pkg4_v1.PodTemplateSpec - var v5 pkg3_v1.JobSpec - var v6 time.Time - _, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6 - } -} - -func (x *JobTemplate) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Template - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Template - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobTemplate) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = JobTemplateSpec{} - } else { - yyv10 := &x.Template - yyv10.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv12 := &x.Kind - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv14 := &x.APIVersion - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv16 := &x.ObjectMeta - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(yyv16) { - } else { - z.DecFallback(yyv16, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = JobTemplateSpec{} - } else { - yyv18 := &x.Template - yyv18.CodecDecodeSelf(d) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = pkg3_v1.JobSpec{} - } else { - yyv6 := &x.Spec - yyv6.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = pkg3_v1.JobSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CronJob) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CronJob) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CronJob) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = CronJobSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = CronJobStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CronJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = CronJobSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = CronJobStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CronJobList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceCronJob(([]CronJob)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceCronJob(([]CronJob)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CronJobList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CronJobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceCronJob((*[]CronJob)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CronJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceCronJob((*[]CronJob)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CronJobSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.StartingDeadlineSeconds != nil - yyq2[2] = x.ConcurrencyPolicy != "" - yyq2[3] = x.Suspend != nil - yyq2[5] = x.SuccessfulJobsHistoryLimit != nil - yyq2[6] = x.FailedJobsHistoryLimit != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("schedule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.StartingDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy7 := *x.StartingDeadlineSeconds - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startingDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartingDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy9 := *x.StartingDeadlineSeconds - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - x.ConcurrencyPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrencyPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.ConcurrencyPolicy.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Suspend == nil { - r.EncodeNil() - } else { - yy15 := *x.Suspend - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(yy15)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("suspend")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Suspend == nil { - r.EncodeNil() - } else { - yy17 := *x.Suspend - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeBool(bool(yy17)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy20 := &x.JobTemplate - yy20.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("jobTemplate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.JobTemplate - yy22.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.SuccessfulJobsHistoryLimit == nil { - r.EncodeNil() - } else { - yy25 := *x.SuccessfulJobsHistoryLimit - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeInt(int64(yy25)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("successfulJobsHistoryLimit")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SuccessfulJobsHistoryLimit == nil { - r.EncodeNil() - } else { - yy27 := *x.SuccessfulJobsHistoryLimit - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(yy27)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.FailedJobsHistoryLimit == nil { - r.EncodeNil() - } else { - yy30 := *x.FailedJobsHistoryLimit - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeInt(int64(yy30)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failedJobsHistoryLimit")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FailedJobsHistoryLimit == nil { - r.EncodeNil() - } else { - yy32 := *x.FailedJobsHistoryLimit - yym33 := z.EncBinary() - _ = yym33 - if false { - } else { - r.EncodeInt(int64(yy32)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CronJobSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CronJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "schedule": - if r.TryDecodeAsNil() { - x.Schedule = "" - } else { - yyv4 := &x.Schedule - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "startingDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.StartingDeadlineSeconds != nil { - x.StartingDeadlineSeconds = nil - } - } else { - if x.StartingDeadlineSeconds == nil { - x.StartingDeadlineSeconds = new(int64) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "concurrencyPolicy": - if r.TryDecodeAsNil() { - x.ConcurrencyPolicy = "" - } else { - yyv8 := &x.ConcurrencyPolicy - yyv8.CodecDecodeSelf(d) - } - case "suspend": - if r.TryDecodeAsNil() { - if x.Suspend != nil { - x.Suspend = nil - } - } else { - if x.Suspend == nil { - x.Suspend = new(bool) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*bool)(x.Suspend)) = r.DecodeBool() - } - } - case "jobTemplate": - if r.TryDecodeAsNil() { - x.JobTemplate = JobTemplateSpec{} - } else { - yyv11 := &x.JobTemplate - yyv11.CodecDecodeSelf(d) - } - case "successfulJobsHistoryLimit": - if r.TryDecodeAsNil() { - if x.SuccessfulJobsHistoryLimit != nil { - x.SuccessfulJobsHistoryLimit = nil - } - } else { - if x.SuccessfulJobsHistoryLimit == nil { - x.SuccessfulJobsHistoryLimit = new(int32) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int32)(x.SuccessfulJobsHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - case "failedJobsHistoryLimit": - if r.TryDecodeAsNil() { - if x.FailedJobsHistoryLimit != nil { - x.FailedJobsHistoryLimit = nil - } - } else { - if x.FailedJobsHistoryLimit == nil { - x.FailedJobsHistoryLimit = new(int32) - } - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*int32)(x.FailedJobsHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Schedule = "" - } else { - yyv17 := &x.Schedule - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartingDeadlineSeconds != nil { - x.StartingDeadlineSeconds = nil - } - } else { - if x.StartingDeadlineSeconds == nil { - x.StartingDeadlineSeconds = new(int64) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrencyPolicy = "" - } else { - yyv21 := &x.ConcurrencyPolicy - yyv21.CodecDecodeSelf(d) - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Suspend != nil { - x.Suspend = nil - } - } else { - if x.Suspend == nil { - x.Suspend = new(bool) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*bool)(x.Suspend)) = r.DecodeBool() - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.JobTemplate = JobTemplateSpec{} - } else { - yyv24 := &x.JobTemplate - yyv24.CodecDecodeSelf(d) - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SuccessfulJobsHistoryLimit != nil { - x.SuccessfulJobsHistoryLimit = nil - } - } else { - if x.SuccessfulJobsHistoryLimit == nil { - x.SuccessfulJobsHistoryLimit = new(int32) - } - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*int32)(x.SuccessfulJobsHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FailedJobsHistoryLimit != nil { - x.FailedJobsHistoryLimit = nil - } - } else { - if x.FailedJobsHistoryLimit == nil { - x.FailedJobsHistoryLimit = new(int32) - } - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*int32)(x.FailedJobsHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ConcurrencyPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ConcurrencyPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *CronJobStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Active) != 0 - yyq2[1] = x.LastScheduleTime != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Active == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSlicev1_ObjectReference(([]pkg4_v1.ObjectReference)(x.Active), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("active")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Active == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSlicev1_ObjectReference(([]pkg4_v1.ObjectReference)(x.Active), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.LastScheduleTime == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { - } else if yym7 { - z.EncBinaryMarshal(x.LastScheduleTime) - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScheduleTime) - } else { - z.EncFallback(x.LastScheduleTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastScheduleTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LastScheduleTime == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { - } else if yym8 { - z.EncBinaryMarshal(x.LastScheduleTime) - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScheduleTime) - } else { - z.EncFallback(x.LastScheduleTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CronJobStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CronJobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "active": - if r.TryDecodeAsNil() { - x.Active = nil - } else { - yyv4 := &x.Active - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSlicev1_ObjectReference((*[]pkg4_v1.ObjectReference)(yyv4), d) - } - } - case "lastScheduleTime": - if r.TryDecodeAsNil() { - if x.LastScheduleTime != nil { - x.LastScheduleTime = nil - } - } else { - if x.LastScheduleTime == nil { - x.LastScheduleTime = new(pkg1_v1.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.LastScheduleTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScheduleTime) - } else { - z.DecFallback(x.LastScheduleTime, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CronJobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Active = nil - } else { - yyv9 := &x.Active - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicev1_ObjectReference((*[]pkg4_v1.ObjectReference)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LastScheduleTime != nil { - x.LastScheduleTime = nil - } - } else { - if x.LastScheduleTime == nil { - x.LastScheduleTime = new(pkg1_v1.Time) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { - } else if yym12 { - z.DecBinaryUnmarshal(x.LastScheduleTime) - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScheduleTime) - } else { - z.DecFallback(x.LastScheduleTime, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceCronJob(v []CronJob, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCronJob(v *[]CronJob, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []CronJob{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1144) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]CronJob, yyrl1) - } - } else { - yyv1 = make([]CronJob, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CronJob{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, CronJob{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CronJob{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, CronJob{}) // var yyz1 CronJob - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = CronJob{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []CronJob{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicev1_ObjectReference(v []pkg4_v1.ObjectReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicev1_ObjectReference(v *[]pkg4_v1.ObjectReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []pkg4_v1.ObjectReference{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]pkg4_v1.ObjectReference, yyrl1) - } - } else { - yyv1 = make([]pkg4_v1.ObjectReference, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg4_v1.ObjectReference{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, pkg4_v1.ObjectReference{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg4_v1.ObjectReference{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, pkg4_v1.ObjectReference{}) // var yyz1 pkg4_v1.ObjectReference - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg4_v1.ObjectReference{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg4_v1.ObjectReference{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.conversion.go deleted file mode 100644 index b268575cd..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,227 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v2alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/client-go/pkg/api" - api_v1 "k8s.io/client-go/pkg/api/v1" - batch "k8s.io/client-go/pkg/apis/batch" - batch_v1 "k8s.io/client-go/pkg/apis/batch/v1" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v2alpha1_CronJob_To_batch_CronJob, - Convert_batch_CronJob_To_v2alpha1_CronJob, - Convert_v2alpha1_CronJobList_To_batch_CronJobList, - Convert_batch_CronJobList_To_v2alpha1_CronJobList, - Convert_v2alpha1_CronJobSpec_To_batch_CronJobSpec, - Convert_batch_CronJobSpec_To_v2alpha1_CronJobSpec, - Convert_v2alpha1_CronJobStatus_To_batch_CronJobStatus, - Convert_batch_CronJobStatus_To_v2alpha1_CronJobStatus, - Convert_v2alpha1_JobTemplate_To_batch_JobTemplate, - Convert_batch_JobTemplate_To_v2alpha1_JobTemplate, - Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec, - Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec, - ) -} - -func autoConvert_v2alpha1_CronJob_To_batch_CronJob(in *CronJob, out *batch.CronJob, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v2alpha1_CronJob_To_batch_CronJob(in *CronJob, out *batch.CronJob, s conversion.Scope) error { - return autoConvert_v2alpha1_CronJob_To_batch_CronJob(in, out, s) -} - -func autoConvert_batch_CronJob_To_v2alpha1_CronJob(in *batch.CronJob, out *CronJob, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_batch_CronJob_To_v2alpha1_CronJob(in *batch.CronJob, out *CronJob, s conversion.Scope) error { - return autoConvert_batch_CronJob_To_v2alpha1_CronJob(in, out, s) -} - -func autoConvert_v2alpha1_CronJobList_To_batch_CronJobList(in *CronJobList, out *batch.CronJobList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]batch.CronJob, len(*in)) - for i := range *in { - if err := Convert_v2alpha1_CronJob_To_batch_CronJob(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v2alpha1_CronJobList_To_batch_CronJobList(in *CronJobList, out *batch.CronJobList, s conversion.Scope) error { - return autoConvert_v2alpha1_CronJobList_To_batch_CronJobList(in, out, s) -} - -func autoConvert_batch_CronJobList_To_v2alpha1_CronJobList(in *batch.CronJobList, out *CronJobList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CronJob, len(*in)) - for i := range *in { - if err := Convert_batch_CronJob_To_v2alpha1_CronJob(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]CronJob, 0) - } - return nil -} - -func Convert_batch_CronJobList_To_v2alpha1_CronJobList(in *batch.CronJobList, out *CronJobList, s conversion.Scope) error { - return autoConvert_batch_CronJobList_To_v2alpha1_CronJobList(in, out, s) -} - -func autoConvert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(in *CronJobSpec, out *batch.CronJobSpec, s conversion.Scope) error { - out.Schedule = in.Schedule - out.StartingDeadlineSeconds = (*int64)(unsafe.Pointer(in.StartingDeadlineSeconds)) - out.ConcurrencyPolicy = batch.ConcurrencyPolicy(in.ConcurrencyPolicy) - out.Suspend = (*bool)(unsafe.Pointer(in.Suspend)) - if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { - return err - } - out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit)) - out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit)) - return nil -} - -func Convert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(in *CronJobSpec, out *batch.CronJobSpec, s conversion.Scope) error { - return autoConvert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(in, out, s) -} - -func autoConvert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(in *batch.CronJobSpec, out *CronJobSpec, s conversion.Scope) error { - out.Schedule = in.Schedule - out.StartingDeadlineSeconds = (*int64)(unsafe.Pointer(in.StartingDeadlineSeconds)) - out.ConcurrencyPolicy = ConcurrencyPolicy(in.ConcurrencyPolicy) - out.Suspend = (*bool)(unsafe.Pointer(in.Suspend)) - if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { - return err - } - out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit)) - out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit)) - return nil -} - -func Convert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(in *batch.CronJobSpec, out *CronJobSpec, s conversion.Scope) error { - return autoConvert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(in, out, s) -} - -func autoConvert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(in *CronJobStatus, out *batch.CronJobStatus, s conversion.Scope) error { - out.Active = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Active)) - out.LastScheduleTime = (*v1.Time)(unsafe.Pointer(in.LastScheduleTime)) - return nil -} - -func Convert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(in *CronJobStatus, out *batch.CronJobStatus, s conversion.Scope) error { - return autoConvert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(in, out, s) -} - -func autoConvert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(in *batch.CronJobStatus, out *CronJobStatus, s conversion.Scope) error { - out.Active = *(*[]api_v1.ObjectReference)(unsafe.Pointer(&in.Active)) - out.LastScheduleTime = (*v1.Time)(unsafe.Pointer(in.LastScheduleTime)) - return nil -} - -func Convert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(in *batch.CronJobStatus, out *CronJobStatus, s conversion.Scope) error { - return autoConvert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(in, out, s) -} - -func autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error { - return autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in, out, s) -} - -func autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error { - return autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in, out, s) -} - -func autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := batch_v1.Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error { - return autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in, out, s) -} - -func autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := batch_v1.Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error { - return autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 8c9010d9f..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,170 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v2alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api_v1 "k8s.io/client-go/pkg/api/v1" - batch_v1 "k8s.io/client-go/pkg/apis/batch/v1" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJob, InType: reflect.TypeOf(&CronJob{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJobList, InType: reflect.TypeOf(&CronJobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJobSpec, InType: reflect.TypeOf(&CronJobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJobStatus, InType: reflect.TypeOf(&CronJobStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_JobTemplate, InType: reflect.TypeOf(&JobTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_JobTemplateSpec, InType: reflect.TypeOf(&JobTemplateSpec{})}, - ) -} - -func DeepCopy_v2alpha1_CronJob(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CronJob) - out := out.(*CronJob) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v2alpha1_CronJobSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v2alpha1_CronJobStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v2alpha1_CronJobList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CronJobList) - out := out.(*CronJobList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CronJob, len(*in)) - for i := range *in { - if err := DeepCopy_v2alpha1_CronJob(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v2alpha1_CronJobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CronJobSpec) - out := out.(*CronJobSpec) - *out = *in - if in.StartingDeadlineSeconds != nil { - in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.Suspend != nil { - in, out := &in.Suspend, &out.Suspend - *out = new(bool) - **out = **in - } - if err := DeepCopy_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, c); err != nil { - return err - } - if in.SuccessfulJobsHistoryLimit != nil { - in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit - *out = new(int32) - **out = **in - } - if in.FailedJobsHistoryLimit != nil { - in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_v2alpha1_CronJobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CronJobStatus) - out := out.(*CronJobStatus) - *out = *in - if in.Active != nil { - in, out := &in.Active, &out.Active - *out = make([]api_v1.ObjectReference, len(*in)) - copy(*out, *in) - } - if in.LastScheduleTime != nil { - in, out := &in.LastScheduleTime, &out.LastScheduleTime - *out = new(v1.Time) - **out = (*in).DeepCopy() - } - return nil - } -} - -func DeepCopy_v2alpha1_JobTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobTemplate) - out := out.(*JobTemplate) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v2alpha1_JobTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v2alpha1_JobTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobTemplateSpec) - out := out.(*JobTemplateSpec) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := batch_v1.DeepCopy_v1_JobSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.defaults.go deleted file mode 100644 index 1c0bd0ee0..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.defaults.go +++ /dev/null @@ -1,310 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v2alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/client-go/pkg/api/v1" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&CronJob{}, func(obj interface{}) { SetObjectDefaults_CronJob(obj.(*CronJob)) }) - scheme.AddTypeDefaultingFunc(&CronJobList{}, func(obj interface{}) { SetObjectDefaults_CronJobList(obj.(*CronJobList)) }) - scheme.AddTypeDefaultingFunc(&JobTemplate{}, func(obj interface{}) { SetObjectDefaults_JobTemplate(obj.(*JobTemplate)) }) - return nil -} - -func SetObjectDefaults_CronJob(in *CronJob) { - SetDefaults_CronJob(in) - v1.SetDefaults_PodSpec(&in.Spec.JobTemplate.Spec.Template.Spec) - for i := range in.Spec.JobTemplate.Spec.Template.Spec.Volumes { - a := &in.Spec.JobTemplate.Spec.Template.Spec.Volumes[i] - v1.SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Spec.JobTemplate.Spec.Template.Spec.InitContainers { - a := &in.Spec.JobTemplate.Spec.Template.Spec.InitContainers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.JobTemplate.Spec.Template.Spec.Containers { - a := &in.Spec.JobTemplate.Spec.Template.Spec.Containers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } -} - -func SetObjectDefaults_CronJobList(in *CronJobList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_CronJob(a) - } -} - -func SetObjectDefaults_JobTemplate(in *JobTemplate) { - v1.SetDefaults_PodSpec(&in.Template.Spec.Template.Spec) - for i := range in.Template.Spec.Template.Spec.Volumes { - a := &in.Template.Spec.Template.Spec.Volumes[i] - v1.SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Template.Spec.Template.Spec.InitContainers { - a := &in.Template.Spec.Template.Spec.InitContainers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Template.Spec.Template.Spec.Containers { - a := &in.Template.Spec.Template.Spec.Containers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/batch/zz_generated.deepcopy.go deleted file mode 100644 index 6e75ecdc0..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/batch/zz_generated.deepcopy.go +++ /dev/null @@ -1,291 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package batch - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/client-go/pkg/api" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_CronJob, InType: reflect.TypeOf(&CronJob{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_CronJobList, InType: reflect.TypeOf(&CronJobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_CronJobSpec, InType: reflect.TypeOf(&CronJobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_CronJobStatus, InType: reflect.TypeOf(&CronJobStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_Job, InType: reflect.TypeOf(&Job{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobCondition, InType: reflect.TypeOf(&JobCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobList, InType: reflect.TypeOf(&JobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobSpec, InType: reflect.TypeOf(&JobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobStatus, InType: reflect.TypeOf(&JobStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobTemplate, InType: reflect.TypeOf(&JobTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobTemplateSpec, InType: reflect.TypeOf(&JobTemplateSpec{})}, - ) -} - -func DeepCopy_batch_CronJob(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CronJob) - out := out.(*CronJob) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_batch_CronJobSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_batch_CronJobStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_batch_CronJobList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CronJobList) - out := out.(*CronJobList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CronJob, len(*in)) - for i := range *in { - if err := DeepCopy_batch_CronJob(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_batch_CronJobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CronJobSpec) - out := out.(*CronJobSpec) - *out = *in - if in.StartingDeadlineSeconds != nil { - in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.Suspend != nil { - in, out := &in.Suspend, &out.Suspend - *out = new(bool) - **out = **in - } - if err := DeepCopy_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, c); err != nil { - return err - } - if in.SuccessfulJobsHistoryLimit != nil { - in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit - *out = new(int32) - **out = **in - } - if in.FailedJobsHistoryLimit != nil { - in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_batch_CronJobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CronJobStatus) - out := out.(*CronJobStatus) - *out = *in - if in.Active != nil { - in, out := &in.Active, &out.Active - *out = make([]api.ObjectReference, len(*in)) - copy(*out, *in) - } - if in.LastScheduleTime != nil { - in, out := &in.LastScheduleTime, &out.LastScheduleTime - *out = new(v1.Time) - **out = (*in).DeepCopy() - } - return nil - } -} - -func DeepCopy_batch_Job(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Job) - out := out.(*Job) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_batch_JobSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_batch_JobStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_batch_JobCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobCondition) - out := out.(*JobCondition) - *out = *in - out.LastProbeTime = in.LastProbeTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - return nil - } -} - -func DeepCopy_batch_JobList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobList) - out := out.(*JobList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Job, len(*in)) - for i := range *in { - if err := DeepCopy_batch_Job(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_batch_JobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobSpec) - out := out.(*JobSpec) - *out = *in - if in.Parallelism != nil { - in, out := &in.Parallelism, &out.Parallelism - *out = new(int32) - **out = **in - } - if in.Completions != nil { - in, out := &in.Completions, &out.Completions - *out = new(int32) - **out = **in - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if in.ManualSelector != nil { - in, out := &in.ManualSelector, &out.ManualSelector - *out = new(bool) - **out = **in - } - if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_batch_JobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobStatus) - out := out.(*JobStatus) - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]JobCondition, len(*in)) - for i := range *in { - if err := DeepCopy_batch_JobCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = new(v1.Time) - **out = (*in).DeepCopy() - } - if in.CompletionTime != nil { - in, out := &in.CompletionTime, &out.CompletionTime - *out = new(v1.Time) - **out = (*in).DeepCopy() - } - return nil - } -} - -func DeepCopy_batch_JobTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobTemplate) - out := out.(*JobTemplate) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_batch_JobTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_batch_JobTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*JobTemplateSpec) - out := out.(*JobTemplateSpec) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_batch_JobSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/OWNERS b/vendor/k8s.io/client-go/pkg/apis/certificates/OWNERS deleted file mode 100755 index c67bd1172..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/OWNERS +++ /dev/null @@ -1,14 +0,0 @@ -reviewers: -- thockin -- lavalamp -- smarterclayton -- deads2k -- caesarxuchao -- liggitt -- sttts -- timothysc -- dims -- errordeveloper -- mbohlool -- david-mcmahon -- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/helpers.go b/vendor/k8s.io/client-go/pkg/apis/certificates/helpers.go deleted file mode 100644 index 2608e4076..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/helpers.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package certificates - -import ( - "crypto/x509" - "encoding/pem" - "errors" -) - -// ParseCSR extracts the CSR from the API object and decodes it. -func ParseCSR(obj *CertificateSigningRequest) (*x509.CertificateRequest, error) { - // extract PEM from request object - pemBytes := obj.Spec.Request - block, _ := pem.Decode(pemBytes) - if block == nil || block.Type != "CERTIFICATE REQUEST" { - return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") - } - csr, err := x509.ParseCertificateRequest(block.Bytes) - if err != nil { - return nil, err - } - return csr, nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/install/install.go b/vendor/k8s.io/client-go/pkg/apis/certificates/install/install.go deleted file mode 100644 index 8850e07aa..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/install/install.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the certificates API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/apis/certificates" - "k8s.io/client-go/pkg/apis/certificates/v1beta1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: certificates.GroupName, - VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/client-go/pkg/apis/certificates", - RootScopedKinds: sets.NewString("CertificateSigningRequest"), - AddInternalObjectsToScheme: certificates.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/register.go b/vendor/k8s.io/client-go/pkg/apis/certificates/register.go deleted file mode 100644 index f9d228d00..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/register.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package certificates - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// GroupName is the group name use in this package -const GroupName = "certificates.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &CertificateSigningRequest{}, - &CertificateSigningRequestList{}, - ) - return nil -} - -func (obj *CertificateSigningRequest) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } -func (obj *CertificateSigningRequestList) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/types.go b/vendor/k8s.io/client-go/pkg/apis/certificates/types.go deleted file mode 100644 index 4a7884a1e..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/types.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package certificates - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient=true -// +nonNamespaced=true - -// Describes a certificate signing request -type CertificateSigningRequest struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // The certificate request itself and any additional information. - // +optional - Spec CertificateSigningRequestSpec - - // Derived information about the request. - // +optional - Status CertificateSigningRequestStatus -} - -// This information is immutable after the request is created. Only the Request -// and Usages fields can be set on creation, other fields are derived by -// Kubernetes and cannot be modified by users. -type CertificateSigningRequestSpec struct { - // Base64-encoded PKCS#10 CSR data - Request []byte - - // usages specifies a set of usage contexts the key will be - // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - Usages []KeyUsage - - // Information about the requesting user. - // See user.Info interface for details. - // +optional - Username string - // UID information about the requesting user. - // See user.Info interface for details. - // +optional - UID string - // Group information about the requesting user. - // See user.Info interface for details. - // +optional - Groups []string - // Extra information about the requesting user. - // See user.Info interface for details. - // +optional - Extra map[string]ExtraValue -} - -// ExtraValue masks the value so protobuf can generate -type ExtraValue []string - -type CertificateSigningRequestStatus struct { - // Conditions applied to the request, such as approval or denial. - // +optional - Conditions []CertificateSigningRequestCondition - - // If request was approved, the controller will place the issued certificate here. - // +optional - Certificate []byte -} - -type RequestConditionType string - -// These are the possible conditions for a certificate request. -const ( - CertificateApproved RequestConditionType = "Approved" - CertificateDenied RequestConditionType = "Denied" -) - -type CertificateSigningRequestCondition struct { - // request approval state, currently Approved or Denied. - Type RequestConditionType - // brief reason for the request state - // +optional - Reason string - // human readable message with details about the request state - // +optional - Message string - // timestamp for the last update to this condition - // +optional - LastUpdateTime metav1.Time -} - -type CertificateSigningRequestList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // +optional - Items []CertificateSigningRequest -} - -// KeyUsages specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 -// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 -type KeyUsage string - -const ( - UsageSigning KeyUsage = "signing" - UsageDigitalSignature KeyUsage = "digital signature" - UsageContentCommittment KeyUsage = "content committment" - UsageKeyEncipherment KeyUsage = "key encipherment" - UsageKeyAgreement KeyUsage = "key agreement" - UsageDataEncipherment KeyUsage = "data encipherment" - UsageCertSign KeyUsage = "cert sign" - UsageCRLSign KeyUsage = "crl sign" - UsageEncipherOnly KeyUsage = "encipher only" - UsageDecipherOnly KeyUsage = "decipher only" - UsageAny KeyUsage = "any" - UsageServerAuth KeyUsage = "server auth" - UsageClientAuth KeyUsage = "client auth" - UsageCodeSigning KeyUsage = "code signing" - UsageEmailProtection KeyUsage = "email protection" - UsageSMIME KeyUsage = "s/mime" - UsageIPsecEndSystem KeyUsage = "ipsec end system" - UsageIPsecTunnel KeyUsage = "ipsec tunnel" - UsageIPsecUser KeyUsage = "ipsec user" - UsageTimestamping KeyUsage = "timestamping" - UsageOCSPSigning KeyUsage = "ocsp signing" - UsageMicrosoftSGC KeyUsage = "microsoft sgc" - UsageNetscapSGC KeyUsage = "netscape sgc" -) diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/conversion.go deleted file mode 100644 index b9cf0b016..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/conversion.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions here. Currently there are none. - - return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.String(), "CertificateSigningRequest", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }, - ) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/helpers.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/helpers.go deleted file mode 100644 index 1375063c1..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/helpers.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "crypto/x509" - "encoding/pem" - "errors" -) - -// ParseCSR extracts the CSR from the API object and decodes it. -func ParseCSR(obj *CertificateSigningRequest) (*x509.CertificateRequest, error) { - // extract PEM from request object - pemBytes := obj.Spec.Request - block, _ := pem.Decode(pemBytes) - if block == nil || block.Type != "CERTIFICATE REQUEST" { - return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") - } - csr, err := x509.ParseCertificateRequest(block.Bytes) - if err != nil { - return nil, err - } - return csr, nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.generated.go deleted file mode 100644 index 50d908bd7..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.generated.go +++ /dev/null @@ -1,2624 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg2_types "k8s.io/apimachinery/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_v1.TypeMeta - var v1 pkg2_types.UID - var v2 time.Time - _, _, _ = v0, v1, v2 - } -} - -func (x *CertificateSigningRequest) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequest) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = CertificateSigningRequestSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = CertificateSigningRequestStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = CertificateSigningRequestSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = CertificateSigningRequestStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CertificateSigningRequestSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.Usages) != 0 - yyq2[2] = x.Username != "" - yyq2[3] = x.UID != "" - yyq2[4] = len(x.Groups) != 0 - yyq2[5] = len(x.Extra) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Request == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("request")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Request == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Usages == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceKeyUsage(([]KeyUsage)(x.Usages), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("usages")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Usages == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceKeyUsage(([]KeyUsage)(x.Usages), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("username")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Groups == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("groups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.Extra == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("extra")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequestSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequestSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "request": - if r.TryDecodeAsNil() { - x.Request = nil - } else { - yyv4 := &x.Request - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *yyv4 = r.DecodeBytes(*(*[]byte)(yyv4), false, false) - } - } - case "usages": - if r.TryDecodeAsNil() { - x.Usages = nil - } else { - yyv6 := &x.Usages - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceKeyUsage((*[]KeyUsage)(yyv6), d) - } - } - case "username": - if r.TryDecodeAsNil() { - x.Username = "" - } else { - yyv8 := &x.Username - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - yyv10 := &x.UID - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "groups": - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv12 := &x.Groups - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecSliceStringX(yyv12, false, d) - } - } - case "extra": - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv14 := &x.Extra - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv14), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequestSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Request = nil - } else { - yyv17 := &x.Request - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *yyv17 = r.DecodeBytes(*(*[]byte)(yyv17), false, false) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Usages = nil - } else { - yyv19 := &x.Usages - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceKeyUsage((*[]KeyUsage)(yyv19), d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Username = "" - } else { - yyv21 := &x.Username - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - yyv23 := &x.UID - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*string)(yyv23)) = r.DecodeString() - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv25 := &x.Groups - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - z.F.DecSliceStringX(yyv25, false, d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv27 := &x.Extra - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - h.decMapstringExtraValue((*map[string]ExtraValue)(yyv27), d) - } - } - for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encExtraValue((ExtraValue)(x), e) - } - } -} - -func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decExtraValue((*ExtraValue)(x), d) - } -} - -func (x *CertificateSigningRequestStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Conditions) != 0 - yyq2[1] = len(x.Certificate) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Certificate == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("certificate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Certificate == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequestStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequestStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv4 := &x.Conditions - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv4), d) - } - } - case "certificate": - if r.TryDecodeAsNil() { - x.Certificate = nil - } else { - yyv6 := &x.Certificate - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequestStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv9 := &x.Conditions - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Certificate = nil - } else { - yyv11 := &x.Certificate - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x RequestConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *RequestConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *CertificateSigningRequestCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Reason != "" - yyq2[2] = x.Message != "" - yyq2[3] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy13 := &x.LastUpdateTime - yym14 := z.EncBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.EncExt(yy13) { - } else if yym14 { - z.EncBinaryMarshal(yy13) - } else if !yym14 && z.IsJSONHandle() { - z.EncJSONMarshal(yy13) - } else { - z.EncFallback(yy13) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy15 := &x.LastUpdateTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequestCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequestCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv5 := &x.Reason - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*string)(yyv5)) = r.DecodeString() - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv7 := &x.Message - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*string)(yyv7)) = r.DecodeString() - } - } - case "lastUpdateTime": - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_v1.Time{} - } else { - yyv9 := &x.LastUpdateTime - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if yym10 { - z.DecBinaryUnmarshal(yyv9) - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequestCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv12 := &x.Type - yyv12.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv13 := &x.Reason - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv15 := &x.Message - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_v1.Time{} - } else { - yyv17 := &x.LastUpdateTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CertificateSigningRequestList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CertificateSigningRequestList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CertificateSigningRequestList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CertificateSigningRequestList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x KeyUsage) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *KeyUsage) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x codecSelfer1234) encSliceKeyUsage(v []KeyUsage, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceKeyUsage(v *[]KeyUsage, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []KeyUsage{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]KeyUsage, yyrl1) - } - } else { - yyv1 = make([]KeyUsage, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 KeyUsage - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []KeyUsage{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv1 == nil { - r.EncodeNil() - } else { - yyv1.CodecEncodeSelf(e) - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) - yyv1 = make(map[string]ExtraValue, yyrl1) - *v = yyv1 - } - var yymk1 string - var yymv1 ExtraValue - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv2 := &yymk1 - yym3 := z.DecBinary() - _ = yym3 - if false { - } else { - *((*string)(yyv2)) = r.DecodeString() - } - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv4 := &yymv1 - yyv4.CodecDecodeSelf(d) - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv5 := &yymk1 - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*string)(yyv5)) = r.DecodeString() - } - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv7 := &yymv1 - yyv7.CodecDecodeSelf(d) - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1)) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]string, yyrl1) - } - } else { - yyv1 = make([]string, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv2 := &yyv1[yyj1] - yym3 := z.DecBinary() - _ = yym3 - if false { - } else { - *((*string)(yyv2)) = r.DecodeString() - } - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv4 := &yyv1[yyj1] - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 string - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv6 := &yyv1[yyj1] - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceCertificateSigningRequestCondition(v []CertificateSigningRequestCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCertificateSigningRequestCondition(v *[]CertificateSigningRequestCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []CertificateSigningRequestCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]CertificateSigningRequestCondition, yyrl1) - } - } else { - yyv1 = make([]CertificateSigningRequestCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CertificateSigningRequestCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, CertificateSigningRequestCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CertificateSigningRequestCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, CertificateSigningRequestCondition{}) // var yyz1 CertificateSigningRequestCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = CertificateSigningRequestCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []CertificateSigningRequestCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceCertificateSigningRequest(v []CertificateSigningRequest, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCertificateSigningRequest(v *[]CertificateSigningRequest, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []CertificateSigningRequest{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 416) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]CertificateSigningRequest, yyrl1) - } - } else { - yyv1 = make([]CertificateSigningRequest, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CertificateSigningRequest{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, CertificateSigningRequest{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CertificateSigningRequest{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, CertificateSigningRequest{}) // var yyz1 CertificateSigningRequest - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = CertificateSigningRequest{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []CertificateSigningRequest{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.conversion.go deleted file mode 100644 index 4c0e07380..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,179 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - certificates "k8s.io/client-go/pkg/apis/certificates" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest, - Convert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest, - Convert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition, - Convert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition, - Convert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList, - Convert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList, - Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec, - Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec, - Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus, - Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus, - ) -} - -func autoConvert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error { - return autoConvert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in, out, s) -} - -func autoConvert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *CertificateSigningRequest, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *CertificateSigningRequest, s conversion.Scope) error { - return autoConvert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in, out, s) -} - -func autoConvert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error { - out.Type = certificates.RequestConditionType(in.Type) - out.Reason = in.Reason - out.Message = in.Message - out.LastUpdateTime = in.LastUpdateTime - return nil -} - -func Convert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error { - return autoConvert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in, out, s) -} - -func autoConvert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, s conversion.Scope) error { - out.Type = RequestConditionType(in.Type) - out.Reason = in.Reason - out.Message = in.Message - out.LastUpdateTime = in.LastUpdateTime - return nil -} - -func Convert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, s conversion.Scope) error { - return autoConvert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in, out, s) -} - -func autoConvert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]certificates.CertificateSigningRequest)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error { - return autoConvert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in, out, s) -} - -func autoConvert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *CertificateSigningRequestList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]CertificateSigningRequest, 0) - } else { - out.Items = *(*[]CertificateSigningRequest)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *CertificateSigningRequestList, s conversion.Scope) error { - return autoConvert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in, out, s) -} - -func autoConvert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error { - out.Request = *(*[]byte)(unsafe.Pointer(&in.Request)) - out.Usages = *(*[]certificates.KeyUsage)(unsafe.Pointer(&in.Usages)) - out.Username = in.Username - out.UID = in.UID - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - out.Extra = *(*map[string]certificates.ExtraValue)(unsafe.Pointer(&in.Extra)) - return nil -} - -func Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error { - return autoConvert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in, out, s) -} - -func autoConvert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, s conversion.Scope) error { - if in.Request == nil { - out.Request = make([]byte, 0) - } else { - out.Request = *(*[]byte)(unsafe.Pointer(&in.Request)) - } - out.Usages = *(*[]KeyUsage)(unsafe.Pointer(&in.Usages)) - out.Username = in.Username - out.UID = in.UID - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) - return nil -} - -func Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, s conversion.Scope) error { - return autoConvert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in, out, s) -} - -func autoConvert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error { - out.Conditions = *(*[]certificates.CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions)) - out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate)) - return nil -} - -func Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error { - return autoConvert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in, out, s) -} - -func autoConvert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, s conversion.Scope) error { - out.Conditions = *(*[]CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions)) - out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate)) - return nil -} - -func Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, s conversion.Scope) error { - return autoConvert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 800cdee47..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,150 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequest, InType: reflect.TypeOf(&CertificateSigningRequest{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestCondition, InType: reflect.TypeOf(&CertificateSigningRequestCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestList, InType: reflect.TypeOf(&CertificateSigningRequestList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestSpec, InType: reflect.TypeOf(&CertificateSigningRequestSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestStatus, InType: reflect.TypeOf(&CertificateSigningRequestStatus{})}, - ) -} - -func DeepCopy_v1beta1_CertificateSigningRequest(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequest) - out := out.(*CertificateSigningRequest) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_CertificateSigningRequestSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_CertificateSigningRequestStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_CertificateSigningRequestCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequestCondition) - out := out.(*CertificateSigningRequestCondition) - *out = *in - out.LastUpdateTime = in.LastUpdateTime.DeepCopy() - return nil - } -} - -func DeepCopy_v1beta1_CertificateSigningRequestList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequestList) - out := out.(*CertificateSigningRequestList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CertificateSigningRequest, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_CertificateSigningRequest(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_CertificateSigningRequestSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequestSpec) - out := out.(*CertificateSigningRequestSpec) - *out = *in - if in.Request != nil { - in, out := &in.Request, &out.Request - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.Usages != nil { - in, out := &in.Usages, &out.Usages - *out = make([]KeyUsage, len(*in)) - copy(*out, *in) - } - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue) - for key, val := range *in { - if newVal, err := c.DeepCopy(&val); err != nil { - return err - } else { - (*out)[key] = *newVal.(*ExtraValue) - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_CertificateSigningRequestStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequestStatus) - out := out.(*CertificateSigningRequestStatus) - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]CertificateSigningRequestCondition, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_CertificateSigningRequestCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Certificate != nil { - in, out := &in.Certificate, &out.Certificate - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.defaults.go deleted file mode 100644 index 3c5ae0362..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.defaults.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&CertificateSigningRequest{}, func(obj interface{}) { SetObjectDefaults_CertificateSigningRequest(obj.(*CertificateSigningRequest)) }) - scheme.AddTypeDefaultingFunc(&CertificateSigningRequestList{}, func(obj interface{}) { - SetObjectDefaults_CertificateSigningRequestList(obj.(*CertificateSigningRequestList)) - }) - return nil -} - -func SetObjectDefaults_CertificateSigningRequest(in *CertificateSigningRequest) { - SetDefaults_CertificateSigningRequestSpec(&in.Spec) -} - -func SetObjectDefaults_CertificateSigningRequestList(in *CertificateSigningRequestList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_CertificateSigningRequest(a) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/certificates/zz_generated.deepcopy.go deleted file mode 100644 index 876902891..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/certificates/zz_generated.deepcopy.go +++ /dev/null @@ -1,150 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package certificates - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequest, InType: reflect.TypeOf(&CertificateSigningRequest{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestCondition, InType: reflect.TypeOf(&CertificateSigningRequestCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestList, InType: reflect.TypeOf(&CertificateSigningRequestList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestSpec, InType: reflect.TypeOf(&CertificateSigningRequestSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestStatus, InType: reflect.TypeOf(&CertificateSigningRequestStatus{})}, - ) -} - -func DeepCopy_certificates_CertificateSigningRequest(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequest) - out := out.(*CertificateSigningRequest) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_certificates_CertificateSigningRequestSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_certificates_CertificateSigningRequestStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_certificates_CertificateSigningRequestCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequestCondition) - out := out.(*CertificateSigningRequestCondition) - *out = *in - out.LastUpdateTime = in.LastUpdateTime.DeepCopy() - return nil - } -} - -func DeepCopy_certificates_CertificateSigningRequestList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequestList) - out := out.(*CertificateSigningRequestList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CertificateSigningRequest, len(*in)) - for i := range *in { - if err := DeepCopy_certificates_CertificateSigningRequest(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_certificates_CertificateSigningRequestSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequestSpec) - out := out.(*CertificateSigningRequestSpec) - *out = *in - if in.Request != nil { - in, out := &in.Request, &out.Request - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.Usages != nil { - in, out := &in.Usages, &out.Usages - *out = make([]KeyUsage, len(*in)) - copy(*out, *in) - } - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue) - for key, val := range *in { - if newVal, err := c.DeepCopy(&val); err != nil { - return err - } else { - (*out)[key] = *newVal.(*ExtraValue) - } - } - } - return nil - } -} - -func DeepCopy_certificates_CertificateSigningRequestStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CertificateSigningRequestStatus) - out := out.(*CertificateSigningRequestStatus) - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]CertificateSigningRequestCondition, len(*in)) - for i := range *in { - if err := DeepCopy_certificates_CertificateSigningRequestCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Certificate != nil { - in, out := &in.Certificate, &out.Certificate - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go new file mode 100644 index 000000000..d06482d55 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=client.authentication.k8s.io +package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication" diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go similarity index 91% rename from vendor/k8s.io/client-go/pkg/apis/authentication/register.go rename to vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go index b0ac3c28b..e4fbc3ea9 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/register.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package authentication +package clientauthentication import ( "k8s.io/apimachinery/pkg/runtime" @@ -22,7 +22,7 @@ import ( ) // GroupName is the group name use in this package -const GroupName = "authentication.k8s.io" +const GroupName = "client.authentication.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} @@ -44,7 +44,7 @@ var ( func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &TokenReview{}, + &ExecCredential{}, ) return nil } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go new file mode 100644 index 000000000..6fb53cecf --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go @@ -0,0 +1,77 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientauthentication + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExecCredentials is used by exec-based plugins to communicate credentials to +// HTTP transports. +type ExecCredential struct { + metav1.TypeMeta + + // Spec holds information passed to the plugin by the transport. This contains + // request and runtime specific information, such as if the session is interactive. + Spec ExecCredentialSpec + + // Status is filled in by the plugin and holds the credentials that the transport + // should use to contact the API. + // +optional + Status *ExecCredentialStatus +} + +// ExecCredenitalSpec holds request and runtime specific information provided by +// the transport. +type ExecCredentialSpec struct { + // Response is populated when the transport encounters HTTP status codes, such as 401, + // suggesting previous credentials were invalid. + // +optional + Response *Response + + // Interactive is true when the transport detects the command is being called from an + // interactive prompt. + // +optional + Interactive bool +} + +// ExecCredentialStatus holds credentials for the transport to use. +type ExecCredentialStatus struct { + // ExpirationTimestamp indicates a time when the provided credentials expire. + // +optional + ExpirationTimestamp *metav1.Time + // Token is a bearer token used by the client for request authentication. + // +optional + Token string + // PEM-encoded client TLS certificate. + // +optional + ClientCertificateData string + // PEM-encoded client TLS private key. + // +optional + ClientKeyData string +} + +// Response defines metadata about a failed request, including HTTP status code and +// response headers. +type Response struct { + // Headers holds HTTP headers returned by the server. + Header map[string][]string + // Code is the HTTP status code returned by the server. + Code int32 +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go new file mode 100644 index 000000000..016adb28a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=client.authentication.k8s.io +package v1alpha1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go new file mode 100644 index 000000000..2acd13dea --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "client.authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ExecCredential{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go new file mode 100644 index 000000000..921f3a2b9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExecCredentials is used by exec-based plugins to communicate credentials to +// HTTP transports. +type ExecCredential struct { + metav1.TypeMeta `json:",inline"` + + // Spec holds information passed to the plugin by the transport. This contains + // request and runtime specific information, such as if the session is interactive. + Spec ExecCredentialSpec `json:"spec,omitempty"` + + // Status is filled in by the plugin and holds the credentials that the transport + // should use to contact the API. + // +optional + Status *ExecCredentialStatus `json:"status,omitempty"` +} + +// ExecCredenitalSpec holds request and runtime specific information provided by +// the transport. +type ExecCredentialSpec struct { + // Response is populated when the transport encounters HTTP status codes, such as 401, + // suggesting previous credentials were invalid. + // +optional + Response *Response `json:"response,omitempty"` + + // Interactive is true when the transport detects the command is being called from an + // interactive prompt. + // +optional + Interactive bool `json:"interactive,omitempty"` +} + +// ExecCredentialStatus holds credentials for the transport to use. +// +// Token and ClientKeyData are sensitive fields. This data should only be +// transmitted in-memory between client and exec plugin process. Exec plugin +// itself should at least be protected via file permissions. +type ExecCredentialStatus struct { + // ExpirationTimestamp indicates a time when the provided credentials expire. + // +optional + ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` + // Token is a bearer token used by the client for request authentication. + Token string `json:"token,omitempty"` + // PEM-encoded client TLS certificates (including intermediates, if any). + ClientCertificateData string `json:"clientCertificateData,omitempty"` + // PEM-encoded private key for the above certificate. + ClientKeyData string `json:"clientKeyData,omitempty"` +} + +// Response defines metadata about a failed request, including HTTP status code and +// response headers. +type Response struct { + // Header holds HTTP headers returned by the server. + Header map[string][]string `json:"header,omitempty"` + // Code is the HTTP status code returned by the server. + Code int32 `json:"code,omitempty"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000..9921c7ee5 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,145 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential, + Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential, + Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec, + Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec, + Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus, + Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus, + Convert_v1alpha1_Response_To_clientauthentication_Response, + Convert_clientauthentication_Response_To_v1alpha1_Response, + ) +} + +func autoConvert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { + if err := Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status)) + return nil +} + +// Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function. +func Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { + return autoConvert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { + if err := Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status)) + return nil +} + +// Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in, out, s) +} + +func autoConvert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + out.Response = (*clientauthentication.Response)(unsafe.Pointer(in.Response)) + out.Interactive = in.Interactive + return nil +} + +// Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function. +func Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + out.Response = (*Response)(unsafe.Pointer(in.Response)) + out.Interactive = in.Interactive + return nil +} + +// Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in, out, s) +} + +func autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { + out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) + out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData + return nil +} + +// Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function. +func Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { + out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) + out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData + return nil +} + +// Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in, out, s) +} + +func autoConvert_v1alpha1_Response_To_clientauthentication_Response(in *Response, out *clientauthentication.Response, s conversion.Scope) error { + out.Header = *(*map[string][]string)(unsafe.Pointer(&in.Header)) + out.Code = in.Code + return nil +} + +// Convert_v1alpha1_Response_To_clientauthentication_Response is an autogenerated conversion function. +func Convert_v1alpha1_Response_To_clientauthentication_Response(in *Response, out *clientauthentication.Response, s conversion.Scope) error { + return autoConvert_v1alpha1_Response_To_clientauthentication_Response(in, out, s) +} + +func autoConvert_clientauthentication_Response_To_v1alpha1_Response(in *clientauthentication.Response, out *Response, s conversion.Scope) error { + out.Header = *(*map[string][]string)(unsafe.Pointer(&in.Header)) + out.Code = in.Code + return nil +} + +// Convert_clientauthentication_Response_To_v1alpha1_Response is an autogenerated conversion function. +func Convert_clientauthentication_Response_To_v1alpha1_Response(in *clientauthentication.Response, out *Response, s conversion.Scope) error { + return autoConvert_clientauthentication_Response_To_v1alpha1_Response(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..a73d31b3f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,128 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ExecCredentialStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential. +func (in *ExecCredential) DeepCopy() *ExecCredential { + if in == nil { + return nil + } + out := new(ExecCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExecCredential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { + *out = *in + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(Response) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec. +func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec { + if in == nil { + return nil + } + out := new(ExecCredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { + *out = *in + if in.ExpirationTimestamp != nil { + in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus. +func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { + if in == nil { + return nil + } + out := new(ExecCredentialStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Response) DeepCopyInto(out *Response) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Response. +func (in *Response) DeepCopy() *Response { + if in == nil { + return nil + } + out := new(Response) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go similarity index 87% rename from vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.defaults.go rename to vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go index 6df448eb9..dd621a3ac 100644 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by defaulter-gen. Do not edit it manually! +// Code generated by defaulter-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( runtime "k8s.io/apimachinery/pkg/runtime" diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go similarity index 61% rename from vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/conversion.go rename to vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go index c40138365..f543806ac 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,10 +17,10 @@ limitations under the License. package v1beta1 import ( - "k8s.io/apimachinery/pkg/runtime" + conversion "k8s.io/apimachinery/pkg/conversion" + clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" ) -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - return scheme.AddConversionFuncs() +func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/types/unix_user_id.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go similarity index 62% rename from vendor/k8s.io/apimachinery/pkg/types/unix_user_id.go rename to vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go index dc770c11e..fbcd9b7fe 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/unix_user_id.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package types +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta -// int64 is used as a safe bet against wrap-around (uid's are general -// int32) and to support uid_t -1, and -2. - -type UnixUserID int64 -type UnixGroupID int64 +// +groupName=client.authentication.k8s.io +package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go new file mode 100644 index 000000000..0bb92f16a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "client.authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ExecCredential{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go new file mode 100644 index 000000000..d6e267452 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExecCredentials is used by exec-based plugins to communicate credentials to +// HTTP transports. +type ExecCredential struct { + metav1.TypeMeta `json:",inline"` + + // Spec holds information passed to the plugin by the transport. This contains + // request and runtime specific information, such as if the session is interactive. + Spec ExecCredentialSpec `json:"spec,omitempty"` + + // Status is filled in by the plugin and holds the credentials that the transport + // should use to contact the API. + // +optional + Status *ExecCredentialStatus `json:"status,omitempty"` +} + +// ExecCredenitalSpec holds request and runtime specific information provided by +// the transport. +type ExecCredentialSpec struct{} + +// ExecCredentialStatus holds credentials for the transport to use. +// +// Token and ClientKeyData are sensitive fields. This data should only be +// transmitted in-memory between client and exec plugin process. Exec plugin +// itself should at least be protected via file permissions. +type ExecCredentialStatus struct { + // ExpirationTimestamp indicates a time when the provided credentials expire. + // +optional + ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` + // Token is a bearer token used by the client for request authentication. + Token string `json:"token,omitempty"` + // PEM-encoded client TLS certificates (including intermediates, if any). + ClientCertificateData string `json:"clientCertificateData,omitempty"` + // PEM-encoded private key for the above certificate. + ClientKeyData string `json:"clientKeyData,omitempty"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..80e9b3159 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go @@ -0,0 +1,114 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential, + Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential, + Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec, + Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec, + Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus, + Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus, + ) +} + +func autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { + if err := Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status)) + return nil +} + +// Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function. +func Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { + return autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { + if err := Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status)) + return nil +} + +// Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in, out, s) +} + +func autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + return nil +} + +// Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function. +func Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + // WARNING: in.Response requires manual conversion: does not exist in peer-type + // WARNING: in.Interactive requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { + out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) + out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData + return nil +} + +// Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function. +func Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { + return autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { + out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) + out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData + return nil +} + +// Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..736b8cf00 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,92 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { + *out = *in + out.TypeMeta = in.TypeMeta + out.Spec = in.Spec + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ExecCredentialStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential. +func (in *ExecCredential) DeepCopy() *ExecCredential { + if in == nil { + return nil + } + out := new(ExecCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExecCredential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec. +func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec { + if in == nil { + return nil + } + out := new(ExecCredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { + *out = *in + if in.ExpirationTimestamp != nil { + in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus. +func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { + if in == nil { + return nil + } + out := new(ExecCredentialStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go similarity index 88% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.defaults.go rename to vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go index e24e70be3..73e63fc11 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by defaulter-gen. Do not edit it manually! +// Code generated by defaulter-gen. DO NOT EDIT. package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go new file mode 100644 index 000000000..c568a6fc8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go @@ -0,0 +1,128 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package clientauthentication + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ExecCredentialStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential. +func (in *ExecCredential) DeepCopy() *ExecCredential { + if in == nil { + return nil + } + out := new(ExecCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExecCredential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { + *out = *in + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(Response) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec. +func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec { + if in == nil { + return nil + } + out := new(ExecCredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { + *out = *in + if in.ExpirationTimestamp != nil { + in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus. +func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { + if in == nil { + return nil + } + out := new(ExecCredentialStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Response) DeepCopyInto(out *Response) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Response. +func (in *Response) DeepCopy() *Response { + if in == nil { + return nil + } + out := new(Response) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/OWNERS b/vendor/k8s.io/client-go/pkg/apis/extensions/OWNERS deleted file mode 100755 index 494763a69..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/OWNERS +++ /dev/null @@ -1,41 +0,0 @@ -reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- brendandburns -- derekwaynecarr -- caesarxuchao -- mikedanese -- liggitt -- nikhiljindal -- bprashanth -- erictune -- pmorie -- sttts -- kargakis -- saad-ali -- janetkuo -- justinsb -- ncdc -- timstclair -- mwielgus -- timothysc -- soltysh -- piosz -- dims -- errordeveloper -- madhusudancs -- rootfs -- jszczepkowski -- mml -- resouer -- mbohlool -- david-mcmahon -- therc -- pweil- -- tmrts -- mqliang -- lukaszo -- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/doc.go b/vendor/k8s.io/client-go/pkg/apis/extensions/doc.go deleted file mode 100644 index 87edee41c..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package extensions diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go b/vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go deleted file mode 100644 index 27d3e23ad..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package extensions - -import ( - "strings" -) - -// SysctlsFromPodSecurityPolicyAnnotation parses an annotation value of the key -// SysctlsSecurityPolocyAnnotationKey into a slice of sysctls. An empty slice -// is returned if annotation is the empty string. -func SysctlsFromPodSecurityPolicyAnnotation(annotation string) ([]string, error) { - if len(annotation) == 0 { - return []string{}, nil - } - - return strings.Split(annotation, ","), nil -} - -// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. -func PodAnnotationsFromSysctls(sysctls []string) string { - return strings.Join(sysctls, ",") -} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/install/install.go b/vendor/k8s.io/client-go/pkg/apis/extensions/install/install.go deleted file mode 100644 index 1f968e861..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/install/install.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/apis/extensions" - "k8s.io/client-go/pkg/apis/extensions/v1beta1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: extensions.GroupName, - VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/client-go/pkg/apis/extensions", - RootScopedKinds: sets.NewString("PodSecurityPolicy", "ThirdPartyResource"), - AddInternalObjectsToScheme: extensions.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/register.go b/vendor/k8s.io/client-go/pkg/apis/extensions/register.go deleted file mode 100644 index 5983636c2..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/register.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package extensions - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "extensions" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - // TODO this gets cleaned up when the types are fixed - scheme.AddKnownTypes(SchemeGroupVersion, - &Deployment{}, - &DeploymentList{}, - &DeploymentRollback{}, - &ReplicationControllerDummy{}, - &Scale{}, - &ThirdPartyResource{}, - &ThirdPartyResourceList{}, - &DaemonSetList{}, - &DaemonSet{}, - &ThirdPartyResourceData{}, - &ThirdPartyResourceDataList{}, - &Ingress{}, - &IngressList{}, - &ReplicaSet{}, - &ReplicaSetList{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, - &NetworkPolicy{}, - &NetworkPolicyList{}, - ) - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/types.go b/vendor/k8s.io/client-go/pkg/apis/extensions/types.go deleted file mode 100644 index 945c5fa2b..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/types.go +++ /dev/null @@ -1,1124 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -This file (together with pkg/apis/extensions/v1beta1/types.go) contain the experimental -types in kubernetes. These API objects are experimental, meaning that the -APIs may be broken at any time by the kubernetes team. - -DISCLAIMER: The implementation of the experimental API group itself is -a temporary one meant as a stopgap solution until kubernetes has proper -support for multiple API groups. The transition may require changes -beyond registration differences. In other words, experimental API group -support is experimental. -*/ - -package extensions - -import ( - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/pkg/api" -) - -const ( - // SysctlsPodSecurityPolicyAnnotationKey represents the key of a whitelist of - // allowed safe and unsafe sysctls in a pod spec. It's a comma-separated list of plain sysctl - // names or sysctl patterns (which end in *). The string "*" matches all sysctls. - SysctlsPodSecurityPolicyAnnotationKey string = "security.alpha.kubernetes.io/sysctls" -) - -// describes the attributes of a scale subresource -type ScaleSpec struct { - // desired number of instances for the scaled object. - // +optional - Replicas int32 -} - -// represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 - - // label query over pods that should match the replicas count. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - Selector *metav1.LabelSelector -} - -// +genclient=true -// +noMethods=true - -// represents a scaling request for a resource. -type Scale struct { - metav1.TypeMeta - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta - - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. - // +optional - Spec ScaleSpec - - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - Status ScaleStatus -} - -// Dummy definition -type ReplicationControllerDummy struct { - metav1.TypeMeta -} - -// Alpha-level support for Custom Metrics in HPA (as annotations). -type CustomMetricTarget struct { - // Custom Metric name. - Name string - // Custom Metric value (average). - TargetValue resource.Quantity -} - -type CustomMetricTargetList struct { - Items []CustomMetricTarget -} - -type CustomMetricCurrentStatus struct { - // Custom Metric name. - Name string - // Custom Metric value (average). - CurrentValue resource.Quantity -} - -type CustomMetricCurrentStatusList struct { - Items []CustomMetricCurrentStatus -} - -// +genclient=true -// +nonNamespaced=true - -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -type ThirdPartyResource struct { - metav1.TypeMeta - - // Standard object metadata - // +optional - metav1.ObjectMeta - - // Description is the description of this object. - // +optional - Description string - - // Versions are versions for this third party object - Versions []APIVersion -} - -type ThirdPartyResourceList struct { - metav1.TypeMeta - - // Standard list metadata. - // +optional - metav1.ListMeta - - // Items is the list of horizontal pod autoscalers. - Items []ThirdPartyResource -} - -// An APIVersion represents a single concrete version of an object model. -// TODO: we should consider merge this struct with GroupVersion in metav1.go -type APIVersion struct { - // Name of this version (e.g. 'v1'). - Name string -} - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -type ThirdPartyResourceData struct { - metav1.TypeMeta - // Standard object metadata. - // +optional - metav1.ObjectMeta - - // Data is the raw JSON data for this data. - // +optional - Data []byte -} - -// +genclient=true - -type Deployment struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Specification of the desired behavior of the Deployment. - // +optional - Spec DeploymentSpec - - // Most recently observed status of the Deployment. - // +optional - Status DeploymentStatus -} - -type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - // +optional - Replicas int32 - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - // +optional - Selector *metav1.LabelSelector - - // Template describes the pods that will be created. - Template api.PodTemplateSpec - - // The deployment strategy to use to replace existing pods with new ones. - // +optional - Strategy DeploymentStrategy - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // +optional - RevisionHistoryLimit *int32 - - // Indicates that the deployment is paused and will not be processed by the - // deployment controller. - // +optional - Paused bool - - // The config this deployment is rolling back to. Will be cleared after rollback is done. - // +optional - RollbackTo *RollbackConfig - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Once autoRollback is - // implemented, the deployment controller will automatically rollback failed - // deployments. Note that progress will not be estimated during the time a - // deployment is paused. This is not set by default. - ProgressDeadlineSeconds *int32 -} - -// DeploymentRollback stores the information required to rollback a deployment. -type DeploymentRollback struct { - metav1.TypeMeta - // Required: This must match the Name of a deployment. - Name string - // The annotations to be updated to a deployment - // +optional - UpdatedAnnotations map[string]string - // The config of this deployment rollback. - RollbackTo RollbackConfig -} - -type RollbackConfig struct { - // The revision to rollback to. If set to 0, rollbck to the last revision. - // +optional - Revision int64 -} - -const ( - // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added - // to existing RCs (and label key that is added to its pods) to prevent the existing RCs - // to select new pods (and old pods being select by new RC). - DefaultDeploymentUniqueLabelKey string = "pod-template-hash" -) - -type DeploymentStrategy struct { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - // +optional - Type DeploymentStrategyType - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - // +optional - RollingUpdate *RollingUpdateDeployment -} - -type DeploymentStrategyType string - -const ( - // Kill all existing pods before creating new ones. - RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" - - // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. - RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" -) - -// Spec to control the desired behavior of rolling update. -type RollingUpdateDeployment struct { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). - // Absolute number is calculated from percentage by rounding down. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down by 30% - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that at least 70% of original number of pods are available at all times - // during the update. - // +optional - MaxUnavailable intstr.IntOrString - - // The maximum number of pods that can be scheduled above the original number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of total pods at - // the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up by 30% - // immediately when the rolling update starts. Once old pods have been killed, - // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of original pods. - // +optional - MaxSurge intstr.IntOrString -} - -type DeploymentStatus struct { - // The generation observed by the deployment controller. - // +optional - ObservedGeneration int64 - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - // +optional - Replicas int32 - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - // +optional - UpdatedReplicas int32 - - // Total number of ready pods targeted by this deployment. - // +optional - ReadyReplicas int32 - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - // +optional - AvailableReplicas int32 - - // Total number of unavailable pods targeted by this deployment. - // +optional - UnavailableReplicas int32 - - // Represents the latest available observations of a deployment's current state. - Conditions []DeploymentCondition -} - -type DeploymentConditionType string - -// These are valid conditions of a deployment. -const ( - // Available means the deployment is available, ie. at least the minimum available - // replicas required are up and running for at least minReadySeconds. - DeploymentAvailable DeploymentConditionType = "Available" - // Progressing means the deployment is progressing. Progress for a deployment is - // considered when a new replica set is created or adopted, and when new pods scale - // up or old pods scale down. Progress is not estimated for paused deployments or - // when progressDeadlineSeconds is not specified. - DeploymentProgressing DeploymentConditionType = "Progressing" - // ReplicaFailure is added in a deployment when one of its pods fails to be created - // or deleted. - DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" -) - -// DeploymentCondition describes the state of a deployment at a certain point. -type DeploymentCondition struct { - // Type of deployment condition. - Type DeploymentConditionType - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus - // The last time this condition was updated. - LastUpdateTime metav1.Time - // Last time the condition transitioned from one status to another. - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - Reason string - // A human readable message indicating details about the transition. - Message string -} - -type DeploymentList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is the list of deployments. - Items []Deployment -} - -type DaemonSetUpdateStrategy struct { - // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". - // Default is OnDelete. - // +optional - Type DaemonSetUpdateStrategyType - - // Rolling update config params. Present only if type = "RollingUpdate". - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as DeploymentStrategy.RollingUpdate. - // See https://github.com/kubernetes/kubernetes/issues/35345 - // +optional - RollingUpdate *RollingUpdateDaemonSet -} - -type DaemonSetUpdateStrategyType string - -const ( - // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. - RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" - - // Replace the old daemons only when it's killed - OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" -) - -// Spec to control the desired behavior of daemon set rolling update. -type RollingUpdateDaemonSet struct { - // The maximum number of DaemonSet pods that can be unavailable during the - // update. Value can be an absolute number (ex: 5) or a percentage of total - // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding up. - // This cannot be 0. - // Default value is 1. - // Example: when this is set to 30%, at most 30% of the total number of nodes - // that should be running the daemon pod (i.e. status.desiredNumberScheduled) - // can have their pods stopped for an update at any given - // time. The update starts by stopping at most 30% of those DaemonSet pods - // and then brings up new DaemonSet pods in their place. Once the new pods - // are available, it then proceeds onto other DaemonSet pods, thus ensuring - // that at least 70% of original number of DaemonSet pods are available at - // all times during the update. - // +optional - MaxUnavailable intstr.IntOrString -} - -// DaemonSetSpec is the specification of a daemon set. -type DaemonSetSpec struct { - // A label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - Selector *metav1.LabelSelector - - // An object that describes the pod that will be created. - // The DaemonSet will create exactly one copy of this pod on every node - // that matches the template's node selector (or on every node if no node - // selector is specified). - // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template - Template api.PodTemplateSpec - - // An update strategy to replace existing DaemonSet pods with new pods. - // +optional - UpdateStrategy DaemonSetUpdateStrategy - - // The minimum number of seconds for which a newly created DaemonSet pod should - // be ready without any of its container crashing, for it to be considered - // available. Defaults to 0 (pod will be considered available as soon as it - // is ready). - // +optional - MinReadySeconds int32 - - // A sequence number representing a specific generation of the template. - // Populated by the system. It can be set only during the creation. - // +optional - TemplateGeneration int64 -} - -// DaemonSetStatus represents the current status of a daemon set. -type DaemonSetStatus struct { - // The number of nodes that are running at least 1 - // daemon pod and are supposed to run the daemon pod. - CurrentNumberScheduled int32 - - // The number of nodes that are running the daemon pod, but are - // not supposed to run the daemon pod. - NumberMisscheduled int32 - - // The total number of nodes that should be running the daemon - // pod (including nodes correctly running the daemon pod). - DesiredNumberScheduled int32 - - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. - NumberReady int32 - - // The most recent generation observed by the daemon set controller. - // +optional - ObservedGeneration int64 - - // The total number of nodes that are running updated daemon pod - // +optional - UpdatedNumberScheduled int32 - - // The number of nodes that should be running the - // daemon pod and have one or more of the daemon pod running and - // available (ready for at least spec.minReadySeconds) - // +optional - NumberAvailable int32 - - // The number of nodes that should be running the - // daemon pod and have none of the daemon pod running and available - // (ready for at least spec.minReadySeconds) - // +optional - NumberUnavailable int32 -} - -// +genclient=true - -// DaemonSet represents the configuration of a daemon set. -type DaemonSet struct { - metav1.TypeMeta - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta - - // The desired behavior of this daemon set. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Spec DaemonSetSpec - - // The current status of this daemon set. This data may be - // out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Status DaemonSetStatus -} - -const ( - // DaemonSetTemplateGenerationKey is the key of the labels that is added - // to daemon set pods to distinguish between old and new pod templates - // during DaemonSet template update. - DaemonSetTemplateGenerationKey string = "pod-template-generation" -) - -// DaemonSetList is a collection of daemon sets. -type DaemonSetList struct { - metav1.TypeMeta - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta - - // A list of daemon sets. - Items []DaemonSet -} - -type ThirdPartyResourceDataList struct { - metav1.TypeMeta - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta - // Items is a list of third party objects - Items []ThirdPartyResourceData -} - -// +genclient=true - -// Ingress is a collection of rules that allow inbound connections to reach the -// endpoints defined by a backend. An Ingress can be configured to give services -// externally-reachable urls, load balance traffic, terminate SSL, offer name -// based virtual hosting etc. -type Ingress struct { - metav1.TypeMeta - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta - - // Spec is the desired state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Spec IngressSpec - - // Status is the current state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Status IngressStatus -} - -// IngressList is a collection of Ingress. -type IngressList struct { - metav1.TypeMeta - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta - - // Items is the list of Ingress. - Items []Ingress -} - -// IngressSpec describes the Ingress the user wishes to exist. -type IngressSpec struct { - // A default backend capable of servicing requests that don't match any - // rule. At least one of 'backend' or 'rules' must be specified. This field - // is optional to allow the loadbalancer controller or defaulting logic to - // specify a global default. - // +optional - Backend *IngressBackend - - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified - // through the SNI TLS extension, if the ingress controller fulfilling the - // ingress supports SNI. - // +optional - TLS []IngressTLS - - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. - // +optional - Rules []IngressRule - // TODO: Add the ability to specify load-balancer IP through claims -} - -// IngressTLS describes the transport layer security associated with an Ingress. -type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in - // this list must match the name/s used in the tlsSecret. Defaults to the - // wildcard host setting for the loadbalancer controller fulfilling this - // Ingress, if left unspecified. - // +optional - Hosts []string - // SecretName is the name of the secret used to terminate SSL traffic on 443. - // Field is left optional to allow SSL routing based on SNI hostname alone. - // If the SNI host in a listener conflicts with the "Host" header field used - // by an IngressRule, the SNI host is used for termination and value of the - // Host header is used for routing. - // +optional - SecretName string - // TODO: Consider specifying different modes of termination, protocols etc. -} - -// IngressStatus describe the current state of the Ingress. -type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. - // +optional - LoadBalancer api.LoadBalancerStatus -} - -// IngressRule represents the rules mapping the paths under a specified host to -// the related backend services. Incoming requests are first evaluated for a host -// match, then routed to the backend associated with the matching IngressRuleValue. -type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined - // by RFC 3986. Note the following deviations from the "host" part of the - // URI as defined in the RFC: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the - // IP in the Spec of the parent Ingress. - // 2. The `:` delimiter is not respected because ports are not allowed. - // Currently the port of an Ingress is implicitly :80 for http and - // :443 for https. - // Both these may change in the future. - // Incoming requests are matched against the host before the IngressRuleValue. - // If the host is unspecified, the Ingress routes all traffic based on the - // specified IngressRuleValue. - // +optional - Host string - // IngressRuleValue represents a rule to route requests for this IngressRule. - // If unspecified, the rule defaults to a http catch-all. Whether that sends - // just traffic matching the host to the default backend or all traffic to the - // default backend, is left to the controller fulfilling the Ingress. Http is - // currently the only supported IngressRuleValue. - // +optional - IngressRuleValue -} - -// IngressRuleValue represents a rule to apply against incoming requests. If the -// rule is satisfied, the request is routed to the specified backend. Currently -// mixing different types of rules in a single Ingress is disallowed, so exactly -// one of the following must be set. -type IngressRuleValue struct { - //TODO: - // 1. Consider renaming this resource and the associated rules so they - // aren't tied to Ingress. They can be used to route intra-cluster traffic. - // 2. Consider adding fields for ingress-type specific global options - // usable by a loadbalancer, like http keep-alive. - - // +optional - HTTP *HTTPIngressRuleValue -} - -// HTTPIngressRuleValue is a list of http selectors pointing to backends. -// In the example: http:///? -> backend where -// where parts of the url correspond to RFC 3986, this resource will be used -// to match against everything after the last '/' and before the first '?' -// or '#'. -type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. - Paths []HTTPIngressPath - // TODO: Consider adding fields for ingress-type specific global - // options usable by a loadbalancer, like http keep-alive. -} - -// HTTPIngressPath associates a path regex with a backend. Incoming urls matching -// the path are forwarded to the backend. -type HTTPIngressPath struct { - // Path is an extended POSIX regex as defined by IEEE Std 1003.1, - // (i.e this follows the egrep/unix syntax, not the perl syntax) - // matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" - // part of a URL as defined by RFC 3986. Paths must begin with - // a '/'. If unspecified, the path defaults to a catch all sending - // traffic to the backend. - // +optional - Path string - - // Backend defines the referenced service endpoint to which the traffic - // will be forwarded to. - Backend IngressBackend -} - -// IngressBackend describes all endpoints for a given service and port. -type IngressBackend struct { - // Specifies the name of the referenced service. - ServiceName string - - // Specifies the port of the referenced service. - ServicePort intstr.IntOrString -} - -// +genclient=true - -// ReplicaSet represents the configuration of a replica set. -type ReplicaSet struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired behavior of this ReplicaSet. - // +optional - Spec ReplicaSetSpec - - // Status is the current status of this ReplicaSet. This data may be - // out of date by some window of time. - // +optional - Status ReplicaSetStatus -} - -// ReplicaSetList is a collection of ReplicaSets. -type ReplicaSetList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ReplicaSet -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -// As the internal representation of a ReplicaSet, it must have -// a Template set. -type ReplicaSetSpec struct { - // Replicas is the number of desired replicas. - Replicas int32 - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 - - // Selector is a label query over pods that should match the replica count. - // Must match in order to be controlled. - // If empty, defaulted to labels on pod template. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - Selector *metav1.LabelSelector - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - // +optional - Template api.PodTemplateSpec -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -type ReplicaSetStatus struct { - // Replicas is the number of actual replicas. - Replicas int32 - - // The number of pods that have labels matching the labels of the pod template of the replicaset. - // +optional - FullyLabeledReplicas int32 - - // The number of ready replicas for this replica set. - // +optional - ReadyReplicas int32 - - // The number of available replicas (ready for at least minReadySeconds) for this replica set. - // +optional - AvailableReplicas int32 - - // ObservedGeneration is the most recent generation observed by the controller. - // +optional - ObservedGeneration int64 - - // Represents the latest available observations of a replica set's current state. - // +optional - Conditions []ReplicaSetCondition -} - -type ReplicaSetConditionType string - -// These are valid conditions of a replica set. -const ( - // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created - // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted - // due to kubelet being down or finalizers are failing. - ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" -) - -// ReplicaSetCondition describes the state of a replica set at a certain point. -type ReplicaSetCondition struct { - // Type of replica set condition. - Type ReplicaSetConditionType - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus - // The last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - // +optional - Reason string - // A human readable message indicating details about the transition. - // +optional - Message string -} - -// +genclient=true -// +nonNamespaced=true - -// PodSecurityPolicy governs the ability to make requests that affect the SecurityContext -// that will be applied to a pod and container. -type PodSecurityPolicy struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the policy enforced. - // +optional - Spec PodSecurityPolicySpec -} - -// PodSecurityPolicySpec defines the policy enforced. -type PodSecurityPolicySpec struct { - // Privileged determines if a pod can request to be run as privileged. - // +optional - Privileged bool - // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // DefaultAddCapabilities and RequiredDropCapabilities. - // +optional - DefaultAddCapabilities []api.Capability - // RequiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - RequiredDropCapabilities []api.Capability - // AllowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. - // +optional - AllowedCapabilities []api.Capability - // Volumes is a white list of allowed volume plugins. Empty indicates that all plugins - // may be used. - // +optional - Volumes []FSType - // HostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - HostNetwork bool - // HostPorts determines which host port ranges are allowed to be exposed. - // +optional - HostPorts []HostPortRange - // HostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - HostPID bool - // HostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - HostIPC bool - // SELinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions - // RunAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions - // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions - // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions - // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - ReadOnlyRootFilesystem bool -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -type HostPortRange struct { - // Min is the start of the range, inclusive. - Min int - // Max is the end of the range, inclusive. - Max int -} - -// FSType gives strong typing to different file systems that are used by volumes. -type FSType string - -var ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - VsphereVolume FSType = "vsphereVolume" - Quobyte FSType = "quobyte" - AzureDisk FSType = "azureDisk" - PhotonPersistentDisk FSType = "photonPersistentDisk" - Projected FSType = "projected" - PortworxVolume FSType = "portworxVolume" - ScaleIO FSType = "scaleIO" - All FSType = "*" -) - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -type SELinuxStrategyOptions struct { - // Rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy - // seLinuxOptions required to run as; required for MustRunAs - // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context - // +optional - SELinuxOptions *api.SELinuxOptions -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security. -type SELinuxStrategy string - -const ( - // container must have SELinux labels of X applied. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // container may make requests for any SELinux context labels. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -type RunAsUserStrategyOptions struct { - // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy - // Ranges are the allowed ranges of uids that may be used. - // +optional - Ranges []IDRange -} - -// IDRange provides a min/max of an allowed range of IDs. -type IDRange struct { - // Min is the start of the range, inclusive. - Min int64 - // Max is the end of the range, inclusive. - Max int64 -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// SecurityContext. -type RunAsUserStrategy string - -const ( - // container must run as a particular uid. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // container must run as a non-root uid - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // container may make requests for any uid. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -type FSGroupStrategyOptions struct { - // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - Rule FSGroupStrategyType - // Ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. - // +optional - Ranges []IDRange -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -type FSGroupStrategyType string - -const ( - // container must have FSGroup of X applied. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // container may make requests for any FSGroup labels. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -type SupplementalGroupsStrategyOptions struct { - // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - Rule SupplementalGroupsStrategyType - // Ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. - // +optional - Ranges []IDRange -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -type SupplementalGroupsStrategyType string - -const ( - // container must run as a particular gid. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // container may make requests for any gid. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -type PodSecurityPolicyList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []PodSecurityPolicy -} - -// +genclient=true - -type NetworkPolicy struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Specification of the desired behavior for this NetworkPolicy. - // +optional - Spec NetworkPolicySpec -} - -type NetworkPolicySpec struct { - // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules - // is applied to any pods selected by this field. Multiple network policies can select the - // same set of pods. In this case, the ingress rules for each are combined additively. - // This field is NOT optional and follows standard label selector semantics. - // An empty podSelector matches all pods in this namespace. - PodSelector metav1.LabelSelector - - // List of ingress rules to be applied to the selected pods. - // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, - // OR if the traffic source is the pod's local node, - // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy - // objects whose podSelector matches the pod. - // If this field is empty then this NetworkPolicy does not affect ingress isolation. - // If this field is present and contains at least one rule, this policy allows any traffic - // which matches at least one of the ingress rules in this list. - // +optional - Ingress []NetworkPolicyIngressRule -} - -// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. -type NetworkPolicyIngressRule struct { - // List of ports which should be made accessible on the pods selected for this rule. - // Each item in this list is combined using a logical OR. - // If this field is not provided, this rule matches all ports (traffic not restricted by port). - // If this field is empty, this rule matches no ports (no traffic matches). - // If this field is present and contains at least one item, then this rule allows traffic - // only if the traffic matches at least one port in the list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. - // +optional - Ports []NetworkPolicyPort - - // List of sources which should be able to access the pods selected for this rule. - // Items in this list are combined using a logical OR operation. - // If this field is not provided, this rule matches all sources (traffic not restricted by source). - // If this field is empty, this rule matches no sources (no traffic matches). - // If this field is present and contains at least on item, this rule allows traffic only if the - // traffic matches at least one item in the from list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. - // +optional - From []NetworkPolicyPeer -} - -type NetworkPolicyPort struct { - // Optional. The protocol (TCP or UDP) which traffic must match. - // If not specified, this field defaults to TCP. - // +optional - Protocol *api.Protocol - - // If specified, the port on the given protocol. This can - // either be a numerical or named port on a pod. If this field is not provided, - // this matches all port names and numbers. - // If present, only traffic on the specified protocol AND port - // will be matched. - // +optional - Port *intstr.IntOrString -} - -type NetworkPolicyPeer struct { - // Exactly one of the following must be specified. - - // This is a label selector which selects Pods in this namespace. - // This field follows standard label selector semantics. - // If not provided, this selector selects no pods. - // If present but empty, this selector selects all pods in this namespace. - // +optional - PodSelector *metav1.LabelSelector - - // Selects Namespaces using cluster scoped-labels. This - // matches all pods in all namespaces selected by this label selector. - // This field follows standard label selector semantics. - // If omitted, this selector selects no namespaces. - // If present but empty, this selector selects all namespaces. - // +optional - NamespaceSelector *metav1.LabelSelector -} - -// NetworkPolicyList is a list of NetworkPolicy objects. -type NetworkPolicyList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []NetworkPolicy -} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/conversion.go deleted file mode 100644 index d53dbc47a..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/conversion.go +++ /dev/null @@ -1,262 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - v1 "k8s.io/client-go/pkg/api/v1" - "k8s.io/client-go/pkg/apis/extensions" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, - Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, - Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, - Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, - Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, - Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, - Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet, - Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, - Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, - Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, - ) - if err != nil { - return err - } - - // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. - for _, k := range []string{"DaemonSet", "Deployment", "Ingress"} { - kind := k // don't close over range variables - err = scheme.AddFieldLabelConversionFunc("extensions/v1beta1", kind, - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", "metadata.namespace": - return label, value, nil - default: - return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) - } - }, - ) - if err != nil { - return err - } - } - - return nil -} - -func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { - out.Replicas = int32(in.Replicas) - - out.Selector = nil - out.TargetSelector = "" - if in.Selector != nil { - if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { - out.Selector = in.Selector.MatchLabels - } - - selector, err := metav1.LabelSelectorAsSelector(in.Selector) - if err != nil { - return fmt.Errorf("invalid label selector: %v", err) - } - out.TargetSelector = selector.String() - } - return nil -} - -func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - - // Normally when 2 fields map to the same internal value we favor the old field, since - // old clients can't be expected to know about new fields but clients that know about the - // new field can be expected to know about the old field (though that's not quite true, due - // to kubectl apply). However, these fields are readonly, so any non-nil value should work. - if in.TargetSelector != "" { - labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) - if err != nil { - out.Selector = nil - return fmt.Errorf("failed to parse target selector: %v", err) - } - out.Selector = labelSelector - } else if in.Selector != nil { - out.Selector = new(metav1.LabelSelector) - selector := make(map[string]string) - for key, val := range in.Selector { - selector[key] = val - } - out.Selector.MatchLabels = selector - } else { - out.Selector = nil - } - return nil -} - -func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { - out.Replicas = &in.Replicas - out.Selector = in.Selector - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { - return err - } - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int32) - *out.RevisionHistoryLimit = int32(*in.RevisionHistoryLimit) - } - out.MinReadySeconds = int32(in.MinReadySeconds) - out.Paused = in.Paused - if in.RollbackTo != nil { - out.RollbackTo = new(RollbackConfig) - out.RollbackTo.Revision = int64(in.RollbackTo.Revision) - } else { - out.RollbackTo = nil - } - if in.ProgressDeadlineSeconds != nil { - out.ProgressDeadlineSeconds = new(int32) - *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds - } - return nil -} - -func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - out.Selector = in.Selector - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { - return err - } - out.RevisionHistoryLimit = in.RevisionHistoryLimit - out.MinReadySeconds = in.MinReadySeconds - out.Paused = in.Paused - if in.RollbackTo != nil { - out.RollbackTo = new(extensions.RollbackConfig) - out.RollbackTo.Revision = in.RollbackTo.Revision - } else { - out.RollbackTo = nil - } - if in.ProgressDeadlineSeconds != nil { - out.ProgressDeadlineSeconds = new(int32) - *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds - } - return nil -} - -func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { - out.Type = DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { - if out.MaxUnavailable == nil { - out.MaxUnavailable = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { - return err - } - if out.MaxSurge == nil { - out.MaxSurge = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { - if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { - return err - } - if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { - return err - } - return nil -} - -func Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *RollingUpdateDaemonSet, s conversion.Scope) error { - if out.MaxUnavailable == nil { - out.MaxUnavailable = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { - if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { - return err - } - return nil -} - -func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { - out.Replicas = new(int32) - *out.Replicas = int32(in.Replicas) - out.MinReadySeconds = in.MinReadySeconds - out.Selector = in.Selector - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = in.Selector - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/defaults.go deleted file mode 100644 index 298c568db..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/defaults.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/pkg/api/v1" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - RegisterDefaults(scheme) - return scheme.AddDefaultingFuncs( - SetDefaults_DaemonSet, - SetDefaults_Deployment, - SetDefaults_ReplicaSet, - SetDefaults_NetworkPolicy, - ) -} - -func SetDefaults_DaemonSet(obj *DaemonSet) { - labels := obj.Spec.Template.Labels - - // TODO: support templates defined elsewhere when we support them in the API - if labels != nil { - if obj.Spec.Selector == nil { - obj.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: labels, - } - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - updateStrategy := &obj.Spec.UpdateStrategy - if updateStrategy.Type == "" { - updateStrategy.Type = OnDeleteDaemonSetStrategyType - } - if updateStrategy.Type == RollingUpdateDaemonSetStrategyType { - if updateStrategy.RollingUpdate == nil { - rollingUpdate := RollingUpdateDaemonSet{} - updateStrategy.RollingUpdate = &rollingUpdate - } - if updateStrategy.RollingUpdate.MaxUnavailable == nil { - // Set default MaxUnavailable as 1 by default. - maxUnavailable := intstr.FromInt(1) - updateStrategy.RollingUpdate.MaxUnavailable = &maxUnavailable - } - } -} - -func SetDefaults_Deployment(obj *Deployment) { - // Default labels and selector to labels from pod template spec. - labels := obj.Spec.Template.Labels - - if labels != nil { - if obj.Spec.Selector == nil { - obj.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels} - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - // Set DeploymentSpec.Replicas to 1 if it is not set. - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } - strategy := &obj.Spec.Strategy - // Set default DeploymentStrategyType as RollingUpdate. - if strategy.Type == "" { - strategy.Type = RollingUpdateDeploymentStrategyType - } - if strategy.Type == RollingUpdateDeploymentStrategyType || strategy.RollingUpdate != nil { - if strategy.RollingUpdate == nil { - rollingUpdate := RollingUpdateDeployment{} - strategy.RollingUpdate = &rollingUpdate - } - if strategy.RollingUpdate.MaxUnavailable == nil { - // Set default MaxUnavailable as 1 by default. - maxUnavailable := intstr.FromInt(1) - strategy.RollingUpdate.MaxUnavailable = &maxUnavailable - } - if strategy.RollingUpdate.MaxSurge == nil { - // Set default MaxSurge as 1 by default. - maxSurge := intstr.FromInt(1) - strategy.RollingUpdate.MaxSurge = &maxSurge - } - } -} - -func SetDefaults_ReplicaSet(obj *ReplicaSet) { - labels := obj.Spec.Template.Labels - - // TODO: support templates defined elsewhere when we support them in the API - if labels != nil { - if obj.Spec.Selector == nil { - obj.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: labels, - } - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } -} - -func SetDefaults_NetworkPolicy(obj *NetworkPolicy) { - // Default any undefined Protocol fields to TCP. - for _, i := range obj.Spec.Ingress { - // TODO: Update Ports to be a pointer to slice as soon as auto-generation supports it. - for _, p := range i.Ports { - if p.Protocol == nil { - proto := v1.ProtocolTCP - p.Protocol = &proto - } - } - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/doc.go deleted file mode 100644 index a397b30e9..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.generated.go deleted file mode 100644 index 4f179e34f..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.generated.go +++ /dev/null @@ -1,21745 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg3_resource "k8s.io/apimachinery/pkg/api/resource" - pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg2_types "k8s.io/apimachinery/pkg/types" - pkg5_intstr "k8s.io/apimachinery/pkg/util/intstr" - pkg4_v1 "k8s.io/client-go/pkg/api/v1" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg3_resource.Quantity - var v1 pkg1_v1.TypeMeta - var v2 pkg2_types.UID - var v3 pkg5_intstr.IntOrString - var v4 pkg4_v1.PodTemplateSpec - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv4 := &x.Replicas - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv7 := &x.Replicas - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*int32)(yyv7)) = int32(r.DecodeInt(32)) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.Selector) != 0 - yyq2[2] = x.TargetSelector != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv4 := &x.Replicas - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv6 := &x.Selector - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecMapStringStringX(yyv6, false, d) - } - } - case "targetSelector": - if r.TryDecodeAsNil() { - x.TargetSelector = "" - } else { - yyv8 := &x.TargetSelector - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv11 := &x.Replicas - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int32)(yyv11)) = int32(r.DecodeInt(32)) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv13 := &x.Selector - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - z.F.DecMapStringStringX(yyv13, false, d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetSelector = "" - } else { - yyv15 := &x.TargetSelector - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerDummy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerDummy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerDummy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerDummy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv9 := &x.Kind - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv11 := &x.APIVersion - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.TargetValue - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.TargetValue - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "value": - if r.TryDecodeAsNil() { - x.TargetValue = pkg3_resource.Quantity{} - } else { - yyv6 := &x.TargetValue - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv9 := &x.Name - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetValue = pkg3_resource.Quantity{} - } else { - yyv11 := &x.TargetValue - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4 := &x.Items - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv7 := &x.Items - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.CurrentValue - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.CurrentValue - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "value": - if r.TryDecodeAsNil() { - x.CurrentValue = pkg3_resource.Quantity{} - } else { - yyv6 := &x.CurrentValue - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricCurrentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv9 := &x.Name - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentValue = pkg3_resource.Quantity{} - } else { - yyv11 := &x.CurrentValue - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4 := &x.Items - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv7 := &x.Items - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = x.Description != "" - yyq2[4] = len(x.Versions) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Description)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("description")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Description)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Versions == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("versions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Versions == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "description": - if r.TryDecodeAsNil() { - x.Description = "" - } else { - yyv10 := &x.Description - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "versions": - if r.TryDecodeAsNil() { - x.Versions = nil - } else { - yyv12 := &x.Versions - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - h.decSliceAPIVersion((*[]APIVersion)(yyv12), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv15 := &x.Kind - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv17 := &x.APIVersion - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv19 := &x.ObjectMeta - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else { - z.DecFallback(yyv19, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Description = "" - } else { - yyv21 := &x.Description - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Versions = nil - } else { - yyv23 := &x.Versions - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - h.decSliceAPIVersion((*[]APIVersion)(yyv23), d) - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResourceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv4 := &x.Name - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv7 := &x.Name - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*string)(yyv7)) = r.DecodeString() - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = len(x.Data) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Data == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv10 := &x.Data - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *yyv10 = r.DecodeBytes(*(*[]byte)(yyv10), false, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResourceData) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv19 := &x.Data - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *yyv19 = r.DecodeBytes(*(*[]byte)(yyv19), false, false) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [9]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != nil - yyq2[1] = x.Selector != nil - yyq2[3] = true - yyq2[4] = x.MinReadySeconds != 0 - yyq2[5] = x.RevisionHistoryLimit != nil - yyq2[6] = x.Paused != false - yyq2[7] = x.RollbackTo != nil - yyq2[8] = x.ProgressDeadlineSeconds != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(9) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Replicas == nil { - r.EncodeNil() - } else { - yy4 := *x.Replicas - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Replicas == nil { - r.EncodeNil() - } else { - yy6 := *x.Replicas - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.Template - yy12.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Template - yy14.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy17 := &x.Strategy - yy17.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("strategy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy19 := &x.Strategy - yy19.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.RevisionHistoryLimit == nil { - r.EncodeNil() - } else { - yy25 := *x.RevisionHistoryLimit - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeInt(int64(yy25)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RevisionHistoryLimit == nil { - r.EncodeNil() - } else { - yy27 := *x.RevisionHistoryLimit - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(yy27)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("paused")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.ProgressDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy36 := *x.ProgressDeadlineSeconds - yym37 := z.EncBinary() - _ = yym37 - if false { - } else { - r.EncodeInt(int64(yy36)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("progressDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ProgressDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy38 := *x.ProgressDeadlineSeconds - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - r.EncodeInt(int64(yy38)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_v1.LabelSelector) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg4_v1.PodTemplateSpec{} - } else { - yyv8 := &x.Template - yyv8.CodecDecodeSelf(d) - } - case "strategy": - if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} - } else { - yyv9 := &x.Strategy - yyv9.CodecDecodeSelf(d) - } - case "minReadySeconds": - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - yyv10 := &x.MinReadySeconds - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(yyv10)) = int32(r.DecodeInt(32)) - } - } - case "revisionHistoryLimit": - if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } - } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int32) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - case "paused": - if r.TryDecodeAsNil() { - x.Paused = false - } else { - yyv14 := &x.Paused - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*bool)(yyv14)) = r.DecodeBool() - } - } - case "rollbackTo": - if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } - } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) - } - case "progressDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ProgressDeadlineSeconds != nil { - x.ProgressDeadlineSeconds = nil - } - } else { - if x.ProgressDeadlineSeconds == nil { - x.ProgressDeadlineSeconds = new(int32) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj19 int - var yyb19 bool - var yyhl19 bool = l >= 0 - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_v1.LabelSelector) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg4_v1.PodTemplateSpec{} - } else { - yyv24 := &x.Template - yyv24.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} - } else { - yyv25 := &x.Strategy - yyv25.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - yyv26 := &x.MinReadySeconds - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - *((*int32)(yyv26)) = int32(r.DecodeInt(32)) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } - } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int32) - } - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Paused = false - } else { - yyv30 := &x.Paused - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - *((*bool)(yyv30)) = r.DecodeBool() - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } - } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ProgressDeadlineSeconds != nil { - x.ProgressDeadlineSeconds = nil - } - } else { - if x.ProgressDeadlineSeconds == nil { - x.ProgressDeadlineSeconds = new(int32) - } - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) - } - } - for { - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj19-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[3] = len(x.UpdatedAnnotations) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.UpdatedAnnotations == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.UpdatedAnnotations == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy16 := &x.RollbackTo - yy16.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy18 := &x.RollbackTo - yy18.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv8 := &x.Name - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "updatedAnnotations": - if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil - } else { - yyv10 := &x.UpdatedAnnotations - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - z.F.DecMapStringStringX(yyv10, false, d) - } - } - case "rollbackTo": - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv12 := &x.RollbackTo - yyv12.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv14 := &x.Kind - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv16 := &x.APIVersion - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv18 := &x.Name - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*string)(yyv18)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil - } else { - yyv20 := &x.UpdatedAnnotations - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - z.F.DecMapStringStringX(yyv20, false, d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv22 := &x.RollbackTo - yyv22.CodecDecodeSelf(d) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Revision != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revision")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "revision": - if r.TryDecodeAsNil() { - x.Revision = 0 - } else { - yyv4 := &x.Revision - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(yyv4)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Revision = 0 - } else { - yyv7 := &x.Revision - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*int64)(yyv7)) = int64(r.DecodeInt(64)) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Type != "" - yyq2[1] = x.RollingUpdate != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "rollingUpdate": - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv7 := &x.Type - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.MaxUnavailable != nil - yyq2[1] = x.MaxSurge != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.MaxUnavailable == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { - } else if !yym4 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxUnavailable) - } else { - z.EncFallback(x.MaxUnavailable) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxUnavailable == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxUnavailable) - } else { - z.EncFallback(x.MaxUnavailable) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.MaxSurge == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxSurge) - } else { - z.EncFallback(x.MaxSurge) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxSurge == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxSurge) - } else { - z.EncFallback(x.MaxSurge) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "maxUnavailable": - if r.TryDecodeAsNil() { - if x.MaxUnavailable != nil { - x.MaxUnavailable = nil - } - } else { - if x.MaxUnavailable == nil { - x.MaxUnavailable = new(pkg5_intstr.IntOrString) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxUnavailable) - } else { - z.DecFallback(x.MaxUnavailable, false) - } - } - case "maxSurge": - if r.TryDecodeAsNil() { - if x.MaxSurge != nil { - x.MaxSurge = nil - } - } else { - if x.MaxSurge == nil { - x.MaxSurge = new(pkg5_intstr.IntOrString) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxSurge) - } else { - z.DecFallback(x.MaxSurge, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MaxUnavailable != nil { - x.MaxUnavailable = nil - } - } else { - if x.MaxUnavailable == nil { - x.MaxUnavailable = new(pkg5_intstr.IntOrString) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxUnavailable) - } else { - z.DecFallback(x.MaxUnavailable, false) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MaxSurge != nil { - x.MaxSurge = nil - } - } else { - if x.MaxSurge == nil { - x.MaxSurge = new(pkg5_intstr.IntOrString) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxSurge) - } else { - z.DecFallback(x.MaxSurge, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != 0 - yyq2[1] = x.Replicas != 0 - yyq2[2] = x.UpdatedReplicas != 0 - yyq2[3] = x.ReadyReplicas != 0 - yyq2[4] = x.AvailableReplicas != 0 - yyq2[5] = x.UnavailableReplicas != 0 - yyq2[6] = len(x.Conditions) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.UpdatedReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.UpdatedReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.ReadyReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.ReadyReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.UnavailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.UnavailableReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - yyv4 := &x.ObservedGeneration - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(yyv4)) = int64(r.DecodeInt(64)) - } - } - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv6 := &x.Replicas - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(yyv6)) = int32(r.DecodeInt(32)) - } - } - case "updatedReplicas": - if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 - } else { - yyv8 := &x.UpdatedReplicas - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - case "readyReplicas": - if r.TryDecodeAsNil() { - x.ReadyReplicas = 0 - } else { - yyv10 := &x.ReadyReplicas - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(yyv10)) = int32(r.DecodeInt(32)) - } - } - case "availableReplicas": - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - yyv12 := &x.AvailableReplicas - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int32)(yyv12)) = int32(r.DecodeInt(32)) - } - } - case "unavailableReplicas": - if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 - } else { - yyv14 := &x.UnavailableReplicas - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*int32)(yyv14)) = int32(r.DecodeInt(32)) - } - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv16 := &x.Conditions - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv16), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - yyv19 := &x.ObservedGeneration - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int64)(yyv19)) = int64(r.DecodeInt(64)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv21 := &x.Replicas - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int32)(yyv21)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 - } else { - yyv23 := &x.UpdatedReplicas - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*int32)(yyv23)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadyReplicas = 0 - } else { - yyv25 := &x.ReadyReplicas - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*int32)(yyv25)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - yyv27 := &x.AvailableReplicas - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*int32)(yyv27)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 - } else { - yyv29 := &x.UnavailableReplicas - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*int32)(yyv29)) = int32(r.DecodeInt(32)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv31 := &x.Conditions - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv31), d) - } - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DeploymentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DeploymentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *DeploymentCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf7 := &x.Status - yysf7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf8 := &x.Status - yysf8.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastUpdateTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastUpdateTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "lastUpdateTime": - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_v1.Time{} - } else { - yyv6 := &x.LastUpdateTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_v1.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv10 := &x.Reason - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv12 := &x.Message - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv15 := &x.Type - yyv15.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv16 := &x.Status - yyv16.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastUpdateTime = pkg1_v1.Time{} - } else { - yyv17 := &x.LastUpdateTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_v1.Time{} - } else { - yyv19 := &x.LastTransitionTime - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else if yym20 { - z.DecBinaryUnmarshal(yyv19) - } else if !yym20 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv19) - } else { - z.DecFallback(yyv19, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv21 := &x.Reason - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv23 := &x.Message - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*string)(yyv23)) = r.DecodeString() - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetUpdateStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Type != "" - yyq2[1] = x.RollingUpdate != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetUpdateStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetUpdateStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "rollingUpdate": - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDaemonSet) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSetUpdateStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv7 := &x.Type - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDaemonSet) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DaemonSetUpdateStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DaemonSetUpdateStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *RollingUpdateDaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.MaxUnavailable != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.MaxUnavailable == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { - } else if !yym4 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxUnavailable) - } else { - z.EncFallback(x.MaxUnavailable) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxUnavailable == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxUnavailable) - } else { - z.EncFallback(x.MaxUnavailable) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RollingUpdateDaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RollingUpdateDaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "maxUnavailable": - if r.TryDecodeAsNil() { - if x.MaxUnavailable != nil { - x.MaxUnavailable = nil - } - } else { - if x.MaxUnavailable == nil { - x.MaxUnavailable = new(pkg5_intstr.IntOrString) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxUnavailable) - } else { - z.DecFallback(x.MaxUnavailable, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RollingUpdateDaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MaxUnavailable != nil { - x.MaxUnavailable = nil - } - } else { - if x.MaxUnavailable == nil { - x.MaxUnavailable = new(pkg5_intstr.IntOrString) - } - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxUnavailable) - } else { - z.DecFallback(x.MaxUnavailable, false) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Selector != nil - yyq2[2] = true - yyq2[3] = x.MinReadySeconds != 0 - yyq2[4] = x.TemplateGeneration != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Template - yy7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Template - yy9.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy12 := &x.UpdateStrategy - yy12.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updateStrategy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.UpdateStrategy - yy14.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.TemplateGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("templateGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(x.TemplateGeneration)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_v1.LabelSelector) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg4_v1.PodTemplateSpec{} - } else { - yyv6 := &x.Template - yyv6.CodecDecodeSelf(d) - } - case "updateStrategy": - if r.TryDecodeAsNil() { - x.UpdateStrategy = DaemonSetUpdateStrategy{} - } else { - yyv7 := &x.UpdateStrategy - yyv7.CodecDecodeSelf(d) - } - case "minReadySeconds": - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - yyv8 := &x.MinReadySeconds - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - case "templateGeneration": - if r.TryDecodeAsNil() { - x.TemplateGeneration = 0 - } else { - yyv10 := &x.TemplateGeneration - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int64)(yyv10)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_v1.LabelSelector) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg4_v1.PodTemplateSpec{} - } else { - yyv15 := &x.Template - yyv15.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdateStrategy = DaemonSetUpdateStrategy{} - } else { - yyv16 := &x.UpdateStrategy - yyv16.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - yyv17 := &x.MinReadySeconds - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(yyv17)) = int32(r.DecodeInt(32)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TemplateGeneration = 0 - } else { - yyv19 := &x.TemplateGeneration - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int64)(yyv19)) = int64(r.DecodeInt(64)) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[4] = x.ObservedGeneration != 0 - yyq2[5] = x.UpdatedNumberScheduled != 0 - yyq2[6] = x.NumberAvailable != 0 - yyq2[7] = x.NumberUnavailable != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 4 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.CurrentNumberScheduled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentNumberScheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.CurrentNumberScheduled)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.NumberMisscheduled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("numberMisscheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.NumberMisscheduled)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.DesiredNumberScheduled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredNumberScheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.DesiredNumberScheduled)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.NumberReady)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("numberReady")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.NumberReady)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.UpdatedNumberScheduled)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedNumberScheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.UpdatedNumberScheduled)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(x.NumberAvailable)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("numberAvailable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(x.NumberAvailable)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeInt(int64(x.NumberUnavailable)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("numberUnavailable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeInt(int64(x.NumberUnavailable)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "currentNumberScheduled": - if r.TryDecodeAsNil() { - x.CurrentNumberScheduled = 0 - } else { - yyv4 := &x.CurrentNumberScheduled - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - case "numberMisscheduled": - if r.TryDecodeAsNil() { - x.NumberMisscheduled = 0 - } else { - yyv6 := &x.NumberMisscheduled - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(yyv6)) = int32(r.DecodeInt(32)) - } - } - case "desiredNumberScheduled": - if r.TryDecodeAsNil() { - x.DesiredNumberScheduled = 0 - } else { - yyv8 := &x.DesiredNumberScheduled - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - case "numberReady": - if r.TryDecodeAsNil() { - x.NumberReady = 0 - } else { - yyv10 := &x.NumberReady - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(yyv10)) = int32(r.DecodeInt(32)) - } - } - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - yyv12 := &x.ObservedGeneration - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int64)(yyv12)) = int64(r.DecodeInt(64)) - } - } - case "updatedNumberScheduled": - if r.TryDecodeAsNil() { - x.UpdatedNumberScheduled = 0 - } else { - yyv14 := &x.UpdatedNumberScheduled - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*int32)(yyv14)) = int32(r.DecodeInt(32)) - } - } - case "numberAvailable": - if r.TryDecodeAsNil() { - x.NumberAvailable = 0 - } else { - yyv16 := &x.NumberAvailable - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*int32)(yyv16)) = int32(r.DecodeInt(32)) - } - } - case "numberUnavailable": - if r.TryDecodeAsNil() { - x.NumberUnavailable = 0 - } else { - yyv18 := &x.NumberUnavailable - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*int32)(yyv18)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj20 int - var yyb20 bool - var yyhl20 bool = l >= 0 - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentNumberScheduled = 0 - } else { - yyv21 := &x.CurrentNumberScheduled - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int32)(yyv21)) = int32(r.DecodeInt(32)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NumberMisscheduled = 0 - } else { - yyv23 := &x.NumberMisscheduled - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*int32)(yyv23)) = int32(r.DecodeInt(32)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredNumberScheduled = 0 - } else { - yyv25 := &x.DesiredNumberScheduled - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*int32)(yyv25)) = int32(r.DecodeInt(32)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NumberReady = 0 - } else { - yyv27 := &x.NumberReady - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*int32)(yyv27)) = int32(r.DecodeInt(32)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - yyv29 := &x.ObservedGeneration - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*int64)(yyv29)) = int64(r.DecodeInt(64)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdatedNumberScheduled = 0 - } else { - yyv31 := &x.UpdatedNumberScheduled - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*int32)(yyv31)) = int32(r.DecodeInt(32)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NumberAvailable = 0 - } else { - yyv33 := &x.NumberAvailable - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - *((*int32)(yyv33)) = int32(r.DecodeInt(32)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NumberUnavailable = 0 - } else { - yyv35 := &x.NumberUnavailable - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*int32)(yyv35)) = int32(r.DecodeInt(32)) - } - } - for { - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj20-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = DaemonSetSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = DaemonSetStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = DaemonSetSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = DaemonSetStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceDaemonSet((*[]DaemonSet)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceDaemonSet((*[]DaemonSet)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResourceDataList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Ingress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = IngressSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = IngressStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = IngressSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = IngressStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceIngress(([]Ingress)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceIngress(([]Ingress)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceIngress((*[]Ingress)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceIngress((*[]Ingress)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Backend != nil - yyq2[1] = len(x.TLS) != 0 - yyq2[2] = len(x.Rules) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Backend == nil { - r.EncodeNil() - } else { - x.Backend.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("backend")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Backend == nil { - r.EncodeNil() - } else { - x.Backend.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.TLS == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tls")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TLS == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Rules == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceIngressRule(([]IngressRule)(x.Rules), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rules")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - h.encSliceIngressRule(([]IngressRule)(x.Rules), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "backend": - if r.TryDecodeAsNil() { - if x.Backend != nil { - x.Backend = nil - } - } else { - if x.Backend == nil { - x.Backend = new(IngressBackend) - } - x.Backend.CodecDecodeSelf(d) - } - case "tls": - if r.TryDecodeAsNil() { - x.TLS = nil - } else { - yyv5 := &x.TLS - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceIngressTLS((*[]IngressTLS)(yyv5), d) - } - } - case "rules": - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv7 := &x.Rules - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceIngressRule((*[]IngressRule)(yyv7), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Backend != nil { - x.Backend = nil - } - } else { - if x.Backend == nil { - x.Backend = new(IngressBackend) - } - x.Backend.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TLS = nil - } else { - yyv11 := &x.TLS - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceIngressTLS((*[]IngressTLS)(yyv11), d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv13 := &x.Rules - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceIngressRule((*[]IngressRule)(yyv13), d) - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Hosts) != 0 - yyq2[1] = x.SecretName != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Hosts == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Hosts, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hosts")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Hosts == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Hosts, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressTLS) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "hosts": - if r.TryDecodeAsNil() { - x.Hosts = nil - } else { - yyv4 := &x.Hosts - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "secretName": - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - yyv6 := &x.SecretName - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressTLS) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hosts = nil - } else { - yyv9 := &x.Hosts - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - z.F.DecSliceStringX(yyv9, false, d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - yyv11 := &x.SecretName - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.LoadBalancer - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.LoadBalancer - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "loadBalancer": - if r.TryDecodeAsNil() { - x.LoadBalancer = pkg4_v1.LoadBalancerStatus{} - } else { - yyv4 := &x.LoadBalancer - yyv4.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancer = pkg4_v1.LoadBalancerStatus{} - } else { - yyv6 := &x.LoadBalancer - yyv6.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Host != "" - yyq2[1] = x.IngressRuleValue.HTTP != nil && x.HTTP != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("host")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } - } - var yyn6 bool - if x.IngressRuleValue.HTTP == nil { - yyn6 = true - goto LABEL6 - } - LABEL6: - if yyr2 || yy2arr2 { - if yyn6 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("http")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn6 { - r.EncodeNil() - } else { - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressRule) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "host": - if r.TryDecodeAsNil() { - x.Host = "" - } else { - yyv4 := &x.Host - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "http": - if x.IngressRuleValue.HTTP == nil { - x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) - } - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Host = "" - } else { - yyv8 := &x.Host - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - if x.IngressRuleValue.HTTP == nil { - x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.HTTP != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("http")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "http": - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPIngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Paths == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("paths")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Paths == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPIngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPIngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "paths": - if r.TryDecodeAsNil() { - x.Paths = nil - } else { - yyv4 := &x.Paths - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPIngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Paths = nil - } else { - yyv7 := &x.Paths - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Path != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Backend - yy7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("backend")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Backend - yy9.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPIngressPath) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv4 := &x.Path - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "backend": - if r.TryDecodeAsNil() { - x.Backend = IngressBackend{} - } else { - yyv6 := &x.Backend - yyv6.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - yyv8 := &x.Path - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Backend = IngressBackend{} - } else { - yyv10 := &x.Backend - yyv10.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.ServicePort - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("servicePort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.ServicePort - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressBackend) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "serviceName": - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - yyv4 := &x.ServiceName - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "servicePort": - if r.TryDecodeAsNil() { - x.ServicePort = pkg5_intstr.IntOrString{} - } else { - yyv6 := &x.ServicePort - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - yyv9 := &x.ServiceName - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServicePort = pkg5_intstr.IntOrString{} - } else { - yyv11 := &x.ServicePort - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSet) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ReplicaSetSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ReplicaSetStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ReplicaSetSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ReplicaSetStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceReplicaSet((*[]ReplicaSet)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceReplicaSet((*[]ReplicaSet)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != nil - yyq2[1] = x.MinReadySeconds != 0 - yyq2[2] = x.Selector != nil - yyq2[3] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Replicas == nil { - r.EncodeNil() - } else { - yy4 := *x.Replicas - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Replicas == nil { - r.EncodeNil() - } else { - yy6 := *x.Replicas - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Template - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Template - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - case "minReadySeconds": - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - yyv6 := &x.MinReadySeconds - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(yyv6)) = int32(r.DecodeInt(32)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_v1.LabelSelector) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg4_v1.PodTemplateSpec{} - } else { - yyv10 := &x.Template - yyv10.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - yyv14 := &x.MinReadySeconds - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*int32)(yyv14)) = int32(r.DecodeInt(32)) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_v1.LabelSelector) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg4_v1.PodTemplateSpec{} - } else { - yyv18 := &x.Template - yyv18.CodecDecodeSelf(d) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FullyLabeledReplicas != 0 - yyq2[2] = x.ReadyReplicas != 0 - yyq2[3] = x.AvailableReplicas != 0 - yyq2[4] = x.ObservedGeneration != 0 - yyq2[5] = len(x.Conditions) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.ReadyReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.ReadyReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - h.encSliceReplicaSetCondition(([]ReplicaSetCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - h.encSliceReplicaSetCondition(([]ReplicaSetCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv4 := &x.Replicas - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - case "fullyLabeledReplicas": - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - yyv6 := &x.FullyLabeledReplicas - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(yyv6)) = int32(r.DecodeInt(32)) - } - } - case "readyReplicas": - if r.TryDecodeAsNil() { - x.ReadyReplicas = 0 - } else { - yyv8 := &x.ReadyReplicas - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - case "availableReplicas": - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - yyv10 := &x.AvailableReplicas - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(yyv10)) = int32(r.DecodeInt(32)) - } - } - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - yyv12 := &x.ObservedGeneration - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int64)(yyv12)) = int64(r.DecodeInt(64)) - } - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv14 := &x.Conditions - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - h.decSliceReplicaSetCondition((*[]ReplicaSetCondition)(yyv14), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - yyv17 := &x.Replicas - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(yyv17)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - yyv19 := &x.FullyLabeledReplicas - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int32)(yyv19)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadyReplicas = 0 - } else { - yyv21 := &x.ReadyReplicas - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int32)(yyv21)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - yyv23 := &x.AvailableReplicas - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*int32)(yyv23)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - yyv25 := &x.ObservedGeneration - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*int64)(yyv25)) = int64(r.DecodeInt(64)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv27 := &x.Conditions - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - h.decSliceReplicaSetCondition((*[]ReplicaSetCondition)(yyv27), d) - } - } - for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ReplicaSetConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ReplicaSetConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ReplicaSetCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = x.Reason != "" - yyq2[4] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf7 := &x.Status - yysf7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf8 := &x.Status - yysf8.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastTransitionTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastTransitionTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv4 := &x.Type - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_v1.Time{} - } else { - yyv6 := &x.LastTransitionTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv8 := &x.Reason - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv10 := &x.Message - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - yyv13 := &x.Type - yyv13.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - yyv14 := &x.Status - yyv14.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_v1.Time{} - } else { - yyv15 := &x.LastTransitionTime - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if yym16 { - z.DecBinaryUnmarshal(yyv15) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - yyv17 := &x.Reason - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv19 := &x.Message - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodSecurityPolicySpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv12 := &x.Kind - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv14 := &x.APIVersion - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv16 := &x.ObjectMeta - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(yyv16) { - } else { - z.DecFallback(yyv16, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodSecurityPolicySpec{} - } else { - yyv18 := &x.Spec - yyv18.CodecDecodeSelf(d) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [14]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Privileged != false - yyq2[1] = len(x.DefaultAddCapabilities) != 0 - yyq2[2] = len(x.RequiredDropCapabilities) != 0 - yyq2[3] = len(x.AllowedCapabilities) != 0 - yyq2[4] = len(x.Volumes) != 0 - yyq2[5] = x.HostNetwork != false - yyq2[6] = len(x.HostPorts) != 0 - yyq2[7] = x.HostPID != false - yyq2[8] = x.HostIPC != false - yyq2[13] = x.ReadOnlyRootFilesystem != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(14) - } else { - yynn2 = 4 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Privileged)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("privileged")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Privileged)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.DefaultAddCapabilities == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.DefaultAddCapabilities), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultAddCapabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultAddCapabilities == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.DefaultAddCapabilities), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.RequiredDropCapabilities == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.RequiredDropCapabilities), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDropCapabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDropCapabilities == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.RequiredDropCapabilities), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.AllowedCapabilities == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.AllowedCapabilities), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allowedCapabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AllowedCapabilities == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.AllowedCapabilities), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Volumes == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceFSType(([]FSType)(x.Volumes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Volumes == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encSliceFSType(([]FSType)(x.Volumes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.HostPorts == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPorts")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPorts == nil { - r.EncodeNil() - } else { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy31 := &x.SELinux - yy31.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinux")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy33 := &x.SELinux - yy33.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy36 := &x.RunAsUser - yy36.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy38 := &x.RunAsUser - yy38.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy41 := &x.SupplementalGroups - yy41.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy43 := &x.SupplementalGroups - yy43.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy46 := &x.FSGroup - yy46.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy48 := &x.FSGroup - yy48.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - yym51 := z.EncBinary() - _ = yym51 - if false { - } else { - r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym52 := z.EncBinary() - _ = yym52 - if false { - } else { - r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "privileged": - if r.TryDecodeAsNil() { - x.Privileged = false - } else { - yyv4 := &x.Privileged - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*bool)(yyv4)) = r.DecodeBool() - } - } - case "defaultAddCapabilities": - if r.TryDecodeAsNil() { - x.DefaultAddCapabilities = nil - } else { - yyv6 := &x.DefaultAddCapabilities - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv6), d) - } - } - case "requiredDropCapabilities": - if r.TryDecodeAsNil() { - x.RequiredDropCapabilities = nil - } else { - yyv8 := &x.RequiredDropCapabilities - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv8), d) - } - } - case "allowedCapabilities": - if r.TryDecodeAsNil() { - x.AllowedCapabilities = nil - } else { - yyv10 := &x.AllowedCapabilities - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv10), d) - } - } - case "volumes": - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv12 := &x.Volumes - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - h.decSliceFSType((*[]FSType)(yyv12), d) - } - } - case "hostNetwork": - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - yyv14 := &x.HostNetwork - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*bool)(yyv14)) = r.DecodeBool() - } - } - case "hostPorts": - if r.TryDecodeAsNil() { - x.HostPorts = nil - } else { - yyv16 := &x.HostPorts - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv16), d) - } - } - case "hostPID": - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - yyv18 := &x.HostPID - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*bool)(yyv18)) = r.DecodeBool() - } - } - case "hostIPC": - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - yyv20 := &x.HostIPC - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - *((*bool)(yyv20)) = r.DecodeBool() - } - } - case "seLinux": - if r.TryDecodeAsNil() { - x.SELinux = SELinuxStrategyOptions{} - } else { - yyv22 := &x.SELinux - yyv22.CodecDecodeSelf(d) - } - case "runAsUser": - if r.TryDecodeAsNil() { - x.RunAsUser = RunAsUserStrategyOptions{} - } else { - yyv23 := &x.RunAsUser - yyv23.CodecDecodeSelf(d) - } - case "supplementalGroups": - if r.TryDecodeAsNil() { - x.SupplementalGroups = SupplementalGroupsStrategyOptions{} - } else { - yyv24 := &x.SupplementalGroups - yyv24.CodecDecodeSelf(d) - } - case "fsGroup": - if r.TryDecodeAsNil() { - x.FSGroup = FSGroupStrategyOptions{} - } else { - yyv25 := &x.FSGroup - yyv25.CodecDecodeSelf(d) - } - case "readOnlyRootFilesystem": - if r.TryDecodeAsNil() { - x.ReadOnlyRootFilesystem = false - } else { - yyv26 := &x.ReadOnlyRootFilesystem - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - *((*bool)(yyv26)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj28 int - var yyb28 bool - var yyhl28 bool = l >= 0 - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Privileged = false - } else { - yyv29 := &x.Privileged - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*bool)(yyv29)) = r.DecodeBool() - } - } - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DefaultAddCapabilities = nil - } else { - yyv31 := &x.DefaultAddCapabilities - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv31), d) - } - } - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RequiredDropCapabilities = nil - } else { - yyv33 := &x.RequiredDropCapabilities - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv33), d) - } - } - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AllowedCapabilities = nil - } else { - yyv35 := &x.AllowedCapabilities - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv35), d) - } - } - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv37 := &x.Volumes - yym38 := z.DecBinary() - _ = yym38 - if false { - } else { - h.decSliceFSType((*[]FSType)(yyv37), d) - } - } - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - yyv39 := &x.HostNetwork - yym40 := z.DecBinary() - _ = yym40 - if false { - } else { - *((*bool)(yyv39)) = r.DecodeBool() - } - } - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPorts = nil - } else { - yyv41 := &x.HostPorts - yym42 := z.DecBinary() - _ = yym42 - if false { - } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv41), d) - } - } - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - yyv43 := &x.HostPID - yym44 := z.DecBinary() - _ = yym44 - if false { - } else { - *((*bool)(yyv43)) = r.DecodeBool() - } - } - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - yyv45 := &x.HostIPC - yym46 := z.DecBinary() - _ = yym46 - if false { - } else { - *((*bool)(yyv45)) = r.DecodeBool() - } - } - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SELinux = SELinuxStrategyOptions{} - } else { - yyv47 := &x.SELinux - yyv47.CodecDecodeSelf(d) - } - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RunAsUser = RunAsUserStrategyOptions{} - } else { - yyv48 := &x.RunAsUser - yyv48.CodecDecodeSelf(d) - } - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SupplementalGroups = SupplementalGroupsStrategyOptions{} - } else { - yyv49 := &x.SupplementalGroups - yyv49.CodecDecodeSelf(d) - } - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSGroup = FSGroupStrategyOptions{} - } else { - yyv50 := &x.FSGroup - yyv50.CodecDecodeSelf(d) - } - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnlyRootFilesystem = false - } else { - yyv51 := &x.ReadOnlyRootFilesystem - yym52 := z.DecBinary() - _ = yym52 - if false { - } else { - *((*bool)(yyv51)) = r.DecodeBool() - } - } - for { - yyj28++ - if yyhl28 { - yyb28 = yyj28 > l - } else { - yyb28 = r.CheckBreak() - } - if yyb28 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj28-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x FSType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *FSType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HostPortRange) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "min": - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - yyv4 := &x.Min - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(yyv4)) = int32(r.DecodeInt(32)) - } - } - case "max": - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - yyv6 := &x.Max - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(yyv6)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HostPortRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - yyv9 := &x.Min - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*int32)(yyv9)) = int32(r.DecodeInt(32)) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - yyv11 := &x.Max - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int32)(yyv11)) = int32(r.DecodeInt(32)) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.SELinuxOptions != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Rule.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SELinuxStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - yyv4 := &x.Rule - yyv4.CodecDecodeSelf(d) - } - case "seLinuxOptions": - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(pkg4_v1.SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SELinuxStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - yyv7 := &x.Rule - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(pkg4_v1.SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x SELinuxStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *SELinuxStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.Ranges) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Rule.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Ranges == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ranges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ranges == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RunAsUserStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - yyv4 := &x.Rule - yyv4.CodecDecodeSelf(d) - } - case "ranges": - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv5 := &x.Ranges - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - yyv8 := &x.Rule - yyv8.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv9 := &x.Ranges - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IDRange) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "min": - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - yyv4 := &x.Min - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(yyv4)) = int64(r.DecodeInt(64)) - } - } - case "max": - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - yyv6 := &x.Max - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int64)(yyv6)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IDRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - yyv9 := &x.Min - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*int64)(yyv9)) = int64(r.DecodeInt(64)) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - yyv11 := &x.Max - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int64)(yyv11)) = int64(r.DecodeInt(64)) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x RunAsUserStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *RunAsUserStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *FSGroupStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Rule != "" - yyq2[1] = len(x.Ranges) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Rule.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Ranges == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ranges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ranges == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FSGroupStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FSGroupStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - yyv4 := &x.Rule - yyv4.CodecDecodeSelf(d) - } - case "ranges": - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv5 := &x.Ranges - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FSGroupStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - yyv8 := &x.Rule - yyv8.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv9 := &x.Ranges - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x FSGroupStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *FSGroupStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *SupplementalGroupsStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Rule != "" - yyq2[1] = len(x.Ranges) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Rule.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Ranges == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ranges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ranges == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SupplementalGroupsStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - yyv4 := &x.Rule - yyv4.CodecDecodeSelf(d) - } - case "ranges": - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv5 := &x.Ranges - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - yyv8 := &x.Rule - yyv8.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv9 := &x.Ranges - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x SupplementalGroupsStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *SupplementalGroupsStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = NetworkPolicySpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv12 := &x.Kind - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*string)(yyv12)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv14 := &x.APIVersion - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv16 := &x.ObjectMeta - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(yyv16) { - } else { - z.DecFallback(yyv16, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = NetworkPolicySpec{} - } else { - yyv18 := &x.Spec - yyv18.CodecDecodeSelf(d) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.Ingress) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.PodSelector - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.PodSelector - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Ingress == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ingress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ingress == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "podSelector": - if r.TryDecodeAsNil() { - x.PodSelector = pkg1_v1.LabelSelector{} - } else { - yyv4 := &x.PodSelector - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "ingress": - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv6 := &x.Ingress - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodSelector = pkg1_v1.LabelSelector{} - } else { - yyv9 := &x.PodSelector - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else { - z.DecFallback(yyv9, false) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv11 := &x.Ingress - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Ports) != 0 - yyq2[1] = len(x.From) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Ports == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.From == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("from")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.From == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyIngressRule) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyIngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv4 := &x.Ports - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv4), d) - } - } - case "from": - if r.TryDecodeAsNil() { - x.From = nil - } else { - yyv6 := &x.From - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyIngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv9 := &x.Ports - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.From = nil - } else { - yyv11 := &x.From - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyPort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Protocol != nil - yyq2[1] = x.Port != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Protocol == nil { - r.EncodeNil() - } else { - yy4 := *x.Protocol - yysf5 := &yy4 - yysf5.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Protocol == nil { - r.EncodeNil() - } else { - yy6 := *x.Protocol - yysf7 := &yy6 - yysf7.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Port == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.Port) { - } else if !yym9 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Port) - } else { - z.EncFallback(x.Port) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Port == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.Port) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Port) - } else { - z.EncFallback(x.Port) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyPort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "protocol": - if r.TryDecodeAsNil() { - if x.Protocol != nil { - x.Protocol = nil - } - } else { - if x.Protocol == nil { - x.Protocol = new(pkg4_v1.Protocol) - } - x.Protocol.CodecDecodeSelf(d) - } - case "port": - if r.TryDecodeAsNil() { - if x.Port != nil { - x.Port = nil - } - } else { - if x.Port == nil { - x.Port = new(pkg5_intstr.IntOrString) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(x.Port) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Port) - } else { - z.DecFallback(x.Port, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Protocol != nil { - x.Protocol = nil - } - } else { - if x.Protocol == nil { - x.Protocol = new(pkg4_v1.Protocol) - } - x.Protocol.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Port != nil { - x.Port = nil - } - } else { - if x.Port == nil { - x.Port = new(pkg5_intstr.IntOrString) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(x.Port) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Port) - } else { - z.DecFallback(x.Port, false) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.PodSelector != nil - yyq2[1] = x.NamespaceSelector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.PodSelector == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.PodSelector) { - } else { - z.EncFallback(x.PodSelector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodSelector == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.PodSelector) { - } else { - z.EncFallback(x.PodSelector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.NamespaceSelector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { - } else { - z.EncFallback(x.NamespaceSelector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespaceSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NamespaceSelector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { - } else { - z.EncFallback(x.NamespaceSelector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyPeer) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "podSelector": - if r.TryDecodeAsNil() { - if x.PodSelector != nil { - x.PodSelector = nil - } - } else { - if x.PodSelector == nil { - x.PodSelector = new(pkg1_v1.LabelSelector) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.PodSelector) { - } else { - z.DecFallback(x.PodSelector, false) - } - } - case "namespaceSelector": - if r.TryDecodeAsNil() { - if x.NamespaceSelector != nil { - x.NamespaceSelector = nil - } - } else { - if x.NamespaceSelector == nil { - x.NamespaceSelector = new(pkg1_v1.LabelSelector) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { - } else { - z.DecFallback(x.NamespaceSelector, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodSelector != nil { - x.PodSelector = nil - } - } else { - if x.PodSelector == nil { - x.PodSelector = new(pkg1_v1.LabelSelector) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(x.PodSelector) { - } else { - z.DecFallback(x.PodSelector, false) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NamespaceSelector != nil { - x.NamespaceSelector = nil - } - } else { - if x.NamespaceSelector == nil { - x.NamespaceSelector = new(pkg1_v1.LabelSelector) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { - } else { - z.DecFallback(x.NamespaceSelector, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []CustomMetricTarget{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]CustomMetricTarget, yyrl1) - } - } else { - yyv1 = make([]CustomMetricTarget, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, CustomMetricTarget{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, CustomMetricTarget{}) // var yyz1 CustomMetricTarget - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []CustomMetricTarget{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurrentStatus, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurrentStatus, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []CustomMetricCurrentStatus{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]CustomMetricCurrentStatus, yyrl1) - } - } else { - yyv1 = make([]CustomMetricCurrentStatus, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, CustomMetricCurrentStatus{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, CustomMetricCurrentStatus{}) // var yyz1 CustomMetricCurrentStatus - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []CustomMetricCurrentStatus{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []APIVersion{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]APIVersion, yyrl1) - } - } else { - yyv1 = make([]APIVersion, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, APIVersion{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, APIVersion{}) // var yyz1 APIVersion - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []APIVersion{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ThirdPartyResource{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ThirdPartyResource, yyrl1) - } - } else { - yyv1 = make([]ThirdPartyResource, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ThirdPartyResource{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ThirdPartyResource{}) // var yyz1 ThirdPartyResource - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ThirdPartyResource{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceDeploymentCondition(v []DeploymentCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDeploymentCondition(v *[]DeploymentCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []DeploymentCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]DeploymentCondition, yyrl1) - } - } else { - yyv1 = make([]DeploymentCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DeploymentCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, DeploymentCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DeploymentCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, DeploymentCondition{}) // var yyz1 DeploymentCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = DeploymentCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []DeploymentCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Deployment{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 920) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Deployment, yyrl1) - } - } else { - yyv1 = make([]Deployment, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Deployment{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Deployment{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []DaemonSet{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 872) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]DaemonSet, yyrl1) - } - } else { - yyv1 = make([]DaemonSet, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, DaemonSet{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, DaemonSet{}) // var yyz1 DaemonSet - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []DaemonSet{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ThirdPartyResourceData{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ThirdPartyResourceData, yyrl1) - } - } else { - yyv1 = make([]ThirdPartyResourceData, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ThirdPartyResourceData{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ThirdPartyResourceData{}) // var yyz1 ThirdPartyResourceData - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ThirdPartyResourceData{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Ingress{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 336) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Ingress, yyrl1) - } - } else { - yyv1 = make([]Ingress, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Ingress{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Ingress{}) // var yyz1 Ingress - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Ingress{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []IngressTLS{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]IngressTLS, yyrl1) - } - } else { - yyv1 = make([]IngressTLS, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IngressTLS{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IngressTLS{}) // var yyz1 IngressTLS - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IngressTLS{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []IngressRule{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]IngressRule, yyrl1) - } - } else { - yyv1 = make([]IngressRule, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IngressRule{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IngressRule{}) // var yyz1 IngressRule - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IngressRule{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HTTPIngressPath{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HTTPIngressPath, yyrl1) - } - } else { - yyv1 = make([]HTTPIngressPath, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HTTPIngressPath{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HTTPIngressPath{}) // var yyz1 HTTPIngressPath - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HTTPIngressPath{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ReplicaSet{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 856) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ReplicaSet, yyrl1) - } - } else { - yyv1 = make([]ReplicaSet, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ReplicaSet{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ReplicaSet{}) // var yyz1 ReplicaSet - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ReplicaSet{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceReplicaSetCondition(v []ReplicaSetCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceReplicaSetCondition(v *[]ReplicaSetCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ReplicaSetCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 88) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ReplicaSetCondition, yyrl1) - } - } else { - yyv1 = make([]ReplicaSetCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSetCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ReplicaSetCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSetCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ReplicaSetCondition{}) // var yyz1 ReplicaSetCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSetCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ReplicaSetCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicev1_Capability(v []pkg4_v1.Capability, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf2 := &yyv1 - yysf2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg4_v1.Capability, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []pkg4_v1.Capability{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]pkg4_v1.Capability, yyrl1) - } - } else { - yyv1 = make([]pkg4_v1.Capability, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 pkg4_v1.Capability - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg4_v1.Capability{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []FSType{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]FSType, yyrl1) - } - } else { - yyv1 = make([]FSType, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 FSType - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []FSType{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HostPortRange{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HostPortRange, yyrl1) - } - } else { - yyv1 = make([]HostPortRange, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HostPortRange{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HostPortRange{}) // var yyz1 HostPortRange - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HostPortRange{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []IDRange{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]IDRange, yyrl1) - } - } else { - yyv1 = make([]IDRange, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IDRange{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IDRange{}) // var yyz1 IDRange - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IDRange{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodSecurityPolicy{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 552) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodSecurityPolicy, yyrl1) - } - } else { - yyv1 = make([]PodSecurityPolicy, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodSecurityPolicy{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodSecurityPolicy{}) // var yyz1 PodSecurityPolicy - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodSecurityPolicy{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NetworkPolicyIngressRule{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NetworkPolicyIngressRule, yyrl1) - } - } else { - yyv1 = make([]NetworkPolicyIngressRule, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyIngressRule{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NetworkPolicyIngressRule{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyIngressRule{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NetworkPolicyIngressRule{}) // var yyz1 NetworkPolicyIngressRule - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyIngressRule{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NetworkPolicyIngressRule{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NetworkPolicyPort{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NetworkPolicyPort, yyrl1) - } - } else { - yyv1 = make([]NetworkPolicyPort, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPort{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NetworkPolicyPort{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPort{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NetworkPolicyPort{}) // var yyz1 NetworkPolicyPort - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPort{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NetworkPolicyPort{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NetworkPolicyPeer{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NetworkPolicyPeer, yyrl1) - } - } else { - yyv1 = make([]NetworkPolicyPeer, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPeer{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NetworkPolicyPeer{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPeer{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NetworkPolicyPeer{}) // var yyz1 NetworkPolicyPeer - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPeer{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NetworkPolicyPeer{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NetworkPolicy{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 312) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NetworkPolicy, yyrl1) - } - } else { - yyv1 = make([]NetworkPolicy, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicy{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NetworkPolicy{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicy{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NetworkPolicy{}) // var yyz1 NetworkPolicy - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicy{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NetworkPolicy{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.conversion.go deleted file mode 100644 index 2baeaa676..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,1628 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/client-go/pkg/api" - api_v1 "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_APIVersion_To_extensions_APIVersion, - Convert_extensions_APIVersion_To_v1beta1_APIVersion, - Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus, - Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus, - Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList, - Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList, - Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget, - Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget, - Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList, - Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList, - Convert_v1beta1_DaemonSet_To_extensions_DaemonSet, - Convert_extensions_DaemonSet_To_v1beta1_DaemonSet, - Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList, - Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList, - Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec, - Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec, - Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus, - Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus, - Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy, - Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy, - Convert_v1beta1_Deployment_To_extensions_Deployment, - Convert_extensions_Deployment_To_v1beta1_Deployment, - Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition, - Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition, - Convert_v1beta1_DeploymentList_To_extensions_DeploymentList, - Convert_extensions_DeploymentList_To_v1beta1_DeploymentList, - Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback, - Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback, - Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, - Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, - Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus, - Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus, - Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, - Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, - Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions, - Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions, - Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath, - Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath, - Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue, - Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue, - Convert_v1beta1_HostPortRange_To_extensions_HostPortRange, - Convert_extensions_HostPortRange_To_v1beta1_HostPortRange, - Convert_v1beta1_IDRange_To_extensions_IDRange, - Convert_extensions_IDRange_To_v1beta1_IDRange, - Convert_v1beta1_Ingress_To_extensions_Ingress, - Convert_extensions_Ingress_To_v1beta1_Ingress, - Convert_v1beta1_IngressBackend_To_extensions_IngressBackend, - Convert_extensions_IngressBackend_To_v1beta1_IngressBackend, - Convert_v1beta1_IngressList_To_extensions_IngressList, - Convert_extensions_IngressList_To_v1beta1_IngressList, - Convert_v1beta1_IngressRule_To_extensions_IngressRule, - Convert_extensions_IngressRule_To_v1beta1_IngressRule, - Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue, - Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue, - Convert_v1beta1_IngressSpec_To_extensions_IngressSpec, - Convert_extensions_IngressSpec_To_v1beta1_IngressSpec, - Convert_v1beta1_IngressStatus_To_extensions_IngressStatus, - Convert_extensions_IngressStatus_To_v1beta1_IngressStatus, - Convert_v1beta1_IngressTLS_To_extensions_IngressTLS, - Convert_extensions_IngressTLS_To_v1beta1_IngressTLS, - Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy, - Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy, - Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule, - Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule, - Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList, - Convert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList, - Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer, - Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer, - Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort, - Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort, - Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec, - Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec, - Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy, - Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy, - Convert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList, - Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList, - Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec, - Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec, - Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet, - Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet, - Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition, - Convert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition, - Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList, - Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList, - Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, - Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, - Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus, - Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus, - Convert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy, - Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy, - Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig, - Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig, - Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, - Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet, - Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, - Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions, - Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions, - Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions, - Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions, - Convert_v1beta1_Scale_To_extensions_Scale, - Convert_extensions_Scale_To_v1beta1_Scale, - Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec, - Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec, - Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, - Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions, - Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions, - Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource, - Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource, - Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData, - Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData, - Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList, - Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList, - Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList, - Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList, - ) -} - -func autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -func Convert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { - return autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in, out, s) -} - -func autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -func Convert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { - return autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { - out.Name = in.Name - out.CurrentValue = in.CurrentValue - return nil -} - -func Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in, out, s) -} - -func autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error { - out.Name = in.Name - out.CurrentValue = in.CurrentValue - return nil -} - -func Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { - out.Items = *(*[]extensions.CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in, out, s) -} - -func autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error { - if in.Items == nil { - out.Items = make([]CustomMetricCurrentStatus, 0) - } else { - out.Items = *(*[]CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { - out.Name = in.Name - out.TargetValue = in.TargetValue - return nil -} - -func Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in, out, s) -} - -func autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error { - out.Name = in.Name - out.TargetValue = in.TargetValue - return nil -} - -func Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { - out.Items = *(*[]extensions.CustomMetricTarget)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in, out, s) -} - -func autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error { - if in.Items == nil { - out.Items = make([]CustomMetricTarget, 0) - } else { - out.Items = *(*[]CustomMetricTarget)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in, out, s) -} - -func autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in, out, s) -} - -func autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { - return autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in, out, s) -} - -func autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.DaemonSet, len(*in)) - for i := range *in { - if err := Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in, out, s) -} - -func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DaemonSet, len(*in)) - for i := range *in { - if err := Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]DaemonSet, 0) - } - return nil -} - -func Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in, out, s) -} - -func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.TemplateGeneration = in.TemplateGeneration - return nil -} - -func Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in, out, s) -} - -func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.TemplateGeneration = in.TemplateGeneration - return nil -} - -func Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in, out, s) -} - -func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { - out.CurrentNumberScheduled = in.CurrentNumberScheduled - out.NumberMisscheduled = in.NumberMisscheduled - out.DesiredNumberScheduled = in.DesiredNumberScheduled - out.NumberReady = in.NumberReady - out.ObservedGeneration = in.ObservedGeneration - out.UpdatedNumberScheduled = in.UpdatedNumberScheduled - out.NumberAvailable = in.NumberAvailable - out.NumberUnavailable = in.NumberUnavailable - return nil -} - -func Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s) -} - -func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { - out.CurrentNumberScheduled = in.CurrentNumberScheduled - out.NumberMisscheduled = in.NumberMisscheduled - out.DesiredNumberScheduled = in.DesiredNumberScheduled - out.NumberReady = in.NumberReady - out.ObservedGeneration = in.ObservedGeneration - out.UpdatedNumberScheduled = in.UpdatedNumberScheduled - out.NumberAvailable = in.NumberAvailable - out.NumberUnavailable = in.NumberUnavailable - return nil -} - -func Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s) -} - -func autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDaemonSet) - if err := Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(*in, *out, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in, out, s) -} - -func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = DaemonSetUpdateStrategyType(in.Type) - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDaemonSet) - if err := Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(*in, *out, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *DaemonSetUpdateStrategy, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in, out, s) -} - -func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { - return autoConvert_v1beta1_Deployment_To_extensions_Deployment(in, out, s) -} - -func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { - return autoConvert_extensions_Deployment_To_v1beta1_Deployment(in, out, s) -} - -func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - out.Type = extensions.DeploymentConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastUpdateTime = in.LastUpdateTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s) -} - -func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *DeploymentCondition, s conversion.Scope) error { - out.Type = DeploymentConditionType(in.Type) - out.Status = api_v1.ConditionStatus(in.Status) - out.LastUpdateTime = in.LastUpdateTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *DeploymentCondition, s conversion.Scope) error { - return autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s) -} - -func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.Deployment, len(*in)) - for i := range *in { - if err := Convert_v1beta1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in, out, s) -} - -func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Deployment, len(*in)) - for i := range *in { - if err := Convert_extensions_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]Deployment, 0) - } - return nil -} - -func Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { - return autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in, out, s) -} - -func autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { - out.Name = in.Name - out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) - if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s) -} - -func autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { - out.Name = in.Name - out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) - if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { - return autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) -} - -func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) - out.Paused = in.Paused - out.RollbackTo = (*extensions.RollbackConfig)(unsafe.Pointer(in.RollbackTo)) - out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) - return nil -} - -func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) - out.Paused = in.Paused - out.RollbackTo = (*RollbackConfig)(unsafe.Pointer(in.RollbackTo)) - out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) - return nil -} - -func autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.Replicas = in.Replicas - out.UpdatedReplicas = in.UpdatedReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.UnavailableReplicas = in.UnavailableReplicas - out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -func Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) -} - -func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.Replicas = in.Replicas - out.UpdatedReplicas = in.UpdatedReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.UnavailableReplicas = in.UnavailableReplicas - out.Conditions = *(*[]DeploymentCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -func Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { - return autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) -} - -func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { - out.Type = DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func autoConvert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error { - out.Rule = extensions.FSGroupStrategyType(in.Rule) - out.Ranges = *(*[]extensions.IDRange)(unsafe.Pointer(&in.Ranges)) - return nil -} - -func Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error { - return autoConvert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in, out, s) -} - -func autoConvert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error { - out.Rule = FSGroupStrategyType(in.Rule) - out.Ranges = *(*[]IDRange)(unsafe.Pointer(&in.Ranges)) - return nil -} - -func Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error { - return autoConvert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in, out, s) -} - -func autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { - out.Path = in.Path - if err := Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(&in.Backend, &out.Backend, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { - return autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in, out, s) -} - -func autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { - out.Path = in.Path - if err := Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(&in.Backend, &out.Backend, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { - return autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in, out, s) -} - -func autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { - out.Paths = *(*[]extensions.HTTPIngressPath)(unsafe.Pointer(&in.Paths)) - return nil -} - -func Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { - return autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in, out, s) -} - -func autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { - if in.Paths == nil { - out.Paths = make([]HTTPIngressPath, 0) - } else { - out.Paths = *(*[]HTTPIngressPath)(unsafe.Pointer(&in.Paths)) - } - return nil -} - -func Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { - return autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in, out, s) -} - -func autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { - out.Min = int(in.Min) - out.Max = int(in.Max) - return nil -} - -func Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { - return autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in, out, s) -} - -func autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { - out.Min = int32(in.Min) - out.Max = int32(in.Max) - return nil -} - -func Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { - return autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in, out, s) -} - -func autoConvert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error { - out.Min = in.Min - out.Max = in.Max - return nil -} - -func Convert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error { - return autoConvert_v1beta1_IDRange_To_extensions_IDRange(in, out, s) -} - -func autoConvert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error { - out.Min = in.Min - out.Max = in.Max - return nil -} - -func Convert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error { - return autoConvert_extensions_IDRange_To_v1beta1_IDRange(in, out, s) -} - -func autoConvert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { - return autoConvert_v1beta1_Ingress_To_extensions_Ingress(in, out, s) -} - -func autoConvert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { - return autoConvert_extensions_Ingress_To_v1beta1_Ingress(in, out, s) -} - -func autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { - out.ServiceName = in.ServiceName - out.ServicePort = in.ServicePort - return nil -} - -func Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { - return autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in, out, s) -} - -func autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { - out.ServiceName = in.ServiceName - out.ServicePort = in.ServicePort - return nil -} - -func Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { - return autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in, out, s) -} - -func autoConvert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]extensions.Ingress)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { - return autoConvert_v1beta1_IngressList_To_extensions_IngressList(in, out, s) -} - -func autoConvert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]Ingress, 0) - } else { - out.Items = *(*[]Ingress)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { - return autoConvert_extensions_IngressList_To_v1beta1_IngressList(in, out, s) -} - -func autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { - out.Host = in.Host - if err := Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { - return autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in, out, s) -} - -func autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { - out.Host = in.Host - if err := Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { - return autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in, out, s) -} - -func autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { - out.HTTP = (*extensions.HTTPIngressRuleValue)(unsafe.Pointer(in.HTTP)) - return nil -} - -func Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { - return autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in, out, s) -} - -func autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { - out.HTTP = (*HTTPIngressRuleValue)(unsafe.Pointer(in.HTTP)) - return nil -} - -func Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { - return autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in, out, s) -} - -func autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { - out.Backend = (*extensions.IngressBackend)(unsafe.Pointer(in.Backend)) - out.TLS = *(*[]extensions.IngressTLS)(unsafe.Pointer(&in.TLS)) - out.Rules = *(*[]extensions.IngressRule)(unsafe.Pointer(&in.Rules)) - return nil -} - -func Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { - return autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in, out, s) -} - -func autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { - out.Backend = (*IngressBackend)(unsafe.Pointer(in.Backend)) - out.TLS = *(*[]IngressTLS)(unsafe.Pointer(&in.TLS)) - out.Rules = *(*[]IngressRule)(unsafe.Pointer(&in.Rules)) - return nil -} - -func Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { - return autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in, out, s) -} - -func autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { - return autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in, out, s) -} - -func autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil { - return err - } - return nil -} - -func Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { - return autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in, out, s) -} - -func autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { - out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts)) - out.SecretName = in.SecretName - return nil -} - -func Convert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { - return autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in, out, s) -} - -func autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { - out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts)) - out.SecretName = in.SecretName - return nil -} - -func Convert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { - return autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in, out, s) -} - -func autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error { - return autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in, out, s) -} - -func autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error { - return autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in, out, s) -} - -func autoConvert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error { - out.Ports = *(*[]extensions.NetworkPolicyPort)(unsafe.Pointer(&in.Ports)) - out.From = *(*[]extensions.NetworkPolicyPeer)(unsafe.Pointer(&in.From)) - return nil -} - -func Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error { - return autoConvert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in, out, s) -} - -func autoConvert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error { - out.Ports = *(*[]NetworkPolicyPort)(unsafe.Pointer(&in.Ports)) - out.From = *(*[]NetworkPolicyPeer)(unsafe.Pointer(&in.From)) - return nil -} - -func Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error { - return autoConvert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in, out, s) -} - -func autoConvert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]extensions.NetworkPolicy)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error { - return autoConvert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in, out, s) -} - -func autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]NetworkPolicy, 0) - } else { - out.Items = *(*[]NetworkPolicy)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error { - return autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in, out, s) -} - -func autoConvert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error { - out.PodSelector = (*v1.LabelSelector)(unsafe.Pointer(in.PodSelector)) - out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) - return nil -} - -func Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error { - return autoConvert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in, out, s) -} - -func autoConvert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error { - out.PodSelector = (*v1.LabelSelector)(unsafe.Pointer(in.PodSelector)) - out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) - return nil -} - -func Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error { - return autoConvert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in, out, s) -} - -func autoConvert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error { - out.Protocol = (*api.Protocol)(unsafe.Pointer(in.Protocol)) - out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port)) - return nil -} - -func Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error { - return autoConvert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in, out, s) -} - -func autoConvert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error { - out.Protocol = (*api_v1.Protocol)(unsafe.Pointer(in.Protocol)) - out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port)) - return nil -} - -func Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error { - return autoConvert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in, out, s) -} - -func autoConvert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error { - out.PodSelector = in.PodSelector - out.Ingress = *(*[]extensions.NetworkPolicyIngressRule)(unsafe.Pointer(&in.Ingress)) - return nil -} - -func Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error { - return autoConvert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in, out, s) -} - -func autoConvert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error { - out.PodSelector = in.PodSelector - out.Ingress = *(*[]NetworkPolicyIngressRule)(unsafe.Pointer(&in.Ingress)) - return nil -} - -func Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error { - return autoConvert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in, out, s) -} - -func autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { - return autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in, out, s) -} - -func autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { - return autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in, out, s) -} - -func autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.PodSecurityPolicy, len(*in)) - for i := range *in { - if err := Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error { - return autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in, out, s) -} - -func autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - if err := Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]PodSecurityPolicy, 0) - } - return nil -} - -func Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { - return autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in, out, s) -} - -func autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { - out.Privileged = in.Privileged - out.DefaultAddCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) - out.RequiredDropCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) - out.AllowedCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) - out.Volumes = *(*[]extensions.FSType)(unsafe.Pointer(&in.Volumes)) - out.HostNetwork = in.HostNetwork - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]extensions.HostPortRange, len(*in)) - for i := range *in { - if err := Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.HostPorts = nil - } - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC - if err := Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { - return err - } - if err := Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { - return err - } - if err := Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { - return err - } - if err := Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, s); err != nil { - return err - } - out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem - return nil -} - -func Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { - return autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in, out, s) -} - -func autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { - out.Privileged = in.Privileged - out.DefaultAddCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) - out.RequiredDropCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) - out.AllowedCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) - out.Volumes = *(*[]FSType)(unsafe.Pointer(&in.Volumes)) - out.HostNetwork = in.HostNetwork - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - for i := range *in { - if err := Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.HostPorts = nil - } - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC - if err := Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { - return err - } - if err := Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { - return err - } - if err := Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { - return err - } - if err := Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, s); err != nil { - return err - } - out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem - return nil -} - -func Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { - return autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in, out, s) -} - -func autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in, out, s) -} - -func autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in, out, s) -} - -func autoConvert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - out.Type = extensions.ReplicaSetConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in, out, s) -} - -func autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *ReplicaSetCondition, s conversion.Scope) error { - out.Type = ReplicaSetConditionType(in.Type) - out.Status = api_v1.ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in, out, s) -} - -func autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.ReplicaSet, len(*in)) - for i := range *in { - if err := Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s) -} - -func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicaSet, len(*in)) - for i := range *in { - if err := Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]ReplicaSet, 0) - } - return nil -} - -func Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in, out, s) -} - -func autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func autoConvert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]extensions.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -func Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s) -} - -func autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -func Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in, out, s) -} - -func autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in *ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error { - return nil -} - -func Convert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in *ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in, out, s) -} - -func autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { - return nil -} - -func Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { - return autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in, out, s) -} - -func autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { - out.Revision = in.Revision - return nil -} - -func Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { - return autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in, out, s) -} - -func autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { - out.Revision = in.Revision - return nil -} - -func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { - return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) -} - -func autoConvert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) - return nil -} - -func autoConvert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *RollingUpdateDaemonSet, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) - return nil -} - -func autoConvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) - // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) - return nil -} - -func autoConvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { - // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) - // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) - return nil -} - -func autoConvert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in *RunAsUserStrategyOptions, out *extensions.RunAsUserStrategyOptions, s conversion.Scope) error { - out.Rule = extensions.RunAsUserStrategy(in.Rule) - out.Ranges = *(*[]extensions.IDRange)(unsafe.Pointer(&in.Ranges)) - return nil -} - -func Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in *RunAsUserStrategyOptions, out *extensions.RunAsUserStrategyOptions, s conversion.Scope) error { - return autoConvert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in, out, s) -} - -func autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { - out.Rule = RunAsUserStrategy(in.Rule) - out.Ranges = *(*[]IDRange)(unsafe.Pointer(&in.Ranges)) - return nil -} - -func Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { - return autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in, out, s) -} - -func autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error { - out.Rule = extensions.SELinuxStrategy(in.Rule) - out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - return nil -} - -func Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error { - return autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in, out, s) -} - -func autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { - out.Rule = SELinuxStrategy(in.Rule) - out.SELinuxOptions = (*api_v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - return nil -} - -func Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { - return autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in, out, s) -} - -func autoConvert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error { - return autoConvert_v1beta1_Scale_To_extensions_Scale(in, out, s) -} - -func autoConvert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { - return autoConvert_extensions_Scale_To_v1beta1_Scale(in, out, s) -} - -func autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -func Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { - return autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in, out, s) -} - -func autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { - return autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) -} - -func autoConvert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) - // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) - return nil -} - -func autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { - out.Rule = extensions.SupplementalGroupsStrategyType(in.Rule) - out.Ranges = *(*[]extensions.IDRange)(unsafe.Pointer(&in.Ranges)) - return nil -} - -func Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { - return autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in, out, s) -} - -func autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error { - out.Rule = SupplementalGroupsStrategyType(in.Rule) - out.Ranges = *(*[]IDRange)(unsafe.Pointer(&in.Ranges)) - return nil -} - -func Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error { - return autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in, out, s) -} - -func autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Description = in.Description - out.Versions = *(*[]extensions.APIVersion)(unsafe.Pointer(&in.Versions)) - return nil -} - -func Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { - return autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in, out, s) -} - -func autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Description = in.Description - out.Versions = *(*[]APIVersion)(unsafe.Pointer(&in.Versions)) - return nil -} - -func Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in, out, s) -} - -func autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) - return nil -} - -func Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { - return autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in, out, s) -} - -func autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) - return nil -} - -func Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in, out, s) -} - -func autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]extensions.ThirdPartyResourceData)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { - return autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in, out, s) -} - -func autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]ThirdPartyResourceData, 0) - } else { - out.Items = *(*[]ThirdPartyResourceData)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in, out, s) -} - -func autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]extensions.ThirdPartyResource)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { - return autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in, out, s) -} - -func autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]ThirdPartyResource, 0) - } else { - out.Items = *(*[]ThirdPartyResource)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index a8088c8fb..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,1087 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" - api_v1 "k8s.io/client-go/pkg/api/v1" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_APIVersion, InType: reflect.TypeOf(&APIVersion{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricCurrentStatus, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricCurrentStatusList, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricTarget, InType: reflect.TypeOf(&CustomMetricTarget{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricTargetList, InType: reflect.TypeOf(&CustomMetricTargetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSet, InType: reflect.TypeOf(&DaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetList, InType: reflect.TypeOf(&DaemonSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetSpec, InType: reflect.TypeOf(&DaemonSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetStatus, InType: reflect.TypeOf(&DaemonSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetUpdateStrategy, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Deployment, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentRollback, InType: reflect.TypeOf(&DeploymentRollback{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentSpec, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStatus, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_FSGroupStrategyOptions, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HTTPIngressPath, InType: reflect.TypeOf(&HTTPIngressPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HTTPIngressRuleValue, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HostPortRange, InType: reflect.TypeOf(&HostPortRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IDRange, InType: reflect.TypeOf(&IDRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Ingress, InType: reflect.TypeOf(&Ingress{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressBackend, InType: reflect.TypeOf(&IngressBackend{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressList, InType: reflect.TypeOf(&IngressList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressRule, InType: reflect.TypeOf(&IngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressRuleValue, InType: reflect.TypeOf(&IngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressSpec, InType: reflect.TypeOf(&IngressSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressStatus, InType: reflect.TypeOf(&IngressStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressTLS, InType: reflect.TypeOf(&IngressTLS{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicy, InType: reflect.TypeOf(&NetworkPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyIngressRule, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyList, InType: reflect.TypeOf(&NetworkPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyPeer, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyPort, InType: reflect.TypeOf(&NetworkPolicyPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicySpec, InType: reflect.TypeOf(&NetworkPolicySpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodSecurityPolicy, InType: reflect.TypeOf(&PodSecurityPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodSecurityPolicyList, InType: reflect.TypeOf(&PodSecurityPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodSecurityPolicySpec, InType: reflect.TypeOf(&PodSecurityPolicySpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSet, InType: reflect.TypeOf(&ReplicaSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetCondition, InType: reflect.TypeOf(&ReplicaSetCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetList, InType: reflect.TypeOf(&ReplicaSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetSpec, InType: reflect.TypeOf(&ReplicaSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetStatus, InType: reflect.TypeOf(&ReplicaSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicationControllerDummy, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDaemonSet, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RunAsUserStrategyOptions, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SELinuxStrategyOptions, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Scale, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SupplementalGroupsStrategyOptions, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResource, InType: reflect.TypeOf(&ThirdPartyResource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResourceData, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResourceDataList, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResourceList, InType: reflect.TypeOf(&ThirdPartyResourceList{})}, - ) -} - -func DeepCopy_v1beta1_APIVersion(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIVersion) - out := out.(*APIVersion) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_CustomMetricCurrentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CustomMetricCurrentStatus) - out := out.(*CustomMetricCurrentStatus) - *out = *in - out.CurrentValue = in.CurrentValue.DeepCopy() - return nil - } -} - -func DeepCopy_v1beta1_CustomMetricCurrentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CustomMetricCurrentStatusList) - out := out.(*CustomMetricCurrentStatusList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricCurrentStatus, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_CustomMetricCurrentStatus(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_CustomMetricTarget(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CustomMetricTarget) - out := out.(*CustomMetricTarget) - *out = *in - out.TargetValue = in.TargetValue.DeepCopy() - return nil - } -} - -func DeepCopy_v1beta1_CustomMetricTargetList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CustomMetricTargetList) - out := out.(*CustomMetricTargetList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricTarget, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_CustomMetricTarget(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_DaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DaemonSet) - out := out.(*DaemonSet) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_DaemonSetList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DaemonSetList) - out := out.(*DaemonSetList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DaemonSet, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_DaemonSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DaemonSetSpec) - out := out.(*DaemonSetSpec) - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_DaemonSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DaemonSetStatus) - out := out.(*DaemonSetStatus) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_DaemonSetUpdateStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DaemonSetUpdateStrategy) - out := out.(*DaemonSetUpdateStrategy) - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDaemonSet) - if err := DeepCopy_v1beta1_RollingUpdateDaemonSet(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1beta1_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Deployment) - out := out.(*Deployment) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_DeploymentStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_DeploymentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentCondition) - out := out.(*DeploymentCondition) - *out = *in - out.LastUpdateTime = in.LastUpdateTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - return nil - } -} - -func DeepCopy_v1beta1_DeploymentList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentList) - out := out.(*DeploymentList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Deployment, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_Deployment(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_DeploymentRollback(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentRollback) - out := out.(*DeploymentRollback) - *out = *in - if in.UpdatedAnnotations != nil { - in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - return nil - } -} - -func DeepCopy_v1beta1_DeploymentSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentSpec) - out := out.(*DeploymentSpec) - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil { - return err - } - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - if in.RollbackTo != nil { - in, out := &in.RollbackTo, &out.RollbackTo - *out = new(RollbackConfig) - **out = **in - } - if in.ProgressDeadlineSeconds != nil { - in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_v1beta1_DeploymentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentStatus) - out := out.(*DeploymentStatus) - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DeploymentCondition, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_DeploymentCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_DeploymentStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentStrategy) - out := out.(*DeploymentStrategy) - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDeployment) - if err := DeepCopy_v1beta1_RollingUpdateDeployment(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1beta1_FSGroupStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*FSGroupStrategyOptions) - out := out.(*FSGroupStrategyOptions) - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1beta1_HTTPIngressPath(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HTTPIngressPath) - out := out.(*HTTPIngressPath) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_HTTPIngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HTTPIngressRuleValue) - out := out.(*HTTPIngressRuleValue) - *out = *in - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]HTTPIngressPath, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1beta1_HostPortRange(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HostPortRange) - out := out.(*HostPortRange) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_IDRange(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IDRange) - out := out.(*IDRange) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_Ingress(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Ingress) - out := out.(*Ingress) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_IngressSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_IngressStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_IngressBackend(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressBackend) - out := out.(*IngressBackend) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_IngressList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressList) - out := out.(*IngressList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Ingress, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_Ingress(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_IngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressRule) - out := out.(*IngressRule) - *out = *in - if err := DeepCopy_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_IngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressRuleValue) - out := out.(*IngressRuleValue) - *out = *in - if in.HTTP != nil { - in, out := &in.HTTP, &out.HTTP - *out = new(HTTPIngressRuleValue) - if err := DeepCopy_v1beta1_HTTPIngressRuleValue(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_v1beta1_IngressSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressSpec) - out := out.(*IngressSpec) - *out = *in - if in.Backend != nil { - in, out := &in.Backend, &out.Backend - *out = new(IngressBackend) - **out = **in - } - if in.TLS != nil { - in, out := &in.TLS, &out.TLS - *out = make([]IngressTLS, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_IngressTLS(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]IngressRule, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_IngressRule(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_IngressStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressStatus) - out := out.(*IngressStatus) - *out = *in - if err := api_v1.DeepCopy_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_IngressTLS(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressTLS) - out := out.(*IngressTLS) - *out = *in - if in.Hosts != nil { - in, out := &in.Hosts, &out.Hosts - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1beta1_NetworkPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NetworkPolicy) - out := out.(*NetworkPolicy) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_NetworkPolicySpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_NetworkPolicyIngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NetworkPolicyIngressRule) - out := out.(*NetworkPolicyIngressRule) - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]NetworkPolicyPort, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_NetworkPolicyPort(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.From != nil { - in, out := &in.From, &out.From - *out = make([]NetworkPolicyPeer, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_NetworkPolicyPeer(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_NetworkPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NetworkPolicyList) - out := out.(*NetworkPolicyList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NetworkPolicy, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_NetworkPolicy(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_NetworkPolicyPeer(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NetworkPolicyPeer) - out := out.(*NetworkPolicyPeer) - *out = *in - if in.PodSelector != nil { - in, out := &in.PodSelector, &out.PodSelector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - return nil - } -} - -func DeepCopy_v1beta1_NetworkPolicyPort(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NetworkPolicyPort) - out := out.(*NetworkPolicyPort) - *out = *in - if in.Protocol != nil { - in, out := &in.Protocol, &out.Protocol - *out = new(api_v1.Protocol) - **out = **in - } - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(intstr.IntOrString) - **out = **in - } - return nil - } -} - -func DeepCopy_v1beta1_NetworkPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NetworkPolicySpec) - out := out.(*NetworkPolicySpec) - *out = *in - if newVal, err := c.DeepCopy(&in.PodSelector); err != nil { - return err - } else { - out.PodSelector = *newVal.(*v1.LabelSelector) - } - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]NetworkPolicyIngressRule, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_PodSecurityPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodSecurityPolicy) - out := out.(*PodSecurityPolicy) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_PodSecurityPolicySpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_PodSecurityPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodSecurityPolicyList) - out := out.(*PodSecurityPolicyList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_PodSecurityPolicy(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_PodSecurityPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodSecurityPolicySpec) - out := out.(*PodSecurityPolicySpec) - *out = *in - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]api_v1.Capability, len(*in)) - copy(*out, *in) - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]api_v1.Capability, len(*in)) - copy(*out, *in) - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]api_v1.Capability, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - copy(*out, *in) - } - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - copy(*out, *in) - } - if err := DeepCopy_v1beta1_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_ReplicaSet(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicaSet) - out := out.(*ReplicaSet) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_ReplicaSetCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicaSetCondition) - out := out.(*ReplicaSetCondition) - *out = *in - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - return nil - } -} - -func DeepCopy_v1beta1_ReplicaSetList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicaSetList) - out := out.(*ReplicaSetList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicaSet, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_ReplicaSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicaSetSpec) - out := out.(*ReplicaSetSpec) - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_ReplicaSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicaSetStatus) - out := out.(*ReplicaSetStatus) - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicaSetCondition, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_ReplicaSetCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_ReplicationControllerDummy(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicationControllerDummy) - out := out.(*ReplicationControllerDummy) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_RollbackConfig(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RollbackConfig) - out := out.(*RollbackConfig) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_RollingUpdateDaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RollingUpdateDaemonSet) - out := out.(*RollingUpdateDaemonSet) - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - return nil - } -} - -func DeepCopy_v1beta1_RollingUpdateDeployment(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RollingUpdateDeployment) - out := out.(*RollingUpdateDeployment) - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - if in.MaxSurge != nil { - in, out := &in.MaxSurge, &out.MaxSurge - *out = new(intstr.IntOrString) - **out = **in - } - return nil - } -} - -func DeepCopy_v1beta1_RunAsUserStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RunAsUserStrategyOptions) - out := out.(*RunAsUserStrategyOptions) - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1beta1_SELinuxStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SELinuxStrategyOptions) - out := out.(*SELinuxStrategyOptions) - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(api_v1.SELinuxOptions) - **out = **in - } - return nil - } -} - -func DeepCopy_v1beta1_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Scale) - out := out.(*Scale) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_ScaleStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ScaleSpec) - out := out.(*ScaleSpec) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ScaleStatus) - out := out.(*ScaleStatus) - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - return nil - } -} - -func DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SupplementalGroupsStrategyOptions) - out := out.(*SupplementalGroupsStrategyOptions) - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1beta1_ThirdPartyResource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ThirdPartyResource) - out := out.(*ThirdPartyResource) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]APIVersion, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1beta1_ThirdPartyResourceData(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ThirdPartyResourceData) - out := out.(*ThirdPartyResourceData) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1beta1_ThirdPartyResourceDataList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ThirdPartyResourceDataList) - out := out.(*ThirdPartyResourceDataList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResourceData, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_ThirdPartyResourceData(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_ThirdPartyResourceList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ThirdPartyResourceList) - out := out.(*ThirdPartyResourceList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResource, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_ThirdPartyResource(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.defaults.go deleted file mode 100644 index 770faa513..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.defaults.go +++ /dev/null @@ -1,475 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/client-go/pkg/api/v1" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&DaemonSet{}, func(obj interface{}) { SetObjectDefaults_DaemonSet(obj.(*DaemonSet)) }) - scheme.AddTypeDefaultingFunc(&DaemonSetList{}, func(obj interface{}) { SetObjectDefaults_DaemonSetList(obj.(*DaemonSetList)) }) - scheme.AddTypeDefaultingFunc(&Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*Deployment)) }) - scheme.AddTypeDefaultingFunc(&DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*DeploymentList)) }) - scheme.AddTypeDefaultingFunc(&NetworkPolicy{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicy(obj.(*NetworkPolicy)) }) - scheme.AddTypeDefaultingFunc(&NetworkPolicyList{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicyList(obj.(*NetworkPolicyList)) }) - scheme.AddTypeDefaultingFunc(&ReplicaSet{}, func(obj interface{}) { SetObjectDefaults_ReplicaSet(obj.(*ReplicaSet)) }) - scheme.AddTypeDefaultingFunc(&ReplicaSetList{}, func(obj interface{}) { SetObjectDefaults_ReplicaSetList(obj.(*ReplicaSetList)) }) - return nil -} - -func SetObjectDefaults_DaemonSet(in *DaemonSet) { - SetDefaults_DaemonSet(in) - v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) - for i := range in.Spec.Template.Spec.Volumes { - a := &in.Spec.Template.Spec.Volumes[i] - v1.SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Spec.Template.Spec.InitContainers { - a := &in.Spec.Template.Spec.InitContainers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.Template.Spec.Containers { - a := &in.Spec.Template.Spec.Containers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } -} - -func SetObjectDefaults_DaemonSetList(in *DaemonSetList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_DaemonSet(a) - } -} - -func SetObjectDefaults_Deployment(in *Deployment) { - SetDefaults_Deployment(in) - v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) - for i := range in.Spec.Template.Spec.Volumes { - a := &in.Spec.Template.Spec.Volumes[i] - v1.SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Spec.Template.Spec.InitContainers { - a := &in.Spec.Template.Spec.InitContainers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.Template.Spec.Containers { - a := &in.Spec.Template.Spec.Containers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } -} - -func SetObjectDefaults_DeploymentList(in *DeploymentList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Deployment(a) - } -} - -func SetObjectDefaults_NetworkPolicy(in *NetworkPolicy) { - SetDefaults_NetworkPolicy(in) -} - -func SetObjectDefaults_NetworkPolicyList(in *NetworkPolicyList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_NetworkPolicy(a) - } -} - -func SetObjectDefaults_ReplicaSet(in *ReplicaSet) { - SetDefaults_ReplicaSet(in) - v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) - for i := range in.Spec.Template.Spec.Volumes { - a := &in.Spec.Template.Spec.Volumes[i] - v1.SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Spec.Template.Spec.InitContainers { - a := &in.Spec.Template.Spec.InitContainers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.Template.Spec.Containers { - a := &in.Spec.Template.Spec.Containers[i] - v1.SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - v1.SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - v1.SetDefaults_ResourceList(&a.Resources.Limits) - v1.SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - v1.SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - v1.SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } -} - -func SetObjectDefaults_ReplicaSetList(in *ReplicaSetList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_ReplicaSet(a) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go deleted file mode 100644 index 93409cb61..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go +++ /dev/null @@ -1,1059 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package extensions - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/client-go/pkg/api" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_APIVersion, InType: reflect.TypeOf(&APIVersion{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricCurrentStatus, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricCurrentStatusList, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricTarget, InType: reflect.TypeOf(&CustomMetricTarget{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricTargetList, InType: reflect.TypeOf(&CustomMetricTargetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSet, InType: reflect.TypeOf(&DaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetList, InType: reflect.TypeOf(&DaemonSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetSpec, InType: reflect.TypeOf(&DaemonSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetStatus, InType: reflect.TypeOf(&DaemonSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetUpdateStrategy, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Deployment, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentRollback, InType: reflect.TypeOf(&DeploymentRollback{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentSpec, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentStatus, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_FSGroupStrategyOptions, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_HTTPIngressPath, InType: reflect.TypeOf(&HTTPIngressPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_HTTPIngressRuleValue, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_HostPortRange, InType: reflect.TypeOf(&HostPortRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IDRange, InType: reflect.TypeOf(&IDRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Ingress, InType: reflect.TypeOf(&Ingress{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressBackend, InType: reflect.TypeOf(&IngressBackend{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressList, InType: reflect.TypeOf(&IngressList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressRule, InType: reflect.TypeOf(&IngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressRuleValue, InType: reflect.TypeOf(&IngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressSpec, InType: reflect.TypeOf(&IngressSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressStatus, InType: reflect.TypeOf(&IngressStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressTLS, InType: reflect.TypeOf(&IngressTLS{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicy, InType: reflect.TypeOf(&NetworkPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyIngressRule, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyList, InType: reflect.TypeOf(&NetworkPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyPeer, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyPort, InType: reflect.TypeOf(&NetworkPolicyPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicySpec, InType: reflect.TypeOf(&NetworkPolicySpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_PodSecurityPolicy, InType: reflect.TypeOf(&PodSecurityPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_PodSecurityPolicyList, InType: reflect.TypeOf(&PodSecurityPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_PodSecurityPolicySpec, InType: reflect.TypeOf(&PodSecurityPolicySpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSet, InType: reflect.TypeOf(&ReplicaSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetCondition, InType: reflect.TypeOf(&ReplicaSetCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetList, InType: reflect.TypeOf(&ReplicaSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetSpec, InType: reflect.TypeOf(&ReplicaSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetStatus, InType: reflect.TypeOf(&ReplicaSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicationControllerDummy, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollingUpdateDaemonSet, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RunAsUserStrategyOptions, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_SELinuxStrategyOptions, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Scale, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_SupplementalGroupsStrategyOptions, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResource, InType: reflect.TypeOf(&ThirdPartyResource{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceData, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceDataList, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceList, InType: reflect.TypeOf(&ThirdPartyResourceList{})}, - ) -} - -func DeepCopy_extensions_APIVersion(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIVersion) - out := out.(*APIVersion) - *out = *in - return nil - } -} - -func DeepCopy_extensions_CustomMetricCurrentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CustomMetricCurrentStatus) - out := out.(*CustomMetricCurrentStatus) - *out = *in - out.CurrentValue = in.CurrentValue.DeepCopy() - return nil - } -} - -func DeepCopy_extensions_CustomMetricCurrentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CustomMetricCurrentStatusList) - out := out.(*CustomMetricCurrentStatusList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricCurrentStatus, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_CustomMetricCurrentStatus(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_CustomMetricTarget(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CustomMetricTarget) - out := out.(*CustomMetricTarget) - *out = *in - out.TargetValue = in.TargetValue.DeepCopy() - return nil - } -} - -func DeepCopy_extensions_CustomMetricTargetList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*CustomMetricTargetList) - out := out.(*CustomMetricTargetList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricTarget, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_CustomMetricTarget(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_DaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DaemonSet) - out := out.(*DaemonSet) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_extensions_DaemonSetSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_extensions_DaemonSetList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DaemonSetList) - out := out.(*DaemonSetList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DaemonSet, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_DaemonSet(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_DaemonSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DaemonSetSpec) - out := out.(*DaemonSetSpec) - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - if err := DeepCopy_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_extensions_DaemonSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DaemonSetStatus) - out := out.(*DaemonSetStatus) - *out = *in - return nil - } -} - -func DeepCopy_extensions_DaemonSetUpdateStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DaemonSetUpdateStrategy) - out := out.(*DaemonSetUpdateStrategy) - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDaemonSet) - **out = **in - } - return nil - } -} - -func DeepCopy_extensions_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Deployment) - out := out.(*Deployment) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_extensions_DeploymentSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_extensions_DeploymentStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_extensions_DeploymentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentCondition) - out := out.(*DeploymentCondition) - *out = *in - out.LastUpdateTime = in.LastUpdateTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - return nil - } -} - -func DeepCopy_extensions_DeploymentList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentList) - out := out.(*DeploymentList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Deployment, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_Deployment(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_DeploymentRollback(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentRollback) - out := out.(*DeploymentRollback) - *out = *in - if in.UpdatedAnnotations != nil { - in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - return nil - } -} - -func DeepCopy_extensions_DeploymentSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentSpec) - out := out.(*DeploymentSpec) - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - if err := DeepCopy_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil { - return err - } - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - if in.RollbackTo != nil { - in, out := &in.RollbackTo, &out.RollbackTo - *out = new(RollbackConfig) - **out = **in - } - if in.ProgressDeadlineSeconds != nil { - in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds - *out = new(int32) - **out = **in - } - return nil - } -} - -func DeepCopy_extensions_DeploymentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentStatus) - out := out.(*DeploymentStatus) - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DeploymentCondition, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_DeploymentCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_DeploymentStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeploymentStrategy) - out := out.(*DeploymentStrategy) - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDeployment) - **out = **in - } - return nil - } -} - -func DeepCopy_extensions_FSGroupStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*FSGroupStrategyOptions) - out := out.(*FSGroupStrategyOptions) - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_extensions_HTTPIngressPath(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HTTPIngressPath) - out := out.(*HTTPIngressPath) - *out = *in - return nil - } -} - -func DeepCopy_extensions_HTTPIngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HTTPIngressRuleValue) - out := out.(*HTTPIngressRuleValue) - *out = *in - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]HTTPIngressPath, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_extensions_HostPortRange(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*HostPortRange) - out := out.(*HostPortRange) - *out = *in - return nil - } -} - -func DeepCopy_extensions_IDRange(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IDRange) - out := out.(*IDRange) - *out = *in - return nil - } -} - -func DeepCopy_extensions_Ingress(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Ingress) - out := out.(*Ingress) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_extensions_IngressSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_extensions_IngressStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_extensions_IngressBackend(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressBackend) - out := out.(*IngressBackend) - *out = *in - return nil - } -} - -func DeepCopy_extensions_IngressList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressList) - out := out.(*IngressList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Ingress, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_Ingress(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_IngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressRule) - out := out.(*IngressRule) - *out = *in - if err := DeepCopy_extensions_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_extensions_IngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressRuleValue) - out := out.(*IngressRuleValue) - *out = *in - if in.HTTP != nil { - in, out := &in.HTTP, &out.HTTP - *out = new(HTTPIngressRuleValue) - if err := DeepCopy_extensions_HTTPIngressRuleValue(*in, *out, c); err != nil { - return err - } - } - return nil - } -} - -func DeepCopy_extensions_IngressSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressSpec) - out := out.(*IngressSpec) - *out = *in - if in.Backend != nil { - in, out := &in.Backend, &out.Backend - *out = new(IngressBackend) - **out = **in - } - if in.TLS != nil { - in, out := &in.TLS, &out.TLS - *out = make([]IngressTLS, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_IngressTLS(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]IngressRule, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_IngressRule(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_IngressStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressStatus) - out := out.(*IngressStatus) - *out = *in - if err := api.DeepCopy_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_extensions_IngressTLS(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*IngressTLS) - out := out.(*IngressTLS) - *out = *in - if in.Hosts != nil { - in, out := &in.Hosts, &out.Hosts - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_extensions_NetworkPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NetworkPolicy) - out := out.(*NetworkPolicy) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_extensions_NetworkPolicySpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_extensions_NetworkPolicyIngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NetworkPolicyIngressRule) - out := out.(*NetworkPolicyIngressRule) - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]NetworkPolicyPort, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_NetworkPolicyPort(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.From != nil { - in, out := &in.From, &out.From - *out = make([]NetworkPolicyPeer, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_NetworkPolicyPeer(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_NetworkPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NetworkPolicyList) - out := out.(*NetworkPolicyList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NetworkPolicy, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_NetworkPolicy(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_NetworkPolicyPeer(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NetworkPolicyPeer) - out := out.(*NetworkPolicyPeer) - *out = *in - if in.PodSelector != nil { - in, out := &in.PodSelector, &out.PodSelector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - return nil - } -} - -func DeepCopy_extensions_NetworkPolicyPort(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NetworkPolicyPort) - out := out.(*NetworkPolicyPort) - *out = *in - if in.Protocol != nil { - in, out := &in.Protocol, &out.Protocol - *out = new(api.Protocol) - **out = **in - } - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(intstr.IntOrString) - **out = **in - } - return nil - } -} - -func DeepCopy_extensions_NetworkPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*NetworkPolicySpec) - out := out.(*NetworkPolicySpec) - *out = *in - if newVal, err := c.DeepCopy(&in.PodSelector); err != nil { - return err - } else { - out.PodSelector = *newVal.(*v1.LabelSelector) - } - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]NetworkPolicyIngressRule, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_PodSecurityPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodSecurityPolicy) - out := out.(*PodSecurityPolicy) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_extensions_PodSecurityPolicySpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_extensions_PodSecurityPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodSecurityPolicyList) - out := out.(*PodSecurityPolicyList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_PodSecurityPolicy(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_PodSecurityPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodSecurityPolicySpec) - out := out.(*PodSecurityPolicySpec) - *out = *in - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]api.Capability, len(*in)) - copy(*out, *in) - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]api.Capability, len(*in)) - copy(*out, *in) - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]api.Capability, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - copy(*out, *in) - } - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - copy(*out, *in) - } - if err := DeepCopy_extensions_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, c); err != nil { - return err - } - if err := DeepCopy_extensions_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, c); err != nil { - return err - } - if err := DeepCopy_extensions_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, c); err != nil { - return err - } - if err := DeepCopy_extensions_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_extensions_ReplicaSet(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicaSet) - out := out.(*ReplicaSet) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_extensions_ReplicaSetStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_extensions_ReplicaSetCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicaSetCondition) - out := out.(*ReplicaSetCondition) - *out = *in - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - return nil - } -} - -func DeepCopy_extensions_ReplicaSetList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicaSetList) - out := out.(*ReplicaSetList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicaSet, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_ReplicaSet(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_ReplicaSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicaSetSpec) - out := out.(*ReplicaSetSpec) - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_extensions_ReplicaSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicaSetStatus) - out := out.(*ReplicaSetStatus) - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicaSetCondition, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_ReplicaSetCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_ReplicationControllerDummy(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ReplicationControllerDummy) - out := out.(*ReplicationControllerDummy) - *out = *in - return nil - } -} - -func DeepCopy_extensions_RollbackConfig(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RollbackConfig) - out := out.(*RollbackConfig) - *out = *in - return nil - } -} - -func DeepCopy_extensions_RollingUpdateDaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RollingUpdateDaemonSet) - out := out.(*RollingUpdateDaemonSet) - *out = *in - return nil - } -} - -func DeepCopy_extensions_RollingUpdateDeployment(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RollingUpdateDeployment) - out := out.(*RollingUpdateDeployment) - *out = *in - return nil - } -} - -func DeepCopy_extensions_RunAsUserStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RunAsUserStrategyOptions) - out := out.(*RunAsUserStrategyOptions) - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_extensions_SELinuxStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SELinuxStrategyOptions) - out := out.(*SELinuxStrategyOptions) - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(api.SELinuxOptions) - **out = **in - } - return nil - } -} - -func DeepCopy_extensions_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Scale) - out := out.(*Scale) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_extensions_ScaleStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_extensions_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ScaleSpec) - out := out.(*ScaleSpec) - *out = *in - return nil - } -} - -func DeepCopy_extensions_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ScaleStatus) - out := out.(*ScaleStatus) - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - return nil - } -} - -func DeepCopy_extensions_SupplementalGroupsStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*SupplementalGroupsStrategyOptions) - out := out.(*SupplementalGroupsStrategyOptions) - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_extensions_ThirdPartyResource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ThirdPartyResource) - out := out.(*ThirdPartyResource) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]APIVersion, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_extensions_ThirdPartyResourceData(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ThirdPartyResourceData) - out := out.(*ThirdPartyResourceData) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_extensions_ThirdPartyResourceDataList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ThirdPartyResourceDataList) - out := out.(*ThirdPartyResourceDataList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResourceData, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_ThirdPartyResourceData(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_extensions_ThirdPartyResourceList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ThirdPartyResourceList) - out := out.(*ThirdPartyResourceList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResource, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_ThirdPartyResource(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/OWNERS b/vendor/k8s.io/client-go/pkg/apis/policy/OWNERS deleted file mode 100755 index 8d0eea516..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/policy/OWNERS +++ /dev/null @@ -1,14 +0,0 @@ -reviewers: -- thockin -- lavalamp -- smarterclayton -- deads2k -- caesarxuchao -- sttts -- ncdc -- timothysc -- dims -- mml -- mbohlool -- david-mcmahon -- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/doc.go b/vendor/k8s.io/client-go/pkg/apis/policy/doc.go deleted file mode 100644 index 8feeef8e4..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/policy/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package policy diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/install/install.go b/vendor/k8s.io/client-go/pkg/apis/policy/install/install.go deleted file mode 100644 index 6253d5bc2..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/policy/install/install.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/apis/policy" - "k8s.io/client-go/pkg/apis/policy/v1beta1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: policy.GroupName, - VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/client-go/pkg/apis/policy", - AddInternalObjectsToScheme: policy.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/register.go b/vendor/k8s.io/client-go/pkg/apis/policy/register.go deleted file mode 100644 index 5aadc3f1b..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/policy/register.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package policy - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "policy" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - // TODO this gets cleaned up when the types are fixed - scheme.AddKnownTypes(SchemeGroupVersion, - &PodDisruptionBudget{}, - &PodDisruptionBudgetList{}, - &Eviction{}, - ) - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/types.go b/vendor/k8s.io/client-go/pkg/apis/policy/types.go deleted file mode 100644 index ef2ff713a..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/policy/types.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package policy - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. -type PodDisruptionBudgetSpec struct { - // An eviction is allowed if at least "minAvailable" pods selected by - // "selector" will still be available after the eviction, i.e. even in the - // absence of the evicted pod. So for example you can prevent all voluntary - // evictions by specifying "100%". - // +optional - MinAvailable intstr.IntOrString - - // Label query over pods whose evictions are managed by the disruption - // budget. - // +optional - Selector *metav1.LabelSelector -} - -// PodDisruptionBudgetStatus represents information about the status of a -// PodDisruptionBudget. Status may trail the actual state of a system. -type PodDisruptionBudgetStatus struct { - // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other - // status informatio is valid only if observedGeneration equals to PDB's object generation. - // +optional - ObservedGeneration int64 - - // DisruptedPods contains information about pods whose eviction was - // processed by the API server eviction subresource handler but has not - // yet been observed by the PodDisruptionBudget controller. - // A pod will be in this map from the time when the API server processed the - // eviction request to the time when the pod is seen by PDB controller - // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod - // and the value is the time when the API server processed the eviction request. If - // the deletion didn't occur and a pod is still there it will be removed from - // the list automatically by PodDisruptionBudget controller after some time. - // If everything goes smooth this map should be empty for the most of the time. - // Large number of entries in the map may indicate problems with pod deletions. - DisruptedPods map[string]metav1.Time - - // Number of pod disruptions that are currently allowed. - PodDisruptionsAllowed int32 - - // current number of healthy pods - CurrentHealthy int32 - - // minimum desired number of healthy pods - DesiredHealthy int32 - - // total number of pods counted by this disruption budget - ExpectedPods int32 -} - -// +genclient=true - -// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods -type PodDisruptionBudget struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Specification of the desired behavior of the PodDisruptionBudget. - // +optional - Spec PodDisruptionBudgetSpec - // Most recently observed status of the PodDisruptionBudget. - // +optional - Status PodDisruptionBudgetStatus -} - -// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. -type PodDisruptionBudgetList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []PodDisruptionBudget -} - -// +genclient=true -// +noMethods=true - -// Eviction evicts a pod from its node subject to certain policies and safety constraints. -// This is a subresource of Pod. A request to cause such an eviction is -// created by POSTing to .../pods//eviction. -type Eviction struct { - metav1.TypeMeta - - // ObjectMeta describes the pod that is being evicted. - // +optional - metav1.ObjectMeta - - // DeleteOptions may be provided - // +optional - DeleteOptions *metav1.DeleteOptions -} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.pb.go deleted file mode 100644 index 3b68f56fa..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.pb.go +++ /dev/null @@ -1,1375 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.proto - - It has these top-level messages: - Eviction - PodDisruptionBudget - PodDisruptionBudgetList - PodDisruptionBudgetSpec - PodDisruptionBudgetStatus -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import strings "strings" -import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *Eviction) Reset() { *m = Eviction{} } -func (*Eviction) ProtoMessage() {} -func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } -func (*PodDisruptionBudget) ProtoMessage() {} -func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } -func (*PodDisruptionBudgetList) ProtoMessage() {} -func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } -func (*PodDisruptionBudgetSpec) ProtoMessage() {} -func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } -func (*PodDisruptionBudgetStatus) ProtoMessage() {} -func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} -} - -func init() { - proto.RegisterType((*Eviction)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.Eviction") - proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudget") - proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudgetList") - proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudgetSpec") - proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudgetStatus") -} -func (m *Eviction) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Eviction) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - if m.DeleteOptions != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.DeleteOptions.Size())) - n2, err := m.DeleteOptions.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - } - return i, nil -} - -func (m *PodDisruptionBudget) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodDisruptionBudget) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - return i, nil -} - -func (m *PodDisruptionBudgetList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodDisruptionBudgetList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodDisruptionBudgetSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodDisruptionBudgetSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.MinAvailable.Size())) - n7, err := m.MinAvailable.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - if m.Selector != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil -} - -func (m *PodDisruptionBudgetStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodDisruptionBudgetStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) - if len(m.DisruptedPods) > 0 { - for k := range m.DisruptedPods { - data[i] = 0x12 - i++ - v := m.DisruptedPods[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n9, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n9 - } - } - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.PodDisruptionsAllowed)) - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentHealthy)) - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.DesiredHealthy)) - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ExpectedPods)) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Eviction) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.DeleteOptions != nil { - l = m.DeleteOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodDisruptionBudget) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodDisruptionBudgetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodDisruptionBudgetSpec) Size() (n int) { - var l int - _ = l - l = m.MinAvailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodDisruptionBudgetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - if len(m.DisruptedPods) > 0 { - for k, v := range m.DisruptedPods { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - n += 1 + sovGenerated(uint64(m.PodDisruptionsAllowed)) - n += 1 + sovGenerated(uint64(m.CurrentHealthy)) - n += 1 + sovGenerated(uint64(m.DesiredHealthy)) - n += 1 + sovGenerated(uint64(m.ExpectedPods)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Eviction) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Eviction{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudget) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDisruptionBudget{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDisruptionBudgetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, - `MinAvailable:` + strings.Replace(strings.Replace(this.MinAvailable.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetStatus) String() string { - if this == nil { - return "nil" - } - keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) - for k := range this.DisruptedPods { - keysForDisruptedPods = append(keysForDisruptedPods, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - mapStringForDisruptedPods := "map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time{" - for _, k := range keysForDisruptedPods { - mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) - } - mapStringForDisruptedPods += "}" - s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `DisruptedPods:` + mapStringForDisruptedPods + `,`, - `PodDisruptionsAllowed:` + fmt.Sprintf("%v", this.PodDisruptionsAllowed) + `,`, - `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, - `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, - `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Eviction) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Eviction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeleteOptions == nil { - m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{} - } - if err := m.DeleteOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudget) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodDisruptionBudget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.MinAvailable.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) - } - m.DisruptedPods[mapkey] = *mapvalue - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionsAllowed", wireType) - } - m.PodDisruptionsAllowed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) - } - m.CurrentHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.CurrentHealthy |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) - } - m.DesiredHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.DesiredHealthy |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) - } - m.ExpectedPods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.ExpectedPods |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 773 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x94, 0xcb, 0x6e, 0xf3, 0x44, - 0x18, 0x86, 0xe3, 0x26, 0x29, 0x61, 0x9a, 0x54, 0x65, 0xa0, 0x10, 0x22, 0xe1, 0xa2, 0xac, 0x5a, - 0x04, 0x63, 0xda, 0x22, 0x54, 0x58, 0x54, 0xd4, 0xa4, 0x82, 0xa2, 0x56, 0xa9, 0x5c, 0x24, 0x24, - 0x04, 0x12, 0x63, 0xfb, 0xc3, 0x99, 0xc6, 0x27, 0x8d, 0xc7, 0xa1, 0xd9, 0x71, 0x09, 0x2c, 0xb8, - 0xa8, 0x4a, 0x6c, 0xba, 0x44, 0x08, 0x55, 0x34, 0x70, 0x0b, 0xec, 0x91, 0xc7, 0x93, 0x83, 0x9b, - 0x44, 0x0d, 0xea, 0xaf, 0x7f, 0xe7, 0x39, 0x3c, 0xef, 0xfb, 0x9d, 0xc6, 0xe8, 0x93, 0xfe, 0x51, - 0x42, 0x58, 0x64, 0xf4, 0x53, 0x1b, 0x78, 0x08, 0x02, 0x12, 0x23, 0xee, 0x7b, 0x06, 0x8d, 0x59, - 0x62, 0xc4, 0x91, 0xcf, 0x9c, 0xa1, 0x31, 0xd8, 0xb7, 0x41, 0xd0, 0x7d, 0xc3, 0x83, 0x10, 0x38, - 0x15, 0xe0, 0x92, 0x98, 0x47, 0x22, 0xc2, 0x7b, 0x39, 0x4a, 0xa6, 0x28, 0x89, 0xfb, 0x1e, 0xc9, - 0x50, 0x92, 0xa3, 0x44, 0xa1, 0xad, 0x0f, 0x3c, 0x26, 0x7a, 0xa9, 0x4d, 0x9c, 0x28, 0x30, 0xbc, - 0xc8, 0x8b, 0x0c, 0xa9, 0x60, 0xa7, 0x3f, 0xca, 0x95, 0x5c, 0xc8, 0xaf, 0x5c, 0xb9, 0xf5, 0x91, - 0x0a, 0x8a, 0xc6, 0x2c, 0xa0, 0x4e, 0x8f, 0x85, 0xc0, 0x87, 0xd3, 0xb0, 0x02, 0x10, 0xd4, 0x18, - 0xcc, 0xc5, 0xd3, 0x32, 0x96, 0x51, 0x3c, 0x0d, 0x05, 0x0b, 0x60, 0x0e, 0xf8, 0xf8, 0x29, 0x20, - 0x71, 0x7a, 0x10, 0xd0, 0x39, 0xee, 0x70, 0x19, 0x97, 0x0a, 0xe6, 0x1b, 0x2c, 0x14, 0x89, 0xe0, - 0x73, 0xd0, 0x4c, 0x4e, 0x09, 0xf0, 0x01, 0xf0, 0x69, 0x42, 0x70, 0x43, 0x83, 0xd8, 0x87, 0x45, - 0x39, 0xbd, 0xbf, 0xb4, 0x3d, 0x0b, 0x6e, 0xb7, 0xff, 0xd0, 0x50, 0xed, 0x74, 0xc0, 0x1c, 0xc1, - 0xa2, 0x10, 0xff, 0x80, 0x6a, 0x59, 0xa5, 0x5c, 0x2a, 0x68, 0x53, 0x7b, 0x57, 0xdb, 0xdd, 0x38, - 0xf8, 0x90, 0xa8, 0x8e, 0xcd, 0x06, 0x3e, 0xed, 0x59, 0x76, 0x9b, 0x0c, 0xf6, 0x49, 0xd7, 0xbe, - 0x06, 0x47, 0x5c, 0x80, 0xa0, 0x26, 0xbe, 0xbd, 0xdf, 0x29, 0x8d, 0xee, 0x77, 0xd0, 0x74, 0xcf, - 0x9a, 0xa8, 0x62, 0x1f, 0x35, 0x5c, 0xf0, 0x41, 0x40, 0x37, 0xce, 0x1c, 0x93, 0xe6, 0x9a, 0xb4, - 0x39, 0x5c, 0xcd, 0xa6, 0x33, 0x8b, 0x9a, 0xaf, 0x8d, 0xee, 0x77, 0x1a, 0x85, 0x2d, 0xab, 0x28, - 0xde, 0xfe, 0x6d, 0x0d, 0xbd, 0x7e, 0x19, 0xb9, 0x1d, 0x96, 0xf0, 0x54, 0x6e, 0x99, 0xa9, 0xeb, - 0x81, 0x78, 0x09, 0x79, 0xba, 0xa8, 0x92, 0xc4, 0xe0, 0xa8, 0xf4, 0x4c, 0xb2, 0xf2, 0xdc, 0x93, - 0x05, 0xf1, 0x5e, 0xc5, 0xe0, 0x98, 0x75, 0xe5, 0x57, 0xc9, 0x56, 0x96, 0x54, 0xc7, 0x3e, 0x5a, - 0x4f, 0x04, 0x15, 0x69, 0xd2, 0x2c, 0x4b, 0x9f, 0xce, 0x33, 0x7d, 0xa4, 0x96, 0xb9, 0xa9, 0x9c, - 0xd6, 0xf3, 0xb5, 0xa5, 0x3c, 0xda, 0x7f, 0x6a, 0xe8, 0xad, 0x05, 0xd4, 0x39, 0x4b, 0x04, 0xfe, - 0x6e, 0xae, 0xa2, 0x64, 0xb5, 0x8a, 0x66, 0xb4, 0xac, 0xe7, 0x96, 0x72, 0xad, 0x8d, 0x77, 0x66, - 0xaa, 0xe9, 0xa0, 0x2a, 0x13, 0x10, 0x64, 0xd3, 0x52, 0xde, 0xdd, 0x38, 0x38, 0x7e, 0x5e, 0x9a, - 0x66, 0x43, 0x59, 0x55, 0xcf, 0x32, 0x51, 0x2b, 0xd7, 0x6e, 0xff, 0xb3, 0x38, 0xbd, 0xac, 0xdc, - 0xf8, 0x1a, 0xd5, 0x03, 0x16, 0x9e, 0x0c, 0x28, 0xf3, 0xa9, 0xed, 0xc3, 0x93, 0x43, 0x93, 0xbd, - 0x6a, 0x92, 0xbf, 0x6a, 0x72, 0x16, 0x8a, 0x2e, 0xbf, 0x12, 0x9c, 0x85, 0x9e, 0xf9, 0x86, 0x72, - 0xae, 0x5f, 0xcc, 0xa8, 0x59, 0x05, 0x6d, 0xfc, 0x3d, 0xaa, 0x25, 0xe0, 0x83, 0x23, 0x22, 0xfe, - 0xff, 0x5e, 0xc7, 0x39, 0xb5, 0xc1, 0xbf, 0x52, 0xa8, 0x59, 0xcf, 0x6a, 0x39, 0x5e, 0x59, 0x13, - 0xc9, 0xf6, 0xbf, 0x15, 0xf4, 0xf6, 0xd2, 0xde, 0xe3, 0xaf, 0x10, 0x8e, 0x6c, 0xf9, 0xb3, 0x71, - 0xbf, 0xc8, 0xff, 0x14, 0x2c, 0x0a, 0x65, 0xba, 0x65, 0xb3, 0xa5, 0x82, 0xc7, 0xdd, 0xb9, 0x1b, - 0xd6, 0x02, 0x0a, 0xff, 0xaa, 0xa1, 0x86, 0x9b, 0xdb, 0x80, 0x7b, 0x19, 0xb9, 0xe3, 0xf6, 0x7d, - 0xf3, 0x22, 0xa6, 0x94, 0x74, 0x66, 0x95, 0x4f, 0x43, 0xc1, 0x87, 0xe6, 0xb6, 0x0a, 0xb0, 0x51, - 0x38, 0xb3, 0x8a, 0x41, 0xe0, 0x0b, 0x84, 0xdd, 0x89, 0x64, 0x72, 0xe2, 0xfb, 0xd1, 0x4f, 0xe0, - 0xca, 0x07, 0x54, 0x35, 0xdf, 0x51, 0x0a, 0xdb, 0x05, 0xdf, 0xf1, 0x25, 0x6b, 0x01, 0x88, 0x8f, - 0xd1, 0xa6, 0x93, 0x72, 0x0e, 0xa1, 0xf8, 0x12, 0xa8, 0x2f, 0x7a, 0xc3, 0x66, 0x45, 0x4a, 0xbd, - 0xa9, 0xa4, 0x36, 0x3f, 0x2f, 0x9c, 0x5a, 0x8f, 0x6e, 0x67, 0xbc, 0x0b, 0x09, 0xe3, 0xe0, 0x8e, - 0xf9, 0x6a, 0x91, 0xef, 0x14, 0x4e, 0xad, 0x47, 0xb7, 0xf1, 0x11, 0xaa, 0xc3, 0x4d, 0x0c, 0xce, - 0xb8, 0xc6, 0xeb, 0x92, 0x9e, 0x0c, 0xda, 0xe9, 0xcc, 0x99, 0x55, 0xb8, 0xd9, 0xf2, 0x11, 0x9e, - 0x2f, 0x22, 0xde, 0x42, 0xe5, 0x3e, 0x0c, 0x65, 0xcb, 0x5f, 0xb5, 0xb2, 0x4f, 0xfc, 0x19, 0xaa, - 0x0e, 0xa8, 0x9f, 0x82, 0x9a, 0xc6, 0xf7, 0x56, 0x9b, 0xc6, 0xaf, 0x59, 0x00, 0x56, 0x0e, 0x7e, - 0xba, 0x76, 0xa4, 0x99, 0x7b, 0xb7, 0x0f, 0x7a, 0xe9, 0xee, 0x41, 0x2f, 0xfd, 0xfe, 0xa0, 0x97, - 0x7e, 0x1e, 0xe9, 0xda, 0xed, 0x48, 0xd7, 0xee, 0x46, 0xba, 0xf6, 0xd7, 0x48, 0xd7, 0x7e, 0xf9, - 0x5b, 0x2f, 0x7d, 0xfb, 0x8a, 0x6a, 0xfa, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xe9, 0x2f, - 0xf1, 0x61, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.proto deleted file mode 100644 index d14d5de30..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.proto +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.policy.v1beta1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// Eviction evicts a pod from its node subject to certain policies and safety constraints. -// This is a subresource of Pod. A request to cause such an eviction is -// created by POSTing to .../pods//evictions. -message Eviction { - // ObjectMeta describes the pod that is being evicted. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // DeleteOptions may be provided - optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; -} - -// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods -message PodDisruptionBudget { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the PodDisruptionBudget. - optional PodDisruptionBudgetSpec spec = 2; - - // Most recently observed status of the PodDisruptionBudget. - optional PodDisruptionBudgetStatus status = 3; -} - -// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. -message PodDisruptionBudgetList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated PodDisruptionBudget items = 2; -} - -// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. -message PodDisruptionBudgetSpec { - // An eviction is allowed if at least "minAvailable" pods selected by - // "selector" will still be available after the eviction, i.e. even in the - // absence of the evicted pod. So for example you can prevent all voluntary - // evictions by specifying "100%". - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; - - // Label query over pods whose evictions are managed by the disruption - // budget. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; -} - -// PodDisruptionBudgetStatus represents information about the status of a -// PodDisruptionBudget. Status may trail the actual state of a system. -message PodDisruptionBudgetStatus { - // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other - // status informatio is valid only if observedGeneration equals to PDB's object generation. - // +optional - optional int64 observedGeneration = 1; - - // DisruptedPods contains information about pods whose eviction was - // processed by the API server eviction subresource handler but has not - // yet been observed by the PodDisruptionBudget controller. - // A pod will be in this map from the time when the API server processed the - // eviction request to the time when the pod is seen by PDB controller - // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod - // and the value is the time when the API server processed the eviction request. If - // the deletion didn't occur and a pod is still there it will be removed from - // the list automatically by PodDisruptionBudget controller after some time. - // If everything goes smooth this map should be empty for the most of the time. - // Large number of entries in the map may indicate problems with pod deletions. - map disruptedPods = 2; - - // Number of pod disruptions that are currently allowed. - optional int32 disruptionsAllowed = 3; - - // current number of healthy pods - optional int32 currentHealthy = 4; - - // minimum desired number of healthy pods - optional int32 desiredHealthy = 5; - - // total number of pods counted by this disruption budget - optional int32 expectedPods = 6; -} - diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.generated.go deleted file mode 100644 index 049bfe8e6..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.generated.go +++ /dev/null @@ -1,2203 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg3_types "k8s.io/apimachinery/pkg/types" - pkg1_intstr "k8s.io/apimachinery/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_v1.LabelSelector - var v1 pkg3_types.UID - var v2 pkg1_intstr.IntOrString - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *PodDisruptionBudgetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = x.Selector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.MinAvailable - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4) - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minAvailable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.MinAvailable - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(yy6) - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudgetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "minAvailable": - if r.TryDecodeAsNil() { - x.MinAvailable = pkg1_intstr.IntOrString{} - } else { - yyv4 := &x.MinAvailable - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) - } else { - z.DecFallback(yyv4, false) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_v1.LabelSelector) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinAvailable = pkg1_intstr.IntOrString{} - } else { - yyv9 := &x.MinAvailable - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_v1.LabelSelector) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 5 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.DisruptedPods == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encMapstringv1_Time((map[string]pkg2_v1.Time)(x.DisruptedPods), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("disruptedPods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DisruptedPods == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encMapstringv1_Time((map[string]pkg2_v1.Time)(x.DisruptedPods), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.PodDisruptionsAllowed)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("disruptionsAllowed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.PodDisruptionsAllowed)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.CurrentHealthy)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentHealthy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.CurrentHealthy)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.DesiredHealthy)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredHealthy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.DesiredHealthy)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.ExpectedPods)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("expectedPods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.ExpectedPods)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudgetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - yyv4 := &x.ObservedGeneration - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(yyv4)) = int64(r.DecodeInt(64)) - } - } - case "disruptedPods": - if r.TryDecodeAsNil() { - x.DisruptedPods = nil - } else { - yyv6 := &x.DisruptedPods - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decMapstringv1_Time((*map[string]pkg2_v1.Time)(yyv6), d) - } - } - case "disruptionsAllowed": - if r.TryDecodeAsNil() { - x.PodDisruptionsAllowed = 0 - } else { - yyv8 := &x.PodDisruptionsAllowed - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(yyv8)) = int32(r.DecodeInt(32)) - } - } - case "currentHealthy": - if r.TryDecodeAsNil() { - x.CurrentHealthy = 0 - } else { - yyv10 := &x.CurrentHealthy - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(yyv10)) = int32(r.DecodeInt(32)) - } - } - case "desiredHealthy": - if r.TryDecodeAsNil() { - x.DesiredHealthy = 0 - } else { - yyv12 := &x.DesiredHealthy - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int32)(yyv12)) = int32(r.DecodeInt(32)) - } - } - case "expectedPods": - if r.TryDecodeAsNil() { - x.ExpectedPods = 0 - } else { - yyv14 := &x.ExpectedPods - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*int32)(yyv14)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - yyv17 := &x.ObservedGeneration - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int64)(yyv17)) = int64(r.DecodeInt(64)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DisruptedPods = nil - } else { - yyv19 := &x.DisruptedPods - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decMapstringv1_Time((*map[string]pkg2_v1.Time)(yyv19), d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodDisruptionsAllowed = 0 - } else { - yyv21 := &x.PodDisruptionsAllowed - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int32)(yyv21)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentHealthy = 0 - } else { - yyv23 := &x.CurrentHealthy - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*int32)(yyv23)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredHealthy = 0 - } else { - yyv25 := &x.DesiredHealthy - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*int32)(yyv25)) = int32(r.DecodeInt(32)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExpectedPods = 0 - } else { - yyv27 := &x.ExpectedPods - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*int32)(yyv27)) = int32(r.DecodeInt(32)) - } - } - for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodDisruptionBudget) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = true - yyq2[4] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Spec - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Spec - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Status - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Status - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudget) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodDisruptionBudgetSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PodDisruptionBudgetStatus{} - } else { - yyv11 := &x.Status - yyv11.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodDisruptionBudgetSpec{} - } else { - yyv19 := &x.Spec - yyv19.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PodDisruptionBudgetStatus{} - } else { - yyv20 := &x.Status - yyv20.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudgetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudgetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Eviction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[3] = x.DeleteOptions != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.DeleteOptions == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeleteOptions) { - } else { - z.EncFallback(x.DeleteOptions) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deleteOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DeleteOptions == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeleteOptions) { - } else { - z.EncFallback(x.DeleteOptions) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Eviction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Eviction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "deleteOptions": - if r.TryDecodeAsNil() { - if x.DeleteOptions != nil { - x.DeleteOptions = nil - } - } else { - if x.DeleteOptions == nil { - x.DeleteOptions = new(pkg2_v1.DeleteOptions) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeleteOptions) { - } else { - z.DecFallback(x.DeleteOptions, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Eviction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeleteOptions != nil { - x.DeleteOptions = nil - } - } else { - if x.DeleteOptions == nil { - x.DeleteOptions = new(pkg2_v1.DeleteOptions) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeleteOptions) { - } else { - z.DecFallback(x.DeleteOptions, false) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encMapstringv1_Time(v map[string]pkg2_v1.Time, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3 := &yyv1 - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(yy3) { - } else if yym4 { - z.EncBinaryMarshal(yy3) - } else if !yym4 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3) - } else { - z.EncFallback(yy3) - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringv1_Time(v *map[string]pkg2_v1.Time, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) - yyv1 = make(map[string]pkg2_v1.Time, yyrl1) - *v = yyv1 - } - var yymk1 string - var yymv1 pkg2_v1.Time - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv2 := &yymk1 - yym3 := z.DecBinary() - _ = yym3 - if false { - } else { - *((*string)(yyv2)) = r.DecodeString() - } - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = pkg2_v1.Time{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = pkg2_v1.Time{} - } else { - yyv4 := &yymv1 - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if yym5 { - z.DecBinaryUnmarshal(yyv4) - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) - } else { - z.DecFallback(yyv4, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yyv6 := &yymk1 - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = pkg2_v1.Time{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = pkg2_v1.Time{} - } else { - yyv8 := &yymv1 - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSlicePodDisruptionBudget(v []PodDisruptionBudget, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodDisruptionBudget(v *[]PodDisruptionBudget, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodDisruptionBudget{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodDisruptionBudget, yyrl1) - } - } else { - yyv1 = make([]PodDisruptionBudget, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodDisruptionBudget{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodDisruptionBudget{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodDisruptionBudget{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodDisruptionBudget{}) // var yyz1 PodDisruptionBudget - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodDisruptionBudget{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodDisruptionBudget{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.go deleted file mode 100644 index ca5ea3730..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. -type PodDisruptionBudgetSpec struct { - // An eviction is allowed if at least "minAvailable" pods selected by - // "selector" will still be available after the eviction, i.e. even in the - // absence of the evicted pod. So for example you can prevent all voluntary - // evictions by specifying "100%". - MinAvailable intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` - - // Label query over pods whose evictions are managed by the disruption - // budget. - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` -} - -// PodDisruptionBudgetStatus represents information about the status of a -// PodDisruptionBudget. Status may trail the actual state of a system. -type PodDisruptionBudgetStatus struct { - // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other - // status informatio is valid only if observedGeneration equals to PDB's object generation. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // DisruptedPods contains information about pods whose eviction was - // processed by the API server eviction subresource handler but has not - // yet been observed by the PodDisruptionBudget controller. - // A pod will be in this map from the time when the API server processed the - // eviction request to the time when the pod is seen by PDB controller - // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod - // and the value is the time when the API server processed the eviction request. If - // the deletion didn't occur and a pod is still there it will be removed from - // the list automatically by PodDisruptionBudget controller after some time. - // If everything goes smooth this map should be empty for the most of the time. - // Large number of entries in the map may indicate problems with pod deletions. - DisruptedPods map[string]metav1.Time `json:"disruptedPods" protobuf:"bytes,2,rep,name=disruptedPods"` - - // Number of pod disruptions that are currently allowed. - PodDisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"` - - // current number of healthy pods - CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,4,opt,name=currentHealthy"` - - // minimum desired number of healthy pods - DesiredHealthy int32 `json:"desiredHealthy" protobuf:"varint,5,opt,name=desiredHealthy"` - - // total number of pods counted by this disruption budget - ExpectedPods int32 `json:"expectedPods" protobuf:"varint,6,opt,name=expectedPods"` -} - -// +genclient=true - -// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods -type PodDisruptionBudget struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the PodDisruptionBudget. - Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Most recently observed status of the PodDisruptionBudget. - Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. -type PodDisruptionBudgetList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient=true -// +noMethods=true - -// Eviction evicts a pod from its node subject to certain policies and safety constraints. -// This is a subresource of Pod. A request to cause such an eviction is -// created by POSTing to .../pods//evictions. -type Eviction struct { - metav1.TypeMeta `json:",inline"` - - // ObjectMeta describes the pod that is being evicted. - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // DeleteOptions may be provided - DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` -} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index d919856b2..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_Eviction = map[string]string{ - "": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods//evictions.", - "metadata": "ObjectMeta describes the pod that is being evicted.", - "deleteOptions": "DeleteOptions may be provided", -} - -func (Eviction) SwaggerDoc() map[string]string { - return map_Eviction -} - -var map_PodDisruptionBudget = map[string]string{ - "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", - "spec": "Specification of the desired behavior of the PodDisruptionBudget.", - "status": "Most recently observed status of the PodDisruptionBudget.", -} - -func (PodDisruptionBudget) SwaggerDoc() map[string]string { - return map_PodDisruptionBudget -} - -var map_PodDisruptionBudgetList = map[string]string{ - "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", -} - -func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { - return map_PodDisruptionBudgetList -} - -var map_PodDisruptionBudgetSpec = map[string]string{ - "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", - "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", - "selector": "Label query over pods whose evictions are managed by the disruption budget.", -} - -func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { - return map_PodDisruptionBudgetSpec -} - -var map_PodDisruptionBudgetStatus = map[string]string{ - "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", - "observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status informatio is valid only if observedGeneration equals to PDB's object generation.", - "disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", - "disruptionsAllowed": "Number of pod disruptions that are currently allowed.", - "currentHealthy": "current number of healthy pods", - "desiredHealthy": "minimum desired number of healthy pods", - "expectedPods": "total number of pods counted by this disruption budget", -} - -func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { - return map_PodDisruptionBudgetStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.conversion.go deleted file mode 100644 index 5a54f5039..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,172 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - policy "k8s.io/client-go/pkg/apis/policy" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_Eviction_To_policy_Eviction, - Convert_policy_Eviction_To_v1beta1_Eviction, - Convert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget, - Convert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget, - Convert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList, - Convert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList, - Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec, - Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec, - Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus, - Convert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus, - ) -} - -func autoConvert_v1beta1_Eviction_To_policy_Eviction(in *Eviction, out *policy.Eviction, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.DeleteOptions = (*v1.DeleteOptions)(unsafe.Pointer(in.DeleteOptions)) - return nil -} - -func Convert_v1beta1_Eviction_To_policy_Eviction(in *Eviction, out *policy.Eviction, s conversion.Scope) error { - return autoConvert_v1beta1_Eviction_To_policy_Eviction(in, out, s) -} - -func autoConvert_policy_Eviction_To_v1beta1_Eviction(in *policy.Eviction, out *Eviction, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.DeleteOptions = (*v1.DeleteOptions)(unsafe.Pointer(in.DeleteOptions)) - return nil -} - -func Convert_policy_Eviction_To_v1beta1_Eviction(in *policy.Eviction, out *Eviction, s conversion.Scope) error { - return autoConvert_policy_Eviction_To_v1beta1_Eviction(in, out, s) -} - -func autoConvert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error { - return autoConvert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in, out, s) -} - -func autoConvert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error { - return autoConvert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(in, out, s) -} - -func autoConvert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]policy.PodDisruptionBudget)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error { - return autoConvert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in, out, s) -} - -func autoConvert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]PodDisruptionBudget, 0) - } else { - out.Items = *(*[]PodDisruptionBudget)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error { - return autoConvert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(in, out, s) -} - -func autoConvert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error { - out.MinAvailable = in.MinAvailable - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - return nil -} - -func Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error { - return autoConvert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in, out, s) -} - -func autoConvert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error { - out.MinAvailable = in.MinAvailable - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - return nil -} - -func Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error { - return autoConvert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(in, out, s) -} - -func autoConvert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.DisruptedPods = *(*map[string]v1.Time)(unsafe.Pointer(&in.DisruptedPods)) - out.PodDisruptionsAllowed = in.PodDisruptionsAllowed - out.CurrentHealthy = in.CurrentHealthy - out.DesiredHealthy = in.DesiredHealthy - out.ExpectedPods = in.ExpectedPods - return nil -} - -func Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error { - return autoConvert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in, out, s) -} - -func autoConvert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.DisruptedPods = *(*map[string]v1.Time)(unsafe.Pointer(&in.DisruptedPods)) - out.PodDisruptionsAllowed = in.PodDisruptionsAllowed - out.CurrentHealthy = in.CurrentHealthy - out.DesiredHealthy = in.DesiredHealthy - out.ExpectedPods = in.ExpectedPods - return nil -} - -func Convert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error { - return autoConvert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 395e6689e..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,137 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Eviction, InType: reflect.TypeOf(&Eviction{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodDisruptionBudget, InType: reflect.TypeOf(&PodDisruptionBudget{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodDisruptionBudgetList, InType: reflect.TypeOf(&PodDisruptionBudgetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodDisruptionBudgetSpec, InType: reflect.TypeOf(&PodDisruptionBudgetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodDisruptionBudgetStatus, InType: reflect.TypeOf(&PodDisruptionBudgetStatus{})}, - ) -} - -func DeepCopy_v1beta1_Eviction(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Eviction) - out := out.(*Eviction) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.DeleteOptions != nil { - in, out := &in.DeleteOptions, &out.DeleteOptions - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.DeleteOptions) - } - } - return nil - } -} - -func DeepCopy_v1beta1_PodDisruptionBudget(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodDisruptionBudget) - out := out.(*PodDisruptionBudget) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1beta1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_PodDisruptionBudgetStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_PodDisruptionBudgetList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodDisruptionBudgetList) - out := out.(*PodDisruptionBudgetList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodDisruptionBudget, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_PodDisruptionBudget(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_PodDisruptionBudgetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodDisruptionBudgetSpec) - out := out.(*PodDisruptionBudgetSpec) - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - return nil - } -} - -func DeepCopy_v1beta1_PodDisruptionBudgetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodDisruptionBudgetStatus) - out := out.(*PodDisruptionBudgetStatus) - *out = *in - if in.DisruptedPods != nil { - in, out := &in.DisruptedPods, &out.DisruptedPods - *out = make(map[string]v1.Time) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/policy/zz_generated.deepcopy.go deleted file mode 100644 index 298ec5fda..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/policy/zz_generated.deepcopy.go +++ /dev/null @@ -1,137 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package policy - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_Eviction, InType: reflect.TypeOf(&Eviction{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_PodDisruptionBudget, InType: reflect.TypeOf(&PodDisruptionBudget{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_PodDisruptionBudgetList, InType: reflect.TypeOf(&PodDisruptionBudgetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_PodDisruptionBudgetSpec, InType: reflect.TypeOf(&PodDisruptionBudgetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_PodDisruptionBudgetStatus, InType: reflect.TypeOf(&PodDisruptionBudgetStatus{})}, - ) -} - -func DeepCopy_policy_Eviction(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Eviction) - out := out.(*Eviction) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.DeleteOptions != nil { - in, out := &in.DeleteOptions, &out.DeleteOptions - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.DeleteOptions) - } - } - return nil - } -} - -func DeepCopy_policy_PodDisruptionBudget(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodDisruptionBudget) - out := out.(*PodDisruptionBudget) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_policy_PodDisruptionBudgetStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_policy_PodDisruptionBudgetList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodDisruptionBudgetList) - out := out.(*PodDisruptionBudgetList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodDisruptionBudget, len(*in)) - for i := range *in { - if err := DeepCopy_policy_PodDisruptionBudget(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_policy_PodDisruptionBudgetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodDisruptionBudgetSpec) - out := out.(*PodDisruptionBudgetSpec) - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*v1.LabelSelector) - } - } - return nil - } -} - -func DeepCopy_policy_PodDisruptionBudgetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodDisruptionBudgetStatus) - out := out.(*PodDisruptionBudgetStatus) - *out = *in - if in.DisruptedPods != nil { - in, out := &in.DisruptedPods, &out.DisruptedPods - *out = make(map[string]v1.Time) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/OWNERS b/vendor/k8s.io/client-go/pkg/apis/rbac/OWNERS deleted file mode 100755 index a35477b92..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/OWNERS +++ /dev/null @@ -1,17 +0,0 @@ -reviewers: -- thockin -- lavalamp -- smarterclayton -- deads2k -- sttts -- ncdc -- timothysc -- dims -- krousey -- mml -- mbohlool -- david-mcmahon -- ericchiang -- lixiaobing10051267 -- jianhuiz -- liggitt diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/helpers.go b/vendor/k8s.io/client-go/pkg/apis/rbac/helpers.go deleted file mode 100644 index a2ec9e8a5..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/helpers.go +++ /dev/null @@ -1,340 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rbac - -import ( - "fmt" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" -) - -func RoleRefGroupKind(roleRef RoleRef) schema.GroupKind { - return schema.GroupKind{Group: roleRef.APIGroup, Kind: roleRef.Kind} -} - -func VerbMatches(rule PolicyRule, requestedVerb string) bool { - for _, ruleVerb := range rule.Verbs { - if ruleVerb == VerbAll { - return true - } - if ruleVerb == requestedVerb { - return true - } - } - - return false -} - -func APIGroupMatches(rule PolicyRule, requestedGroup string) bool { - for _, ruleGroup := range rule.APIGroups { - if ruleGroup == APIGroupAll { - return true - } - if ruleGroup == requestedGroup { - return true - } - } - - return false -} - -func ResourceMatches(rule PolicyRule, requestedResource string) bool { - for _, ruleResource := range rule.Resources { - if ruleResource == ResourceAll { - return true - } - if ruleResource == requestedResource { - return true - } - } - - return false -} - -func ResourceNameMatches(rule PolicyRule, requestedName string) bool { - if len(rule.ResourceNames) == 0 { - return true - } - - for _, ruleName := range rule.ResourceNames { - if ruleName == requestedName { - return true - } - } - - return false -} - -func NonResourceURLMatches(rule PolicyRule, requestedURL string) bool { - for _, ruleURL := range rule.NonResourceURLs { - if ruleURL == NonResourceAll { - return true - } - if ruleURL == requestedURL { - return true - } - if strings.HasSuffix(ruleURL, "*") && strings.HasPrefix(requestedURL, strings.TrimRight(ruleURL, "*")) { - return true - } - } - - return false -} - -// subjectsStrings returns users, groups, serviceaccounts, unknown for display purposes. -func SubjectsStrings(subjects []Subject) ([]string, []string, []string, []string) { - users := []string{} - groups := []string{} - sas := []string{} - others := []string{} - - for _, subject := range subjects { - switch subject.Kind { - case ServiceAccountKind: - sas = append(sas, fmt.Sprintf("%s/%s", subject.Namespace, subject.Name)) - - case UserKind: - users = append(users, subject.Name) - - case GroupKind: - groups = append(groups, subject.Name) - - default: - others = append(others, fmt.Sprintf("%s/%s/%s", subject.Kind, subject.Namespace, subject.Name)) - } - } - - return users, groups, sas, others -} - -// PolicyRuleBuilder let's us attach methods. A no-no for API types. -// We use it to construct rules in code. It's more compact than trying to write them -// out in a literal and allows us to perform some basic checking during construction -type PolicyRuleBuilder struct { - PolicyRule PolicyRule -} - -func NewRule(verbs ...string) *PolicyRuleBuilder { - return &PolicyRuleBuilder{ - PolicyRule: PolicyRule{Verbs: sets.NewString(verbs...).List()}, - } -} - -func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder { - r.PolicyRule.APIGroups = combine(r.PolicyRule.APIGroups, groups) - return r -} - -func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder { - r.PolicyRule.Resources = combine(r.PolicyRule.Resources, resources) - return r -} - -func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder { - r.PolicyRule.ResourceNames = combine(r.PolicyRule.ResourceNames, names) - return r -} - -func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder { - r.PolicyRule.NonResourceURLs = combine(r.PolicyRule.NonResourceURLs, urls) - return r -} - -func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule { - ret, err := r.Rule() - if err != nil { - panic(err) - } - return ret -} - -func combine(s1, s2 []string) []string { - s := sets.NewString(s1...) - s.Insert(s2...) - return s.List() -} - -func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) { - if len(r.PolicyRule.Verbs) == 0 { - return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule) - } - - switch { - case len(r.PolicyRule.NonResourceURLs) > 0: - if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 { - return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule) - } - case len(r.PolicyRule.Resources) > 0: - if len(r.PolicyRule.NonResourceURLs) != 0 { - return PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule) - } - if len(r.PolicyRule.APIGroups) == 0 { - // this a common bug - return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule) - } - default: - return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule) - } - - return r.PolicyRule, nil -} - -// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. -// We use it to construct bindings in code. It's more compact than trying to write them -// out in a literal. -type ClusterRoleBindingBuilder struct { - ClusterRoleBinding ClusterRoleBinding -} - -func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { - return &ClusterRoleBindingBuilder{ - ClusterRoleBinding: ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}, - RoleRef: RoleRef{ - APIGroup: GroupName, - Kind: "ClusterRole", - Name: clusterRoleName, - }, - }, - } -} - -func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { - for _, group := range groups { - r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, APIGroup: GroupName, Name: group}) - } - return r -} - -func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { - for _, user := range users { - r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, APIGroup: GroupName, Name: user}) - } - return r -} - -func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder { - for _, saName := range serviceAccountNames { - r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) - } - return r -} - -func (r *ClusterRoleBindingBuilder) BindingOrDie() ClusterRoleBinding { - ret, err := r.Binding() - if err != nil { - panic(err) - } - return ret -} - -func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) { - if len(r.ClusterRoleBinding.Subjects) == 0 { - return ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding) - } - - return r.ClusterRoleBinding, nil -} - -// RoleBindingBuilder let's us attach methods. It is similar to -// ClusterRoleBindingBuilder above. -type RoleBindingBuilder struct { - RoleBinding RoleBinding -} - -// NewRoleBinding creates a RoleBinding builder that can be used -// to define the subjects of a role binding. At least one of -// the `Groups`, `Users` or `SAs` method must be called before -// calling the `Binding*` methods. -func NewRoleBinding(roleName, namespace string) *RoleBindingBuilder { - return &RoleBindingBuilder{ - RoleBinding: RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: roleName, - Namespace: namespace, - }, - RoleRef: RoleRef{ - APIGroup: GroupName, - Kind: "Role", - Name: roleName, - }, - }, - } -} - -func NewRoleBindingForClusterRole(roleName, namespace string) *RoleBindingBuilder { - return &RoleBindingBuilder{ - RoleBinding: RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: roleName, - Namespace: namespace, - }, - RoleRef: RoleRef{ - APIGroup: GroupName, - Kind: "ClusterRole", - Name: roleName, - }, - }, - } -} - -// Groups adds the specified groups as the subjects of the RoleBinding. -func (r *RoleBindingBuilder) Groups(groups ...string) *RoleBindingBuilder { - for _, group := range groups { - r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) - } - return r -} - -// Users adds the specified users as the subjects of the RoleBinding. -func (r *RoleBindingBuilder) Users(users ...string) *RoleBindingBuilder { - for _, user := range users { - r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) - } - return r -} - -// SAs adds the specified service accounts as the subjects of the -// RoleBinding. -func (r *RoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *RoleBindingBuilder { - for _, saName := range serviceAccountNames { - r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) - } - return r -} - -// BindingOrDie calls the binding method and panics if there is an error. -func (r *RoleBindingBuilder) BindingOrDie() RoleBinding { - ret, err := r.Binding() - if err != nil { - panic(err) - } - return ret -} - -// Binding builds and returns the RoleBinding API object from the builder -// object. -func (r *RoleBindingBuilder) Binding() (RoleBinding, error) { - if len(r.RoleBinding.Subjects) == 0 { - return RoleBinding{}, fmt.Errorf("subjects are required: %#v", r.RoleBinding) - } - - return r.RoleBinding, nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/install/install.go b/vendor/k8s.io/client-go/pkg/apis/rbac/install/install.go deleted file mode 100644 index f92c11e06..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/install/install.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the batch API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/apis/rbac" - "k8s.io/client-go/pkg/apis/rbac/v1alpha1" - "k8s.io/client-go/pkg/apis/rbac/v1beta1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: rbac.GroupName, - VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version, v1alpha1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/client-go/pkg/apis/rbac", - RootScopedKinds: sets.NewString("ClusterRole", "ClusterRoleBinding"), - AddInternalObjectsToScheme: rbac.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, - v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/conversion.go deleted file mode 100644 index 22b3c4076..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/conversion.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime/schema" - api "k8s.io/client-go/pkg/apis/rbac" -) - -// allAuthenticated matches k8s.io/apiserver/pkg/authentication/user.AllAuthenticated, -// but we don't want an client library (which must include types), depending on a server library -const allAuthenticated = "system:authenticated" - -func Convert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *api.Subject, s conversion.Scope) error { - if err := autoConvert_v1alpha1_Subject_To_rbac_Subject(in, out, s); err != nil { - return err - } - - // specifically set the APIGroup for the three subjects recognized in v1alpha1 - switch { - case in.Kind == ServiceAccountKind: - out.APIGroup = "" - case in.Kind == UserKind: - out.APIGroup = GroupName - case in.Kind == GroupKind: - out.APIGroup = GroupName - default: - // For unrecognized kinds, use the group portion of the APIVersion if we can get it - if gv, err := schema.ParseGroupVersion(in.APIVersion); err == nil { - out.APIGroup = gv.Group - } - } - - // User * in v1alpha1 will only match all authenticated users - // This is only for compatibility with old RBAC bindings - // Special treatment for * should not be included in v1beta1 - if out.Kind == UserKind && out.APIGroup == GroupName && out.Name == "*" { - out.Kind = GroupKind - out.Name = allAuthenticated - } - - return nil -} - -func Convert_rbac_Subject_To_v1alpha1_Subject(in *api.Subject, out *Subject, s conversion.Scope) error { - if err := autoConvert_rbac_Subject_To_v1alpha1_Subject(in, out, s); err != nil { - return err - } - - switch { - case in.Kind == ServiceAccountKind && in.APIGroup == "": - // Make service accounts v1 - out.APIVersion = "v1" - case in.Kind == UserKind && in.APIGroup == GroupName: - // users in the rbac API group get v1alpha - out.APIVersion = SchemeGroupVersion.String() - case in.Kind == GroupKind && in.APIGroup == GroupName: - // groups in the rbac API group get v1alpha - out.APIVersion = SchemeGroupVersion.String() - default: - // otherwise, they get an unspecified version of a group - out.APIVersion = schema.GroupVersion{Group: in.APIGroup}.String() - } - - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/defaults.go deleted file mode 100644 index 49e934916..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/defaults.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - RegisterDefaults(scheme) - return scheme.AddDefaultingFuncs( - SetDefaults_ClusterRoleBinding, - SetDefaults_RoleBinding, - SetDefaults_Subject, - ) -} - -func SetDefaults_ClusterRoleBinding(obj *ClusterRoleBinding) { - if len(obj.RoleRef.APIGroup) == 0 { - obj.RoleRef.APIGroup = GroupName - } -} -func SetDefaults_RoleBinding(obj *RoleBinding) { - if len(obj.RoleRef.APIGroup) == 0 { - obj.RoleRef.APIGroup = GroupName - } -} -func SetDefaults_Subject(obj *Subject) { - if len(obj.APIVersion) == 0 { - switch obj.Kind { - case ServiceAccountKind: - obj.APIVersion = "v1" - case UserKind: - obj.APIVersion = SchemeGroupVersion.String() - case GroupKind: - obj.APIVersion = SchemeGroupVersion.String() - } - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/helpers.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/helpers.go deleted file mode 100644 index f417f3be0..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/helpers.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// PolicyRuleBuilder let's us attach methods. A no-no for API types. -// We use it to construct rules in code. It's more compact than trying to write them -// out in a literal and allows us to perform some basic checking during construction -type PolicyRuleBuilder struct { - PolicyRule PolicyRule `protobuf:"bytes,1,opt,name=policyRule"` -} - -func NewRule(verbs ...string) *PolicyRuleBuilder { - return &PolicyRuleBuilder{ - PolicyRule: PolicyRule{Verbs: verbs}, - } -} - -func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder { - r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...) - return r -} - -func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder { - r.PolicyRule.Resources = append(r.PolicyRule.Resources, resources...) - return r -} - -func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder { - r.PolicyRule.ResourceNames = append(r.PolicyRule.ResourceNames, names...) - return r -} - -func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder { - r.PolicyRule.NonResourceURLs = append(r.PolicyRule.NonResourceURLs, urls...) - return r -} - -func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule { - ret, err := r.Rule() - if err != nil { - panic(err) - } - return ret -} - -func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) { - if len(r.PolicyRule.Verbs) == 0 { - return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule) - } - - switch { - case len(r.PolicyRule.NonResourceURLs) > 0: - if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 { - return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule) - } - case len(r.PolicyRule.Resources) > 0: - if len(r.PolicyRule.NonResourceURLs) != 0 { - return PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule) - } - if len(r.PolicyRule.APIGroups) == 0 { - // this a common bug - return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule) - } - default: - return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule) - } - - return r.PolicyRule, nil -} - -// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. -// We use it to construct bindings in code. It's more compact than trying to write them -// out in a literal. -type ClusterRoleBindingBuilder struct { - ClusterRoleBinding ClusterRoleBinding `protobuf:"bytes,1,opt,name=clusterRoleBinding"` -} - -func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { - return &ClusterRoleBindingBuilder{ - ClusterRoleBinding: ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}, - RoleRef: RoleRef{ - APIGroup: GroupName, - Kind: "ClusterRole", - Name: clusterRoleName, - }, - }, - } -} - -func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { - for _, group := range groups { - r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) - } - return r -} - -func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { - for _, user := range users { - r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) - } - return r -} - -func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder { - for _, saName := range serviceAccountNames { - r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) - } - return r -} - -func (r *ClusterRoleBindingBuilder) BindingOrDie() ClusterRoleBinding { - ret, err := r.Binding() - if err != nil { - panic(err) - } - return ret -} - -func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) { - if len(r.ClusterRoleBinding.Subjects) == 0 { - return ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding) - } - - return r.ClusterRoleBinding, nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.generated.go deleted file mode 100644 index 64ba53a9b..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.generated.go +++ /dev/null @@ -1,4879 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1alpha1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg2_types "k8s.io/apimachinery/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_v1.TypeMeta - var v1 pkg2_types.UID - var v2 time.Time - _, _, _ = v0, v1, v2 - } -} - -func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.APIGroups) != 0 - yyq2[2] = len(x.Resources) != 0 - yyq2[3] = len(x.ResourceNames) != 0 - yyq2[4] = len(x.NonResourceURLs) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Verbs == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Verbs, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("verbs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Verbs == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Verbs, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.APIGroups == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncSliceStringV(x.APIGroups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiGroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.APIGroups == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncSliceStringV(x.APIGroups, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Resources == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Resources, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Resources == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Resources, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.ResourceNames == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - z.F.EncSliceStringV(x.ResourceNames, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceNames")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceNames == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - z.F.EncSliceStringV(x.ResourceNames, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.NonResourceURLs == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - z.F.EncSliceStringV(x.NonResourceURLs, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nonResourceURLs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NonResourceURLs == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - z.F.EncSliceStringV(x.NonResourceURLs, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PolicyRule) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PolicyRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "verbs": - if r.TryDecodeAsNil() { - x.Verbs = nil - } else { - yyv4 := &x.Verbs - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "apiGroups": - if r.TryDecodeAsNil() { - x.APIGroups = nil - } else { - yyv6 := &x.APIGroups - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - case "resources": - if r.TryDecodeAsNil() { - x.Resources = nil - } else { - yyv8 := &x.Resources - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } - } - case "resourceNames": - if r.TryDecodeAsNil() { - x.ResourceNames = nil - } else { - yyv10 := &x.ResourceNames - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - z.F.DecSliceStringX(yyv10, false, d) - } - } - case "nonResourceURLs": - if r.TryDecodeAsNil() { - x.NonResourceURLs = nil - } else { - yyv12 := &x.NonResourceURLs - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecSliceStringX(yyv12, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Verbs = nil - } else { - yyv15 := &x.Verbs - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - z.F.DecSliceStringX(yyv15, false, d) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIGroups = nil - } else { - yyv17 := &x.APIGroups - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - z.F.DecSliceStringX(yyv17, false, d) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resources = nil - } else { - yyv19 := &x.Resources - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - z.F.DecSliceStringX(yyv19, false, d) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceNames = nil - } else { - yyv21 := &x.ResourceNames - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - z.F.DecSliceStringX(yyv21, false, d) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NonResourceURLs = nil - } else { - yyv23 := &x.NonResourceURLs - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - z.F.DecSliceStringX(yyv23, false, d) - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.APIVersion != "" - yyq2[3] = x.Namespace != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Subject) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Subject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv8 := &x.Name - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - yyv10 := &x.Namespace - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv17 := &x.Name - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - yyv19 := &x.Namespace - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RoleRef) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RoleRef) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RoleRef) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "apiGroup": - if r.TryDecodeAsNil() { - x.APIGroup = "" - } else { - yyv4 := &x.APIGroup - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv6 := &x.Kind - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv8 := &x.Name - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RoleRef) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIGroup = "" - } else { - yyv11 := &x.APIGroup - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv15 := &x.Name - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rules")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Role) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Role) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "rules": - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv10 := &x.Rules - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv19 := &x.Rules - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Subjects == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceSubject(([]Subject)(x.Subjects), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subjects")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Subjects == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceSubject(([]Subject)(x.Subjects), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy18 := &x.RoleRef - yy18.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("roleRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy20 := &x.RoleRef - yy20.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "subjects": - if r.TryDecodeAsNil() { - x.Subjects = nil - } else { - yyv10 := &x.Subjects - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceSubject((*[]Subject)(yyv10), d) - } - } - case "roleRef": - if r.TryDecodeAsNil() { - x.RoleRef = RoleRef{} - } else { - yyv12 := &x.RoleRef - yyv12.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv14 := &x.Kind - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv16 := &x.APIVersion - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv18 := &x.ObjectMeta - yym19 := z.DecBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.DecExt(yyv18) { - } else { - z.DecFallback(yyv18, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subjects = nil - } else { - yyv20 := &x.Subjects - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - h.decSliceSubject((*[]Subject)(yyv20), d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RoleRef = RoleRef{} - } else { - yyv22 := &x.RoleRef - yyv22.CodecDecodeSelf(d) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceRoleBinding((*[]RoleBinding)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceRoleBinding((*[]RoleBinding)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceRole(([]Role)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceRole(([]Role)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RoleList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceRole((*[]Role)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceRole((*[]Role)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rules")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterRole) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterRole) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "rules": - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv10 := &x.Rules - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv19 := &x.Rules - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Subjects == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceSubject(([]Subject)(x.Subjects), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subjects")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Subjects == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceSubject(([]Subject)(x.Subjects), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy18 := &x.RoleRef - yy18.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("roleRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy20 := &x.RoleRef - yy20.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterRoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterRoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "subjects": - if r.TryDecodeAsNil() { - x.Subjects = nil - } else { - yyv10 := &x.Subjects - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceSubject((*[]Subject)(yyv10), d) - } - } - case "roleRef": - if r.TryDecodeAsNil() { - x.RoleRef = RoleRef{} - } else { - yyv12 := &x.RoleRef - yyv12.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv14 := &x.Kind - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv16 := &x.APIVersion - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv18 := &x.ObjectMeta - yym19 := z.DecBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.DecExt(yyv18) { - } else { - z.DecFallback(yyv18, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subjects = nil - } else { - yyv20 := &x.Subjects - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - h.decSliceSubject((*[]Subject)(yyv20), d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RoleRef = RoleRef{} - } else { - yyv22 := &x.RoleRef - yyv22.CodecDecodeSelf(d) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterRoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterRoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceClusterRole(([]ClusterRole)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceClusterRole(([]ClusterRole)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterRoleList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterRoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceClusterRole((*[]ClusterRole)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceClusterRole((*[]ClusterRole)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSlicePolicyRule(v []PolicyRule, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePolicyRule(v *[]PolicyRule, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PolicyRule{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PolicyRule, yyrl1) - } - } else { - yyv1 = make([]PolicyRule, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PolicyRule{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PolicyRule{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PolicyRule{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PolicyRule{}) // var yyz1 PolicyRule - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PolicyRule{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PolicyRule{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceSubject(v []Subject, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceSubject(v *[]Subject, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Subject{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Subject, yyrl1) - } - } else { - yyv1 = make([]Subject, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Subject{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Subject{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Subject{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Subject{}) // var yyz1 Subject - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Subject{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Subject{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceRoleBinding(v []RoleBinding, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceRoleBinding(v *[]RoleBinding, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []RoleBinding{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]RoleBinding, yyrl1) - } - } else { - yyv1 = make([]RoleBinding, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = RoleBinding{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, RoleBinding{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = RoleBinding{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, RoleBinding{}) // var yyz1 RoleBinding - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = RoleBinding{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []RoleBinding{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceRole(v []Role, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceRole(v *[]Role, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Role{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Role, yyrl1) - } - } else { - yyv1 = make([]Role, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Role{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Role{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Role{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Role{}) // var yyz1 Role - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Role{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Role{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceClusterRoleBinding(v []ClusterRoleBinding, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceClusterRoleBinding(v *[]ClusterRoleBinding, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ClusterRoleBinding{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ClusterRoleBinding, yyrl1) - } - } else { - yyv1 = make([]ClusterRoleBinding, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRoleBinding{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ClusterRoleBinding{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRoleBinding{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ClusterRoleBinding{}) // var yyz1 ClusterRoleBinding - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRoleBinding{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ClusterRoleBinding{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceClusterRole(v []ClusterRole, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceClusterRole(v *[]ClusterRole, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ClusterRole{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ClusterRole, yyrl1) - } - } else { - yyv1 = make([]ClusterRole, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRole{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ClusterRole{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRole{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ClusterRole{}) // var yyz1 ClusterRole - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRole{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ClusterRole{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index 1471d529f..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,445 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1alpha1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - rbac "k8s.io/client-go/pkg/apis/rbac" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole, - Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole, - Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, - Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding, - Convert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder, - Convert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder, - Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList, - Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList, - Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList, - Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList, - Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule, - Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule, - Convert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder, - Convert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder, - Convert_v1alpha1_Role_To_rbac_Role, - Convert_rbac_Role_To_v1alpha1_Role, - Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding, - Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding, - Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList, - Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList, - Convert_v1alpha1_RoleList_To_rbac_RoleList, - Convert_rbac_RoleList_To_v1alpha1_RoleList, - Convert_v1alpha1_RoleRef_To_rbac_RoleRef, - Convert_rbac_RoleRef_To_v1alpha1_RoleRef, - Convert_v1alpha1_Subject_To_rbac_Subject, - Convert_rbac_Subject_To_v1alpha1_Subject, - ) -} - -func autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) - return nil -} - -func Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in, out, s) -} - -func autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if in.Rules == nil { - out.Rules = make([]PolicyRule, 0) - } else { - out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) - } - return nil -} - -func Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { - return autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in, out, s) -} - -func autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]rbac.Subject, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Subjects = nil - } - if err := Convert_v1alpha1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s) -} - -func autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - for i := range *in { - if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Subjects = make([]Subject, 0) - } - if err := Convert_rbac_RoleRef_To_v1alpha1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { - return err - } - return nil -} - -func Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { - return autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in, out, s) -} - -func autoConvert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { - if err := Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in, out, s) -} - -func autoConvert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { - if err := Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { - return err - } - return nil -} - -func Convert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { - return autoConvert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder(in, out, s) -} - -func autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]rbac.ClusterRoleBinding, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s) -} - -func autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRoleBinding, len(*in)) - for i := range *in { - if err := Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]ClusterRoleBinding, 0) - } - return nil -} - -func Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { - return autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in, out, s) -} - -func autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]rbac.ClusterRole)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s) -} - -func autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]ClusterRole, 0) - } else { - out.Items = *(*[]ClusterRole)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { - return autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in, out, s) -} - -func autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { - out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) - out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) - out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) - out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) - out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) - return nil -} - -func Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { - return autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in, out, s) -} - -func autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { - if in.Verbs == nil { - out.Verbs = make([]string, 0) - } else { - out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) - } - out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) - out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) - out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) - out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) - return nil -} - -func Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { - return autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in, out, s) -} - -func autoConvert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { - if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { - return autoConvert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in, out, s) -} - -func autoConvert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { - if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { - return err - } - return nil -} - -func Convert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { - return autoConvert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder(in, out, s) -} - -func autoConvert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) - return nil -} - -func Convert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { - return autoConvert_v1alpha1_Role_To_rbac_Role(in, out, s) -} - -func autoConvert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if in.Rules == nil { - out.Rules = make([]PolicyRule, 0) - } else { - out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) - } - return nil -} - -func Convert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { - return autoConvert_rbac_Role_To_v1alpha1_Role(in, out, s) -} - -func autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]rbac.Subject, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Subjects = nil - } - if err := Convert_v1alpha1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { - return autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in, out, s) -} - -func autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - for i := range *in { - if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Subjects = make([]Subject, 0) - } - if err := Convert_rbac_RoleRef_To_v1alpha1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { - return err - } - return nil -} - -func Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { - return autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in, out, s) -} - -func autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]rbac.RoleBinding, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { - return autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in, out, s) -} - -func autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RoleBinding, len(*in)) - for i := range *in { - if err := Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]RoleBinding, 0) - } - return nil -} - -func Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { - return autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in, out, s) -} - -func autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]rbac.Role)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { - return autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in, out, s) -} - -func autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]Role, 0) - } else { - out.Items = *(*[]Role)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { - return autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in, out, s) -} - -func autoConvert_v1alpha1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { - out.APIGroup = in.APIGroup - out.Kind = in.Kind - out.Name = in.Name - return nil -} - -func Convert_v1alpha1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { - return autoConvert_v1alpha1_RoleRef_To_rbac_RoleRef(in, out, s) -} - -func autoConvert_rbac_RoleRef_To_v1alpha1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { - out.APIGroup = in.APIGroup - out.Kind = in.Kind - out.Name = in.Name - return nil -} - -func Convert_rbac_RoleRef_To_v1alpha1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { - return autoConvert_rbac_RoleRef_To_v1alpha1_RoleRef(in, out, s) -} - -func autoConvert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { - out.Kind = in.Kind - // INFO: in.APIVersion opted out of conversion generation - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -func autoConvert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { - out.Kind = in.Kind - // WARNING: in.APIGroup requires manual conversion: does not exist in peer-type - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index f4dfd3ca7..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,258 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ClusterRole, InType: reflect.TypeOf(&ClusterRole{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ClusterRoleBinding, InType: reflect.TypeOf(&ClusterRoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ClusterRoleBindingList, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ClusterRoleList, InType: reflect.TypeOf(&ClusterRoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PolicyRule, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_Role, InType: reflect.TypeOf(&Role{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_RoleBinding, InType: reflect.TypeOf(&RoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_RoleBindingList, InType: reflect.TypeOf(&RoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_RoleList, InType: reflect.TypeOf(&RoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_RoleRef, InType: reflect.TypeOf(&RoleRef{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_Subject, InType: reflect.TypeOf(&Subject{})}, - ) -} - -func DeepCopy_v1alpha1_ClusterRole(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterRole) - out := out.(*ClusterRole) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - if err := DeepCopy_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1alpha1_ClusterRoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterRoleBinding) - out := out.(*ClusterRoleBinding) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1alpha1_ClusterRoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterRoleBindingList) - out := out.(*ClusterRoleBindingList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRoleBinding, len(*in)) - for i := range *in { - if err := DeepCopy_v1alpha1_ClusterRoleBinding(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1alpha1_ClusterRoleList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterRoleList) - out := out.(*ClusterRoleList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRole, len(*in)) - for i := range *in { - if err := DeepCopy_v1alpha1_ClusterRole(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1alpha1_PolicyRule(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PolicyRule) - out := out.(*PolicyRule) - *out = *in - if in.Verbs != nil { - in, out := &in.Verbs, &out.Verbs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ResourceNames != nil { - in, out := &in.ResourceNames, &out.ResourceNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NonResourceURLs != nil { - in, out := &in.NonResourceURLs, &out.NonResourceURLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1alpha1_Role(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Role) - out := out.(*Role) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - if err := DeepCopy_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1alpha1_RoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RoleBinding) - out := out.(*RoleBinding) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1alpha1_RoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RoleBindingList) - out := out.(*RoleBindingList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RoleBinding, len(*in)) - for i := range *in { - if err := DeepCopy_v1alpha1_RoleBinding(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1alpha1_RoleList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RoleList) - out := out.(*RoleList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Role, len(*in)) - for i := range *in { - if err := DeepCopy_v1alpha1_Role(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1alpha1_RoleRef(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RoleRef) - out := out.(*RoleRef) - *out = *in - return nil - } -} - -func DeepCopy_v1alpha1_Subject(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Subject) - out := out.(*Subject) - *out = *in - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go deleted file mode 100644 index 1a5749be3..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go +++ /dev/null @@ -1,66 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&ClusterRoleBinding{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBinding(obj.(*ClusterRoleBinding)) }) - scheme.AddTypeDefaultingFunc(&ClusterRoleBindingList{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBindingList(obj.(*ClusterRoleBindingList)) }) - scheme.AddTypeDefaultingFunc(&RoleBinding{}, func(obj interface{}) { SetObjectDefaults_RoleBinding(obj.(*RoleBinding)) }) - scheme.AddTypeDefaultingFunc(&RoleBindingList{}, func(obj interface{}) { SetObjectDefaults_RoleBindingList(obj.(*RoleBindingList)) }) - return nil -} - -func SetObjectDefaults_ClusterRoleBinding(in *ClusterRoleBinding) { - SetDefaults_ClusterRoleBinding(in) - for i := range in.Subjects { - a := &in.Subjects[i] - SetDefaults_Subject(a) - } -} - -func SetObjectDefaults_ClusterRoleBindingList(in *ClusterRoleBindingList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_ClusterRoleBinding(a) - } -} - -func SetObjectDefaults_RoleBinding(in *RoleBinding) { - SetDefaults_RoleBinding(in) - for i := range in.Subjects { - a := &in.Subjects[i] - SetDefaults_Subject(a) - } -} - -func SetObjectDefaults_RoleBindingList(in *RoleBindingList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_RoleBinding(a) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/defaults.go deleted file mode 100644 index 6c29ae500..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/defaults.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - RegisterDefaults(scheme) - return scheme.AddDefaultingFuncs( - SetDefaults_ClusterRoleBinding, - SetDefaults_RoleBinding, - SetDefaults_Subject, - ) -} - -func SetDefaults_ClusterRoleBinding(obj *ClusterRoleBinding) { - if len(obj.RoleRef.APIGroup) == 0 { - obj.RoleRef.APIGroup = GroupName - } -} -func SetDefaults_RoleBinding(obj *RoleBinding) { - if len(obj.RoleRef.APIGroup) == 0 { - obj.RoleRef.APIGroup = GroupName - } -} -func SetDefaults_Subject(obj *Subject) { - if len(obj.APIGroup) == 0 { - switch obj.Kind { - case ServiceAccountKind: - obj.APIGroup = "" - case UserKind: - obj.APIGroup = GroupName - case GroupKind: - obj.APIGroup = GroupName - } - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/helpers.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/helpers.go deleted file mode 100644 index 063b9ed3f..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/helpers.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// PolicyRuleBuilder let's us attach methods. A no-no for API types. -// We use it to construct rules in code. It's more compact than trying to write them -// out in a literal and allows us to perform some basic checking during construction -type PolicyRuleBuilder struct { - PolicyRule PolicyRule `protobuf:"bytes,1,opt,name=policyRule"` -} - -func NewRule(verbs ...string) *PolicyRuleBuilder { - return &PolicyRuleBuilder{ - PolicyRule: PolicyRule{Verbs: verbs}, - } -} - -func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder { - r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...) - return r -} - -func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder { - r.PolicyRule.Resources = append(r.PolicyRule.Resources, resources...) - return r -} - -func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder { - r.PolicyRule.ResourceNames = append(r.PolicyRule.ResourceNames, names...) - return r -} - -func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder { - r.PolicyRule.NonResourceURLs = append(r.PolicyRule.NonResourceURLs, urls...) - return r -} - -func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule { - ret, err := r.Rule() - if err != nil { - panic(err) - } - return ret -} - -func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) { - if len(r.PolicyRule.Verbs) == 0 { - return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule) - } - - switch { - case len(r.PolicyRule.NonResourceURLs) > 0: - if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 { - return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule) - } - case len(r.PolicyRule.Resources) > 0: - if len(r.PolicyRule.NonResourceURLs) != 0 { - return PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule) - } - if len(r.PolicyRule.APIGroups) == 0 { - // this a common bug - return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule) - } - default: - return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule) - } - - return r.PolicyRule, nil -} - -// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. -// We use it to construct bindings in code. It's more compact than trying to write them -// out in a literal. -type ClusterRoleBindingBuilder struct { - ClusterRoleBinding ClusterRoleBinding `protobuf:"bytes,1,opt,name=clusterRoleBinding"` -} - -func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { - return &ClusterRoleBindingBuilder{ - ClusterRoleBinding: ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}, - RoleRef: RoleRef{ - APIGroup: GroupName, - Kind: "ClusterRole", - Name: clusterRoleName, - }, - }, - } -} - -func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { - for _, group := range groups { - r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) - } - return r -} - -func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { - for _, user := range users { - r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) - } - return r -} - -func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder { - for _, saName := range serviceAccountNames { - r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) - } - return r -} - -func (r *ClusterRoleBindingBuilder) BindingOrDie() ClusterRoleBinding { - ret, err := r.Binding() - if err != nil { - panic(err) - } - return ret -} - -func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) { - if len(r.ClusterRoleBinding.Subjects) == 0 { - return ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding) - } - - return r.ClusterRoleBinding, nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.generated.go deleted file mode 100644 index 56de3e3e7..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.generated.go +++ /dev/null @@ -1,4879 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg2_types "k8s.io/apimachinery/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_v1.TypeMeta - var v1 pkg2_types.UID - var v2 time.Time - _, _, _ = v0, v1, v2 - } -} - -func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.APIGroups) != 0 - yyq2[2] = len(x.Resources) != 0 - yyq2[3] = len(x.ResourceNames) != 0 - yyq2[4] = len(x.NonResourceURLs) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Verbs == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Verbs, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("verbs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Verbs == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Verbs, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.APIGroups == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncSliceStringV(x.APIGroups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiGroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.APIGroups == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncSliceStringV(x.APIGroups, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Resources == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Resources, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Resources == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Resources, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.ResourceNames == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - z.F.EncSliceStringV(x.ResourceNames, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceNames")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceNames == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - z.F.EncSliceStringV(x.ResourceNames, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.NonResourceURLs == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - z.F.EncSliceStringV(x.NonResourceURLs, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nonResourceURLs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NonResourceURLs == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - z.F.EncSliceStringV(x.NonResourceURLs, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PolicyRule) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PolicyRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "verbs": - if r.TryDecodeAsNil() { - x.Verbs = nil - } else { - yyv4 := &x.Verbs - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "apiGroups": - if r.TryDecodeAsNil() { - x.APIGroups = nil - } else { - yyv6 := &x.APIGroups - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - case "resources": - if r.TryDecodeAsNil() { - x.Resources = nil - } else { - yyv8 := &x.Resources - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } - } - case "resourceNames": - if r.TryDecodeAsNil() { - x.ResourceNames = nil - } else { - yyv10 := &x.ResourceNames - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - z.F.DecSliceStringX(yyv10, false, d) - } - } - case "nonResourceURLs": - if r.TryDecodeAsNil() { - x.NonResourceURLs = nil - } else { - yyv12 := &x.NonResourceURLs - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecSliceStringX(yyv12, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Verbs = nil - } else { - yyv15 := &x.Verbs - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - z.F.DecSliceStringX(yyv15, false, d) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIGroups = nil - } else { - yyv17 := &x.APIGroups - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - z.F.DecSliceStringX(yyv17, false, d) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resources = nil - } else { - yyv19 := &x.Resources - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - z.F.DecSliceStringX(yyv19, false, d) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceNames = nil - } else { - yyv21 := &x.ResourceNames - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - z.F.DecSliceStringX(yyv21, false, d) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NonResourceURLs = nil - } else { - yyv23 := &x.NonResourceURLs - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - z.F.DecSliceStringX(yyv23, false, d) - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.APIGroup != "" - yyq2[3] = x.Namespace != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Subject) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Subject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiGroup": - if r.TryDecodeAsNil() { - x.APIGroup = "" - } else { - yyv6 := &x.APIGroup - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv8 := &x.Name - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - yyv10 := &x.Namespace - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIGroup = "" - } else { - yyv15 := &x.APIGroup - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv17 := &x.Name - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - yyv19 := &x.Namespace - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RoleRef) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RoleRef) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RoleRef) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "apiGroup": - if r.TryDecodeAsNil() { - x.APIGroup = "" - } else { - yyv4 := &x.APIGroup - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv6 := &x.Kind - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv8 := &x.Name - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RoleRef) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIGroup = "" - } else { - yyv11 := &x.APIGroup - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*string)(yyv11)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - yyv15 := &x.Name - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rules")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Role) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Role) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "rules": - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv10 := &x.Rules - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv19 := &x.Rules - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Subjects == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceSubject(([]Subject)(x.Subjects), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subjects")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Subjects == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceSubject(([]Subject)(x.Subjects), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy18 := &x.RoleRef - yy18.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("roleRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy20 := &x.RoleRef - yy20.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "subjects": - if r.TryDecodeAsNil() { - x.Subjects = nil - } else { - yyv10 := &x.Subjects - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceSubject((*[]Subject)(yyv10), d) - } - } - case "roleRef": - if r.TryDecodeAsNil() { - x.RoleRef = RoleRef{} - } else { - yyv12 := &x.RoleRef - yyv12.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv14 := &x.Kind - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv16 := &x.APIVersion - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv18 := &x.ObjectMeta - yym19 := z.DecBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.DecExt(yyv18) { - } else { - z.DecFallback(yyv18, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subjects = nil - } else { - yyv20 := &x.Subjects - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - h.decSliceSubject((*[]Subject)(yyv20), d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RoleRef = RoleRef{} - } else { - yyv22 := &x.RoleRef - yyv22.CodecDecodeSelf(d) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceRoleBinding((*[]RoleBinding)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceRoleBinding((*[]RoleBinding)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceRole(([]Role)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceRole(([]Role)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RoleList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceRole((*[]Role)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceRole((*[]Role)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rules")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterRole) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterRole) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "rules": - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv10 := &x.Rules - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv17 := &x.ObjectMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv19 := &x.Rules - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Subjects == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceSubject(([]Subject)(x.Subjects), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subjects")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Subjects == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceSubject(([]Subject)(x.Subjects), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy18 := &x.RoleRef - yy18.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("roleRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy20 := &x.RoleRef - yy20.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterRoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterRoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "subjects": - if r.TryDecodeAsNil() { - x.Subjects = nil - } else { - yyv10 := &x.Subjects - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceSubject((*[]Subject)(yyv10), d) - } - } - case "roleRef": - if r.TryDecodeAsNil() { - x.RoleRef = RoleRef{} - } else { - yyv12 := &x.RoleRef - yyv12.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv14 := &x.Kind - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*string)(yyv14)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv16 := &x.APIVersion - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*string)(yyv16)) = r.DecodeString() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv18 := &x.ObjectMeta - yym19 := z.DecBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.DecExt(yyv18) { - } else { - z.DecFallback(yyv18, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subjects = nil - } else { - yyv20 := &x.Subjects - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - h.decSliceSubject((*[]Subject)(yyv20), d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RoleRef = RoleRef{} - } else { - yyv22 := &x.RoleRef - yyv22.CodecDecodeSelf(d) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterRoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterRoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceClusterRole(([]ClusterRole)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceClusterRole(([]ClusterRole)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterRoleList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterRoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceClusterRole((*[]ClusterRole)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceClusterRole((*[]ClusterRole)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSlicePolicyRule(v []PolicyRule, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePolicyRule(v *[]PolicyRule, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PolicyRule{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PolicyRule, yyrl1) - } - } else { - yyv1 = make([]PolicyRule, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PolicyRule{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PolicyRule{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PolicyRule{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PolicyRule{}) // var yyz1 PolicyRule - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PolicyRule{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PolicyRule{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceSubject(v []Subject, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceSubject(v *[]Subject, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Subject{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Subject, yyrl1) - } - } else { - yyv1 = make([]Subject, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Subject{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Subject{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Subject{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Subject{}) // var yyz1 Subject - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Subject{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Subject{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceRoleBinding(v []RoleBinding, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceRoleBinding(v *[]RoleBinding, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []RoleBinding{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]RoleBinding, yyrl1) - } - } else { - yyv1 = make([]RoleBinding, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = RoleBinding{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, RoleBinding{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = RoleBinding{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, RoleBinding{}) // var yyz1 RoleBinding - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = RoleBinding{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []RoleBinding{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceRole(v []Role, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceRole(v *[]Role, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Role{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Role, yyrl1) - } - } else { - yyv1 = make([]Role, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Role{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Role{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Role{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Role{}) // var yyz1 Role - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Role{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Role{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceClusterRoleBinding(v []ClusterRoleBinding, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceClusterRoleBinding(v *[]ClusterRoleBinding, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ClusterRoleBinding{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ClusterRoleBinding, yyrl1) - } - } else { - yyv1 = make([]ClusterRoleBinding, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRoleBinding{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ClusterRoleBinding{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRoleBinding{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ClusterRoleBinding{}) // var yyz1 ClusterRoleBinding - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRoleBinding{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ClusterRoleBinding{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceClusterRole(v []ClusterRole, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceClusterRole(v *[]ClusterRole, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ClusterRole{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ClusterRole, yyrl1) - } - } else { - yyv1 = make([]ClusterRole, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRole{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ClusterRole{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRole{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ClusterRole{}) // var yyz1 ClusterRole - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRole{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ClusterRole{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.conversion.go deleted file mode 100644 index 6a9b31f9d..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,389 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - rbac "k8s.io/client-go/pkg/apis/rbac" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_ClusterRole_To_rbac_ClusterRole, - Convert_rbac_ClusterRole_To_v1beta1_ClusterRole, - Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, - Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding, - Convert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder, - Convert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder, - Convert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList, - Convert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList, - Convert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList, - Convert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList, - Convert_v1beta1_PolicyRule_To_rbac_PolicyRule, - Convert_rbac_PolicyRule_To_v1beta1_PolicyRule, - Convert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder, - Convert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder, - Convert_v1beta1_Role_To_rbac_Role, - Convert_rbac_Role_To_v1beta1_Role, - Convert_v1beta1_RoleBinding_To_rbac_RoleBinding, - Convert_rbac_RoleBinding_To_v1beta1_RoleBinding, - Convert_v1beta1_RoleBindingList_To_rbac_RoleBindingList, - Convert_rbac_RoleBindingList_To_v1beta1_RoleBindingList, - Convert_v1beta1_RoleList_To_rbac_RoleList, - Convert_rbac_RoleList_To_v1beta1_RoleList, - Convert_v1beta1_RoleRef_To_rbac_RoleRef, - Convert_rbac_RoleRef_To_v1beta1_RoleRef, - Convert_v1beta1_Subject_To_rbac_Subject, - Convert_rbac_Subject_To_v1beta1_Subject, - ) -} - -func autoConvert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) - return nil -} - -func Convert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterRole_To_rbac_ClusterRole(in, out, s) -} - -func autoConvert_rbac_ClusterRole_To_v1beta1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if in.Rules == nil { - out.Rules = make([]PolicyRule, 0) - } else { - out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) - } - return nil -} - -func Convert_rbac_ClusterRole_To_v1beta1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { - return autoConvert_rbac_ClusterRole_To_v1beta1_ClusterRole(in, out, s) -} - -func autoConvert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects)) - if err := Convert_v1beta1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s) -} - -func autoConvert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if in.Subjects == nil { - out.Subjects = make([]Subject, 0) - } else { - out.Subjects = *(*[]Subject)(unsafe.Pointer(&in.Subjects)) - } - if err := Convert_rbac_RoleRef_To_v1beta1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { - return err - } - return nil -} - -func Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { - return autoConvert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in, out, s) -} - -func autoConvert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { - if err := Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in, out, s) -} - -func autoConvert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { - if err := Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { - return err - } - return nil -} - -func Convert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { - return autoConvert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in, out, s) -} - -func autoConvert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]rbac.ClusterRoleBinding)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s) -} - -func autoConvert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]ClusterRoleBinding, 0) - } else { - out.Items = *(*[]ClusterRoleBinding)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { - return autoConvert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in, out, s) -} - -func autoConvert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]rbac.ClusterRole)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s) -} - -func autoConvert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]ClusterRole, 0) - } else { - out.Items = *(*[]ClusterRole)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { - return autoConvert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in, out, s) -} - -func autoConvert_v1beta1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { - out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) - out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) - out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) - out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) - out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) - return nil -} - -func Convert_v1beta1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { - return autoConvert_v1beta1_PolicyRule_To_rbac_PolicyRule(in, out, s) -} - -func autoConvert_rbac_PolicyRule_To_v1beta1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { - if in.Verbs == nil { - out.Verbs = make([]string, 0) - } else { - out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) - } - out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) - out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) - out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) - out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) - return nil -} - -func Convert_rbac_PolicyRule_To_v1beta1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { - return autoConvert_rbac_PolicyRule_To_v1beta1_PolicyRule(in, out, s) -} - -func autoConvert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { - if err := Convert_v1beta1_PolicyRule_To_rbac_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { - return autoConvert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in, out, s) -} - -func autoConvert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { - if err := Convert_rbac_PolicyRule_To_v1beta1_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { - return err - } - return nil -} - -func Convert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { - return autoConvert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in, out, s) -} - -func autoConvert_v1beta1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) - return nil -} - -func Convert_v1beta1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { - return autoConvert_v1beta1_Role_To_rbac_Role(in, out, s) -} - -func autoConvert_rbac_Role_To_v1beta1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if in.Rules == nil { - out.Rules = make([]PolicyRule, 0) - } else { - out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) - } - return nil -} - -func Convert_rbac_Role_To_v1beta1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { - return autoConvert_rbac_Role_To_v1beta1_Role(in, out, s) -} - -func autoConvert_v1beta1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects)) - if err := Convert_v1beta1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { - return autoConvert_v1beta1_RoleBinding_To_rbac_RoleBinding(in, out, s) -} - -func autoConvert_rbac_RoleBinding_To_v1beta1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if in.Subjects == nil { - out.Subjects = make([]Subject, 0) - } else { - out.Subjects = *(*[]Subject)(unsafe.Pointer(&in.Subjects)) - } - if err := Convert_rbac_RoleRef_To_v1beta1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { - return err - } - return nil -} - -func Convert_rbac_RoleBinding_To_v1beta1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { - return autoConvert_rbac_RoleBinding_To_v1beta1_RoleBinding(in, out, s) -} - -func autoConvert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]rbac.RoleBinding)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { - return autoConvert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in, out, s) -} - -func autoConvert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]RoleBinding, 0) - } else { - out.Items = *(*[]RoleBinding)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { - return autoConvert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in, out, s) -} - -func autoConvert_v1beta1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]rbac.Role)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { - return autoConvert_v1beta1_RoleList_To_rbac_RoleList(in, out, s) -} - -func autoConvert_rbac_RoleList_To_v1beta1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]Role, 0) - } else { - out.Items = *(*[]Role)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_rbac_RoleList_To_v1beta1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { - return autoConvert_rbac_RoleList_To_v1beta1_RoleList(in, out, s) -} - -func autoConvert_v1beta1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { - out.APIGroup = in.APIGroup - out.Kind = in.Kind - out.Name = in.Name - return nil -} - -func Convert_v1beta1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { - return autoConvert_v1beta1_RoleRef_To_rbac_RoleRef(in, out, s) -} - -func autoConvert_rbac_RoleRef_To_v1beta1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { - out.APIGroup = in.APIGroup - out.Kind = in.Kind - out.Name = in.Name - return nil -} - -func Convert_rbac_RoleRef_To_v1beta1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { - return autoConvert_rbac_RoleRef_To_v1beta1_RoleRef(in, out, s) -} - -func autoConvert_v1beta1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { - out.Kind = in.Kind - out.APIGroup = in.APIGroup - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -func Convert_v1beta1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { - return autoConvert_v1beta1_Subject_To_rbac_Subject(in, out, s) -} - -func autoConvert_rbac_Subject_To_v1beta1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { - out.Kind = in.Kind - out.APIGroup = in.APIGroup - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -func Convert_rbac_Subject_To_v1beta1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { - return autoConvert_rbac_Subject_To_v1beta1_Subject(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index dccae9b4f..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,258 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRole, InType: reflect.TypeOf(&ClusterRole{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRoleBinding, InType: reflect.TypeOf(&ClusterRoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRoleBindingList, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRoleList, InType: reflect.TypeOf(&ClusterRoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PolicyRule, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Role, InType: reflect.TypeOf(&Role{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleBinding, InType: reflect.TypeOf(&RoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleBindingList, InType: reflect.TypeOf(&RoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleList, InType: reflect.TypeOf(&RoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleRef, InType: reflect.TypeOf(&RoleRef{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Subject, InType: reflect.TypeOf(&Subject{})}, - ) -} - -func DeepCopy_v1beta1_ClusterRole(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterRole) - out := out.(*ClusterRole) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_ClusterRoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterRoleBinding) - out := out.(*ClusterRoleBinding) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1beta1_ClusterRoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterRoleBindingList) - out := out.(*ClusterRoleBindingList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRoleBinding, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_ClusterRoleBinding(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_ClusterRoleList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterRoleList) - out := out.(*ClusterRoleList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRole, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_ClusterRole(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_PolicyRule(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PolicyRule) - out := out.(*PolicyRule) - *out = *in - if in.Verbs != nil { - in, out := &in.Verbs, &out.Verbs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ResourceNames != nil { - in, out := &in.ResourceNames, &out.ResourceNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NonResourceURLs != nil { - in, out := &in.NonResourceURLs, &out.NonResourceURLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1beta1_Role(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Role) - out := out.(*Role) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_RoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RoleBinding) - out := out.(*RoleBinding) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_v1beta1_RoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RoleBindingList) - out := out.(*RoleBindingList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RoleBinding, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_RoleBinding(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_RoleList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RoleList) - out := out.(*RoleList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Role, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_Role(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1beta1_RoleRef(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RoleRef) - out := out.(*RoleRef) - *out = *in - return nil - } -} - -func DeepCopy_v1beta1_Subject(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Subject) - out := out.(*Subject) - *out = *in - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.defaults.go deleted file mode 100644 index fa5bfb6ab..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.defaults.go +++ /dev/null @@ -1,66 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&ClusterRoleBinding{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBinding(obj.(*ClusterRoleBinding)) }) - scheme.AddTypeDefaultingFunc(&ClusterRoleBindingList{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBindingList(obj.(*ClusterRoleBindingList)) }) - scheme.AddTypeDefaultingFunc(&RoleBinding{}, func(obj interface{}) { SetObjectDefaults_RoleBinding(obj.(*RoleBinding)) }) - scheme.AddTypeDefaultingFunc(&RoleBindingList{}, func(obj interface{}) { SetObjectDefaults_RoleBindingList(obj.(*RoleBindingList)) }) - return nil -} - -func SetObjectDefaults_ClusterRoleBinding(in *ClusterRoleBinding) { - SetDefaults_ClusterRoleBinding(in) - for i := range in.Subjects { - a := &in.Subjects[i] - SetDefaults_Subject(a) - } -} - -func SetObjectDefaults_ClusterRoleBindingList(in *ClusterRoleBindingList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_ClusterRoleBinding(a) - } -} - -func SetObjectDefaults_RoleBinding(in *RoleBinding) { - SetDefaults_RoleBinding(in) - for i := range in.Subjects { - a := &in.Subjects[i] - SetDefaults_Subject(a) - } -} - -func SetObjectDefaults_RoleBindingList(in *RoleBindingList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_RoleBinding(a) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/rbac/zz_generated.deepcopy.go deleted file mode 100644 index db5d08cdf..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/rbac/zz_generated.deepcopy.go +++ /dev/null @@ -1,258 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package rbac - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_ClusterRole, InType: reflect.TypeOf(&ClusterRole{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_ClusterRoleBinding, InType: reflect.TypeOf(&ClusterRoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_ClusterRoleBindingList, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_ClusterRoleList, InType: reflect.TypeOf(&ClusterRoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_PolicyRule, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_Role, InType: reflect.TypeOf(&Role{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_RoleBinding, InType: reflect.TypeOf(&RoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_RoleBindingList, InType: reflect.TypeOf(&RoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_RoleList, InType: reflect.TypeOf(&RoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_RoleRef, InType: reflect.TypeOf(&RoleRef{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_Subject, InType: reflect.TypeOf(&Subject{})}, - ) -} - -func DeepCopy_rbac_ClusterRole(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterRole) - out := out.(*ClusterRole) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - if err := DeepCopy_rbac_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_rbac_ClusterRoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterRoleBinding) - out := out.(*ClusterRoleBinding) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_rbac_ClusterRoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterRoleBindingList) - out := out.(*ClusterRoleBindingList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRoleBinding, len(*in)) - for i := range *in { - if err := DeepCopy_rbac_ClusterRoleBinding(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_rbac_ClusterRoleList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterRoleList) - out := out.(*ClusterRoleList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRole, len(*in)) - for i := range *in { - if err := DeepCopy_rbac_ClusterRole(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_rbac_PolicyRule(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PolicyRule) - out := out.(*PolicyRule) - *out = *in - if in.Verbs != nil { - in, out := &in.Verbs, &out.Verbs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ResourceNames != nil { - in, out := &in.ResourceNames, &out.ResourceNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NonResourceURLs != nil { - in, out := &in.NonResourceURLs, &out.NonResourceURLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_rbac_Role(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Role) - out := out.(*Role) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - if err := DeepCopy_rbac_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_rbac_RoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RoleBinding) - out := out.(*RoleBinding) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - copy(*out, *in) - } - return nil - } -} - -func DeepCopy_rbac_RoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RoleBindingList) - out := out.(*RoleBindingList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RoleBinding, len(*in)) - for i := range *in { - if err := DeepCopy_rbac_RoleBinding(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_rbac_RoleList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RoleList) - out := out.(*RoleList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Role, len(*in)) - for i := range *in { - if err := DeepCopy_rbac_Role(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_rbac_RoleRef(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RoleRef) - out := out.(*RoleRef) - *out = *in - return nil - } -} - -func DeepCopy_rbac_Subject(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Subject) - out := out.(*Subject) - *out = *in - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/doc.go b/vendor/k8s.io/client-go/pkg/apis/settings/doc.go deleted file mode 100644 index 90ccf882a..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/settings/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +groupName=settings.k8s.io -package settings diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/install/install.go b/vendor/k8s.io/client-go/pkg/apis/settings/install/install.go deleted file mode 100644 index ccdc70400..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/settings/install/install.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the settings API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/apis/settings" - "k8s.io/client-go/pkg/apis/settings/v1alpha1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: settings.GroupName, - VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/client-go/pkg/apis/settings", - AddInternalObjectsToScheme: settings.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/register.go b/vendor/k8s.io/client-go/pkg/apis/settings/register.go deleted file mode 100644 index 858470127..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/settings/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package settings - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// GroupName is the group name use in this package -const GroupName = "settings.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &PodPreset{}, - &PodPresetList{}, - ) - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/types.go b/vendor/k8s.io/client-go/pkg/apis/settings/types.go deleted file mode 100644 index f89a16d25..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/settings/types.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package settings - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api" -) - -// +genclient=true - -// PodPreset is a policy resource that defines additional runtime -// requirements for a Pod. -type PodPreset struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // +optional - Spec PodPresetSpec -} - -// PodPresetSpec is a description of a pod injection policy. -type PodPresetSpec struct { - // Selector is a label query over a set of resources, in this case pods. - // Required. - Selector metav1.LabelSelector - // Env defines the collection of EnvVar to inject into containers. - // +optional - Env []api.EnvVar - // EnvFrom defines the collection of EnvFromSource to inject into containers. - // +optional - EnvFrom []api.EnvFromSource - // Volumes defines the collection of Volume to inject into the pod. - // +optional - Volumes []api.Volume - // VolumeMounts defines the collection of VolumeMount to inject into containers. - // +optional - VolumeMounts []api.VolumeMount -} - -// PodPresetList is a list of PodPreset objects. -type PodPresetList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []PodPreset -} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index 304af7ea0..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,159 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1alpha1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/client-go/pkg/api" - v1 "k8s.io/client-go/pkg/api/v1" - settings "k8s.io/client-go/pkg/apis/settings" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1alpha1_PodPreset_To_settings_PodPreset, - Convert_settings_PodPreset_To_v1alpha1_PodPreset, - Convert_v1alpha1_PodPresetList_To_settings_PodPresetList, - Convert_settings_PodPresetList_To_v1alpha1_PodPresetList, - Convert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec, - Convert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec, - ) -} - -func autoConvert_v1alpha1_PodPreset_To_settings_PodPreset(in *PodPreset, out *settings.PodPreset, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_PodPreset_To_settings_PodPreset(in *PodPreset, out *settings.PodPreset, s conversion.Scope) error { - return autoConvert_v1alpha1_PodPreset_To_settings_PodPreset(in, out, s) -} - -func autoConvert_settings_PodPreset_To_v1alpha1_PodPreset(in *settings.PodPreset, out *PodPreset, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_settings_PodPreset_To_v1alpha1_PodPreset(in *settings.PodPreset, out *PodPreset, s conversion.Scope) error { - return autoConvert_settings_PodPreset_To_v1alpha1_PodPreset(in, out, s) -} - -func autoConvert_v1alpha1_PodPresetList_To_settings_PodPresetList(in *PodPresetList, out *settings.PodPresetList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]settings.PodPreset, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_PodPreset_To_settings_PodPreset(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1alpha1_PodPresetList_To_settings_PodPresetList(in *PodPresetList, out *settings.PodPresetList, s conversion.Scope) error { - return autoConvert_v1alpha1_PodPresetList_To_settings_PodPresetList(in, out, s) -} - -func autoConvert_settings_PodPresetList_To_v1alpha1_PodPresetList(in *settings.PodPresetList, out *PodPresetList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodPreset, len(*in)) - for i := range *in { - if err := Convert_settings_PodPreset_To_v1alpha1_PodPreset(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = make([]PodPreset, 0) - } - return nil -} - -func Convert_settings_PodPresetList_To_v1alpha1_PodPresetList(in *settings.PodPresetList, out *PodPresetList, s conversion.Scope) error { - return autoConvert_settings_PodPresetList_To_v1alpha1_PodPresetList(in, out, s) -} - -func autoConvert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in *PodPresetSpec, out *settings.PodPresetSpec, s conversion.Scope) error { - out.Selector = in.Selector - out.Env = *(*[]api.EnvVar)(unsafe.Pointer(&in.Env)) - out.EnvFrom = *(*[]api.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]api.Volume, len(*in)) - for i := range *in { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - out.VolumeMounts = *(*[]api.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - return nil -} - -func Convert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in *PodPresetSpec, out *settings.PodPresetSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in, out, s) -} - -func autoConvert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(in *settings.PodPresetSpec, out *PodPresetSpec, s conversion.Scope) error { - out.Selector = in.Selector - out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) - out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) - for i := range *in { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - return nil -} - -func Convert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(in *settings.PodPresetSpec, out *PodPresetSpec, s conversion.Scope) error { - return autoConvert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 017d8ccb0..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api_v1 "k8s.io/client-go/pkg/api/v1" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PodPreset, InType: reflect.TypeOf(&PodPreset{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PodPresetList, InType: reflect.TypeOf(&PodPresetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PodPresetSpec, InType: reflect.TypeOf(&PodPresetSpec{})}, - ) -} - -func DeepCopy_v1alpha1_PodPreset(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodPreset) - out := out.(*PodPreset) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_v1alpha1_PodPresetSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1alpha1_PodPresetList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodPresetList) - out := out.(*PodPresetList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodPreset, len(*in)) - for i := range *in { - if err := DeepCopy_v1alpha1_PodPreset(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_v1alpha1_PodPresetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodPresetSpec) - out := out.(*PodPresetSpec) - *out = *in - if newVal, err := c.DeepCopy(&in.Selector); err != nil { - return err - } else { - out.Selector = *newVal.(*v1.LabelSelector) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]api_v1.EnvVar, len(*in)) - for i := range *in { - if err := api_v1.DeepCopy_v1_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]api_v1.EnvFromSource, len(*in)) - for i := range *in { - if err := api_v1.DeepCopy_v1_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]api_v1.Volume, len(*in)) - for i := range *in { - if err := api_v1.DeepCopy_v1_Volume(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]api_v1.VolumeMount, len(*in)) - copy(*out, *in) - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.defaults.go deleted file mode 100644 index c178a3072..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.defaults.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/client-go/pkg/api/v1" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&PodPreset{}, func(obj interface{}) { SetObjectDefaults_PodPreset(obj.(*PodPreset)) }) - scheme.AddTypeDefaultingFunc(&PodPresetList{}, func(obj interface{}) { SetObjectDefaults_PodPresetList(obj.(*PodPresetList)) }) - return nil -} - -func SetObjectDefaults_PodPreset(in *PodPreset) { - for i := range in.Spec.Env { - a := &in.Spec.Env[i] - if a.ValueFrom != nil { - if a.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(a.ValueFrom.FieldRef) - } - } - } - for i := range in.Spec.Volumes { - a := &in.Spec.Volumes[i] - v1.SetDefaults_Volume(a) - if a.VolumeSource.Secret != nil { - v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } -} - -func SetObjectDefaults_PodPresetList(in *PodPresetList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_PodPreset(a) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/settings/zz_generated.deepcopy.go deleted file mode 100644 index eb109b553..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/settings/zz_generated.deepcopy.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package settings - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/client-go/pkg/api" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_settings_PodPreset, InType: reflect.TypeOf(&PodPreset{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_settings_PodPresetList, InType: reflect.TypeOf(&PodPresetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_settings_PodPresetSpec, InType: reflect.TypeOf(&PodPresetSpec{})}, - ) -} - -func DeepCopy_settings_PodPreset(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodPreset) - out := out.(*PodPreset) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_settings_PodPresetSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_settings_PodPresetList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodPresetList) - out := out.(*PodPresetList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodPreset, len(*in)) - for i := range *in { - if err := DeepCopy_settings_PodPreset(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} - -func DeepCopy_settings_PodPresetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*PodPresetSpec) - out := out.(*PodPresetSpec) - *out = *in - if newVal, err := c.DeepCopy(&in.Selector); err != nil { - return err - } else { - out.Selector = *newVal.(*v1.LabelSelector) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]api.EnvVar, len(*in)) - for i := range *in { - if err := api.DeepCopy_api_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]api.EnvFromSource, len(*in)) - for i := range *in { - if err := api.DeepCopy_api_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]api.Volume, len(*in)) - for i := range *in { - if err := api.DeepCopy_api_Volume(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]api.VolumeMount, len(*in)) - copy(*out, *in) - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/OWNERS b/vendor/k8s.io/client-go/pkg/apis/storage/OWNERS deleted file mode 100755 index d59ed6e1d..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/OWNERS +++ /dev/null @@ -1,3 +0,0 @@ -reviewers: -- deads2k -- mbohlool diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/install/install.go b/vendor/k8s.io/client-go/pkg/apis/storage/install/install.go deleted file mode 100644 index fb590fecd..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/install/install.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/apis/storage" - "k8s.io/client-go/pkg/apis/storage/v1" - "k8s.io/client-go/pkg/apis/storage/v1beta1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: storage.GroupName, - // TODO: change the order when GKE supports v1 - VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version, v1.SchemeGroupVersion.Version}, - ImportPrefix: "k8s.io/client-go/pkg/apis/storage", - RootScopedKinds: sets.NewString("StorageClass"), - AddInternalObjectsToScheme: storage.AddToScheme, - }, - announced.VersionToSchemeFunc{ - v1.SchemeGroupVersion.Version: v1.AddToScheme, - v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/register.go b/vendor/k8s.io/client-go/pkg/apis/storage/register.go deleted file mode 100644 index aaa619b4d..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/register.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "storage.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &StorageClass{}, - &StorageClassList{}, - ) - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/types.go b/vendor/k8s.io/client-go/pkg/apis/storage/types.go deleted file mode 100644 index 3d589de5c..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/types.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient=true -// +nonNamespaced=true - -// StorageClass describes a named "class" of storage offered in a cluster. -// Different classes might map to quality-of-service levels, or to backup policies, -// or to arbitrary policies determined by the cluster administrators. Kubernetes -// itself is unopinionated about what classes represent. This concept is sometimes -// called "profiles" in other storage systems. -// The name of a StorageClass object is significant, and is how users can request a particular class. -type StorageClass struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // provisioner is the driver expected to handle this StorageClass. - // This is an optionally-prefixed name, like a label key. - // For example: "kubernetes.io/gce-pd" or "kubernetes.io/aws-ebs". - // This value may not be empty. - Provisioner string - - // parameters holds parameters for the provisioner. - // These values are opaque to the system and are passed directly - // to the provisioner. The only validation done on keys is that they are - // not empty. The maximum number of parameters is - // 512, with a cumulative max size of 256K - // +optional - Parameters map[string]string -} - -// StorageClassList is a collection of storage classes. -type StorageClassList struct { - metav1.TypeMeta - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta - - // Items is the list of StorageClasses - Items []StorageClass -} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.pb.go deleted file mode 100644 index 531282ba6..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.pb.go +++ /dev/null @@ -1,730 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/storage/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/storage/v1/generated.proto - - It has these top-level messages: - StorageClass - StorageClassList -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func init() { - proto.RegisterType((*StorageClass)(nil), "k8s.io.client-go.pkg.apis.storage.v1.StorageClass") - proto.RegisterType((*StorageClassList)(nil), "k8s.io.client-go.pkg.apis.storage.v1.StorageClassList") -} -func (m *StorageClass) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *StorageClass) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Provisioner))) - i += copy(data[i:], m.Provisioner) - if len(m.Parameters) > 0 { - for k := range m.Parameters { - data[i] = 0x1a - i++ - v := m.Parameters[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - return i, nil -} - -func (m *StorageClassList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *StorageClassList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *StorageClass) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Provisioner) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Parameters) > 0 { - for k, v := range m.Parameters { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *StorageClassList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *StorageClass) String() string { - if this == nil { - return "nil" - } - keysForParameters := make([]string, 0, len(this.Parameters)) - for k := range this.Parameters { - keysForParameters = append(keysForParameters, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - mapStringForParameters := "map[string]string{" - for _, k := range keysForParameters { - mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) - } - mapStringForParameters += "}" - s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, - `Parameters:` + mapStringForParameters + `,`, - `}`, - }, "") - return s -} -func (this *StorageClassList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *StorageClass) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provisioner = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Parameters == nil { - m.Parameters = make(map[string]string) - } - m.Parameters[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StorageClassList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, StorageClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 474 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0x4f, 0x6f, 0xd3, 0x30, - 0x18, 0xc6, 0xe3, 0x54, 0x95, 0x36, 0x17, 0x44, 0x15, 0x38, 0x54, 0x3d, 0x64, 0xd5, 0x04, 0x52, - 0x2f, 0xd8, 0x74, 0x63, 0x68, 0x42, 0xe2, 0xd2, 0x89, 0x03, 0x12, 0x88, 0x29, 0x5c, 0x10, 0xe2, - 0x80, 0xdb, 0xbd, 0xa4, 0x26, 0x4d, 0x1c, 0xd9, 0x6f, 0x02, 0xbd, 0xf1, 0x11, 0xf8, 0x58, 0x15, - 0xa7, 0x1d, 0x39, 0x0d, 0x1a, 0xbe, 0x08, 0xca, 0x1f, 0x96, 0x88, 0x6c, 0xa2, 0xda, 0x2d, 0xaf, - 0xed, 0xdf, 0xe3, 0xe7, 0x79, 0x1c, 0x7a, 0x14, 0x1c, 0x1b, 0x26, 0x15, 0x0f, 0x92, 0x19, 0xe8, - 0x08, 0x10, 0x0c, 0x8f, 0x03, 0x9f, 0x8b, 0x58, 0x1a, 0x6e, 0x50, 0x69, 0xe1, 0x03, 0x4f, 0x27, - 0xdc, 0x87, 0x08, 0xb4, 0x40, 0x38, 0x63, 0xb1, 0x56, 0xa8, 0x9c, 0x07, 0x25, 0xc6, 0x6a, 0x8c, - 0xc5, 0x81, 0xcf, 0x72, 0x8c, 0x55, 0x18, 0x4b, 0x27, 0xc3, 0x87, 0xbe, 0xc4, 0x45, 0x32, 0x63, - 0x73, 0x15, 0x72, 0x5f, 0xf9, 0x8a, 0x17, 0xf4, 0x2c, 0xf9, 0x58, 0x4c, 0xc5, 0x50, 0x7c, 0x95, - 0xaa, 0xc3, 0xc7, 0x95, 0x19, 0x11, 0xcb, 0x50, 0xcc, 0x17, 0x32, 0x02, 0xbd, 0xaa, 0xed, 0x84, - 0x80, 0xe2, 0x0a, 0x2f, 0x43, 0x7e, 0x1d, 0xa5, 0x93, 0x08, 0x65, 0x08, 0x2d, 0xe0, 0xc9, 0xff, - 0x00, 0x33, 0x5f, 0x40, 0x28, 0x5a, 0xdc, 0xe1, 0x75, 0x5c, 0x82, 0x72, 0xc9, 0x65, 0x84, 0x06, - 0x75, 0x0b, 0x6a, 0x64, 0x32, 0xa0, 0x53, 0xd0, 0x75, 0x20, 0xf8, 0x22, 0xc2, 0x78, 0x79, 0x55, - 0xbf, 0xfb, 0x3f, 0x6d, 0x7a, 0xeb, 0x4d, 0xd9, 0xe3, 0xc9, 0x52, 0x18, 0xe3, 0x7c, 0xa0, 0x3b, - 0x79, 0xfe, 0x33, 0x81, 0x62, 0x40, 0x46, 0x64, 0xdc, 0x3b, 0x78, 0xc4, 0xaa, 0x37, 0x68, 0xda, - 0xa9, 0x5f, 0x21, 0x3f, 0xcd, 0xd2, 0x09, 0x7b, 0x3d, 0xfb, 0x04, 0x73, 0x7c, 0x05, 0x28, 0xa6, - 0xce, 0xfa, 0x62, 0xcf, 0xca, 0x2e, 0xf6, 0x68, 0xbd, 0xe6, 0x5d, 0xaa, 0x3a, 0x47, 0xb4, 0x17, - 0x6b, 0x95, 0x4a, 0x23, 0x55, 0x04, 0x7a, 0x60, 0x8f, 0xc8, 0x78, 0x77, 0x7a, 0xb7, 0x42, 0x7a, - 0xa7, 0xf5, 0x96, 0xd7, 0x3c, 0xe7, 0x7c, 0xa6, 0x34, 0x16, 0x5a, 0x84, 0x80, 0xa0, 0xcd, 0xa0, - 0x33, 0xea, 0x8c, 0x7b, 0x07, 0x27, 0x6c, 0xab, 0xdf, 0x83, 0x35, 0x13, 0xb2, 0xd3, 0x4b, 0x95, - 0xe7, 0x11, 0xea, 0x55, 0xed, 0xb6, 0xde, 0xf0, 0x1a, 0x57, 0x0d, 0x9f, 0xd1, 0x3b, 0xff, 0x20, - 0x4e, 0x9f, 0x76, 0x02, 0x58, 0x15, 0xfd, 0xec, 0x7a, 0xf9, 0xa7, 0x73, 0x8f, 0x76, 0x53, 0xb1, - 0x4c, 0xa0, 0x8c, 0xe3, 0x95, 0xc3, 0x53, 0xfb, 0x98, 0xec, 0x7f, 0x27, 0xb4, 0xdf, 0xbc, 0xff, - 0xa5, 0x34, 0xe8, 0xbc, 0x6f, 0xb5, 0xcc, 0xb6, 0x6b, 0x39, 0xa7, 0x8b, 0x8e, 0xfb, 0x95, 0xeb, - 0x9d, 0xbf, 0x2b, 0x8d, 0x86, 0xdf, 0xd2, 0xae, 0x44, 0x08, 0xcd, 0xc0, 0x2e, 0x5a, 0x3a, 0xbc, - 0x41, 0x4b, 0xd3, 0xdb, 0x95, 0x7e, 0xf7, 0x45, 0xae, 0xe4, 0x95, 0x82, 0xd3, 0xfb, 0xeb, 0x8d, - 0x6b, 0x9d, 0x6f, 0x5c, 0xeb, 0xc7, 0xc6, 0xb5, 0xbe, 0x66, 0x2e, 0x59, 0x67, 0x2e, 0x39, 0xcf, - 0x5c, 0xf2, 0x2b, 0x73, 0xc9, 0xb7, 0xdf, 0xae, 0xf5, 0xce, 0x4e, 0x27, 0x7f, 0x02, 0x00, 0x00, - 0xff, 0xff, 0xe8, 0xe1, 0xb9, 0x93, 0xec, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/types.go deleted file mode 100644 index 0591b397c..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1/types.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient=true -// +nonNamespaced=true - -// StorageClass describes the parameters for a class of storage for -// which PersistentVolumes can be dynamically provisioned. -// -// StorageClasses are non-namespaced; the name of the storage class -// according to etcd is in ObjectMeta.Name. -type StorageClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Provisioner indicates the type of the provisioner. - Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - - // Parameters holds the parameters for the provisioner that should - // create volumes of this storage class. - // +optional - Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` -} - -// StorageClassList is a collection of storage classes. -type StorageClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of StorageClasses - Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/types_swagger_doc_generated.go deleted file mode 100644 index 6f2717708..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_StorageClass = map[string]string{ - "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", -} - -func (StorageClass) SwaggerDoc() map[string]string { - return map_StorageClass -} - -var map_StorageClassList = map[string]string{ - "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", -} - -func (StorageClassList) SwaggerDoc() map[string]string { - return map_StorageClassList -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.conversion.go deleted file mode 100644 index 3c0c5d4b3..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.conversion.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - storage "k8s.io/client-go/pkg/apis/storage" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1_StorageClass_To_storage_StorageClass, - Convert_storage_StorageClass_To_v1_StorageClass, - Convert_v1_StorageClassList_To_storage_StorageClassList, - Convert_storage_StorageClassList_To_v1_StorageClassList, - ) -} - -func autoConvert_v1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Provisioner = in.Provisioner - out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) - return nil -} - -func Convert_v1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { - return autoConvert_v1_StorageClass_To_storage_StorageClass(in, out, s) -} - -func autoConvert_storage_StorageClass_To_v1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Provisioner = in.Provisioner - out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) - return nil -} - -func Convert_storage_StorageClass_To_v1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { - return autoConvert_storage_StorageClass_To_v1_StorageClass(in, out, s) -} - -func autoConvert_v1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]storage.StorageClass)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { - return autoConvert_v1_StorageClassList_To_storage_StorageClassList(in, out, s) -} - -func autoConvert_storage_StorageClassList_To_v1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]StorageClass, 0) - } else { - out.Items = *(*[]StorageClass)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_storage_StorageClassList_To_v1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { - return autoConvert_storage_StorageClassList_To_v1_StorageClassList(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.deepcopy.go deleted file mode 100644 index 8b3786ab1..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,80 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1 - -import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, - ) -} - -func DeepCopy_v1_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StorageClass) - out := out.(*StorageClass) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - return nil - } -} - -func DeepCopy_v1_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StorageClassList) - out := out.(*StorageClassList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StorageClass, len(*in)) - for i := range *in { - if err := DeepCopy_v1_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/doc.go deleted file mode 100644 index 364321710..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +groupName=storage.k8s.io -package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.pb.go deleted file mode 100644 index f1130cf37..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.pb.go +++ /dev/null @@ -1,731 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto - - It has these top-level messages: - StorageClass - StorageClassList -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func init() { - proto.RegisterType((*StorageClass)(nil), "k8s.io.client-go.pkg.apis.storage.v1beta1.StorageClass") - proto.RegisterType((*StorageClassList)(nil), "k8s.io.client-go.pkg.apis.storage.v1beta1.StorageClassList") -} -func (m *StorageClass) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *StorageClass) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Provisioner))) - i += copy(data[i:], m.Provisioner) - if len(m.Parameters) > 0 { - for k := range m.Parameters { - data[i] = 0x1a - i++ - v := m.Parameters[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - return i, nil -} - -func (m *StorageClassList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *StorageClassList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *StorageClass) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Provisioner) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Parameters) > 0 { - for k, v := range m.Parameters { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *StorageClassList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *StorageClass) String() string { - if this == nil { - return "nil" - } - keysForParameters := make([]string, 0, len(this.Parameters)) - for k := range this.Parameters { - keysForParameters = append(keysForParameters, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - mapStringForParameters := "map[string]string{" - for _, k := range keysForParameters { - mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) - } - mapStringForParameters += "}" - s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, - `Parameters:` + mapStringForParameters + `,`, - `}`, - }, "") - return s -} -func (this *StorageClassList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *StorageClass) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provisioner = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Parameters == nil { - m.Parameters = make(map[string]string) - } - m.Parameters[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StorageClassList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, StorageClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 486 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0xcf, 0x8b, 0xd3, 0x40, - 0x14, 0xc7, 0x33, 0x5d, 0x8a, 0xbb, 0x53, 0xc5, 0x12, 0x3d, 0x94, 0x1e, 0xb2, 0x65, 0x4f, 0x55, - 0x74, 0xc6, 0xae, 0x3f, 0x28, 0x0b, 0x5e, 0x2a, 0x82, 0x82, 0xe2, 0x12, 0x6f, 0xa2, 0xe0, 0xa4, - 0xfb, 0x4c, 0xc7, 0x34, 0x99, 0x30, 0xf3, 0x12, 0x2c, 0x78, 0xf0, 0x4f, 0xf0, 0xcf, 0xea, 0xcd, - 0x3d, 0x7a, 0x5a, 0x6c, 0xf4, 0x0f, 0x91, 0xfc, 0x70, 0x13, 0xcc, 0x16, 0x17, 0x6f, 0x99, 0x99, - 0xf7, 0xf9, 0xbe, 0xef, 0xfb, 0xbe, 0xd0, 0xa3, 0x60, 0x6a, 0x98, 0x54, 0x3c, 0x48, 0x3c, 0xd0, - 0x11, 0x20, 0x18, 0x1e, 0x07, 0x3e, 0x17, 0xb1, 0x34, 0xdc, 0xa0, 0xd2, 0xc2, 0x07, 0x9e, 0x4e, - 0x3c, 0x40, 0x31, 0xe1, 0x3e, 0x44, 0xa0, 0x05, 0xc2, 0x09, 0x8b, 0xb5, 0x42, 0x65, 0xdf, 0x2e, - 0x59, 0x56, 0xb3, 0x2c, 0x0e, 0x7c, 0x96, 0xb3, 0xac, 0x62, 0x59, 0xc5, 0x0e, 0xef, 0xfa, 0x12, - 0x17, 0x89, 0xc7, 0xe6, 0x2a, 0xe4, 0xbe, 0xf2, 0x15, 0x2f, 0x24, 0xbc, 0xe4, 0x43, 0x71, 0x2a, - 0x0e, 0xc5, 0x57, 0x29, 0x3d, 0x7c, 0x50, 0xd9, 0x12, 0xb1, 0x0c, 0xc5, 0x7c, 0x21, 0x23, 0xd0, - 0xab, 0xda, 0x58, 0x08, 0x28, 0x78, 0xda, 0x32, 0x34, 0xe4, 0xdb, 0x28, 0x9d, 0x44, 0x28, 0x43, - 0x68, 0x01, 0x8f, 0xfe, 0x05, 0x98, 0xf9, 0x02, 0x42, 0xd1, 0xe2, 0xee, 0x6f, 0xe3, 0x12, 0x94, - 0x4b, 0x2e, 0x23, 0x34, 0xa8, 0x5b, 0x50, 0x63, 0x26, 0x03, 0x3a, 0x05, 0x5d, 0x0f, 0x04, 0x9f, - 0x44, 0x18, 0x2f, 0xe1, 0xa2, 0x99, 0xee, 0x6c, 0x5d, 0xd0, 0x05, 0xd5, 0x07, 0xbf, 0x3a, 0xf4, - 0xea, 0xeb, 0x32, 0xfa, 0x27, 0x4b, 0x61, 0x8c, 0xfd, 0x9e, 0xee, 0xe6, 0x69, 0x9d, 0x08, 0x14, - 0x03, 0x32, 0x22, 0xe3, 0xde, 0xe1, 0x3d, 0x56, 0xad, 0xad, 0x69, 0xbe, 0x5e, 0x5c, 0x5e, 0xcd, - 0xd2, 0x09, 0x7b, 0xe5, 0x7d, 0x84, 0x39, 0xbe, 0x04, 0x14, 0x33, 0x7b, 0x7d, 0xb6, 0x6f, 0x65, - 0x67, 0xfb, 0xb4, 0xbe, 0x73, 0xcf, 0x55, 0xed, 0x87, 0xb4, 0x17, 0x6b, 0x95, 0x4a, 0x23, 0x55, - 0x04, 0x7a, 0xd0, 0x19, 0x91, 0xf1, 0xde, 0xec, 0x46, 0x85, 0xf4, 0x8e, 0xeb, 0x27, 0xb7, 0x59, - 0x67, 0x7f, 0xa6, 0x34, 0x16, 0x5a, 0x84, 0x80, 0xa0, 0xcd, 0x60, 0x67, 0xb4, 0x33, 0xee, 0x1d, - 0x3e, 0x63, 0x97, 0xff, 0xa3, 0x58, 0x73, 0x4c, 0x76, 0x7c, 0x2e, 0xf5, 0x34, 0x42, 0xbd, 0xaa, - 0x2d, 0xd7, 0x0f, 0x6e, 0xa3, 0xdf, 0xf0, 0x31, 0xbd, 0xfe, 0x17, 0x62, 0xf7, 0xe9, 0x4e, 0x00, - 0xab, 0x22, 0xa4, 0x3d, 0x37, 0xff, 0xb4, 0x6f, 0xd2, 0x6e, 0x2a, 0x96, 0x09, 0x94, 0x33, 0xb9, - 0xe5, 0xe1, 0xa8, 0x33, 0x25, 0x07, 0xdf, 0x08, 0xed, 0x37, 0xfb, 0xbf, 0x90, 0x06, 0xed, 0xb7, - 0xad, 0xa8, 0xd9, 0xe5, 0xa2, 0xce, 0xe9, 0x22, 0xe8, 0x7e, 0xe5, 0x7a, 0xf7, 0xcf, 0x4d, 0x23, - 0xe6, 0x77, 0xb4, 0x2b, 0x11, 0x42, 0x33, 0xe8, 0x14, 0x51, 0x4d, 0xff, 0x37, 0xaa, 0xd9, 0xb5, - 0xaa, 0x49, 0xf7, 0x79, 0x2e, 0xe7, 0x96, 0xaa, 0xb3, 0x5b, 0xeb, 0x8d, 0x63, 0x9d, 0x6e, 0x1c, - 0xeb, 0xfb, 0xc6, 0xb1, 0xbe, 0x64, 0x0e, 0x59, 0x67, 0x0e, 0x39, 0xcd, 0x1c, 0xf2, 0x23, 0x73, - 0xc8, 0xd7, 0x9f, 0x8e, 0xf5, 0xe6, 0x4a, 0xa5, 0xf6, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x99, 0xdf, - 0x66, 0x94, 0x33, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.proto deleted file mode 100644 index 0b9dbaead..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.proto +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.storage.v1beta1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; -import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// StorageClass describes the parameters for a class of storage for -// which PersistentVolumes can be dynamically provisioned. -// -// StorageClasses are non-namespaced; the name of the storage class -// according to etcd is in ObjectMeta.Name. -message StorageClass { - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Provisioner indicates the type of the provisioner. - optional string provisioner = 2; - - // Parameters holds the parameters for the provisioner that should - // create volumes of this storage class. - // +optional - map parameters = 3; -} - -// StorageClassList is a collection of storage classes. -message StorageClassList { - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of StorageClasses - repeated StorageClass items = 2; -} - diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.generated.go deleted file mode 100644 index ddc516b28..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.generated.go +++ /dev/null @@ -1,985 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkg2_types "k8s.io/apimachinery/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_v1.TypeMeta - var v1 pkg2_types.UID - var v2 time.Time - _, _, _ = v0, v1, v2 - } -} - -func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - yyq2[4] = len(x.Parameters) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ObjectMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ObjectMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("provisioner")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Parameters == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - z.F.EncMapStringStringV(x.Parameters, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("parameters")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Parameters == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - z.F.EncMapStringStringV(x.Parameters, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StorageClass) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StorageClass) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv8 := &x.ObjectMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "provisioner": - if r.TryDecodeAsNil() { - x.Provisioner = "" - } else { - yyv10 := &x.Provisioner - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } - } - case "parameters": - if r.TryDecodeAsNil() { - x.Parameters = nil - } else { - yyv12 := &x.Parameters - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecMapStringStringX(yyv12, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv15 := &x.Kind - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv17 := &x.APIVersion - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv19 := &x.ObjectMeta - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else { - z.DecFallback(yyv19, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Provisioner = "" - } else { - yyv21 := &x.Provisioner - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Parameters = nil - } else { - yyv23 := &x.Parameters - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - z.F.DecMapStringStringX(yyv23, false, d) - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.ListMeta - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.ListMeta - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceStorageClass(([]StorageClass)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceStorageClass(([]StorageClass)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StorageClassList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StorageClassList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv4 := &x.Kind - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv6 := &x.APIVersion - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv8 := &x.ListMeta - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else { - z.DecFallback(yyv8, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv10 := &x.Items - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceStorageClass((*[]StorageClass)(yyv10), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - yyv13 := &x.Kind - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - yyv15 := &x.APIVersion - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_v1.ListMeta{} - } else { - yyv17 := &x.ListMeta - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv19 := &x.Items - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceStorageClass((*[]StorageClass)(yyv19), d) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceStorageClass(v []StorageClass, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []StorageClass{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]StorageClass, yyrl1) - } - } else { - yyv1 = make([]StorageClass, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = StorageClass{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, StorageClass{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = StorageClass{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, StorageClass{}) // var yyz1 StorageClass - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = StorageClass{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []StorageClass{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.go deleted file mode 100644 index afbd5bcdb..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient=true -// +nonNamespaced=true - -// StorageClass describes the parameters for a class of storage for -// which PersistentVolumes can be dynamically provisioned. -// -// StorageClasses are non-namespaced; the name of the storage class -// according to etcd is in ObjectMeta.Name. -type StorageClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Provisioner indicates the type of the provisioner. - Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - - // Parameters holds the parameters for the provisioner that should - // create volumes of this storage class. - // +optional - Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` -} - -// StorageClassList is a collection of storage classes. -type StorageClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of StorageClasses - Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index e8362e381..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_StorageClass = map[string]string{ - "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", -} - -func (StorageClass) SwaggerDoc() map[string]string { - return map_StorageClass -} - -var map_StorageClassList = map[string]string{ - "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", -} - -func (StorageClassList) SwaggerDoc() map[string]string { - return map_StorageClassList -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.conversion.go deleted file mode 100644 index f5b1b2aa4..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - storage "k8s.io/client-go/pkg/apis/storage" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_StorageClass_To_storage_StorageClass, - Convert_storage_StorageClass_To_v1beta1_StorageClass, - Convert_v1beta1_StorageClassList_To_storage_StorageClassList, - Convert_storage_StorageClassList_To_v1beta1_StorageClassList, - ) -} - -func autoConvert_v1beta1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Provisioner = in.Provisioner - out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) - return nil -} - -func Convert_v1beta1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { - return autoConvert_v1beta1_StorageClass_To_storage_StorageClass(in, out, s) -} - -func autoConvert_storage_StorageClass_To_v1beta1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Provisioner = in.Provisioner - out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) - return nil -} - -func Convert_storage_StorageClass_To_v1beta1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { - return autoConvert_storage_StorageClass_To_v1beta1_StorageClass(in, out, s) -} - -func autoConvert_v1beta1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]storage.StorageClass)(unsafe.Pointer(&in.Items)) - return nil -} - -func Convert_v1beta1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { - return autoConvert_v1beta1_StorageClassList_To_storage_StorageClassList(in, out, s) -} - -func autoConvert_storage_StorageClassList_To_v1beta1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items == nil { - out.Items = make([]StorageClass, 0) - } else { - out.Items = *(*[]StorageClass)(unsafe.Pointer(&in.Items)) - } - return nil -} - -func Convert_storage_StorageClassList_To_v1beta1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { - return autoConvert_storage_StorageClassList_To_v1beta1_StorageClassList(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index f2ea4f2fb..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,80 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, - ) -} - -func DeepCopy_v1beta1_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StorageClass) - out := out.(*StorageClass) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - return nil - } -} - -func DeepCopy_v1beta1_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StorageClassList) - out := out.(*StorageClassList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StorageClass, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/storage/zz_generated.deepcopy.go deleted file mode 100644 index 2ef9f1767..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/storage/zz_generated.deepcopy.go +++ /dev/null @@ -1,80 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package storage - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_storage_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_storage_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, - ) -} - -func DeepCopy_storage_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StorageClass) - out := out.(*StorageClass) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - return nil - } -} - -func DeepCopy_storage_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StorageClassList) - out := out.(*StorageClassList) - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StorageClass, len(*in)) - for i := range *in { - if err := DeepCopy_storage_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/util/doc.go b/vendor/k8s.io/client-go/pkg/util/doc.go deleted file mode 100644 index 1747db550..000000000 --- a/vendor/k8s.io/client-go/pkg/util/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package util implements various utility functions used in both testing and implementation -// of Kubernetes. Package util may not depend on any other package in the Kubernetes -// package tree. -package util diff --git a/vendor/k8s.io/client-go/pkg/util/parsers/parsers.go b/vendor/k8s.io/client-go/pkg/util/parsers/parsers.go deleted file mode 100644 index 4e70cc682..000000000 --- a/vendor/k8s.io/client-go/pkg/util/parsers/parsers.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parsers - -import ( - "fmt" - - dockerref "github.com/docker/distribution/reference" -) - -const ( - DefaultImageTag = "latest" -) - -// ParseImageName parses a docker image string into three parts: repo, tag and digest. -// If both tag and digest are empty, a default image tag will be returned. -func ParseImageName(image string) (string, string, string, error) { - named, err := dockerref.ParseNamed(image) - if err != nil { - return "", "", "", fmt.Errorf("couldn't parse image name: %v", err) - } - - repoToPull := named.Name() - var tag, digest string - - tagged, ok := named.(dockerref.Tagged) - if ok { - tag = tagged.Tag() - } - - digested, ok := named.(dockerref.Digested) - if ok { - digest = digested.Digest().String() - } - // If no tag was specified, use the default "latest". - if len(tag) == 0 && len(digest) == 0 { - tag = DefaultImageTag - } - return repoToPull, tag, digest, nil -} diff --git a/vendor/k8s.io/client-go/pkg/util/template.go b/vendor/k8s.io/client-go/pkg/util/template.go deleted file mode 100644 index d09d7dc86..000000000 --- a/vendor/k8s.io/client-go/pkg/util/template.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bytes" - "go/doc" - "io" - "strings" - "text/template" -) - -func wrap(indent string, s string) string { - var buf bytes.Buffer - doc.ToText(&buf, s, indent, indent+" ", 80-len(indent)) - return buf.String() -} - -// ExecuteTemplate executes templateText with data and output written to w. -func ExecuteTemplate(w io.Writer, templateText string, data interface{}) error { - t := template.New("top") - t.Funcs(template.FuncMap{ - "trim": strings.TrimSpace, - "wrap": wrap, - }) - template.Must(t.Parse(templateText)) - return t.Execute(w, data) -} - -func ExecuteTemplateToString(templateText string, data interface{}) (string, error) { - b := bytes.Buffer{} - err := ExecuteTemplate(&b, templateText, data) - return b.String(), err -} diff --git a/vendor/k8s.io/client-go/pkg/util/util.go b/vendor/k8s.io/client-go/pkg/util/util.go deleted file mode 100644 index 356b295a3..000000000 --- a/vendor/k8s.io/client-go/pkg/util/util.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "os" - "reflect" - "regexp" -) - -// Takes a list of strings and compiles them into a list of regular expressions -func CompileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { - regexps := []*regexp.Regexp{} - for _, regexpStr := range regexpStrings { - r, err := regexp.Compile(regexpStr) - if err != nil { - return []*regexp.Regexp{}, err - } - regexps = append(regexps, r) - } - return regexps, nil -} - -// Detects if using systemd as the init system -// Please note that simply reading /proc/1/cmdline can be misleading because -// some installation of various init programs can automatically make /sbin/init -// a symlink or even a renamed version of their main program. -// TODO(dchen1107): realiably detects the init system using on the system: -// systemd, upstart, initd, etc. -func UsingSystemdInitSystem() bool { - if _, err := os.Stat("/run/systemd/system"); err == nil { - return true - } - - return false -} - -// Tests whether all pointer fields in a struct are nil. This is useful when, -// for example, an API struct is handled by plugins which need to distinguish -// "no plugin accepted this spec" from "this spec is empty". -// -// This function is only valid for structs and pointers to structs. Any other -// type will cause a panic. Passing a typed nil pointer will return true. -func AllPtrFieldsNil(obj interface{}) bool { - v := reflect.ValueOf(obj) - if !v.IsValid() { - panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) - } - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return true - } - v = v.Elem() - } - for i := 0; i < v.NumField(); i++ { - if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { - return false - } - } - return true -} - -func FileExists(filename string) (bool, error) { - if _, err := os.Stat(filename); os.IsNotExist(err) { - return false, nil - } else if err != nil { - return false, err - } - return true, nil -} - -// ReadDirNoStat returns a string of files/directories contained -// in dirname without calling lstat on them. -func ReadDirNoStat(dirname string) ([]string, error) { - if dirname == "" { - dirname = "." - } - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - return f.Readdirnames(-1) -} - -// IntPtr returns a pointer to an int -func IntPtr(i int) *int { - o := i - return &o -} - -// Int32Ptr returns a pointer to an int32 -func Int32Ptr(i int32) *int32 { - o := i - return &o -} - -// IntPtrDerefOr dereference the int ptr and returns it i not nil, -// else returns def. -func IntPtrDerefOr(ptr *int, def int) int { - if ptr != nil { - return *ptr - } - return def -} - -// Int32PtrDerefOr dereference the int32 ptr and returns it i not nil, -// else returns def. -func Int32PtrDerefOr(ptr *int32, def int32) int32 { - if ptr != nil { - return *ptr - } - return def -} diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index f0f7338c7..9b4c79f89 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -39,11 +39,11 @@ var ( // them irrelevant. (Next we'll take it out, which may muck with // scripts consuming the kubectl version output - but most of // these should be looking at gitVersion already anyways.) - gitMajor string = "1" // major version, always numeric - gitMinor string = "6+" // minor version, numeric possibly followed by "+" + gitMajor string = "" // major version, always numeric + gitMinor string = "" // minor version, numeric possibly followed by "+" // semantic version, derived by build scripts (see - // https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md + // https://git.k8s.io/community/contributors/design-proposals/release/versioning.md // for a detailed discussion of this field) // // TODO: This field is still called "gitVersion" for legacy @@ -51,9 +51,13 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. - gitVersion string = "v1.6.1-beta.0+$Format:%h$" - gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) - gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" + + // NOTE: The $Format strings are replaced during 'git archive' thanks to the + // companion .gitattributes file containing 'export-subst' in this same + // directory. See also https://git-scm.com/docs/gitattributes + gitVersion string = "v0.0.0-master+$Format:%h$" + gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState string = "" // state of git tree, either "clean" or "dirty" buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') ) diff --git a/vendor/k8s.io/client-go/pkg/version/def.bzl b/vendor/k8s.io/client-go/pkg/version/def.bzl new file mode 100644 index 000000000..9c018a4ef --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/version/def.bzl @@ -0,0 +1,38 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Implements hack/lib/version.sh's kube::version::ldflags() for Bazel. +def version_x_defs(): + # This should match the list of packages in kube::version::ldflag + stamp_pkgs = [ + "k8s.io/kubernetes/pkg/version", + # In hack/lib/version.sh, this has a vendor/ prefix. That isn't needed here? + "k8s.io/client-go/pkg/version", + ] + # This should match the list of vars in kube::version::ldflags + # It should also match the list of vars set in hack/print-workspace-status.sh. + stamp_vars = [ + "buildDate", + "gitCommit", + "gitMajor", + "gitMinor", + "gitTreeState", + "gitVersion", + ] + # Generate the cross-product. + x_defs = {} + for pkg in stamp_pkgs: + for var in stamp_vars: + x_defs["%s.%s" % (pkg, var)] = "{%s}" % var + return x_defs diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go index 4bf139ce8..30399fb02 100644 --- a/vendor/k8s.io/client-go/pkg/version/doc.go +++ b/vendor/k8s.io/client-go/pkg/version/doc.go @@ -16,4 +16,5 @@ limitations under the License. // Package version supplies version information collected at build time to // kubernetes components. +// +k8s:openapi-gen=true package version // import "k8s.io/client-go/pkg/version" diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go new file mode 100644 index 000000000..2d05ac622 --- /dev/null +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -0,0 +1,370 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "os" + "os/exec" + "reflect" + "sync" + "time" + + "github.com/golang/glog" + "golang.org/x/crypto/ssh/terminal" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/pkg/apis/clientauthentication" + "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" + "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" + "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/transport" + "k8s.io/client-go/util/connrotation" +) + +const execInfoEnv = "KUBERNETES_EXEC_INFO" + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + v1alpha1.AddToScheme(scheme) + v1beta1.AddToScheme(scheme) + clientauthentication.AddToScheme(scheme) +} + +var ( + // Since transports can be constantly re-initialized by programs like kubectl, + // keep a cache of initialized authenticators keyed by a hash of their config. + globalCache = newCache() + // The list of API versions we accept. + apiVersions = map[string]schema.GroupVersion{ + v1alpha1.SchemeGroupVersion.String(): v1alpha1.SchemeGroupVersion, + v1beta1.SchemeGroupVersion.String(): v1beta1.SchemeGroupVersion, + } +) + +func newCache() *cache { + return &cache{m: make(map[string]*Authenticator)} +} + +func cacheKey(c *api.ExecConfig) string { + return fmt.Sprintf("%#v", c) +} + +type cache struct { + mu sync.Mutex + m map[string]*Authenticator +} + +func (c *cache) get(s string) (*Authenticator, bool) { + c.mu.Lock() + defer c.mu.Unlock() + a, ok := c.m[s] + return a, ok +} + +// put inserts an authenticator into the cache. If an authenticator is already +// associated with the key, the first one is returned instead. +func (c *cache) put(s string, a *Authenticator) *Authenticator { + c.mu.Lock() + defer c.mu.Unlock() + existing, ok := c.m[s] + if ok { + return existing + } + c.m[s] = a + return a +} + +// GetAuthenticator returns an exec-based plugin for providing client credentials. +func GetAuthenticator(config *api.ExecConfig) (*Authenticator, error) { + return newAuthenticator(globalCache, config) +} + +func newAuthenticator(c *cache, config *api.ExecConfig) (*Authenticator, error) { + key := cacheKey(config) + if a, ok := c.get(key); ok { + return a, nil + } + + gv, ok := apiVersions[config.APIVersion] + if !ok { + return nil, fmt.Errorf("exec plugin: invalid apiVersion %q", config.APIVersion) + } + + a := &Authenticator{ + cmd: config.Command, + args: config.Args, + group: gv, + + stdin: os.Stdin, + stderr: os.Stderr, + interactive: terminal.IsTerminal(int(os.Stdout.Fd())), + now: time.Now, + environ: os.Environ, + } + + for _, env := range config.Env { + a.env = append(a.env, env.Name+"="+env.Value) + } + + return c.put(key, a), nil +} + +// Authenticator is a client credential provider that rotates credentials by executing a plugin. +// The plugin input and output are defined by the API group client.authentication.k8s.io. +type Authenticator struct { + // Set by the config + cmd string + args []string + group schema.GroupVersion + env []string + + // Stubbable for testing + stdin io.Reader + stderr io.Writer + interactive bool + now func() time.Time + environ func() []string + + // Cached results. + // + // The mutex also guards calling the plugin. Since the plugin could be + // interactive we want to make sure it's only called once. + mu sync.Mutex + cachedCreds *credentials + exp time.Time + + onRotate func() +} + +type credentials struct { + token string + cert *tls.Certificate +} + +// UpdateTransportConfig updates the transport.Config to use credentials +// returned by the plugin. +func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { + wt := c.WrapTransport + c.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + if wt != nil { + rt = wt(rt) + } + return &roundTripper{a, rt} + } + + getCert := c.TLS.GetCert + c.TLS.GetCert = func() (*tls.Certificate, error) { + // If previous GetCert is present and returns a valid non-nil + // certificate, use that. Otherwise use cert from exec plugin. + if getCert != nil { + cert, err := getCert() + if err != nil { + return nil, err + } + if cert != nil { + return cert, nil + } + } + return a.cert() + } + + var dial func(ctx context.Context, network, addr string) (net.Conn, error) + if c.Dial != nil { + dial = c.Dial + } else { + dial = (&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext + } + d := connrotation.NewDialer(dial) + a.onRotate = d.CloseAll + c.Dial = d.DialContext + + return nil +} + +type roundTripper struct { + a *Authenticator + base http.RoundTripper +} + +func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + // If a user has already set credentials, use that. This makes commands like + // "kubectl get --token (token) pods" work. + if req.Header.Get("Authorization") != "" { + return r.base.RoundTrip(req) + } + + creds, err := r.a.getCreds() + if err != nil { + return nil, fmt.Errorf("getting credentials: %v", err) + } + if creds.token != "" { + req.Header.Set("Authorization", "Bearer "+creds.token) + } + + res, err := r.base.RoundTrip(req) + if err != nil { + return nil, err + } + if res.StatusCode == http.StatusUnauthorized { + resp := &clientauthentication.Response{ + Header: res.Header, + Code: int32(res.StatusCode), + } + if err := r.a.maybeRefreshCreds(creds, resp); err != nil { + glog.Errorf("refreshing credentials: %v", err) + } + } + return res, nil +} + +func (a *Authenticator) credsExpired() bool { + if a.exp.IsZero() { + return false + } + return a.now().After(a.exp) +} + +func (a *Authenticator) cert() (*tls.Certificate, error) { + creds, err := a.getCreds() + if err != nil { + return nil, err + } + return creds.cert, nil +} + +func (a *Authenticator) getCreds() (*credentials, error) { + a.mu.Lock() + defer a.mu.Unlock() + if a.cachedCreds != nil && !a.credsExpired() { + return a.cachedCreds, nil + } + + if err := a.refreshCredsLocked(nil); err != nil { + return nil, err + } + return a.cachedCreds, nil +} + +// maybeRefreshCreds executes the plugin to force a rotation of the +// credentials, unless they were rotated already. +func (a *Authenticator) maybeRefreshCreds(creds *credentials, r *clientauthentication.Response) error { + a.mu.Lock() + defer a.mu.Unlock() + + // Since we're not making a new pointer to a.cachedCreds in getCreds, no + // need to do deep comparison. + if creds != a.cachedCreds { + // Credentials already rotated. + return nil + } + + return a.refreshCredsLocked(r) +} + +// refreshCredsLocked executes the plugin and reads the credentials from +// stdout. It must be called while holding the Authenticator's mutex. +func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) error { + cred := &clientauthentication.ExecCredential{ + Spec: clientauthentication.ExecCredentialSpec{ + Response: r, + Interactive: a.interactive, + }, + } + + env := append(a.environ(), a.env...) + if a.group == v1alpha1.SchemeGroupVersion { + // Input spec disabled for beta due to lack of use. Possibly re-enable this later if + // someone wants it back. + // + // See: https://github.com/kubernetes/kubernetes/issues/61796 + data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred) + if err != nil { + return fmt.Errorf("encode ExecCredentials: %v", err) + } + env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data)) + } + + stdout := &bytes.Buffer{} + cmd := exec.Command(a.cmd, a.args...) + cmd.Env = env + cmd.Stderr = a.stderr + cmd.Stdout = stdout + if a.interactive { + cmd.Stdin = a.stdin + } + + if err := cmd.Run(); err != nil { + return fmt.Errorf("exec: %v", err) + } + + _, gvk, err := codecs.UniversalDecoder(a.group).Decode(stdout.Bytes(), nil, cred) + if err != nil { + return fmt.Errorf("decoding stdout: %v", err) + } + if gvk.Group != a.group.Group || gvk.Version != a.group.Version { + return fmt.Errorf("exec plugin is configured to use API version %s, plugin returned version %s", + a.group, schema.GroupVersion{Group: gvk.Group, Version: gvk.Version}) + } + + if cred.Status == nil { + return fmt.Errorf("exec plugin didn't return a status field") + } + if cred.Status.Token == "" && cred.Status.ClientCertificateData == "" && cred.Status.ClientKeyData == "" { + return fmt.Errorf("exec plugin didn't return a token or cert/key pair") + } + if (cred.Status.ClientCertificateData == "") != (cred.Status.ClientKeyData == "") { + return fmt.Errorf("exec plugin returned only certificate or key, not both") + } + + if cred.Status.ExpirationTimestamp != nil { + a.exp = cred.Status.ExpirationTimestamp.Time + } else { + a.exp = time.Time{} + } + + newCreds := &credentials{ + token: cred.Status.Token, + } + if cred.Status.ClientKeyData != "" && cred.Status.ClientCertificateData != "" { + cert, err := tls.X509KeyPair([]byte(cred.Status.ClientCertificateData), []byte(cred.Status.ClientKeyData)) + if err != nil { + return fmt.Errorf("failed parsing client key/certificate: %v", err) + } + newCreds.cert = &cert + } + + oldCreds := a.cachedCreds + a.cachedCreds = newCreds + // Only close all connections when TLS cert rotates. Token rotation doesn't + // need the extra noise. + if a.onRotate != nil && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) { + a.onRotate() + } + return nil +} diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go index 524e0d8eb..927403cb2 100644 --- a/vendor/k8s.io/client-go/rest/client.go +++ b/vendor/k8s.io/client-go/rest/client.go @@ -222,9 +222,9 @@ func (c *RESTClient) Verb(verb string) *Request { backoff := c.createBackoffMgr() if c.Client == nil { - return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) + return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, 0) } - return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) + return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, c.Client.Timeout) } // Post begins a POST request. Short for c.Verb("POST"). diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 2a2c03dff..7934a0196 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -17,12 +17,13 @@ limitations under the License. package rest import ( + "context" "fmt" "io/ioutil" "net" "net/http" "os" - "path" + "path/filepath" gruntime "runtime" "strings" "time" @@ -32,7 +33,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/pkg/api" "k8s.io/client-go/pkg/version" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" certutil "k8s.io/client-go/util/cert" @@ -54,9 +54,6 @@ type Config struct { Host string // APIPath is a sub-path that points to an API root. APIPath string - // Prefix is the sub path of the server. If not specified, the client will set - // a default value. Use "/" to indicate the server root should be used - Prefix string // ContentConfig contains settings that affect how objects are transformed when // sent to the server. @@ -80,6 +77,9 @@ type Config struct { // Callback to persist config for AuthProvider. AuthConfigPersister AuthProviderConfigPersister + // Exec-based authentication provider. + ExecProvider *clientcmdapi.ExecConfig + // TLSClientConfig contains settings to enable transport layer security TLSClientConfig @@ -110,6 +110,9 @@ type Config struct { // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. Timeout time.Duration + // Dial specifies the dial function for creating unencrypted TCP connections. + Dial func(ctx context.Context, network, address string) (net.Conn, error) + // Version forces a specific version to be used (if registered) // Do we need this? // Version string @@ -126,6 +129,7 @@ type ImpersonationConfig struct { Extra map[string][]string } +// +k8s:deepcopy-gen=true // TLSClientConfig contains settings to enable transport layer security type TLSClientConfig struct { // Server should be accessed without verifying the TLS certificate. For testing only. @@ -255,19 +259,51 @@ func SetKubernetesDefaults(config *Config) error { return nil } -// DefaultKubernetesUserAgent returns the default user agent that clients can use. +// adjustCommit returns sufficient significant figures of the commit's git hash. +func adjustCommit(c string) string { + if len(c) == 0 { + return "unknown" + } + if len(c) > 7 { + return c[:7] + } + return c +} + +// adjustVersion strips "alpha", "beta", etc. from version in form +// major.minor.patch-[alpha|beta|etc]. +func adjustVersion(v string) string { + if len(v) == 0 { + return "unknown" + } + seg := strings.SplitN(v, "-", 2) + return seg[0] +} + +// adjustCommand returns the last component of the +// OS-specific command path for use in User-Agent. +func adjustCommand(p string) string { + // Unlikely, but better than returning "". + if len(p) == 0 { + return "unknown" + } + return filepath.Base(p) +} + +// buildUserAgent builds a User-Agent string from given args. +func buildUserAgent(command, version, os, arch, commit string) string { + return fmt.Sprintf( + "%s/%s (%s/%s) kubernetes/%s", command, version, os, arch, commit) +} + +// DefaultKubernetesUserAgent returns a User-Agent string built from static global vars. func DefaultKubernetesUserAgent() string { - commit := version.Get().GitCommit - if len(commit) > 7 { - commit = commit[:7] - } - if len(commit) == 0 { - commit = "unknown" - } - version := version.Get().GitVersion - seg := strings.SplitN(version, "-", 2) - version = seg[0] - return fmt.Sprintf("%s/%s (%s/%s) kubernetes/%s", path.Base(os.Args[0]), version, gruntime.GOOS, gruntime.GOARCH, commit) + return buildUserAgent( + adjustCommand(os.Args[0]), + adjustVersion(version.Get().GitVersion), + gruntime.GOOS, + gruntime.GOARCH, + adjustCommit(version.Get().GitCommit)) } // InClusterConfig returns a config object which uses the service account @@ -280,12 +316,12 @@ func InClusterConfig() (*Config, error) { return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined") } - token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountTokenKey) + token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token") if err != nil { return nil, err } tlsClientConfig := TLSClientConfig{} - rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountRootCAKey + rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" if _, err := certutil.NewPool(rootCAFile); err != nil { glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) } else { @@ -365,7 +401,6 @@ func AnonymousClientConfig(config *Config) *Config { return &Config{ Host: config.Host, APIPath: config.APIPath, - Prefix: config.Prefix, ContentConfig: config.ContentConfig, TLSClientConfig: TLSClientConfig{ Insecure: config.Insecure, @@ -380,5 +415,44 @@ func AnonymousClientConfig(config *Config) *Config { QPS: config.QPS, Burst: config.Burst, Timeout: config.Timeout, + Dial: config.Dial, + } +} + +// CopyConfig returns a copy of the given config +func CopyConfig(config *Config) *Config { + return &Config{ + Host: config.Host, + APIPath: config.APIPath, + ContentConfig: config.ContentConfig, + Username: config.Username, + Password: config.Password, + BearerToken: config.BearerToken, + Impersonate: ImpersonationConfig{ + Groups: config.Impersonate.Groups, + Extra: config.Impersonate.Extra, + UserName: config.Impersonate.UserName, + }, + AuthProvider: config.AuthProvider, + AuthConfigPersister: config.AuthConfigPersister, + ExecProvider: config.ExecProvider, + TLSClientConfig: TLSClientConfig{ + Insecure: config.TLSClientConfig.Insecure, + ServerName: config.TLSClientConfig.ServerName, + CertFile: config.TLSClientConfig.CertFile, + KeyFile: config.TLSClientConfig.KeyFile, + CAFile: config.TLSClientConfig.CAFile, + CertData: config.TLSClientConfig.CertData, + KeyData: config.TLSClientConfig.KeyData, + CAData: config.TLSClientConfig.CAData, + }, + UserAgent: config.UserAgent, + Transport: config.Transport, + WrapTransport: config.WrapTransport, + QPS: config.QPS, + Burst: config.Burst, + RateLimiter: config.RateLimiter, + Timeout: config.Timeout, + Dial: config.Dial, } } diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index b87ddaff5..09ffd76dd 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -33,27 +33,20 @@ import ( "time" "github.com/golang/glog" + "golang.org/x/net/http2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/streaming" "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/pkg/api/v1" restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/util/flowcontrol" ) var ( - // specialParams lists parameters that are handled specially and which users of Request - // are therefore not allowed to set manually. - specialParams = sets.NewString("timeout") - // longThrottleLatency defines threshold for logging requests. All requests being // throttle for more than longThrottleLatency will be logged. longThrottleLatency = 50 * time.Millisecond @@ -119,7 +112,7 @@ type Request struct { } // NewRequest creates a new request helper object for accessing runtime.Objects on a server. -func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter) *Request { +func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request { if backoff == nil { glog.V(2).Infof("Not implementing request backoff strategy.") backoff = &NoBackoff{} @@ -138,6 +131,7 @@ func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPa serializers: serializers, backoffMgr: backoff, throttle: throttle, + timeout: timeout, } switch { case len(content.AcceptContentTypes) > 0: @@ -186,6 +180,24 @@ func (r *Request) Resource(resource string) *Request { return r } +// BackOff sets the request's backoff manager to the one specified, +// or defaults to the stub implementation if nil is provided +func (r *Request) BackOff(manager BackoffManager) *Request { + if manager == nil { + r.backoffMgr = &NoBackoff{} + return r + } + + r.backoffMgr = manager + return r +} + +// Throttle receives a rate-limiter and sets or replaces an existing request limiter +func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request { + r.throttle = limiter + return r +} + // SubResource sets a sub-resource path which can be multiple segments segment after the resource // name but before the suffix. func (r *Request) SubResource(subresources ...string) *Request { @@ -269,7 +281,7 @@ func (r *Request) AbsPath(segments ...string) *Request { } // RequestURI overwrites existing path and parameters with the value of the provided server relative -// URI. Some parameters (those in specialParameters) cannot be overwritten. +// URI. func (r *Request) RequestURI(uri string) *Request { if r.err != nil { return r @@ -291,143 +303,6 @@ func (r *Request) RequestURI(uri string) *Request { return r } -const ( - // A constant that clients can use to refer in a field selector to the object name field. - // Will be automatically emitted as the correct name for the API version. - nodeUnschedulable = "spec.unschedulable" - objectNameField = "metadata.name" - podHost = "spec.nodeName" - podStatus = "status.phase" - secretType = "type" - - eventReason = "reason" - eventSource = "source" - eventType = "type" - eventInvolvedKind = "involvedObject.kind" - eventInvolvedNamespace = "involvedObject.namespace" - eventInvolvedName = "involvedObject.name" - eventInvolvedUID = "involvedObject.uid" - eventInvolvedAPIVersion = "involvedObject.apiVersion" - eventInvolvedResourceVersion = "involvedObject.resourceVersion" - eventInvolvedFieldPath = "involvedObject.fieldPath" -) - -type clientFieldNameToAPIVersionFieldName map[string]string - -func (c clientFieldNameToAPIVersionFieldName) filterField(field, value string) (newField, newValue string, err error) { - newFieldName, ok := c[field] - if !ok { - return "", "", fmt.Errorf("%v - %v - no field mapping defined", field, value) - } - return newFieldName, value, nil -} - -type resourceTypeToFieldMapping map[string]clientFieldNameToAPIVersionFieldName - -func (r resourceTypeToFieldMapping) filterField(resourceType, field, value string) (newField, newValue string, err error) { - fMapping, ok := r[resourceType] - if !ok { - return "", "", fmt.Errorf("%v - %v - %v - no field mapping defined", resourceType, field, value) - } - return fMapping.filterField(field, value) -} - -type versionToResourceToFieldMapping map[schema.GroupVersion]resourceTypeToFieldMapping - -// filterField transforms the given field/value selector for the given groupVersion and resource -func (v versionToResourceToFieldMapping) filterField(groupVersion *schema.GroupVersion, resourceType, field, value string) (newField, newValue string, err error) { - rMapping, ok := v[*groupVersion] - if !ok { - // no groupVersion overrides registered, default to identity mapping - return field, value, nil - } - newField, newValue, err = rMapping.filterField(resourceType, field, value) - if err != nil { - // no groupVersionResource overrides registered, default to identity mapping - return field, value, nil - } - return newField, newValue, nil -} - -var fieldMappings = versionToResourceToFieldMapping{ - v1.SchemeGroupVersion: resourceTypeToFieldMapping{ - "nodes": clientFieldNameToAPIVersionFieldName{ - objectNameField: objectNameField, - nodeUnschedulable: nodeUnschedulable, - }, - "pods": clientFieldNameToAPIVersionFieldName{ - objectNameField: objectNameField, - podHost: podHost, - podStatus: podStatus, - }, - "secrets": clientFieldNameToAPIVersionFieldName{ - secretType: secretType, - }, - "serviceAccounts": clientFieldNameToAPIVersionFieldName{ - objectNameField: objectNameField, - }, - "endpoints": clientFieldNameToAPIVersionFieldName{ - objectNameField: objectNameField, - }, - "events": clientFieldNameToAPIVersionFieldName{ - objectNameField: objectNameField, - eventReason: eventReason, - eventSource: eventSource, - eventType: eventType, - eventInvolvedKind: eventInvolvedKind, - eventInvolvedNamespace: eventInvolvedNamespace, - eventInvolvedName: eventInvolvedName, - eventInvolvedUID: eventInvolvedUID, - eventInvolvedAPIVersion: eventInvolvedAPIVersion, - eventInvolvedResourceVersion: eventInvolvedResourceVersion, - eventInvolvedFieldPath: eventInvolvedFieldPath, - }, - }, -} - -// FieldsSelectorParam adds the given selector as a query parameter with the name paramName. -func (r *Request) FieldsSelectorParam(s fields.Selector) *Request { - if r.err != nil { - return r - } - if s == nil { - return r - } - if s.Empty() { - return r - } - s2, err := s.Transform(func(field, value string) (newField, newValue string, err error) { - return fieldMappings.filterField(r.content.GroupVersion, r.resource, field, value) - }) - if err != nil { - r.err = err - return r - } - return r.setParam(metav1.FieldSelectorQueryParam(r.content.GroupVersion.String()), s2.String()) -} - -// LabelsSelectorParam adds the given selector as a query parameter -func (r *Request) LabelsSelectorParam(s labels.Selector) *Request { - if r.err != nil { - return r - } - if s == nil { - return r - } - if s.Empty() { - return r - } - return r.setParam(metav1.LabelSelectorQueryParam(r.content.GroupVersion.String()), s.String()) -} - -// UintParam creates a query parameter with the given value. -func (r *Request) UintParam(paramName string, u uint64) *Request { - if r.err != nil { - return r - } - return r.setParam(paramName, strconv.FormatUint(u, 10)) -} - // Param creates a query parameter with the given string value. func (r *Request) Param(paramName, s string) *Request { if r.err != nil { @@ -439,62 +314,31 @@ func (r *Request) Param(paramName, s string) *Request { // VersionedParams will take the provided object, serialize it to a map[string][]string using the // implicit RESTClient API version and the default parameter codec, and then add those as parameters // to the request. Use this to provide versioned query parameters from client libraries. +// VersionedParams will not write query parameters that have omitempty set and are empty. If a +// parameter has already been set it is appended to (Params and VersionedParams are additive). func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request { + return r.SpecificallyVersionedParams(obj, codec, *r.content.GroupVersion) +} + +func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request { if r.err != nil { return r } - params, err := codec.EncodeParameters(obj, *r.content.GroupVersion) + params, err := codec.EncodeParameters(obj, version) if err != nil { r.err = err return r } for k, v := range params { - for _, value := range v { - // TODO: Move it to setParam method, once we get rid of - // FieldSelectorParam & LabelSelectorParam methods. - if k == metav1.LabelSelectorQueryParam(r.content.GroupVersion.String()) && value == "" { - // Don't set an empty selector for backward compatibility. - // Since there is no way to get the difference between empty - // and unspecified string, we don't set it to avoid having - // labelSelector= param in every request. - continue - } - if k == metav1.FieldSelectorQueryParam(r.content.GroupVersion.String()) { - if len(value) == 0 { - // Don't set an empty selector for backward compatibility. - // Since there is no way to get the difference between empty - // and unspecified string, we don't set it to avoid having - // fieldSelector= param in every request. - continue - } - // TODO: Filtering should be handled somewhere else. - selector, err := fields.ParseSelector(value) - if err != nil { - r.err = fmt.Errorf("unparsable field selector: %v", err) - return r - } - filteredSelector, err := selector.Transform( - func(field, value string) (newField, newValue string, err error) { - return fieldMappings.filterField(r.content.GroupVersion, r.resource, field, value) - }) - if err != nil { - r.err = fmt.Errorf("untransformable field selector: %v", err) - return r - } - value = filteredSelector.String() - } - - r.setParam(k, value) + if r.params == nil { + r.params = make(url.Values) } + r.params[k] = append(r.params[k], v...) } return r } func (r *Request) setParam(paramName, value string) *Request { - if specialParams.Has(paramName) { - r.err = fmt.Errorf("must set %v through the corresponding function, not directly.", paramName) - return r - } if r.params == nil { r.params = make(url.Values) } @@ -502,16 +346,19 @@ func (r *Request) setParam(paramName, value string) *Request { return r } -func (r *Request) SetHeader(key, value string) *Request { +func (r *Request) SetHeader(key string, values ...string) *Request { if r.headers == nil { r.headers = http.Header{} } - r.headers.Set(key, value) + r.headers.Del(key) + for _, value := range values { + r.headers.Add(key, value) + } return r } -// Timeout makes the request use the given duration as a timeout. Sets the "timeout" -// parameter. +// Timeout makes the request use the given duration as an overall timeout for the +// request. Additionally, if set passes the value as "timeout" parameter in URL. func (r *Request) Timeout(d time.Duration) *Request { if r.err != nil { return r @@ -609,7 +456,7 @@ func (r *Request) URL() *url.URL { // finalURLTemplate is similar to URL(), but will make all specific parameter values equal // - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query // parameters will be reset. This creates a copy of the request so as not to change the -// underyling object. This means some useful request info (like the types of field +// underlying object. This means some useful request info (like the types of field // selectors in use) will be lost. // TODO: preserve field selector keys func (r Request) finalURLTemplate() url.URL { @@ -642,6 +489,19 @@ func (r *Request) tryThrottle() { // Watch attempts to begin watching the requested location. // Returns a watch.Interface, or an error. func (r *Request) Watch() (watch.Interface, error) { + return r.WatchWithSpecificDecoders( + func(body io.ReadCloser) streaming.Decoder { + framer := r.serializers.Framer.NewFrameReader(body) + return streaming.NewDecoder(framer, r.serializers.StreamingSerializer) + }, + r.serializers.Decoder, + ) +} + +// WatchWithSpecificDecoders attempts to begin watching the requested location with a *different* decoder. +// Turns out that you want one "standard" decoder for the watch event and one "personal" decoder for the content +// Returns a watch.Interface, or an error. +func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) streaming.Decoder, embeddedDecoder runtime.Decoder) (watch.Interface, error) { // We specifically don't want to rate limit watches, so we // don't use r.throttle here. if r.err != nil { @@ -689,9 +549,8 @@ func (r *Request) Watch() (watch.Interface, error) { } return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode) } - framer := r.serializers.Framer.NewFrameReader(resp.Body) - decoder := streaming.NewDecoder(framer, r.serializers.StreamingSerializer) - return watch.NewStreamWatcher(restclientwatch.NewDecoder(decoder, r.serializers.Decoder)), nil + wrapperDecoder := wrapperDecoderFn(resp.Body) + return watch.NewStreamWatcher(restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder)), nil } // updateURLMetrics is a convenience function for pushing metrics. @@ -797,7 +656,6 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { } // Right now we make about ten retry attempts if we get a Retry-After response. - // TODO: Change to a timeout based approach. maxRetries := 10 retries := 0 for { @@ -806,6 +664,14 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { if err != nil { return err } + if r.timeout > 0 { + if r.ctx == nil { + r.ctx = context.Background() + } + var cancelFn context.CancelFunc + r.ctx, cancelFn = context.WithTimeout(r.ctx, r.timeout) + defer cancelFn() + } if r.ctx != nil { req = req.WithContext(r.ctx) } @@ -921,8 +787,29 @@ func (r *Request) DoRaw() ([]byte, error) { func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result { var body []byte if resp.Body != nil { - if data, err := ioutil.ReadAll(resp.Body); err == nil { + data, err := ioutil.ReadAll(resp.Body) + switch err.(type) { + case nil: body = data + case http2.StreamError: + // This is trying to catch the scenario that the server may close the connection when sending the + // response body. This can be caused by server timeout due to a slow network connection. + // TODO: Add test for this. Steps may be: + // 1. client-go (or kubectl) sends a GET request. + // 2. Apiserver sends back the headers and then part of the body + // 3. Apiserver closes connection. + // 4. client-go should catch this and return an error. + glog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) + streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err) + return Result{ + err: streamErr, + } + default: + glog.Errorf("Unexpected error when reading response body: %#v", err) + unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err) + return Result{ + err: unexpectedErr, + } } } @@ -978,6 +865,25 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu } } +// truncateBody decides if the body should be truncated, based on the glog Verbosity. +func truncateBody(body string) string { + max := 0 + switch { + case bool(glog.V(10)): + return body + case bool(glog.V(9)): + max = 10240 + case bool(glog.V(8)): + max = 1024 + } + + if len(body) <= max { + return body + } + + return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max) +} + // glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against // allocating a new string for the body output unless necessary. Uses a simple heuristic to determine // whether the body is printable. @@ -986,9 +892,9 @@ func glogBody(prefix string, body []byte) { if bytes.IndexFunc(body, func(r rune) bool { return r < 0x0a }) != -1 { - glog.Infof("%s:\n%s", prefix, hex.Dump(body)) + glog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) } else { - glog.Infof("%s: %s", prefix, string(body)) + glog.Infof("%s: %s", prefix, truncateBody(string(body))) } } } @@ -1069,7 +975,7 @@ func isTextResponse(resp *http.Response) bool { func checkWait(resp *http.Response) (int, bool) { switch r := resp.StatusCode; { // any 500 error code and 429 can trigger a wait - case r == errors.StatusTooManyRequests, r >= 500: + case r == http.StatusTooManyRequests, r >= 500: default: return 0, false } @@ -1148,6 +1054,9 @@ func (r Result) Into(obj runtime.Object) error { if r.decoder == nil { return fmt.Errorf("serializer for %s doesn't exist", r.contentType) } + if len(r.body) == 0 { + return fmt.Errorf("0-length response") + } out, _, err := r.decoder.Decode(r.body, nil, obj) if err != nil || out == obj { diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index ba43752bc..25c1801b6 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -18,8 +18,10 @@ package rest import ( "crypto/tls" + "errors" "net/http" + "k8s.io/client-go/plugin/pkg/client/auth/exec" "k8s.io/client-go/transport" ) @@ -58,25 +60,10 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip // TransportConfig converts a client config to an appropriate transport config. func (c *Config) TransportConfig() (*transport.Config, error) { - wt := c.WrapTransport - if c.AuthProvider != nil { - provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister) - if err != nil { - return nil, err - } - if wt != nil { - previousWT := wt - wt = func(rt http.RoundTripper) http.RoundTripper { - return provider.WrapTransport(previousWT(rt)) - } - } else { - wt = provider.WrapTransport - } - } - return &transport.Config{ + conf := &transport.Config{ UserAgent: c.UserAgent, Transport: c.Transport, - WrapTransport: wt, + WrapTransport: c.WrapTransport, TLS: transport.TLSConfig{ Insecure: c.Insecure, ServerName: c.ServerName, @@ -95,5 +82,35 @@ func (c *Config) TransportConfig() (*transport.Config, error) { Groups: c.Impersonate.Groups, Extra: c.Impersonate.Extra, }, - }, nil + Dial: c.Dial, + } + + if c.ExecProvider != nil && c.AuthProvider != nil { + return nil, errors.New("execProvider and authProvider cannot be used in combination") + } + + if c.ExecProvider != nil { + provider, err := exec.GetAuthenticator(c.ExecProvider) + if err != nil { + return nil, err + } + if err := provider.UpdateTransportConfig(conf); err != nil { + return nil, err + } + } + if c.AuthProvider != nil { + provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister) + if err != nil { + return nil, err + } + wt := conf.WrapTransport + if wt != nil { + conf.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + return provider.WrapTransport(wt(rt)) + } + } else { + conf.WrapTransport = provider.WrapTransport + } + } + return conf, nil } diff --git a/vendor/k8s.io/client-go/rest/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go index 14f94650a..a56d1838d 100644 --- a/vendor/k8s.io/client-go/rest/url_utils.go +++ b/vendor/k8s.io/client-go/rest/url_utils.go @@ -56,6 +56,14 @@ func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, de // hostURL.Path should be blank. // // versionedAPIPath, a path relative to baseURL.Path, points to a versioned API base + versionedAPIPath := DefaultVersionedAPIPath(apiPath, groupVersion) + + return hostURL, versionedAPIPath, nil +} + +// DefaultVersionedAPIPathFor constructs the default path for the given group version, assuming the given +// API path, following the standard conventions of the Kubernetes API. +func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) string { versionedAPIPath := path.Join("/", apiPath) // Add the version to the end of the path @@ -64,10 +72,9 @@ func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, de } else { versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Version) - } - return hostURL, versionedAPIPath, nil + return versionedAPIPath } // defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It diff --git a/vendor/k8s.io/client-go/rest/versions.go b/vendor/k8s.io/client-go/rest/versions.go deleted file mode 100644 index 9d41812f2..000000000 --- a/vendor/k8s.io/client-go/rest/versions.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "encoding/json" - "fmt" - "net/http" - "path" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - legacyAPIPath = "/api" - defaultAPIPath = "/apis" -) - -// TODO: Is this obsoleted by the discovery client? - -// ServerAPIVersions returns the GroupVersions supported by the API server. -// It creates a RESTClient based on the passed in config, but it doesn't rely -// on the Version and Codec of the config, because it uses AbsPath and -// takes the raw response. -func ServerAPIVersions(c *Config) (groupVersions []string, err error) { - transport, err := TransportFor(c) - if err != nil { - return nil, err - } - client := http.Client{Transport: transport} - - configCopy := *c - configCopy.GroupVersion = nil - configCopy.APIPath = "" - baseURL, _, err := defaultServerUrlFor(&configCopy) - if err != nil { - return nil, err - } - // Get the groupVersions exposed at /api - originalPath := baseURL.Path - baseURL.Path = path.Join(originalPath, legacyAPIPath) - resp, err := client.Get(baseURL.String()) - if err != nil { - return nil, err - } - var v metav1.APIVersions - defer resp.Body.Close() - err = json.NewDecoder(resp.Body).Decode(&v) - if err != nil { - return nil, fmt.Errorf("unexpected error: %v", err) - } - - groupVersions = append(groupVersions, v.Versions...) - // Get the groupVersions exposed at /apis - baseURL.Path = path.Join(originalPath, defaultAPIPath) - resp2, err := client.Get(baseURL.String()) - if err != nil { - return nil, err - } - var apiGroupList metav1.APIGroupList - defer resp2.Body.Close() - err = json.NewDecoder(resp2.Body).Decode(&apiGroupList) - if err != nil { - return nil, fmt.Errorf("unexpected error: %v", err) - } - - for _, g := range apiGroupList.Groups { - for _, gv := range g.Versions { - groupVersions = append(groupVersions, gv.GroupVersion) - } - } - - return groupVersions, nil -} diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go new file mode 100644 index 000000000..c1ab45f33 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go @@ -0,0 +1,52 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package rest + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) { + *out = *in + if in.CertData != nil { + in, out := &in.CertData, &out.CertData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CAData != nil { + in, out := &in.CAData, &out.CAData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSClientConfig. +func (in *TLSClientConfig) DeepCopy() *TLSClientConfig { + if in == nil { + return nil + } + out := new(TLSClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go index fdcebc9bd..b99f231c8 100644 --- a/vendor/k8s.io/client-go/testing/actions.go +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -47,20 +47,43 @@ func NewGetAction(resource schema.GroupVersionResource, namespace, name string) return action } -func NewRootListAction(resource schema.GroupVersionResource, opts interface{}) ListActionImpl { +func NewGetSubresourceAction(resource schema.GroupVersionResource, namespace, subresource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootGetSubresourceAction(resource schema.GroupVersionResource, subresource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Subresource = subresource + action.Name = name + + return action +} + +func NewRootListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts interface{}) ListActionImpl { action := ListActionImpl{} action.Verb = "list" action.Resource = resource + action.Kind = kind labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} return action } -func NewListAction(resource schema.GroupVersionResource, namespace string, opts interface{}) ListActionImpl { +func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts interface{}) ListActionImpl { action := ListActionImpl{} action.Verb = "list" action.Resource = resource + action.Kind = kind action.Namespace = namespace labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} @@ -87,6 +110,29 @@ func NewCreateAction(resource schema.GroupVersionResource, namespace string, obj return action } +func NewRootCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Subresource = subresource + action.Name = name + action.Object = object + + return action +} + +func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Namespace = namespace + action.Subresource = subresource + action.Name = name + action.Object = object + + return action +} + func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" @@ -179,6 +225,16 @@ func NewRootDeleteAction(resource schema.GroupVersionResource, name string) Dele return action } +func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subresource string, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Subresource = subresource + action.Name = name + + return action +} + func NewDeleteAction(resource schema.GroupVersionResource, namespace, name string) DeleteActionImpl { action := DeleteActionImpl{} action.Verb = "delete" @@ -189,6 +245,17 @@ func NewDeleteAction(resource schema.GroupVersionResource, namespace, name strin return action } +func NewDeleteSubresourceAction(resource schema.GroupVersionResource, subresource, namespace, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Name = name + + return action +} + func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl { action := DeleteCollectionActionImpl{} action.Verb = "delete-collection" @@ -285,6 +352,10 @@ type Action interface { GetResource() schema.GroupVersionResource GetSubresource() string Matches(verb, resource string) bool + + // DeepCopy is used to copy an action to avoid any risk of accidental mutation. Most people never need to call this + // because the invocation logic deep copies before calls to storage and reactors. + DeepCopy() Action } type GenericAction interface { @@ -317,6 +388,17 @@ type DeleteAction interface { GetName() string } +type DeleteCollectionAction interface { + Action + GetListRestrictions() ListRestrictions +} + +type PatchAction interface { + Action + GetName() string + GetPatch() []byte +} + type WatchAction interface { Action GetWatchRestrictions() WatchRestrictions @@ -354,6 +436,10 @@ func (a ActionImpl) Matches(verb, resource string) bool { return strings.ToLower(verb) == strings.ToLower(a.Verb) && strings.ToLower(resource) == strings.ToLower(a.Resource.Resource) } +func (a ActionImpl) DeepCopy() Action { + ret := a + return ret +} type GenericActionImpl struct { ActionImpl @@ -364,6 +450,14 @@ func (a GenericActionImpl) GetValue() interface{} { return a.Value } +func (a GenericActionImpl) DeepCopy() Action { + return GenericActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + // TODO this is wrong, but no worse than before + Value: a.Value, + } +} + type GetActionImpl struct { ActionImpl Name string @@ -373,17 +467,43 @@ func (a GetActionImpl) GetName() string { return a.Name } +func (a GetActionImpl) DeepCopy() Action { + return GetActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + } +} + type ListActionImpl struct { ActionImpl + Kind schema.GroupVersionKind + Name string ListRestrictions ListRestrictions } +func (a ListActionImpl) GetKind() schema.GroupVersionKind { + return a.Kind +} + func (a ListActionImpl) GetListRestrictions() ListRestrictions { return a.ListRestrictions } +func (a ListActionImpl) DeepCopy() Action { + return ListActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Kind: a.Kind, + Name: a.Name, + ListRestrictions: ListRestrictions{ + Labels: a.ListRestrictions.Labels.DeepCopySelector(), + Fields: a.ListRestrictions.Fields.DeepCopySelector(), + }, + } +} + type CreateActionImpl struct { ActionImpl + Name string Object runtime.Object } @@ -391,6 +511,14 @@ func (a CreateActionImpl) GetObject() runtime.Object { return a.Object } +func (a CreateActionImpl) DeepCopy() Action { + return CreateActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + Object: a.Object.DeepCopyObject(), + } +} + type UpdateActionImpl struct { ActionImpl Object runtime.Object @@ -400,6 +528,13 @@ func (a UpdateActionImpl) GetObject() runtime.Object { return a.Object } +func (a UpdateActionImpl) DeepCopy() Action { + return UpdateActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Object: a.Object.DeepCopyObject(), + } +} + type PatchActionImpl struct { ActionImpl Name string @@ -414,6 +549,16 @@ func (a PatchActionImpl) GetPatch() []byte { return a.Patch } +func (a PatchActionImpl) DeepCopy() Action { + patch := make([]byte, len(a.Patch)) + copy(patch, a.Patch) + return PatchActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + Patch: patch, + } +} + type DeleteActionImpl struct { ActionImpl Name string @@ -423,6 +568,13 @@ func (a DeleteActionImpl) GetName() string { return a.Name } +func (a DeleteActionImpl) DeepCopy() Action { + return DeleteActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + } +} + type DeleteCollectionActionImpl struct { ActionImpl ListRestrictions ListRestrictions @@ -432,6 +584,16 @@ func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions { return a.ListRestrictions } +func (a DeleteCollectionActionImpl) DeepCopy() Action { + return DeleteCollectionActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + ListRestrictions: ListRestrictions{ + Labels: a.ListRestrictions.Labels.DeepCopySelector(), + Fields: a.ListRestrictions.Fields.DeepCopySelector(), + }, + } +} + type WatchActionImpl struct { ActionImpl WatchRestrictions WatchRestrictions @@ -441,6 +603,17 @@ func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions { return a.WatchRestrictions } +func (a WatchActionImpl) DeepCopy() Action { + return WatchActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + WatchRestrictions: WatchRestrictions{ + Labels: a.WatchRestrictions.Labels.DeepCopySelector(), + Fields: a.WatchRestrictions.Fields.DeepCopySelector(), + ResourceVersion: a.WatchRestrictions.ResourceVersion, + }, + } +} + type ProxyGetActionImpl struct { ActionImpl Scheme string @@ -469,3 +642,18 @@ func (a ProxyGetActionImpl) GetPath() string { func (a ProxyGetActionImpl) GetParams() map[string]string { return a.Params } + +func (a ProxyGetActionImpl) DeepCopy() Action { + params := map[string]string{} + for k, v := range a.Params { + params[k] = v + } + return ProxyGetActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Scheme: a.Scheme, + Name: a.Name, + Port: a.Port, + Path: a.Path, + Params: params, + } +} diff --git a/vendor/k8s.io/client-go/testing/fake.go b/vendor/k8s.io/client-go/testing/fake.go index da47b23b9..8b3f31eaf 100644 --- a/vendor/k8s.io/client-go/testing/fake.go +++ b/vendor/k8s.io/client-go/testing/fake.go @@ -22,10 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/version" "k8s.io/apimachinery/pkg/watch" - kubeversion "k8s.io/client-go/pkg/version" restclient "k8s.io/client-go/rest" ) @@ -134,13 +131,13 @@ func (c *Fake) Invokes(action Action, defaultReturnObj runtime.Object) (runtime. c.Lock() defer c.Unlock() - c.actions = append(c.actions, action) + c.actions = append(c.actions, action.DeepCopy()) for _, reactor := range c.ReactionChain { if !reactor.Handles(action) { continue } - handled, ret, err := reactor.React(action) + handled, ret, err := reactor.React(action.DeepCopy()) if !handled { continue } @@ -157,13 +154,13 @@ func (c *Fake) InvokesWatch(action Action) (watch.Interface, error) { c.Lock() defer c.Unlock() - c.actions = append(c.actions, action) + c.actions = append(c.actions, action.DeepCopy()) for _, reactor := range c.WatchReactionChain { if !reactor.Handles(action) { continue } - handled, ret, err := reactor.React(action) + handled, ret, err := reactor.React(action.DeepCopy()) if !handled { continue } @@ -180,13 +177,13 @@ func (c *Fake) InvokesProxy(action Action) restclient.ResponseWrapper { c.Lock() defer c.Unlock() - c.actions = append(c.actions, action) + c.actions = append(c.actions, action.DeepCopy()) for _, reactor := range c.ProxyReactionChain { if !reactor.Handles(action) { continue } - handled, ret, err := reactor.React(action) + handled, ret, err := reactor.React(action.DeepCopy()) if !handled || err != nil { continue } @@ -214,46 +211,3 @@ func (c *Fake) Actions() []Action { copy(fa, c.actions) return fa } - -// TODO: this probably should be moved to somewhere else. -type FakeDiscovery struct { - *Fake -} - -func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { - action := ActionImpl{ - Verb: "get", - Resource: schema.GroupVersionResource{Resource: "resource"}, - } - c.Invokes(action, nil) - for _, rl := range c.Resources { - if rl.GroupVersion == groupVersion { - return rl, nil - } - } - - return nil, fmt.Errorf("GroupVersion %q not found", groupVersion) -} - -func (c *FakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) { - action := ActionImpl{ - Verb: "get", - Resource: schema.GroupVersionResource{Resource: "resource"}, - } - c.Invokes(action, nil) - return c.Resources, nil -} - -func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { - return nil, nil -} - -func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { - action := ActionImpl{} - action.Verb = "get" - action.Resource = schema.GroupVersionResource{Resource: "version"} - - c.Invokes(action, nil) - versionInfo := kubeversion.Get() - return &versionInfo, nil -} diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go index f68585357..00c4c49fc 100644 --- a/vendor/k8s.io/client-go/testing/fixture.go +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -22,10 +22,11 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apimachinery/registered" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" ) @@ -39,52 +40,40 @@ type ObjectTracker interface { Add(obj runtime.Object) error // Get retrieves the object by its kind, namespace and name. - Get(gvk schema.GroupVersionKind, ns, name string) (runtime.Object, error) + Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) // Create adds an object to the tracker in the specified namespace. - Create(obj runtime.Object, ns string) error + Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error // Update updates an existing object in the tracker in the specified namespace. - Update(obj runtime.Object, ns string) error + Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error // List retrieves all objects of a given kind in the given // namespace. Only non-List kinds are accepted. - List(gvk schema.GroupVersionKind, ns string) (runtime.Object, error) + List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) // Delete deletes an existing object from the tracker. If object // didn't exist in the tracker prior to deletion, Delete returns // no error. - Delete(gvk schema.GroupVersionKind, ns, name string) error + Delete(gvr schema.GroupVersionResource, ns, name string) error + + // Watch watches objects from the tracker. Watch returns a channel + // which will push added / modified / deleted object. + Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) } // ObjectScheme abstracts the implementation of common operations on objects. type ObjectScheme interface { runtime.ObjectCreater - runtime.ObjectCopier runtime.ObjectTyper } // ObjectReaction returns a ReactionFunc that applies core.Action to // the given tracker. -func ObjectReaction(tracker ObjectTracker, mapper meta.RESTMapper) ReactionFunc { +func ObjectReaction(tracker ObjectTracker) ReactionFunc { return func(action Action) (bool, runtime.Object, error) { ns := action.GetNamespace() gvr := action.GetResource() - - gvk, err := mapper.KindFor(gvr) - if err != nil { - return false, nil, fmt.Errorf("error getting kind for resource %q: %s", gvr, err) - } - - // This is a temporary fix. Because there is no internal resource, so - // the caller has no way to express that it expects to get an internal - // kind back. A more proper fix will be directly specify the Kind when - // build the action. - gvk.Version = gvr.Version - if len(gvk.Version) == 0 { - gvk.Version = runtime.APIVersionInternal - } - // Here and below we need to switch on implementation types, // not on interfaces, as some interfaces are identical // (e.g. UpdateAction and CreateAction), so if we use them, @@ -92,11 +81,11 @@ func ObjectReaction(tracker ObjectTracker, mapper meta.RESTMapper) ReactionFunc switch action := action.(type) { case ListActionImpl: - obj, err := tracker.List(gvk, ns) + obj, err := tracker.List(gvr, action.GetKind(), ns) return true, obj, err case GetActionImpl: - obj, err := tracker.Get(gvk, ns, action.GetName()) + obj, err := tracker.Get(gvr, ns, action.GetName()) return true, obj, err case CreateActionImpl: @@ -105,17 +94,17 @@ func ObjectReaction(tracker ObjectTracker, mapper meta.RESTMapper) ReactionFunc return true, nil, err } if action.GetSubresource() == "" { - err = tracker.Create(action.GetObject(), ns) + err = tracker.Create(gvr, action.GetObject(), ns) } else { // TODO: Currently we're handling subresource creation as an update // on the enclosing resource. This works for some subresources but // might not be generic enough. - err = tracker.Update(action.GetObject(), ns) + err = tracker.Update(gvr, action.GetObject(), ns) } if err != nil { return true, nil, err } - obj, err := tracker.Get(gvk, ns, objMeta.GetName()) + obj, err := tracker.Get(gvr, ns, objMeta.GetName()) return true, obj, err case UpdateActionImpl: @@ -123,20 +112,48 @@ func ObjectReaction(tracker ObjectTracker, mapper meta.RESTMapper) ReactionFunc if err != nil { return true, nil, err } - err = tracker.Update(action.GetObject(), ns) + err = tracker.Update(gvr, action.GetObject(), ns) if err != nil { return true, nil, err } - obj, err := tracker.Get(gvk, ns, objMeta.GetName()) + obj, err := tracker.Get(gvr, ns, objMeta.GetName()) return true, obj, err case DeleteActionImpl: - err := tracker.Delete(gvk, ns, action.GetName()) + err := tracker.Delete(gvr, ns, action.GetName()) if err != nil { return true, nil, err } return true, nil, nil + case PatchActionImpl: + obj, err := tracker.Get(gvr, ns, action.GetName()) + if err != nil { + // object is not registered + return false, nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return true, nil, err + } + // Only supports strategic merge patch + // TODO: Add support for other Patch types + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return true, nil, err + } + + if err = json.Unmarshal(mergedByte, obj); err != nil { + return true, nil, err + } + + if err = tracker.Update(gvr, obj, ns); err != nil { + return true, nil, err + } + + return true, obj, nil + default: return false, nil, fmt.Errorf("no reaction implemented for %s", action) } @@ -144,32 +161,42 @@ func ObjectReaction(tracker ObjectTracker, mapper meta.RESTMapper) ReactionFunc } type tracker struct { - registry *registered.APIRegistrationManager - scheme ObjectScheme - decoder runtime.Decoder - lock sync.RWMutex - objects map[schema.GroupVersionKind][]runtime.Object + scheme ObjectScheme + decoder runtime.Decoder + lock sync.RWMutex + objects map[schema.GroupVersionResource][]runtime.Object + // The value type of watchers is a map of which the key is either a namespace or + // all/non namespace aka "" and its value is list of fake watchers. + // Manipulations on resources will broadcast the notification events into the + // watchers' channel. Note that too many unhandled events (currently 100, + // see apimachinery/pkg/watch.DefaultChanSize) will cause a panic. + watchers map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher } var _ ObjectTracker = &tracker{} // NewObjectTracker returns an ObjectTracker that can be used to keep track // of objects for the fake clientset. Mostly useful for unit tests. -func NewObjectTracker(registry *registered.APIRegistrationManager, scheme ObjectScheme, decoder runtime.Decoder) ObjectTracker { +func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracker { return &tracker{ - registry: registry, scheme: scheme, decoder: decoder, - objects: make(map[schema.GroupVersionKind][]runtime.Object), + objects: make(map[schema.GroupVersionResource][]runtime.Object), + watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher), } } -func (t *tracker) List(gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { +func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { // Heuristic for list kind: original kind + List suffix. Might // not always be true but this tracker has a pretty limited // understanding of the actual API model. listGVK := gvk listGVK.Kind = listGVK.Kind + "List" + // GVK does have the concept of "internal version". The scheme recognizes + // the runtime.APIVersionInternal, but not the empty string. + if listGVK.Version == "" { + listGVK.Version = runtime.APIVersionInternal + } list, err := t.scheme.New(listGVK) if err != nil { @@ -183,7 +210,7 @@ func (t *tracker) List(gvk schema.GroupVersionKind, ns string) (runtime.Object, t.lock.RLock() defer t.lock.RUnlock() - objs, ok := t.objects[gvk] + objs, ok := t.objects[gvr] if !ok { return list, nil } @@ -195,23 +222,29 @@ func (t *tracker) List(gvk schema.GroupVersionKind, ns string) (runtime.Object, if err := meta.SetList(list, matchingObjs); err != nil { return nil, err } - if list, err = t.scheme.Copy(list); err != nil { - return nil, err - } - return list, nil + return list.DeepCopyObject(), nil } -func (t *tracker) Get(gvk schema.GroupVersionKind, ns, name string) (runtime.Object, error) { - if err := checkNamespace(t.registry, gvk, ns); err != nil { - return nil, err - } +func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { + t.lock.Lock() + defer t.lock.Unlock() - errNotFound := errors.NewNotFound(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, name) + fakewatcher := watch.NewRaceFreeFake() + + if _, exists := t.watchers[gvr]; !exists { + t.watchers[gvr] = make(map[string][]*watch.RaceFreeFakeWatcher) + } + t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher) + return fakewatcher, nil +} + +func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { + errNotFound := errors.NewNotFound(gvr.GroupResource(), name) t.lock.RLock() defer t.lock.RUnlock() - objs, ok := t.objects[gvk] + objs, ok := t.objects[gvr] if !ok { return nil, errNotFound } @@ -224,21 +257,14 @@ func (t *tracker) Get(gvk schema.GroupVersionKind, ns, name string) (runtime.Obj return nil, errNotFound } if len(matchingObjs) > 1 { - return nil, fmt.Errorf("more than one object matched gvk %s, ns: %q name: %q", gvk, ns, name) + return nil, fmt.Errorf("more than one object matched gvr %s, ns: %q name: %q", gvr, ns, name) } // Only one object should match in the tracker if it works // correctly, as Add/Update methods enforce kind/namespace/name // uniqueness. - obj, err := t.scheme.Copy(matchingObjs[0]) - if err != nil { - return nil, err - } - + obj := matchingObjs[0].DeepCopyObject() if status, ok := obj.(*metav1.Status); ok { - if status.Details != nil { - status.Details.Kind = gvk.Kind - } if status.Status != metav1.StatusSuccess { return nil, &errors.StatusError{ErrStatus: *status} } @@ -251,23 +277,10 @@ func (t *tracker) Add(obj runtime.Object) error { if meta.IsListType(obj) { return t.addList(obj, false) } - objMeta, err := meta.Accessor(obj) if err != nil { return err } - return t.add(obj, objMeta.GetNamespace(), false) -} - -func (t *tracker) Create(obj runtime.Object, ns string) error { - return t.add(obj, ns, false) -} - -func (t *tracker) Update(obj runtime.Object, ns string) error { - return t.add(obj, ns, true) -} - -func (t *tracker) add(obj runtime.Object, ns string, replaceExisting bool) error { gvks, _, err := t.scheme.ObjectKinds(obj) if err != nil { return err @@ -275,64 +288,99 @@ func (t *tracker) add(obj runtime.Object, ns string, replaceExisting bool) error if len(gvks) == 0 { return fmt.Errorf("no registered kinds for %v", obj) } + for _, gvk := range gvks { + // NOTE: UnsafeGuessKindToResource is a heuristic and default match. The + // actual registration in apiserver can specify arbitrary route for a + // gvk. If a test uses such objects, it cannot preset the tracker with + // objects via Add(). Instead, it should trigger the Create() function + // of the tracker, where an arbitrary gvr can be specified. + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + // Resource doesn't have the concept of "__internal" version, just set it to "". + if gvr.Version == runtime.APIVersionInternal { + gvr.Version = "" + } + err := t.add(gvr, obj, objMeta.GetNamespace(), false) + if err != nil { + return err + } + } + return nil +} + +func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + return t.add(gvr, obj, ns, false) +} + +func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + return t.add(gvr, obj, ns, true) +} + +func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.RaceFreeFakeWatcher { + watches := []*watch.RaceFreeFakeWatcher{} + if t.watchers[gvr] != nil { + if w := t.watchers[gvr][ns]; w != nil { + watches = append(watches, w...) + } + if w := t.watchers[gvr][""]; w != nil { + watches = append(watches, w...) + } + } + return watches +} + +func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error { t.lock.Lock() defer t.lock.Unlock() - for _, gvk := range gvks { - gr := schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind} + gr := gvr.GroupResource() - // To avoid the object from being accidentally modified by caller - // after it's been added to the tracker, we always store the deep - // copy. - obj, err = t.scheme.Copy(obj) + // To avoid the object from being accidentally modified by caller + // after it's been added to the tracker, we always store the deep + // copy. + obj = obj.DeepCopyObject() + + newMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + + // Propagate namespace to the new object if hasn't already been set. + if len(newMeta.GetNamespace()) == 0 { + newMeta.SetNamespace(ns) + } + + if ns != newMeta.GetNamespace() { + msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) + return errors.NewBadRequest(msg) + } + + for i, existingObj := range t.objects[gvr] { + oldMeta, err := meta.Accessor(existingObj) if err != nil { return err } - - if status, ok := obj.(*metav1.Status); ok && status.Details != nil { - gvk.Kind = status.Details.Kind - } - - newMeta, err := meta.Accessor(obj) - if err != nil { - return err - } - - // Propagate namespace to the new object if hasn't already been set. - if len(newMeta.GetNamespace()) == 0 { - newMeta.SetNamespace(ns) - } - - if ns != newMeta.GetNamespace() { - msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) - return errors.NewBadRequest(msg) - } - - if err := checkNamespace(t.registry, gvk, newMeta.GetNamespace()); err != nil { - return err - } - - for i, existingObj := range t.objects[gvk] { - oldMeta, err := meta.Accessor(existingObj) - if err != nil { - return err - } - if oldMeta.GetNamespace() == newMeta.GetNamespace() && oldMeta.GetName() == newMeta.GetName() { - if replaceExisting { - t.objects[gvk][i] = obj - return nil + if oldMeta.GetNamespace() == newMeta.GetNamespace() && oldMeta.GetName() == newMeta.GetName() { + if replaceExisting { + for _, w := range t.getWatches(gvr, ns) { + w.Modify(obj) } - return errors.NewAlreadyExists(gr, newMeta.GetName()) + t.objects[gvr][i] = obj + return nil } + return errors.NewAlreadyExists(gr, newMeta.GetName()) } + } - if replaceExisting { - // Tried to update but no matching object was found. - return errors.NewNotFound(gr, newMeta.GetName()) - } + if replaceExisting { + // Tried to update but no matching object was found. + return errors.NewNotFound(gr, newMeta.GetName()) + } - t.objects[gvk] = append(t.objects[gvk], obj) + t.objects[gvr] = append(t.objects[gvr], obj) + + for _, w := range t.getWatches(gvr, ns) { + w.Add(obj) } return nil @@ -348,35 +396,30 @@ func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { return errs[0] } for _, obj := range list { - objMeta, err := meta.Accessor(obj) - if err != nil { - return err - } - err = t.add(obj, objMeta.GetNamespace(), replaceExisting) - if err != nil { + if err := t.Add(obj); err != nil { return err } } return nil } -func (t *tracker) Delete(gvk schema.GroupVersionKind, ns, name string) error { - if err := checkNamespace(t.registry, gvk, ns); err != nil { - return err - } - +func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { t.lock.Lock() defer t.lock.Unlock() found := false - for i, existingObj := range t.objects[gvk] { + for i, existingObj := range t.objects[gvr] { objMeta, err := meta.Accessor(existingObj) if err != nil { return err } if objMeta.GetNamespace() == ns && objMeta.GetName() == name { - t.objects[gvk] = append(t.objects[gvk][:i], t.objects[gvk][i+1:]...) + obj := t.objects[gvr][i] + t.objects[gvr] = append(t.objects[gvr][:i], t.objects[gvr][i+1:]...) + for _, w := range t.getWatches(gvr, ns) { + w.Delete(obj) + } found = true break } @@ -386,7 +429,7 @@ func (t *tracker) Delete(gvk schema.GroupVersionKind, ns, name string) error { return nil } - return errors.NewNotFound(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, name) + return errors.NewNotFound(gvr.GroupResource(), name) } // filterByNamespaceAndName returns all objects in the collection that @@ -412,37 +455,6 @@ func filterByNamespaceAndName(objs []runtime.Object, ns, name string) ([]runtime return res, nil } -// checkNamespace makes sure that the scope of gvk matches ns. It -// returns an error if namespace is empty but gvk is a namespaced -// kind, or if ns is non-empty and gvk is a namespaced kind. -func checkNamespace(registry *registered.APIRegistrationManager, gvk schema.GroupVersionKind, ns string) error { - group, err := registry.Group(gvk.Group) - if err != nil { - return err - } - mapping, err := group.RESTMapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return err - } - switch mapping.Scope.Name() { - case meta.RESTScopeNameRoot: - if ns != "" { - return fmt.Errorf("namespace specified for a non-namespaced kind %s", gvk) - } - case meta.RESTScopeNameNamespace: - if ns == "" { - // Skipping this check for Events, since - // controllers emit events that have no namespace, - // even though Event is a namespaced resource. - if gvk.Kind != "Event" { - return fmt.Errorf("no namespace specified for a namespaced kind %s", gvk) - } - } - } - - return nil -} - func DefaultWatchReactor(watchInterface watch.Interface, err error) WatchReactionFunc { return func(action Action) (bool, watch.Interface, error) { return true, watchInterface, err diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS index e923c7709..727377f9d 100755 --- a/vendor/k8s.io/client-go/tools/cache/OWNERS +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -1,3 +1,12 @@ +approvers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- caesarxuchao +- liggitt +- ncdc reviewers: - thockin - lavalamp @@ -10,7 +19,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - erictune - davidopp - pmorie @@ -39,3 +47,4 @@ reviewers: - mqliang - feihujiang - sdminonne +- ncdc diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 06bc04f85..028c75e8e 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -21,9 +21,9 @@ import ( "time" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/clock" ) // Config contains all the settings for a Controller. @@ -116,7 +116,10 @@ func (c *controller) Run(stopCh <-chan struct{}) { c.reflector = r c.reflectorMutex.Unlock() - r.RunUntil(stopCh) + var wg wait.Group + defer wg.Wait() + + wg.StartWithChannel(stopCh, r.Run) wait.Until(c.processLoop, time.Second, stopCh) } @@ -207,6 +210,47 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { } } +// FilteringResourceEventHandler applies the provided filter to all events coming +// in, ensuring the appropriate nested handler method is invoked. An object +// that starts passing the filter after an update is considered an add, and an +// object that stops passing the filter after an update is considered a delete. +type FilteringResourceEventHandler struct { + FilterFunc func(obj interface{}) bool + Handler ResourceEventHandler +} + +// OnAdd calls the nested handler only if the filter succeeds +func (r FilteringResourceEventHandler) OnAdd(obj interface{}) { + if !r.FilterFunc(obj) { + return + } + r.Handler.OnAdd(obj) +} + +// OnUpdate ensures the proper handler is called depending on whether the filter matches +func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) { + newer := r.FilterFunc(newObj) + older := r.FilterFunc(oldObj) + switch { + case newer && older: + r.Handler.OnUpdate(oldObj, newObj) + case newer && !older: + r.Handler.OnAdd(newObj) + case !newer && older: + r.Handler.OnDelete(oldObj) + default: + // do nothing + } +} + +// OnDelete calls the nested handler only if the filter succeeds +func (r FilteringResourceEventHandler) OnDelete(obj interface{}) { + if !r.FilterFunc(obj) { + return + } + r.Handler.OnDelete(obj) +} + // DeletionHandlingMetaNamespaceKeyFunc checks for // DeletedFinalStateUnknown objects before calling // MetaNamespaceKeyFunc. @@ -244,7 +288,7 @@ func NewInformer( // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. - fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState) + fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState) cfg := &Config{ Queue: fifo, @@ -296,6 +340,7 @@ func NewInformer( // long as possible (until the upstream source closes the watch or times out, // or you stop the controller). // * h is the object you want notifications sent to. +// * indexers is the indexer for the received object type. // func NewIndexerInformer( lw ListerWatcher, @@ -310,7 +355,7 @@ func NewIndexerInformer( // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. - fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState) + fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState) cfg := &Config{ Queue: fifo, diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index a71db6048..45c3b500d 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -31,11 +31,6 @@ import ( // keyFunc is used to figure out what key an object should have. (It's // exposed in the returned DeltaFIFO's KeyOf() method, with bonus features.) // -// 'compressor' may compress as many or as few items as it wants -// (including returning an empty slice), but it should do what it -// does quickly since it is called while the queue is locked. -// 'compressor' may be nil if you don't want any delta compression. -// // 'keyLister' is expected to return a list of keys that the consumer of // this queue "knows about". It is used to decide which items are missing // when Replace() is called; 'Deleted' deltas are produced for these items. @@ -43,18 +38,30 @@ import ( // TODO: consider merging keyLister with this object, tracking a list of // "known" keys when Pop() is called. Have to think about how that // affects error retrying. -// TODO(lavalamp): I believe there is a possible race only when using an -// external known object source that the above TODO would -// fix. +// NOTE: It is possible to misuse this and cause a race when using an +// external known object source. +// Whether there is a potential race depends on how the comsumer +// modifies knownObjects. In Pop(), process function is called under +// lock, so it is safe to update data structures in it that need to be +// in sync with the queue (e.g. knownObjects). +// +// Example: +// In case of sharedIndexInformer being a consumer +// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/ +// src/k8s.io/client-go/tools/cache/shared_informer.go#L192), +// there is no race as knownObjects (s.indexer) is modified safely +// under DeltaFIFO's lock. The only exceptions are GetStore() and +// GetIndexer() methods, which expose ways to modify the underlying +// storage. Currently these two methods are used for creating Lister +// and internal tests. // // Also see the comment on DeltaFIFO. -func NewDeltaFIFO(keyFunc KeyFunc, compressor DeltaCompressor, knownObjects KeyListerGetter) *DeltaFIFO { +func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO { f := &DeltaFIFO{ - items: map[string]Deltas{}, - queue: []string{}, - keyFunc: keyFunc, - deltaCompressor: compressor, - knownObjects: knownObjects, + items: map[string]Deltas{}, + queue: []string{}, + keyFunc: keyFunc, + knownObjects: knownObjects, } f.cond.L = &f.lock return f @@ -86,9 +93,6 @@ func NewDeltaFIFO(keyFunc KeyFunc, compressor DeltaCompressor, knownObjects KeyL // items have been deleted when Replace() or Delete() are called. The deleted // object will be included in the DeleteFinalStateUnknown markers. These objects // could be stale. -// -// You may provide a function to compress deltas (e.g., represent a -// series of Updates as a single Update). type DeltaFIFO struct { // lock/cond protects access to 'items' and 'queue'. lock sync.RWMutex @@ -110,10 +114,6 @@ type DeltaFIFO struct { // insertion and retrieval, and should be deterministic. keyFunc KeyFunc - // deltaCompressor tells us how to combine two or more - // deltas. It may be nil. - deltaCompressor DeltaCompressor - // knownObjects list keys that are "known", for the // purpose of figuring out which items have been deleted // when Replace() or Delete() is called. @@ -133,7 +133,6 @@ var ( var ( // ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas // object with zero length is encountered (should be impossible, - // even if such an object is accidentally produced by a DeltaCompressor-- // but included for completeness). ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key") ) @@ -213,8 +212,6 @@ func (f *DeltaFIFO) Delete(obj interface{}) error { if err == nil && !exists && !itemsExist { // Presumably, this was deleted when a relist happened. // Don't provide a second report of the same deletion. - // TODO(lavalamp): This may be racy-- we aren't properly locked - // with knownObjects. return nil } } @@ -305,8 +302,8 @@ func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool { return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted } -// queueActionLocked appends to the delta list for the object, calling -// f.deltaCompressor if needed. Caller must lock first. +// queueActionLocked appends to the delta list for the object. +// Caller must lock first. func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { id, err := f.KeyOf(obj) if err != nil { @@ -322,9 +319,6 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err newDeltas := append(f.items[id], Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) - if f.deltaCompressor != nil { - newDeltas = f.deltaCompressor.Compress(newDeltas) - } _, exists := f.items[id] if len(newDeltas) > 0 { @@ -334,8 +328,7 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err f.items[id] = newDeltas f.cond.Broadcast() } else if exists { - // The compression step removed all deltas, so - // we need to remove this from our map (extra items + // We need to remove this from our map (extra items // in the queue are ignored if they are not in the // map). delete(f.items, id) @@ -355,8 +348,8 @@ func (f *DeltaFIFO) List() []interface{} { func (f *DeltaFIFO) listLocked() []interface{} { list := make([]interface{}, 0, len(f.items)) for _, item := range f.items { - // Copy item's slice so operations on this slice (delta - // compression) won't interfere with the object we return. + // Copy item's slice so operations on this slice + // won't interfere with the object we return. item = copyDeltas(item) list = append(list, item.Newest().Object) } @@ -394,8 +387,8 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err defer f.lock.RUnlock() d, exists := f.items[key] if exists { - // Copy item's slice so operations on this slice (delta - // compression) won't interfere with the object we return. + // Copy item's slice so operations on this slice + // won't interfere with the object we return. d = copyDeltas(d) } return d, exists, nil @@ -503,8 +496,6 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { } // Detect deletions not already in the queue. - // TODO(lavalamp): This may be racy-- we aren't properly locked - // with knownObjects. Unproven. knownKeys := f.knownObjects.ListKeys() queuedDeletions := 0 for _, k := range knownKeys { @@ -539,6 +530,10 @@ func (f *DeltaFIFO) Resync() error { f.lock.Lock() defer f.lock.Unlock() + if f.knownObjects == nil { + return nil + } + keys := f.knownObjects.ListKeys() for _, k := range keys { if err := f.syncKeyLocked(k); err != nil { @@ -599,23 +594,6 @@ type KeyGetter interface { GetByKey(key string) (interface{}, bool, error) } -// DeltaCompressor is an algorithm that removes redundant changes. -type DeltaCompressor interface { - Compress(Deltas) Deltas -} - -// DeltaCompressorFunc should remove redundant changes; but changes that -// are redundant depend on one's desired semantics, so this is an -// injectable function. -// -// DeltaCompressorFunc adapts a raw function to be a DeltaCompressor. -type DeltaCompressorFunc func(Deltas) Deltas - -// Compress just calls dc. -func (dc DeltaCompressorFunc) Compress(d Deltas) Deltas { - return dc(d) -} - // DeltaType is the type of a change (addition, deletion, etc) type DeltaType string @@ -664,7 +642,7 @@ func (d Deltas) Newest() *Delta { // copyDeltas returns a shallow copy of d; that is, it copies the slice but not // the objects in the slice. This allows Get/List to return an object that we -// know won't be clobbered by a subsequent call to a delta compressor. +// know won't be clobbered by a subsequent modifications. func copyDeltas(d Deltas) Deltas { d2 := make(Deltas, len(d)) copy(d2, d) diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go index befac1c75..fa88fc407 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -21,7 +21,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/client-go/util/clock" + "k8s.io/apimachinery/pkg/util/clock" ) // ExpirationCache implements the store interface diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go index ab2f57687..a096765f6 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -17,8 +17,8 @@ limitations under the License. package cache import ( + "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/util/clock" ) type fakeThreadSafeMap struct { diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index 3f6e2a948..e05c01ee2 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -59,7 +59,7 @@ type Queue interface { // has since been added. AddIfNotPresent(interface{}) error - // Return true if the first batch of items has been popped + // HasSynced returns true if the first batch of items has been popped HasSynced() bool // Close queue @@ -169,7 +169,7 @@ func (f *FIFO) AddIfNotPresent(obj interface{}) error { return nil } -// addIfNotPresent assumes the fifo lock is already held and adds the the provided +// addIfNotPresent assumes the fifo lock is already held and adds the provided // item to the queue under id if it does not already exist. func (f *FIFO) addIfNotPresent(id string, obj interface{}) { f.populated = true diff --git a/vendor/k8s.io/client-go/tools/cache/heap.go b/vendor/k8s.io/client-go/tools/cache/heap.go new file mode 100644 index 000000000..78e492455 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/heap.go @@ -0,0 +1,323 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file implements a heap data structure. + +package cache + +import ( + "container/heap" + "fmt" + "sync" +) + +const ( + closedMsg = "heap is closed" +) + +type LessFunc func(interface{}, interface{}) bool +type heapItem struct { + obj interface{} // The object which is stored in the heap. + index int // The index of the object's key in the Heap.queue. +} + +type itemKeyValue struct { + key string + obj interface{} +} + +// heapData is an internal struct that implements the standard heap interface +// and keeps the data stored in the heap. +type heapData struct { + // items is a map from key of the objects to the objects and their index. + // We depend on the property that items in the map are in the queue and vice versa. + items map[string]*heapItem + // queue implements a heap data structure and keeps the order of elements + // according to the heap invariant. The queue keeps the keys of objects stored + // in "items". + queue []string + + // keyFunc is used to make the key used for queued item insertion and retrieval, and + // should be deterministic. + keyFunc KeyFunc + // lessFunc is used to compare two objects in the heap. + lessFunc LessFunc +} + +var ( + _ = heap.Interface(&heapData{}) // heapData is a standard heap +) + +// Less compares two objects and returns true if the first one should go +// in front of the second one in the heap. +func (h *heapData) Less(i, j int) bool { + if i > len(h.queue) || j > len(h.queue) { + return false + } + itemi, ok := h.items[h.queue[i]] + if !ok { + return false + } + itemj, ok := h.items[h.queue[j]] + if !ok { + return false + } + return h.lessFunc(itemi.obj, itemj.obj) +} + +// Len returns the number of items in the Heap. +func (h *heapData) Len() int { return len(h.queue) } + +// Swap implements swapping of two elements in the heap. This is a part of standard +// heap interface and should never be called directly. +func (h *heapData) Swap(i, j int) { + h.queue[i], h.queue[j] = h.queue[j], h.queue[i] + item := h.items[h.queue[i]] + item.index = i + item = h.items[h.queue[j]] + item.index = j +} + +// Push is supposed to be called by heap.Push only. +func (h *heapData) Push(kv interface{}) { + keyValue := kv.(*itemKeyValue) + n := len(h.queue) + h.items[keyValue.key] = &heapItem{keyValue.obj, n} + h.queue = append(h.queue, keyValue.key) +} + +// Pop is supposed to be called by heap.Pop only. +func (h *heapData) Pop() interface{} { + key := h.queue[len(h.queue)-1] + h.queue = h.queue[0 : len(h.queue)-1] + item, ok := h.items[key] + if !ok { + // This is an error + return nil + } + delete(h.items, key) + return item.obj +} + +// Heap is a thread-safe producer/consumer queue that implements a heap data structure. +// It can be used to implement priority queues and similar data structures. +type Heap struct { + lock sync.RWMutex + cond sync.Cond + + // data stores objects and has a queue that keeps their ordering according + // to the heap invariant. + data *heapData + + // closed indicates that the queue is closed. + // It is mainly used to let Pop() exit its control loop while waiting for an item. + closed bool +} + +// Close the Heap and signals condition variables that may be waiting to pop +// items from the heap. +func (h *Heap) Close() { + h.lock.Lock() + defer h.lock.Unlock() + h.closed = true + h.cond.Broadcast() +} + +// Add inserts an item, and puts it in the queue. The item is updated if it +// already exists. +func (h *Heap) Add(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + if _, exists := h.data.items[key]; exists { + h.data.items[key].obj = obj + heap.Fix(h.data, h.data.items[key].index) + } else { + h.addIfNotPresentLocked(key, obj) + } + h.cond.Broadcast() + return nil +} + +// Adds all the items in the list to the queue and then signals the condition +// variable. It is useful when the caller would like to add all of the items +// to the queue before consumer starts processing them. +func (h *Heap) BulkAdd(list []interface{}) error { + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + for _, obj := range list { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + if _, exists := h.data.items[key]; exists { + h.data.items[key].obj = obj + heap.Fix(h.data, h.data.items[key].index) + } else { + h.addIfNotPresentLocked(key, obj) + } + } + h.cond.Broadcast() + return nil +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If an item with +// the key is present in the map, no changes is made to the item. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +func (h *Heap) AddIfNotPresent(obj interface{}) error { + id, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + h.addIfNotPresentLocked(id, obj) + h.cond.Broadcast() + return nil +} + +// addIfNotPresentLocked assumes the lock is already held and adds the the provided +// item to the queue if it does not already exist. +func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) { + if _, exists := h.data.items[key]; exists { + return + } + heap.Push(h.data, &itemKeyValue{key, obj}) +} + +// Update is the same as Add in this implementation. When the item does not +// exist, it is added. +func (h *Heap) Update(obj interface{}) error { + return h.Add(obj) +} + +// Delete removes an item. +func (h *Heap) Delete(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if item, ok := h.data.items[key]; ok { + heap.Remove(h.data, item.index) + return nil + } + return fmt.Errorf("object not found") +} + +// Pop waits until an item is ready. If multiple items are +// ready, they are returned in the order given by Heap.data.lessFunc. +func (h *Heap) Pop() (interface{}, error) { + h.lock.Lock() + defer h.lock.Unlock() + for len(h.data.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the h.closed is set and the condition is broadcast, + // which causes this loop to continue and return from the Pop(). + if h.closed { + return nil, fmt.Errorf("heap is closed") + } + h.cond.Wait() + } + obj := heap.Pop(h.data) + if obj != nil { + return obj, nil + } else { + return nil, fmt.Errorf("object was removed from heap data") + } +} + +// List returns a list of all the items. +func (h *Heap) List() []interface{} { + h.lock.RLock() + defer h.lock.RUnlock() + list := make([]interface{}, 0, len(h.data.items)) + for _, item := range h.data.items { + list = append(list, item.obj) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently in the Heap. +func (h *Heap) ListKeys() []string { + h.lock.RLock() + defer h.lock.RUnlock() + list := make([]string, 0, len(h.data.items)) + for key := range h.data.items { + list = append(list, key) + } + return list +} + +// Get returns the requested item, or sets exists=false. +func (h *Heap) Get(obj interface{}) (interface{}, bool, error) { + key, err := h.data.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return h.GetByKey(key) +} + +// GetByKey returns the requested item, or sets exists=false. +func (h *Heap) GetByKey(key string) (interface{}, bool, error) { + h.lock.RLock() + defer h.lock.RUnlock() + item, exists := h.data.items[key] + if !exists { + return nil, false, nil + } + return item.obj, true, nil +} + +// IsClosed returns true if the queue is closed. +func (h *Heap) IsClosed() bool { + h.lock.RLock() + defer h.lock.RUnlock() + if h.closed { + return true + } + return false +} + +// NewHeap returns a Heap which can be used to queue up items to process. +func NewHeap(keyFn KeyFunc, lessFn LessFunc) *Heap { + h := &Heap{ + data: &heapData{ + items: map[string]*heapItem{}, + queue: []string{}, + keyFunc: keyFn, + lessFunc: lessFn, + }, + } + h.cond.L = &h.lock + return h +} diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go index 42fc6c7a2..15acb168e 100644 --- a/vendor/k8s.io/client-go/tools/cache/index.go +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -28,6 +28,8 @@ type Indexer interface { Store // Retrieve list of objects that match on the named indexing function Index(indexName string, obj interface{}) ([]interface{}, error) + // IndexKeys returns the set of keys that match on the named indexing function. + IndexKeys(indexName, indexKey string) ([]string, error) // ListIndexFuncValues returns the list of generated values of an Index func ListIndexFuncValues(indexName string) []string // ByIndex lists object that match on the named indexing function with the exact key diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go index af01d4745..8bf41f517 100644 --- a/vendor/k8s.io/client-go/tools/cache/listwatch.go +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -17,14 +17,17 @@ limitations under the License. package cache import ( + "context" "time" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/pager" ) // ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. @@ -48,6 +51,8 @@ type WatchFunc func(options metav1.ListOptions) (watch.Interface, error) type ListWatch struct { ListFunc ListFunc WatchFunc WatchFunc + // DisableChunking requests no chunking for this list watcher. + DisableChunking bool } // Getter interface knows how to access Get method from RESTClient. @@ -57,22 +62,32 @@ type Getter interface { // NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector. func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch { + optionsModifier := func(options *metav1.ListOptions) { + options.FieldSelector = fieldSelector.String() + } + return NewFilteredListWatchFromClient(c, resource, namespace, optionsModifier) +} + +// NewFilteredListWatchFromClient creates a new ListWatch from the specified client, resource, namespace, and option modifier. +// Option modifier is a function takes a ListOptions and modifies the consumed ListOptions. Provide customized modifier function +// to apply modification to ListOptions with a field selector, a label selector, or any other desired options. +func NewFilteredListWatchFromClient(c Getter, resource string, namespace string, optionsModifier func(options *metav1.ListOptions)) *ListWatch { listFunc := func(options metav1.ListOptions) (runtime.Object, error) { + optionsModifier(&options) return c.Get(). Namespace(namespace). Resource(resource). VersionedParams(&options, metav1.ParameterCodec). - FieldsSelectorParam(fieldSelector). Do(). Get() } watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { options.Watch = true + optionsModifier(&options) return c.Get(). Namespace(namespace). Resource(resource). VersionedParams(&options, metav1.ParameterCodec). - FieldsSelectorParam(fieldSelector). Watch() } return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} @@ -87,6 +102,9 @@ func timeoutFromListOptions(options metav1.ListOptions) time.Duration { // List a set of apiserver resources func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) { + if !lw.DisableChunking { + return pager.New(pager.SimplePageFunc(lw.ListFunc)).List(context.TODO(), options) + } return lw.ListFunc(options) } @@ -95,6 +113,8 @@ func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) return lw.WatchFunc(options) } +// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout +// if timeout is exceeded without all conditions returning true, or an error if an error occurs. // TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until. func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch.ConditionFunc) (*watch.Event, error) { if len(conditions) == 0 { @@ -158,5 +178,10 @@ func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch return nil, err } - return watch.Until(timeout, watchInterface, remainingConditions...) + evt, err := watch.Until(timeout, watchInterface, remainingConditions...) + if err == watch.ErrWatchClosed { + // present a consistent error interface to callers + err = wait.ErrWaitTimeout + } + return evt, err } diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go new file mode 100644 index 000000000..cbb6434eb --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go @@ -0,0 +1,261 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "strconv" + "sync" + "time" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + utilcache "k8s.io/apimachinery/pkg/util/cache" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +// MutationCache is able to take the result of update operations and stores them in an LRU +// that can be used to provide a more current view of a requested object. It requires interpreting +// resourceVersions for comparisons. +// Implementations must be thread-safe. +// TODO find a way to layer this into an informer/lister +type MutationCache interface { + GetByKey(key string) (interface{}, bool, error) + ByIndex(indexName, indexKey string) ([]interface{}, error) + Mutation(interface{}) +} + +type ResourceVersionComparator interface { + CompareResourceVersion(lhs, rhs runtime.Object) int +} + +// NewIntegerResourceVersionMutationCache returns a MutationCache that understands how to +// deal with objects that have a resource version that: +// +// - is an integer +// - increases when updated +// - is comparable across the same resource in a namespace +// +// Most backends will have these semantics. Indexer may be nil. ttl controls how long an item +// remains in the mutation cache before it is removed. +// +// If includeAdds is true, objects in the mutation cache will be returned even if they don't exist +// in the underlying store. This is only safe if your use of the cache can handle mutation entries +// remaining in the cache for up to ttl when mutations and deletes occur very closely in time. +func NewIntegerResourceVersionMutationCache(backingCache Store, indexer Indexer, ttl time.Duration, includeAdds bool) MutationCache { + return &mutationCache{ + backingCache: backingCache, + indexer: indexer, + mutationCache: utilcache.NewLRUExpireCache(100), + comparator: etcdObjectVersioner{}, + ttl: ttl, + includeAdds: includeAdds, + } +} + +// mutationCache doesn't guarantee that it returns values added via Mutation since they can page out and +// since you can't distinguish between, "didn't observe create" and "was deleted after create", +// if the key is missing from the backing cache, we always return it as missing +type mutationCache struct { + lock sync.Mutex + backingCache Store + indexer Indexer + mutationCache *utilcache.LRUExpireCache + includeAdds bool + ttl time.Duration + + comparator ResourceVersionComparator +} + +// GetByKey is never guaranteed to return back the value set in Mutation. It could be paged out, it could +// be older than another copy, the backingCache may be more recent or, you might have written twice into the same key. +// You get a value that was valid at some snapshot of time and will always return the newer of backingCache and mutationCache. +func (c *mutationCache) GetByKey(key string) (interface{}, bool, error) { + c.lock.Lock() + defer c.lock.Unlock() + + obj, exists, err := c.backingCache.GetByKey(key) + if err != nil { + return nil, false, err + } + if !exists { + if !c.includeAdds { + // we can't distinguish between, "didn't observe create" and "was deleted after create", so + // if the key is missing, we always return it as missing + return nil, false, nil + } + obj, exists = c.mutationCache.Get(key) + if !exists { + return nil, false, nil + } + } + objRuntime, ok := obj.(runtime.Object) + if !ok { + return obj, true, nil + } + return c.newerObject(key, objRuntime), true, nil +} + +// ByIndex returns the newer objects that match the provided index and indexer key. +// Will return an error if no indexer was provided. +func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, error) { + c.lock.Lock() + defer c.lock.Unlock() + if c.indexer == nil { + return nil, fmt.Errorf("no indexer has been provided to the mutation cache") + } + keys, err := c.indexer.IndexKeys(name, indexKey) + if err != nil { + return nil, err + } + var items []interface{} + keySet := sets.NewString() + for _, key := range keys { + keySet.Insert(key) + obj, exists, err := c.indexer.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + continue + } + if objRuntime, ok := obj.(runtime.Object); ok { + items = append(items, c.newerObject(key, objRuntime)) + } else { + items = append(items, obj) + } + } + + if c.includeAdds { + fn := c.indexer.GetIndexers()[name] + // Keys() is returned oldest to newest, so full traversal does not alter the LRU behavior + for _, key := range c.mutationCache.Keys() { + updated, ok := c.mutationCache.Get(key) + if !ok { + continue + } + if keySet.Has(key.(string)) { + continue + } + elements, err := fn(updated) + if err != nil { + glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) + continue + } + for _, inIndex := range elements { + if inIndex != indexKey { + continue + } + items = append(items, updated) + break + } + } + } + + return items, nil +} + +// newerObject checks the mutation cache for a newer object and returns one if found. If the +// mutated object is older than the backing object, it is removed from the Must be +// called while the lock is held. +func (c *mutationCache) newerObject(key string, backing runtime.Object) runtime.Object { + mutatedObj, exists := c.mutationCache.Get(key) + if !exists { + return backing + } + mutatedObjRuntime, ok := mutatedObj.(runtime.Object) + if !ok { + return backing + } + if c.comparator.CompareResourceVersion(backing, mutatedObjRuntime) >= 0 { + c.mutationCache.Remove(key) + return backing + } + return mutatedObjRuntime +} + +// Mutation adds a change to the cache that can be returned in GetByKey if it is newer than the backingCache +// copy. If you call Mutation twice with the same object on different threads, one will win, but its not defined +// which one. This doesn't affect correctness, since the GetByKey guaranteed of "later of these two caches" is +// preserved, but you may not get the version of the object you want. The object you get is only guaranteed to +// "one that was valid at some point in time", not "the one that I want". +func (c *mutationCache) Mutation(obj interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + key, err := DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + // this is a "nice to have", so failures shouldn't do anything weird + utilruntime.HandleError(err) + return + } + + if objRuntime, ok := obj.(runtime.Object); ok { + if mutatedObj, exists := c.mutationCache.Get(key); exists { + if mutatedObjRuntime, ok := mutatedObj.(runtime.Object); ok { + if c.comparator.CompareResourceVersion(objRuntime, mutatedObjRuntime) < 0 { + return + } + } + } + } + c.mutationCache.Add(key, obj, c.ttl) +} + +// etcdObjectVersioner implements versioning and extracting etcd node information +// for objects that have an embedded ObjectMeta or ListMeta field. +type etcdObjectVersioner struct{} + +// ObjectResourceVersion implements Versioner +func (a etcdObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + version := accessor.GetResourceVersion() + if len(version) == 0 { + return 0, nil + } + return strconv.ParseUint(version, 10, 64) +} + +// CompareResourceVersion compares etcd resource versions. Outside this API they are all strings, +// but etcd resource versions are special, they're actually ints, so we can easily compare them. +func (a etcdObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int { + lhsVersion, err := a.ObjectResourceVersion(lhs) + if err != nil { + // coder error + panic(err) + } + rhsVersion, err := a.ObjectResourceVersion(rhs) + if err != nil { + // coder error + panic(err) + } + + if lhsVersion == rhsVersion { + return 0 + } + if lhsVersion < rhsVersion { + return -1 + } + + return 1 +} diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go index cc7399114..e2aa44848 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -24,9 +24,10 @@ import ( "sync" "time" + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/diff" - "k8s.io/client-go/pkg/api" ) var mutationDetectionEnabled = false @@ -44,6 +45,7 @@ func NewCacheMutationDetector(name string) CacheMutationDetector { if !mutationDetectionEnabled { return dummyMutationDetector{} } + glog.Warningln("Mutation detector is enabled, this will result in memory leakage.") return &defaultCacheMutationDetector{name: name, period: 1 * time.Second} } @@ -79,17 +81,15 @@ type cacheObj struct { func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) { // we DON'T want protection from panics. If we're running this code, we want to die - go func() { - for { - d.CompareObjects() + for { + d.CompareObjects() - select { - case <-stopCh: - return - case <-time.After(d.period): - } + select { + case <-stopCh: + return + case <-time.After(d.period): } - }() + } } // AddObject makes a deep copy of the object for later comparison. It only works on runtime.Object @@ -98,18 +98,13 @@ func (d *defaultCacheMutationDetector) AddObject(obj interface{}) { if _, ok := obj.(DeletedFinalStateUnknown); ok { return } - if _, ok := obj.(runtime.Object); !ok { - return - } + if obj, ok := obj.(runtime.Object); ok { + copiedObj := obj.DeepCopyObject() - copiedObj, err := api.Scheme.Copy(obj.(runtime.Object)) - if err != nil { - return + d.lock.Lock() + defer d.lock.Unlock() + d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj}) } - - d.lock.Lock() - defer d.lock.Unlock() - d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj}) } func (d *defaultCacheMutationDetector) CompareObjects() { diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index b43f43a5e..054a7373c 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -30,6 +30,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "syscall" "time" @@ -38,16 +39,18 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/util/clock" ) // Reflector watches a specified resource and causes all changes to be reflected in the given store. type Reflector struct { // name identifies this reflector. By default it will be a file:line if possible. name string + // metrics tracks basic metric information about the reflector + metrics *reflectorMetrics // The type of object we expect to place in the store. expectedType reflect.Type @@ -96,10 +99,17 @@ func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyn return NewNamedReflector(getDefaultReflectorName(internalPackages...), lw, expectedType, store, resyncPeriod) } +// reflectorDisambiguator is used to disambiguate started reflectors. +// initialized to an unstable value to ensure meaning isn't attributed to the suffix. +var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345) + // NewNamedReflector same as NewReflector, but with a specified name for logging func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1) r := &Reflector{ - name: name, + name: name, + // we need this to be unique per process (some names are still the same) but obvious who it belongs to + metrics: newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))), listerWatcher: lw, store: store, expectedType: reflect.TypeOf(expectedType), @@ -110,6 +120,11 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, return r } +func makeValidPrometheusMetricLabel(in string) string { + // this isn't perfect, but it removes our common characters + return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in) +} + // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common // call chains to NewReflector, so they'd be low entropy names for reflectors var internalPackages = []string{"client-go/tools/cache/", "/runtime/asm_"} @@ -182,21 +197,10 @@ func extractStackCreator() (string, int, bool) { } // Run starts a watch and handles watch events. Will restart the watch if it is closed. -// Run starts a goroutine and returns immediately. -func (r *Reflector) Run() { +// Run will exit when stopCh is closed. +func (r *Reflector) Run(stopCh <-chan struct{}) { glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) - go wait.Until(func() { - if err := r.ListAndWatch(wait.NeverStop); err != nil { - utilruntime.HandleError(err) - } - }, r.period, wait.NeverStop) -} - -// RunUntil starts a watch and handles watch events. Will restart the watch if it is closed. -// RunUntil starts a goroutine and returns immediately. It will exit when stopCh is closed. -func (r *Reflector) RunUntil(stopCh <-chan struct{}) { - glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) - go wait.Until(func() { + wait.Until(func() { if err := r.ListAndWatch(stopCh); err != nil { utilruntime.HandleError(err) } @@ -235,17 +239,18 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) var resourceVersion string - resyncCh, cleanup := r.resyncChan() - defer cleanup() // Explicitly set "0" as resource version - it's fine for the List() // to be served from cache and potentially be delayed relative to // etcd contents. Reflector framework will catch up via Watch() eventually. options := metav1.ListOptions{ResourceVersion: "0"} + r.metrics.numberOfLists.Inc() + start := r.clock.Now() list, err := r.listerWatcher.List(options) if err != nil { return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) } + r.metrics.listDuration.Observe(time.Since(start).Seconds()) listMetaInterface, err := meta.ListAccessor(list) if err != nil { return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err) @@ -255,6 +260,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { if err != nil { return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err) } + r.metrics.numberOfItemsInList.Observe(float64(len(items))) if err := r.syncWith(items, resourceVersion); err != nil { return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err) } @@ -264,6 +270,10 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { cancelCh := make(chan struct{}) defer close(cancelCh) go func() { + resyncCh, cleanup := r.resyncChan() + defer func() { + cleanup() // Call the last one written into cleanup + }() for { select { case <-resyncCh: @@ -285,14 +295,22 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { }() for { - timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors + select { + case <-stopCh: + return nil + default: + } + + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) options = metav1.ListOptions{ ResourceVersion: resourceVersion, // We want to avoid situations of hanging watchers. Stop any wachers that do not // receive any events within the timeout window. - TimeoutSeconds: &timemoutseconds, + TimeoutSeconds: &timeoutSeconds, } + r.metrics.numberOfWatches.Inc() w, err := r.listerWatcher.Watch(options) if err != nil { switch err { @@ -344,6 +362,11 @@ func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, err // Stopping the watcher should be idempotent and if we return from this function there's no way // we're coming back in with the same watch interface. defer w.Stop() + // update metrics + defer func() { + r.metrics.numberOfItemsInWatch.Observe(float64(eventCount)) + r.metrics.watchDuration.Observe(time.Since(start).Seconds()) + }() loop: for { @@ -399,8 +422,8 @@ loop: watchDuration := r.clock.Now().Sub(start) if watchDuration < 1*time.Second && eventCount == 0 { - glog.V(4).Infof("%s: Unexpected watch close - watch lasted less than a second and no items received", r.name) - return errors.New("very short watch") + r.metrics.numberOfShortWatches.Inc() + return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) } glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) return nil @@ -418,4 +441,9 @@ func (r *Reflector) setLastSyncResourceVersion(v string) { r.lastSyncResourceVersionMutex.Lock() defer r.lastSyncResourceVersionMutex.Unlock() r.lastSyncResourceVersion = v + + rv, err := strconv.Atoi(v) + if err == nil { + r.metrics.lastResourceVersion.Set(float64(rv)) + } } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go new file mode 100644 index 000000000..0945e5c3a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file provides abstractions for setting the provider (e.g., prometheus) +// of metrics. + +package cache + +import ( + "sync" +) + +// GaugeMetric represents a single numerical value that can arbitrarily go up +// and down. +type GaugeMetric interface { + Set(float64) +} + +// CounterMetric represents a single numerical value that only ever +// goes up. +type CounterMetric interface { + Inc() +} + +// SummaryMetric captures individual observations. +type SummaryMetric interface { + Observe(float64) +} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Dec() {} +func (noopMetric) Observe(float64) {} +func (noopMetric) Set(float64) {} + +type reflectorMetrics struct { + numberOfLists CounterMetric + listDuration SummaryMetric + numberOfItemsInList SummaryMetric + + numberOfWatches CounterMetric + numberOfShortWatches CounterMetric + watchDuration SummaryMetric + numberOfItemsInWatch SummaryMetric + + lastResourceVersion GaugeMetric +} + +// MetricsProvider generates various metrics used by the reflector. +type MetricsProvider interface { + NewListsMetric(name string) CounterMetric + NewListDurationMetric(name string) SummaryMetric + NewItemsInListMetric(name string) SummaryMetric + + NewWatchesMetric(name string) CounterMetric + NewShortWatchesMetric(name string) CounterMetric + NewWatchDurationMetric(name string) SummaryMetric + NewItemsInWatchMetric(name string) SummaryMetric + + NewLastResourceVersionMetric(name string) GaugeMetric +} + +type noopMetricsProvider struct{} + +func (noopMetricsProvider) NewListsMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewListDurationMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewItemsInListMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewWatchesMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewShortWatchesMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewWatchDurationMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewItemsInWatchMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewLastResourceVersionMetric(name string) GaugeMetric { + return noopMetric{} +} + +var metricsFactory = struct { + metricsProvider MetricsProvider + setProviders sync.Once +}{ + metricsProvider: noopMetricsProvider{}, +} + +func newReflectorMetrics(name string) *reflectorMetrics { + var ret *reflectorMetrics + if len(name) == 0 { + return ret + } + return &reflectorMetrics{ + numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name), + listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name), + numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name), + numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name), + numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name), + watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name), + numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name), + lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name), + } +} + +// SetReflectorMetricsProvider sets the metrics provider +func SetReflectorMetricsProvider(metricsProvider MetricsProvider) { + metricsFactory.setProviders.Do(func() { + metricsFactory.metricsProvider = metricsProvider + }) +} diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 1349f335d..5f8c507f9 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -22,9 +22,11 @@ import ( "time" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/clock" + "k8s.io/client-go/util/buffer" + "k8s.io/client-go/util/retry" "github.com/golang/glog" ) @@ -92,11 +94,16 @@ func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEve // InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. type InformerSynced func() bool -// syncedPollPeriod controls how often you look at the status of your sync funcs -const syncedPollPeriod = 100 * time.Millisecond +const ( + // syncedPollPeriod controls how often you look at the status of your sync funcs + syncedPollPeriod = 100 * time.Millisecond + + // initialBufferSize is the initial number of event notifications that can be buffered. + initialBufferSize = 1024 +) // WaitForCacheSync waits for caches to populate. It returns true if it was successful, false -// if the contoller should shutdown +// if the controller should shutdown func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { err := wait.PollUntil(syncedPollPeriod, func() (bool, error) { @@ -138,19 +145,16 @@ type sharedIndexInformer struct { // clock allows for testability clock clock.Clock - started bool - startedLock sync.Mutex + started, stopped bool + startedLock sync.Mutex // blockDeltas gives a way to stop all event distribution so that a late event handler // can safely join the shared informer. blockDeltas sync.Mutex - // stopCh is the channel used to stop the main Run process. We have to track it so that - // late joiners can have a proper stop - stopCh <-chan struct{} } // dummyController hides the fact that a SharedInformer is different from a dedicated one -// where a caller can `Run`. The run method is disonnected in this case, because higher +// where a caller can `Run`. The run method is disconnected in this case, because higher // level logic will decide when to start the SharedInformer and related controller. // Because returning information back is always asynchronous, the legacy callers shouldn't // notice any change in behavior. @@ -185,7 +189,7 @@ type deleteNotification struct { func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() - fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, s.indexer) + fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, s.indexer) cfg := &Config{ Queue: fifo, @@ -207,16 +211,20 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { s.started = true }() - s.stopCh = stopCh - s.cacheMutationDetector.Run(stopCh) - s.processor.run(stopCh) - s.controller.Run(stopCh) -} + // Separate stop channel because Processor should be stopped strictly after controller + processorStopCh := make(chan struct{}) + var wg wait.Group + defer wg.Wait() // Wait for Processor to stop + defer close(processorStopCh) // Tell Processor to stop + wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run) + wg.StartWithChannel(processorStopCh, s.processor.run) -func (s *sharedIndexInformer) isStarted() bool { - s.startedLock.Lock() - defer s.startedLock.Unlock() - return s.started + defer func() { + s.startedLock.Lock() + defer s.startedLock.Unlock() + s.stopped = true // Don't want any new listeners + }() + s.controller.Run(stopCh) } func (s *sharedIndexInformer) HasSynced() bool { @@ -287,6 +295,11 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv s.startedLock.Lock() defer s.startedLock.Unlock() + if s.stopped { + glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) + return + } + if resyncPeriod > 0 { if resyncPeriod < minimumResyncPeriod { glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) @@ -307,7 +320,7 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv } } - listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now()) + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize) if !s.started { s.processor.addListener(listener) @@ -323,13 +336,8 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv defer s.blockDeltas.Unlock() s.processor.addListener(listener) - - go listener.run(s.stopCh) - go listener.pop(s.stopCh) - - items := s.indexer.List() - for i := range items { - listener.add(addNotification{newObj: items[i]}) + for _, item := range s.indexer.List() { + listener.add(addNotification{newObj: item}) } } @@ -365,16 +373,26 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { } type sharedProcessor struct { + listenersStarted bool listenersLock sync.RWMutex listeners []*processorListener syncingListeners []*processorListener clock clock.Clock + wg wait.Group } func (p *sharedProcessor) addListener(listener *processorListener) { p.listenersLock.Lock() defer p.listenersLock.Unlock() + p.addListenerLocked(listener) + if p.listenersStarted { + p.wg.Start(listener.run) + p.wg.Start(listener.pop) + } +} + +func (p *sharedProcessor) addListenerLocked(listener *processorListener) { p.listeners = append(p.listeners, listener) p.syncingListeners = append(p.syncingListeners, listener) } @@ -395,13 +413,22 @@ func (p *sharedProcessor) distribute(obj interface{}, sync bool) { } func (p *sharedProcessor) run(stopCh <-chan struct{}) { + func() { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + for _, listener := range p.listeners { + p.wg.Start(listener.run) + p.wg.Start(listener.pop) + } + p.listenersStarted = true + }() + <-stopCh p.listenersLock.RLock() defer p.listenersLock.RUnlock() - for _, listener := range p.listeners { - go listener.run(stopCh) - go listener.pop(stopCh) + close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop } + p.wg.Wait() // Wait for all .pop() and .run() to stop } // shouldResync queries every listener to determine if any of them need a resync, based on each @@ -437,21 +464,18 @@ func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Durati } type processorListener struct { - // lock/cond protects access to 'pendingNotifications'. - lock sync.RWMutex - cond sync.Cond - - // pendingNotifications is an unbounded slice that holds all notifications not yet distributed - // there is one per listener, but a failing/stalled listener will have infinite pendingNotifications - // added until we OOM. - // TODO This is no worse that before, since reflectors were backed by unbounded DeltaFIFOs, but - // we should try to do something better - pendingNotifications []interface{} - nextCh chan interface{} + addCh chan interface{} handler ResourceEventHandler + // pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed. + // There is one per listener, but a failing/stalled listener will have infinite pendingNotifications + // added until we OOM. + // TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but + // we should try to do something better. + pendingNotifications buffer.RingGrowing + // requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer requestedResyncPeriod time.Duration // resyncPeriod is how frequently the listener wants a full resync from the shared informer. This @@ -464,93 +488,85 @@ type processorListener struct { resyncLock sync.Mutex } -func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time) *processorListener { +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener { ret := &processorListener{ - pendingNotifications: []interface{}{}, nextCh: make(chan interface{}), + addCh: make(chan interface{}), handler: handler, + pendingNotifications: *buffer.NewRingGrowing(bufferSize), requestedResyncPeriod: requestedResyncPeriod, resyncPeriod: resyncPeriod, } - ret.cond.L = &ret.lock - ret.determineNextResync(now) return ret } func (p *processorListener) add(notification interface{}) { - p.lock.Lock() - defer p.lock.Unlock() - - p.pendingNotifications = append(p.pendingNotifications, notification) - p.cond.Broadcast() + p.addCh <- notification } -func (p *processorListener) pop(stopCh <-chan struct{}) { +func (p *processorListener) pop() { defer utilruntime.HandleCrash() + defer close(p.nextCh) // Tell .run() to stop + var nextCh chan<- interface{} + var notification interface{} for { - blockingGet := func() (interface{}, bool) { - p.lock.Lock() - defer p.lock.Unlock() - - for len(p.pendingNotifications) == 0 { - // check if we're shutdown - select { - case <-stopCh: - return nil, true - default: - } - p.cond.Wait() + select { + case nextCh <- notification: + // Notification dispatched + var ok bool + notification, ok = p.pendingNotifications.ReadOne() + if !ok { // Nothing to pop + nextCh = nil // Disable this select case + } + case notificationToAdd, ok := <-p.addCh: + if !ok { + return + } + if notification == nil { // No notification to pop (and pendingNotifications is empty) + // Optimize the case - skip adding to pendingNotifications + notification = notificationToAdd + nextCh = p.nextCh + } else { // There is already a notification waiting to be dispatched + p.pendingNotifications.WriteOne(notificationToAdd) } - - nt := p.pendingNotifications[0] - p.pendingNotifications = p.pendingNotifications[1:] - return nt, false - } - - notification, stopped := blockingGet() - if stopped { - return - } - - select { - case <-stopCh: - return - case p.nextCh <- notification: } } } -func (p *processorListener) run(stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() +func (p *processorListener) run() { + // this call blocks until the channel is closed. When a panic happens during the notification + // we will catch it, **the offending item will be skipped!**, and after a short delay (one second) + // the next notification will be attempted. This is usually better than the alternative of never + // delivering again. + stopCh := make(chan struct{}) + wait.Until(func() { + // this gives us a few quick retries before a long pause and then a few more quick retries + err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) { + for next := range p.nextCh { + switch notification := next.(type) { + case updateNotification: + p.handler.OnUpdate(notification.oldObj, notification.newObj) + case addNotification: + p.handler.OnAdd(notification.newObj) + case deleteNotification: + p.handler.OnDelete(notification.oldObj) + default: + utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next)) + } + } + // the only way to get here is if the p.nextCh is empty and closed + return true, nil + }) - for { - var next interface{} - select { - case <-stopCh: - func() { - p.lock.Lock() - defer p.lock.Unlock() - p.cond.Broadcast() - }() - return - case next = <-p.nextCh: + // the only way to get here is if the p.nextCh is empty and closed + if err == nil { + close(stopCh) } - - switch notification := next.(type) { - case updateNotification: - p.handler.OnUpdate(notification.oldObj, notification.newObj) - case addNotification: - p.handler.OnAdd(notification.newObj) - case deleteNotification: - p.handler.OnDelete(notification.oldObj) - default: - utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next)) - } - } + }, 1*time.Minute, stopCh) } // shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0, diff --git a/vendor/k8s.io/client-go/tools/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go index ee6b3bd46..4958987f0 100755 --- a/vendor/k8s.io/client-go/tools/cache/store.go +++ b/vendor/k8s.io/client-go/tools/cache/store.go @@ -172,6 +172,10 @@ func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) return c.cacheStorage.Index(indexName, obj) } +func (c *cache) IndexKeys(indexName, indexKey string) ([]string, error) { + return c.cacheStorage.IndexKeys(indexName, indexKey) +} + // ListIndexFuncValues returns the list of generated values of an Index func func (c *cache) ListIndexFuncValues(indexName string) []string { return c.cacheStorage.ListIndexFuncValues(indexName) diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 4d6a01297..1c201efb6 100644 --- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -43,6 +43,7 @@ type ThreadSafeStore interface { ListKeys() []string Replace(map[string]interface{}, string) Index(indexName string, obj interface{}) ([]interface{}, error) + IndexKeys(indexName, indexKey string) ([]string, error) ListIndexFuncValues(name string) []string ByIndex(indexName, indexKey string) ([]interface{}, error) GetIndexers() Indexers @@ -184,6 +185,23 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro return list, nil } +// IndexKeys returns a list of keys that match on the index function. +// IndexKeys is thread-safe so long as you treat all items as immutable. +func (c *threadSafeMap) IndexKeys(indexName, indexKey string) ([]string, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + indexFunc := c.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + index := c.indices[indexName] + + set := index[indexKey] + return set.List(), nil +} + func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string { c.lock.RLock() defer c.lock.RUnlock() @@ -223,7 +241,7 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { // updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj // updateIndices must be called from a function that already has a lock on the cache -func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) error { +func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) { // if we got an old object, we need to remove it before we add it again if oldObj != nil { c.deleteFromIndices(oldObj, key) @@ -231,7 +249,7 @@ func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, ke for name, indexFunc := range c.indexers { indexValues, err := indexFunc(newObj) if err != nil { - return err + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) } index := c.indices[name] if index == nil { @@ -248,16 +266,15 @@ func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, ke set.Insert(key) } } - return nil } // deleteFromIndices removes the object from each of the managed indexes // it is intended to be called from a function that already has a lock on the cache -func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error { +func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) { for name, indexFunc := range c.indexers { indexValues, err := indexFunc(obj) if err != nil { - return err + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) } index := c.indices[name] @@ -271,7 +288,6 @@ func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error { } } } - return nil } func (c *threadSafeMap) Resync() error { diff --git a/vendor/k8s.io/client-go/pkg/api/v1/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go similarity index 91% rename from vendor/k8s.io/client-go/pkg/api/v1/doc.go rename to vendor/k8s.io/client-go/tools/clientcmd/api/doc.go index 0fdd87f75..0a081871a 100644 --- a/vendor/k8s.io/client-go/pkg/api/v1/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package v1 is the v1 version of the API. -package v1 +// +k8s:deepcopy-gen=package +package api diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index fa9d40a9b..1391df702 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -25,16 +25,14 @@ import ( // Config holds the information needed to build connect to remote kubernetes clusters as a given user // IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type Config struct { // Legacy field from pkg/api/types.go TypeMeta. // TODO(jlowdermilk): remove this after eliminating downstream dependencies. // +optional Kind string `json:"kind,omitempty"` - // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify - // a single value for the cluster version. - // This field isn't really needed anyway, so we are deprecating it without replacement. - // It will be ignored if it is present. + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. // +optional APIVersion string `json:"apiVersion,omitempty"` // Preferences holds general information to be use for cli interactions @@ -67,9 +65,6 @@ type Cluster struct { LocationOfOrigin string // Server is the address of the kubernetes cluster (https://hostname:port). Server string `json:"server"` - // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - // +optional - APIVersion string `json:"api-version,omitempty"` // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. // +optional InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` @@ -109,6 +104,12 @@ type AuthInfo struct { // Impersonate is the username to act-as. // +optional Impersonate string `json:"act-as,omitempty"` + // ImpersonateGroups is the groups to imperonate. + // +optional + ImpersonateGroups []string `json:"act-as-groups,omitempty"` + // ImpersonateUserExtra contains additional information for impersonated user. + // +optional + ImpersonateUserExtra map[string][]string `json:"act-as-user-extra,omitempty"` // Username is the username for basic authentication to the kubernetes cluster. // +optional Username string `json:"username,omitempty"` @@ -118,6 +119,9 @@ type AuthInfo struct { // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. // +optional AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` + // Exec specifies a custom exec-based authentication plugin for the kubernetes cluster. + // +optional + Exec *ExecConfig `json:"exec,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions map[string]runtime.Object `json:"extensions,omitempty"` @@ -146,6 +150,35 @@ type AuthProviderConfig struct { Config map[string]string `json:"config,omitempty"` } +// ExecConfig specifies a command to provide client credentials. The command is exec'd +// and outputs structured stdout holding credentials. +// +// See the client.authentiction.k8s.io API group for specifications of the exact input +// and output format +type ExecConfig struct { + // Command to execute. + Command string `json:"command"` + // Arguments to pass to the command when executing it. + // +optional + Args []string `json:"args"` + // Env defines additional environment variables to expose to the process. These + // are unioned with the host's environment, as well as variables client-go uses + // to pass argument to the plugin. + // +optional + Env []ExecEnvVar `json:"env"` + + // Preferred input version of the ExecInfo. The returned ExecCredentials MUST use + // the same encoding version as the input. + APIVersion string `json:"apiVersion,omitempty"` +} + +// ExecEnvVar is used for setting environment variables when executing an exec-based +// credential plugin. +type ExecEnvVar struct { + Name string `json:"name"` + Value string `json:"value"` +} + // NewConfig is a convenience function that returns a new Config object with non-nil maps func NewConfig() *Config { return &Config{ @@ -157,22 +190,29 @@ func NewConfig() *Config { } } -// NewConfig is a convenience function that returns a new Config object with non-nil maps +// NewContext is a convenience function that returns a new Context +// object with non-nil maps func NewContext() *Context { return &Context{Extensions: make(map[string]runtime.Object)} } -// NewConfig is a convenience function that returns a new Config object with non-nil maps +// NewCluster is a convenience function that returns a new Cluster +// object with non-nil maps func NewCluster() *Cluster { return &Cluster{Extensions: make(map[string]runtime.Object)} } -// NewConfig is a convenience function that returns a new Config object with non-nil maps +// NewAuthInfo is a convenience function that returns a new AuthInfo +// object with non-nil maps func NewAuthInfo() *AuthInfo { - return &AuthInfo{Extensions: make(map[string]runtime.Object)} + return &AuthInfo{ + Extensions: make(map[string]runtime.Object), + ImpersonateUserExtra: make(map[string][]string), + } } -// NewConfig is a convenience function that returns a new Config object with non-nil maps +// NewPreferences is a convenience function that returns a new +// Preferences object with non-nil maps func NewPreferences() *Preferences { return &Preferences{Extensions: make(map[string]runtime.Object)} } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go new file mode 100644 index 000000000..b90aa8d74 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go @@ -0,0 +1,320 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package api + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthInfo) DeepCopyInto(out *AuthInfo) { + *out = *in + if in.ClientCertificateData != nil { + in, out := &in.ClientCertificateData, &out.ClientCertificateData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.ClientKeyData != nil { + in, out := &in.ClientKeyData, &out.ClientKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.ImpersonateGroups != nil { + in, out := &in.ImpersonateGroups, &out.ImpersonateGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ImpersonateUserExtra != nil { + in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]string, len(val)) + copy((*out)[key], val) + } + } + } + if in.AuthProvider != nil { + in, out := &in.AuthProvider, &out.AuthProvider + if *in == nil { + *out = nil + } else { + *out = new(AuthProviderConfig) + (*in).DeepCopyInto(*out) + } + } + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + if *in == nil { + *out = nil + } else { + *out = new(ExecConfig) + (*in).DeepCopyInto(*out) + } + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make(map[string]runtime.Object, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInfo. +func (in *AuthInfo) DeepCopy() *AuthInfo { + if in == nil { + return nil + } + out := new(AuthInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthProviderConfig) DeepCopyInto(out *AuthProviderConfig) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProviderConfig. +func (in *AuthProviderConfig) DeepCopy() *AuthProviderConfig { + if in == nil { + return nil + } + out := new(AuthProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.CertificateAuthorityData != nil { + in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make(map[string]runtime.Object, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + in.Preferences.DeepCopyInto(&out.Preferences) + if in.Clusters != nil { + in, out := &in.Clusters, &out.Clusters + *out = make(map[string]*Cluster, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = new(Cluster) + val.DeepCopyInto((*out)[key]) + } + } + } + if in.AuthInfos != nil { + in, out := &in.AuthInfos, &out.AuthInfos + *out = make(map[string]*AuthInfo, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = new(AuthInfo) + val.DeepCopyInto((*out)[key]) + } + } + } + if in.Contexts != nil { + in, out := &in.Contexts, &out.Contexts + *out = make(map[string]*Context, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = new(Context) + val.DeepCopyInto((*out)[key]) + } + } + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make(map[string]runtime.Object, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Context) DeepCopyInto(out *Context) { + *out = *in + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make(map[string]runtime.Object, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Context. +func (in *Context) DeepCopy() *Context { + if in == nil { + return nil + } + out := new(Context) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecConfig) DeepCopyInto(out *ExecConfig) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]ExecEnvVar, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig. +func (in *ExecConfig) DeepCopy() *ExecConfig { + if in == nil { + return nil + } + out := new(ExecConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar. +func (in *ExecEnvVar) DeepCopy() *ExecEnvVar { + if in == nil { + return nil + } + out := new(ExecEnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preferences) DeepCopyInto(out *Preferences) { + *out = *in + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make(map[string]runtime.Object, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preferences. +func (in *Preferences) DeepCopy() *Preferences { + if in == nil { + return nil + } + out := new(Preferences) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/tools/pager/pager.go b/vendor/k8s.io/client-go/tools/pager/pager.go new file mode 100644 index 000000000..74ea3586a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/pager/pager.go @@ -0,0 +1,117 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pager + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const defaultPageSize = 500 + +// ListPageFunc returns a list object for the given list options. +type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) + +// SimplePageFunc adapts a context-less list function into one that accepts a context. +func SimplePageFunc(fn func(opts metav1.ListOptions) (runtime.Object, error)) ListPageFunc { + return func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { + return fn(opts) + } +} + +// ListPager assists client code in breaking large list queries into multiple +// smaller chunks of PageSize or smaller. PageFn is expected to accept a +// metav1.ListOptions that supports paging and return a list. The pager does +// not alter the field or label selectors on the initial options list. +type ListPager struct { + PageSize int64 + PageFn ListPageFunc + + FullListIfExpired bool +} + +// New creates a new pager from the provided pager function using the default +// options. It will fall back to a full list if an expiration error is encountered +// as a last resort. +func New(fn ListPageFunc) *ListPager { + return &ListPager{ + PageSize: defaultPageSize, + PageFn: fn, + FullListIfExpired: true, + } +} + +// TODO: introduce other types of paging functions - such as those that retrieve from a list +// of namespaces. + +// List returns a single list object, but attempts to retrieve smaller chunks from the +// server to reduce the impact on the server. If the chunk attempt fails, it will load +// the full list instead. The Limit field on options, if unset, will default to the page size. +func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if options.Limit == 0 { + options.Limit = p.PageSize + } + var list *metainternalversion.List + for { + obj, err := p.PageFn(ctx, options) + if err != nil { + if !errors.IsResourceExpired(err) || !p.FullListIfExpired { + return nil, err + } + // the list expired while we were processing, fall back to a full list + options.Limit = 0 + options.Continue = "" + return p.PageFn(ctx, options) + } + m, err := meta.ListAccessor(obj) + if err != nil { + return nil, fmt.Errorf("returned object must be a list: %v", err) + } + + // exit early and return the object we got if we haven't processed any pages + if len(m.GetContinue()) == 0 && list == nil { + return obj, nil + } + + // initialize the list and fill its contents + if list == nil { + list = &metainternalversion.List{Items: make([]runtime.Object, 0, options.Limit+1)} + list.ResourceVersion = m.GetResourceVersion() + list.SelfLink = m.GetSelfLink() + } + if err := meta.EachListItem(obj, func(obj runtime.Object) error { + list.Items = append(list.Items, obj) + return nil + }); err != nil { + return nil, err + } + + // if we have no more items, return the list + if len(m.GetContinue()) == 0 { + return list, nil + } + + // set the next loop up + options.Continue = m.GetContinue() + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/ref.go b/vendor/k8s.io/client-go/tools/reference/ref.go similarity index 77% rename from vendor/k8s.io/client-go/pkg/api/v1/ref.go rename to vendor/k8s.io/client-go/tools/reference/ref.go index 5d33719fe..573d948a9 100644 --- a/vendor/k8s.io/client-go/pkg/api/v1/ref.go +++ b/vendor/k8s.io/client-go/tools/reference/ref.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package reference import ( "errors" @@ -22,9 +22,9 @@ import ( "net/url" "strings" - "k8s.io/apimachinery/pkg/runtime/schema" - + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -38,11 +38,11 @@ var ( // object, or an error if the object doesn't follow the conventions // that would allow this. // TODO: should take a meta.Interface see http://issue.k8s.io/7127 -func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, error) { +func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReference, error) { if obj == nil { return nil, ErrNilObject } - if ref, ok := obj.(*ObjectReference); ok { + if ref, ok := obj.(*v1.ObjectReference); ok { // Don't make a reference to a reference. return ref, nil } @@ -62,10 +62,10 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, } // An object that implements only List has enough metadata to build a reference - var listMeta meta.List + var listMeta metav1.Common objectMeta, err := meta.Accessor(obj) if err != nil { - listMeta, err = meta.ListAccessor(obj) + listMeta, err = meta.CommonAccessor(obj) if err != nil { return nil, err } @@ -86,22 +86,26 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, } // example paths: ///* parts := strings.Split(selfLinkUrl.Path, "/") - if len(parts) < 3 { + if len(parts) < 4 { return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) } - version = parts[2] + if parts[1] == "api" { + version = parts[2] + } else { + version = parts[2] + "/" + parts[3] + } } // only has list metadata if objectMeta == nil { - return &ObjectReference{ + return &v1.ObjectReference{ Kind: kind, APIVersion: version, ResourceVersion: listMeta.GetResourceVersion(), }, nil } - return &ObjectReference{ + return &v1.ObjectReference{ Kind: kind, APIVersion: version, Name: objectMeta.GetName(), @@ -112,7 +116,7 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, } // GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. -func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*ObjectReference, error) { +func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*v1.ObjectReference, error) { ref, err := GetReference(scheme, obj) if err != nil { return nil, err @@ -120,14 +124,3 @@ func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath s ref.FieldPath = fieldPath return ref, nil } - -// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that -// intend only to get a reference to that object. This simplifies the event recording interface. -func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} -func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { - return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index 8d76def34..7cffe2a5f 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -31,12 +31,30 @@ import ( // the config has no custom TLS options, http.DefaultTransport is returned. type tlsTransportCache struct { mu sync.Mutex - transports map[string]*http.Transport + transports map[tlsCacheKey]*http.Transport } const idleConnsPerHost = 25 -var tlsCache = &tlsTransportCache{transports: make(map[string]*http.Transport)} +var tlsCache = &tlsTransportCache{transports: make(map[tlsCacheKey]*http.Transport)} + +type tlsCacheKey struct { + insecure bool + caData string + certData string + keyData string + getCert string + serverName string + dial string +} + +func (t tlsCacheKey) String() string { + keyText := "" + if len(t.keyData) > 0 { + keyText = "" + } + return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial) +} func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { key, err := tlsConfigKey(config) @@ -59,30 +77,41 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { return nil, err } // The options didn't require a custom TLS config - if tlsConfig == nil { + if tlsConfig == nil && config.Dial == nil { return http.DefaultTransport, nil } + dial := config.Dial + if dial == nil { + dial = (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext + } // Cache a single transport for these options c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{ Proxy: http.ProxyFromEnvironment, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: tlsConfig, MaxIdleConnsPerHost: idleConnsPerHost, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, + DialContext: dial, }) return c.transports[key], nil } // tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor -func tlsConfigKey(c *Config) (string, error) { +func tlsConfigKey(c *Config) (tlsCacheKey, error) { // Make sure ca/key/cert content is loaded if err := loadTLSFiles(c); err != nil { - return "", err + return tlsCacheKey{}, err } - // Only include the things that actually affect the tls.Config - return fmt.Sprintf("%v/%x/%x/%x", c.TLS.Insecure, c.TLS.CAData, c.TLS.CertData, c.TLS.KeyData), nil + return tlsCacheKey{ + insecure: c.TLS.Insecure, + caData: string(c.TLS.CAData), + certData: string(c.TLS.CertData), + keyData: string(c.TLS.KeyData), + getCert: fmt.Sprintf("%p", c.TLS.GetCert), + serverName: c.TLS.ServerName, + dial: fmt.Sprintf("%p", c.Dial), + }, nil } diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 820594ba3..4081c23e7 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -16,7 +16,12 @@ limitations under the License. package transport -import "net/http" +import ( + "context" + "crypto/tls" + "net" + "net/http" +) // Config holds various options for establishing a transport. type Config struct { @@ -48,6 +53,9 @@ type Config struct { // config may layer other RoundTrippers on top of the returned // RoundTripper. WrapTransport func(rt http.RoundTripper) http.RoundTripper + + // Dial specifies the dial function for creating unencrypted TCP connections. + Dial func(ctx context.Context, network, address string) (net.Conn, error) } // ImpersonationConfig has all the available impersonation options @@ -77,7 +85,12 @@ func (c *Config) HasTokenAuth() bool { // HasCertAuth returns whether the configuration has certificate authentication or not. func (c *Config) HasCertAuth() bool { - return len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0 + return (len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0) && (len(c.TLS.KeyData) != 0 || len(c.TLS.KeyFile) != 0) +} + +// HasCertCallbacks returns whether the configuration has certificate callback or not. +func (c *Config) HasCertCallback() bool { + return c.TLS.GetCert != nil } // TLSConfig holds the information needed to set up a TLS transport. @@ -92,4 +105,6 @@ type TLSConfig struct { CAData []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile. CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile. KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile. + + GetCert func() (*tls.Certificate, error) // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field. } diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index a6f396fbb..459a93760 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -23,6 +23,8 @@ import ( "time" "github.com/golang/glog" + + utilnet "k8s.io/apimachinery/pkg/util/net" ) // HTTPWrappersForConfig wraps a round tripper with any relevant layered @@ -105,7 +107,7 @@ func NewAuthProxyRoundTripper(username string, groups []string, extra map[string } func (rt *authProxyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - req = cloneRequest(req) + req = utilnet.CloneRequest(req) SetAuthProxyHeaders(req, rt.username, rt.groups, rt.extra) return rt.rt.RoundTrip(req) @@ -155,7 +157,7 @@ func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, e if len(req.Header.Get("User-Agent")) != 0 { return rt.rt.RoundTrip(req) } - req = cloneRequest(req) + req = utilnet.CloneRequest(req) req.Header.Set("User-Agent", rt.agent) return rt.rt.RoundTrip(req) } @@ -186,7 +188,7 @@ func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, e if len(req.Header.Get("Authorization")) != 0 { return rt.rt.RoundTrip(req) } - req = cloneRequest(req) + req = utilnet.CloneRequest(req) req.SetBasicAuth(rt.username, rt.password) return rt.rt.RoundTrip(req) } @@ -236,7 +238,7 @@ func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Respons if len(req.Header.Get(ImpersonateUserHeader)) != 0 { return rt.delegate.RoundTrip(req) } - req = cloneRequest(req) + req = utilnet.CloneRequest(req) req.Header.Set(ImpersonateUserHeader, rt.impersonate.UserName) for _, group := range rt.impersonate.Groups { @@ -277,7 +279,7 @@ func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, return rt.rt.RoundTrip(req) } - req = cloneRequest(req) + req = utilnet.CloneRequest(req) req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer)) return rt.rt.RoundTrip(req) } @@ -292,20 +294,6 @@ func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - // requestInfo keeps track of information about a request/response combination type requestInfo struct { RequestHeaders http.Header @@ -347,7 +335,7 @@ func (r *requestInfo) toCurl() string { } } - return fmt.Sprintf("curl -k -v -X%s %s %s", r.RequestVerb, headers, r.RequestURL) + return fmt.Sprintf("curl -k -v -X%s %s '%s'", r.RequestVerb, headers, r.RequestURL) } // debuggingRoundTripper will display information about the requests passing diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index 15be0a3e6..c19739fdf 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -28,7 +28,7 @@ import ( // or transport level security defined by the provided Config. func New(config *Config) (http.RoundTripper, error) { // Set transport level security - if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.TLS.Insecure) { + if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.HasCertCallback() || config.TLS.Insecure) { return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed") } @@ -52,7 +52,7 @@ func New(config *Config) (http.RoundTripper, error) { // TLSConfigFor returns a tls.Config that will provide the transport level security defined // by the provided Config. Will return nil if no transport level security is requested. func TLSConfigFor(c *Config) (*tls.Config, error) { - if !(c.HasCA() || c.HasCertAuth() || c.TLS.Insecure) { + if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0) { return nil, nil } if c.HasCA() && c.TLS.Insecure { @@ -75,12 +75,40 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { tlsConfig.RootCAs = rootCertPool(c.TLS.CAData) } + var staticCert *tls.Certificate if c.HasCertAuth() { + // If key/cert were provided, verify them before setting up + // tlsConfig.GetClientCertificate. cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData) if err != nil { return nil, err } - tlsConfig.Certificates = []tls.Certificate{cert} + staticCert = &cert + } + + if c.HasCertAuth() || c.HasCertCallback() { + tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + // Note: static key/cert data always take precedence over cert + // callback. + if staticCert != nil { + return staticCert, nil + } + if c.HasCertCallback() { + cert, err := c.TLS.GetCert() + if err != nil { + return nil, err + } + // GetCert may return empty value, meaning no cert. + if cert != nil { + return cert, nil + } + } + + // Both c.TLS.CertData/KeyData were unset and GetCert didn't return + // anything. Return an empty tls.Certificate, no client cert will + // be sent to the server. + return &tls.Certificate{}, nil + } } return tlsConfig, nil diff --git a/vendor/k8s.io/client-go/util/buffer/ring_growing.go b/vendor/k8s.io/client-go/util/buffer/ring_growing.go new file mode 100644 index 000000000..86965a513 --- /dev/null +++ b/vendor/k8s.io/client-go/util/buffer/ring_growing.go @@ -0,0 +1,72 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package buffer + +// RingGrowing is a growing ring buffer. +// Not thread safe. +type RingGrowing struct { + data []interface{} + n int // Size of Data + beg int // First available element + readable int // Number of data items available +} + +// NewRingGrowing constructs a new RingGrowing instance with provided parameters. +func NewRingGrowing(initialSize int) *RingGrowing { + return &RingGrowing{ + data: make([]interface{}, initialSize), + n: initialSize, + } +} + +// ReadOne reads (consumes) first item from the buffer if it is available, otherwise returns false. +func (r *RingGrowing) ReadOne() (data interface{}, ok bool) { + if r.readable == 0 { + return nil, false + } + r.readable-- + element := r.data[r.beg] + r.data[r.beg] = nil // Remove reference to the object to help GC + if r.beg == r.n-1 { + // Was the last element + r.beg = 0 + } else { + r.beg++ + } + return element, true +} + +// WriteOne adds an item to the end of the buffer, growing it if it is full. +func (r *RingGrowing) WriteOne(data interface{}) { + if r.readable == r.n { + // Time to grow + newN := r.n * 2 + newData := make([]interface{}, newN) + to := r.beg + r.readable + if to <= r.n { + copy(newData, r.data[r.beg:to]) + } else { + copied := copy(newData, r.data[r.beg:]) + copy(newData[copied:], r.data[:(to%r.n)]) + } + r.beg = 0 + r.data = newData + r.n = newN + } + r.data[(r.readable+r.beg)%r.n] = data + r.readable++ +} diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 6854d4152..fb7f5facc 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -38,7 +38,7 @@ const ( duration365d = time.Hour * 24 * 365 ) -// Config containes the basic fields required for creating a certificate +// Config contains the basic fields required for creating a certificate type Config struct { CommonName string Organization []string @@ -138,23 +138,50 @@ func MakeEllipticPrivateKeyPEM() ([]byte, error) { // Host may be an IP or a DNS name // You may also specify additional subject alt names (either ip or dns names) for the certificate func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) { + caKey, err := rsa.GenerateKey(cryptorand.Reader, 2048) + if err != nil { + return nil, nil, err + } + + caTemplate := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: fmt.Sprintf("%s-ca@%d", host, time.Now().Unix()), + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * 365), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + } + + caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey) + if err != nil { + return nil, nil, err + } + + caCertificate, err := x509.ParseCertificate(caDERBytes) + if err != nil { + return nil, nil, err + } + priv, err := rsa.GenerateKey(cryptorand.Reader, 2048) if err != nil { return nil, nil, err } template := x509.Certificate{ - SerialNumber: big.NewInt(1), + SerialNumber: big.NewInt(2), Subject: pkix.Name{ CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()), }, NotBefore: time.Now(), NotAfter: time.Now().Add(time.Hour * 24 * 365), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, BasicConstraintsValid: true, - IsCA: true, } if ip := net.ParseIP(host); ip != nil { @@ -166,16 +193,19 @@ func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS template.IPAddresses = append(template.IPAddresses, alternateIPs...) template.DNSNames = append(template.DNSNames, alternateDNS...) - derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, &template, &priv.PublicKey, priv) + derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, caCertificate, &priv.PublicKey, caKey) if err != nil { return nil, nil, err } - // Generate cert + // Generate cert, followed by ca certBuffer := bytes.Buffer{} if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: derBytes}); err != nil { return nil, nil, err } + if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: caDERBytes}); err != nil { + return nil, nil, err + } // Generate key keyBuffer := bytes.Buffer{} diff --git a/vendor/k8s.io/client-go/util/cert/csr.go b/vendor/k8s.io/client-go/util/cert/csr.go index 280d249fe..39a6751f7 100644 --- a/vendor/k8s.io/client-go/util/cert/csr.go +++ b/vendor/k8s.io/client-go/util/cert/csr.go @@ -51,7 +51,7 @@ func MakeCSRFromTemplate(privateKey interface{}, template *x509.CertificateReque } csrPemBlock := &pem.Block{ - Type: "CERTIFICATE REQUEST", + Type: CertificateRequestBlockType, Bytes: csrDER, } diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go index b6b690383..a57bf09d5 100644 --- a/vendor/k8s.io/client-go/util/cert/io.go +++ b/vendor/k8s.io/client-go/util/cert/io.go @@ -17,7 +17,11 @@ limitations under the License. package cert import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" "crypto/x509" + "encoding/pem" "fmt" "io/ioutil" "os" @@ -66,10 +70,7 @@ func WriteCert(certPath string, data []byte) error { if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil { return err } - if err := ioutil.WriteFile(certPath, data, os.FileMode(0644)); err != nil { - return err - } - return nil + return ioutil.WriteFile(certPath, data, os.FileMode(0644)) } // WriteKey writes the pem-encoded key data to keyPath. @@ -80,17 +81,15 @@ func WriteKey(keyPath string, data []byte) error { if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { return err } - if err := ioutil.WriteFile(keyPath, data, os.FileMode(0600)); err != nil { - return err - } - return nil + return ioutil.WriteFile(keyPath, data, os.FileMode(0600)) } // LoadOrGenerateKeyFile looks for a key in the file at the given path. If it // can't find one, it will generate a new key and store it there. func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { loadedData, err := ioutil.ReadFile(keyPath) - if err == nil { + // Call verifyKeyData to ensure the file wasn't empty/corrupt. + if err == nil && verifyKeyData(loadedData) { return loadedData, false, err } if !os.IsNotExist(err) { @@ -107,6 +106,27 @@ func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err return generatedData, true, nil } +// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to +// a PEM encoded block or returns an error. +func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) { + switch t := privateKey.(type) { + case *ecdsa.PrivateKey: + derBytes, err := x509.MarshalECPrivateKey(t) + if err != nil { + return nil, err + } + privateKeyPemBlock := &pem.Block{ + Type: ECPrivateKeyBlockType, + Bytes: derBytes, + } + return pem.EncodeToMemory(privateKeyPemBlock), nil + case *rsa.PrivateKey: + return EncodePrivateKeyPEM(t), nil + default: + return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey) + } +} + // NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. // Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates func NewPool(filename string) (*x509.CertPool, error) { @@ -138,13 +158,36 @@ func CertsFromFile(file string) ([]*x509.Certificate, error) { // PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. // Returns an error if the file could not be read or if the private key could not be parsed. func PrivateKeyFromFile(file string) (interface{}, error) { - pemBlock, err := ioutil.ReadFile(file) + data, err := ioutil.ReadFile(file) if err != nil { return nil, err } - key, err := ParsePrivateKeyPEM(pemBlock) + key, err := ParsePrivateKeyPEM(data) if err != nil { - return nil, fmt.Errorf("error reading %s: %v", file, err) + return nil, fmt.Errorf("error reading private key file %s: %v", file, err) } return key, nil } + +// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file. +// Reads public keys from both public and private key files. +func PublicKeysFromFile(file string) ([]interface{}, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + keys, err := ParsePublicKeysPEM(data) + if err != nil { + return nil, fmt.Errorf("error reading public key file %s: %v", file, err) + } + return keys, nil +} + +// verifyKeyData returns true if the provided data appears to be a valid private key. +func verifyKeyData(data []byte) bool { + if len(data) == 0 { + return false + } + _, err := ParsePrivateKeyPEM(data) + return err == nil +} diff --git a/vendor/k8s.io/client-go/util/cert/pem.go b/vendor/k8s.io/client-go/util/cert/pem.go index 899845857..b99e36651 100644 --- a/vendor/k8s.io/client-go/util/cert/pem.go +++ b/vendor/k8s.io/client-go/util/cert/pem.go @@ -17,6 +17,7 @@ limitations under the License. package cert import ( + "crypto/ecdsa" "crypto/rsa" "crypto/x509" "encoding/pem" @@ -29,17 +30,17 @@ const ( ECPrivateKeyBlockType = "EC PRIVATE KEY" // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. RSAPrivateKeyBlockType = "RSA PRIVATE KEY" - // CertificateBlockType is a possible value for pem.Block.Type. - CertificateBlockType = "CERTIFICATE" - // CertificateRequestBlockType is a possible value for pem.Block.Type. - CertificateRequestBlockType = "CERTIFICATE REQUEST" // PrivateKeyBlockType is a possible value for pem.Block.Type. PrivateKeyBlockType = "PRIVATE KEY" // PublicKeyBlockType is a possible value for pem.Block.Type. PublicKeyBlockType = "PUBLIC KEY" + // CertificateBlockType is a possible value for pem.Block.Type. + CertificateBlockType = "CERTIFICATE" + // CertificateRequestBlockType is a possible value for pem.Block.Type. + CertificateRequestBlockType = "CERTIFICATE REQUEST" ) -// EncodePublicKeyPEM returns PEM-endcode public data +// EncodePublicKeyPEM returns PEM-encoded public data func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) { der, err := x509.MarshalPKIXPublicKey(key) if err != nil { @@ -106,6 +107,46 @@ func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") } +// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array. +// Reads public keys from both public and private key files. +func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) { + var block *pem.Block + keys := []interface{}{} + for { + // read the next block + block, keyData = pem.Decode(keyData) + if block == nil { + break + } + + // test block against parsing functions + if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil { + keys = append(keys, &privateKey.PublicKey) + continue + } + if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil { + keys = append(keys, publicKey) + continue + } + if privateKey, err := parseECPrivateKey(block.Bytes); err == nil { + keys = append(keys, &privateKey.PublicKey) + continue + } + if publicKey, err := parseECPublicKey(block.Bytes); err == nil { + keys = append(keys, publicKey) + continue + } + + // tolerate non-key PEM blocks for backwards compatibility + // originally, only the first PEM block was parsed and expected to be a key block + } + + if len(keys) == 0 { + return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys") + } + return keys, nil +} + // ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array // Returns an error if a certificate could not be parsed, or if the data does not contain any certificates func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { @@ -132,7 +173,97 @@ func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { } if !ok { - return certs, errors.New("could not read any certificates") + return certs, errors.New("data does not contain any valid RSA or ECDSA certificates") } return certs, nil } + +// parseRSAPublicKey parses a single RSA public key from the provided data +func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { + if cert, err := x509.ParseCertificate(data); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + // Test if parsed key is an RSA Public Key + var pubKey *rsa.PublicKey + var ok bool + if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid RSA Public Key") + } + + return pubKey, nil +} + +// parseRSAPrivateKey parses a single RSA private key from the provided data +func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil { + return nil, err + } + } + + // Test if parsed key is an RSA Private Key + var privKey *rsa.PrivateKey + var ok bool + if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid RSA Private Key") + } + + return privKey, nil +} + +// parseECPublicKey parses a single ECDSA public key from the provided data +func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { + if cert, err := x509.ParseCertificate(data); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + // Test if parsed key is an ECDSA Public Key + var pubKey *ecdsa.PublicKey + var ok bool + if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key") + } + + return pubKey, nil +} + +// parseECPrivateKey parses a single ECDSA private key from the provided data +func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(data); err != nil { + return nil, err + } + + // Test if parsed key is an ECDSA Private Key + var privKey *ecdsa.PrivateKey + var ok bool + if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key") + } + + return privKey, nil +} diff --git a/vendor/k8s.io/client-go/util/connrotation/connrotation.go b/vendor/k8s.io/client-go/util/connrotation/connrotation.go new file mode 100644 index 000000000..235a9e019 --- /dev/null +++ b/vendor/k8s.io/client-go/util/connrotation/connrotation.go @@ -0,0 +1,105 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package connrotation implements a connection dialer that tracks and can close +// all created connections. +// +// This is used for credential rotation of long-lived connections, when there's +// no way to re-authenticate on a live connection. +package connrotation + +import ( + "context" + "net" + "sync" +) + +// DialFunc is a shorthand for signature of net.DialContext. +type DialFunc func(ctx context.Context, network, address string) (net.Conn, error) + +// Dialer opens connections through Dial and tracks them. +type Dialer struct { + dial DialFunc + + mu sync.Mutex + conns map[*closableConn]struct{} +} + +// NewDialer creates a new Dialer instance. +// +// If dial is not nil, it will be used to create new underlying connections. +// Otherwise net.DialContext is used. +func NewDialer(dial DialFunc) *Dialer { + return &Dialer{ + dial: dial, + conns: make(map[*closableConn]struct{}), + } +} + +// CloseAll forcibly closes all tracked connections. +// +// Note: new connections may get created before CloseAll returns. +func (d *Dialer) CloseAll() { + d.mu.Lock() + conns := d.conns + d.conns = make(map[*closableConn]struct{}) + d.mu.Unlock() + + for conn := range conns { + conn.Close() + } +} + +// Dial creates a new tracked connection. +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + return d.DialContext(context.Background(), network, address) +} + +// DialContext creates a new tracked connection. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + conn, err := d.dial(ctx, network, address) + if err != nil { + return nil, err + } + + closable := &closableConn{Conn: conn} + + // Start tracking the connection + d.mu.Lock() + d.conns[closable] = struct{}{} + d.mu.Unlock() + + // When the connection is closed, remove it from the map. This will + // be no-op if the connection isn't in the map, e.g. if CloseAll() + // is called. + closable.onClose = func() { + d.mu.Lock() + delete(d.conns, closable) + d.mu.Unlock() + } + + return closable, nil +} + +type closableConn struct { + onClose func() + net.Conn +} + +func (c *closableConn) Close() error { + go c.onClose() + return c.Conn.Close() +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index 030b15a56..71d442a62 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "k8s.io/client-go/util/clock" + "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/util/integer" ) diff --git a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go index 881a2f57d..e671c044d 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go @@ -18,8 +18,9 @@ package flowcontrol import ( "sync" + "time" - "github.com/juju/ratelimit" + "golang.org/x/time/rate" ) type RateLimiter interface { @@ -30,17 +31,13 @@ type RateLimiter interface { Accept() // Stop stops the rate limiter, subsequent calls to CanAccept will return false Stop() - // Saturation returns a percentage number which describes how saturated - // this rate limiter is. - // Usually we use token bucket rate limiter. In that case, - // 1.0 means no tokens are available; 0.0 means we have a full bucket of tokens to use. - Saturation() float64 // QPS returns QPS of this rate limiter QPS() float32 } type tokenBucketRateLimiter struct { - limiter *ratelimit.Bucket + limiter *rate.Limiter + clock Clock qps float32 } @@ -50,26 +47,48 @@ type tokenBucketRateLimiter struct { // The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'. // The maximum number of tokens in the bucket is capped at 'burst'. func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter { - limiter := ratelimit.NewBucketWithRate(float64(qps), int64(burst)) + limiter := rate.NewLimiter(rate.Limit(qps), burst) + return newTokenBucketRateLimiter(limiter, realClock{}, qps) +} + +// An injectable, mockable clock interface. +type Clock interface { + Now() time.Time + Sleep(time.Duration) +} + +type realClock struct{} + +func (realClock) Now() time.Time { + return time.Now() +} +func (realClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter +// but allows an injectable clock, for testing. +func NewTokenBucketRateLimiterWithClock(qps float32, burst int, c Clock) RateLimiter { + limiter := rate.NewLimiter(rate.Limit(qps), burst) + return newTokenBucketRateLimiter(limiter, c, qps) +} + +func newTokenBucketRateLimiter(limiter *rate.Limiter, c Clock, qps float32) RateLimiter { return &tokenBucketRateLimiter{ limiter: limiter, + clock: c, qps: qps, } } func (t *tokenBucketRateLimiter) TryAccept() bool { - return t.limiter.TakeAvailable(1) == 1 -} - -func (t *tokenBucketRateLimiter) Saturation() float64 { - capacity := t.limiter.Capacity() - avail := t.limiter.Available() - return float64(capacity-avail) / float64(capacity) + return t.limiter.AllowN(t.clock.Now(), 1) } // Accept will block until a token becomes available func (t *tokenBucketRateLimiter) Accept() { - t.limiter.Wait(1) + now := t.clock.Now() + t.clock.Sleep(t.limiter.ReserveN(now, 1).DelayFrom(now)) } func (t *tokenBucketRateLimiter) Stop() { @@ -89,10 +108,6 @@ func (t *fakeAlwaysRateLimiter) TryAccept() bool { return true } -func (t *fakeAlwaysRateLimiter) Saturation() float64 { - return 0 -} - func (t *fakeAlwaysRateLimiter) Stop() {} func (t *fakeAlwaysRateLimiter) Accept() {} @@ -115,10 +130,6 @@ func (t *fakeNeverRateLimiter) TryAccept() bool { return false } -func (t *fakeNeverRateLimiter) Saturation() float64 { - return 1 -} - func (t *fakeNeverRateLimiter) Stop() { t.wg.Done() } diff --git a/vendor/k8s.io/client-go/util/retry/OWNERS b/vendor/k8s.io/client-go/util/retry/OWNERS new file mode 100755 index 000000000..a4c1c2d4f --- /dev/null +++ b/vendor/k8s.io/client-go/util/retry/OWNERS @@ -0,0 +1,2 @@ +reviewers: +- caesarxuchao diff --git a/vendor/k8s.io/client-go/util/retry/util.go b/vendor/k8s.io/client-go/util/retry/util.go new file mode 100644 index 000000000..3ac0840ad --- /dev/null +++ b/vendor/k8s.io/client-go/util/retry/util.go @@ -0,0 +1,79 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package retry + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" +) + +// DefaultRetry is the recommended retry for a conflict where multiple clients +// are making changes to the same resource. +var DefaultRetry = wait.Backoff{ + Steps: 5, + Duration: 10 * time.Millisecond, + Factor: 1.0, + Jitter: 0.1, +} + +// DefaultBackoff is the recommended backoff for a conflict where a client +// may be attempting to make an unrelated modification to a resource under +// active management by one or more controllers. +var DefaultBackoff = wait.Backoff{ + Steps: 4, + Duration: 10 * time.Millisecond, + Factor: 5.0, + Jitter: 0.1, +} + +// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting +// write. Callers should preserve previous executions if they wish to retry changes. It performs an +// exponential backoff. +// +// var pod *api.Pod +// err := RetryOnConflict(DefaultBackoff, func() (err error) { +// pod, err = c.Pods("mynamespace").UpdateStatus(podStatus) +// return +// }) +// if err != nil { +// // may be conflict if max retries were hit +// return err +// } +// ... +// +// TODO: Make Backoff an interface? +func RetryOnConflict(backoff wait.Backoff, fn func() error) error { + var lastConflictErr error + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + err := fn() + switch { + case err == nil: + return true, nil + case errors.IsConflict(err): + lastConflictErr = err + return false, nil + default: + return false, err + } + }) + if err == wait.ErrWaitTimeout { + err = lastConflictErr + } + return err +} diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go index 35caed4fa..a5bed29e0 100644 --- a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go +++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -21,7 +21,7 @@ import ( "sync" "time" - "github.com/juju/ratelimit" + "golang.org/x/time/rate" ) type RateLimiter interface { @@ -40,19 +40,19 @@ func DefaultControllerRateLimiter() RateLimiter { return NewMaxOfRateLimiter( NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) - &BucketRateLimiter{Bucket: ratelimit.NewBucketWithRate(float64(10), int64(100))}, + &BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, ) } // BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API type BucketRateLimiter struct { - *ratelimit.Bucket + *rate.Limiter } var _ RateLimiter = &BucketRateLimiter{} func (r *BucketRateLimiter) When(item interface{}) time.Duration { - return r.Bucket.Take(1) + return r.Limiter.Reserve().Delay() } func (r *BucketRateLimiter) NumRequeues(item interface{}) int { diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index 593ad9ad4..a37177425 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -17,11 +17,11 @@ limitations under the License. package workqueue import ( - "sort" + "container/heap" "time" + "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/util/clock" ) // DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to @@ -43,13 +43,12 @@ func NewNamedDelayingQueue(name string) DelayingInterface { func newDelayingQueue(clock clock.Clock, name string) DelayingInterface { ret := &delayingType{ - Interface: NewNamed(name), - clock: clock, - heartbeat: clock.Tick(maxWait), - stopCh: make(chan struct{}), - waitingTimeByEntry: map[t]time.Time{}, - waitingForAddCh: make(chan waitFor, 1000), - metrics: newRetryMetrics(name), + Interface: NewNamed(name), + clock: clock, + heartbeat: clock.NewTicker(maxWait), + stopCh: make(chan struct{}), + waitingForAddCh: make(chan *waitFor, 1000), + metrics: newRetryMetrics(name), } go ret.waitingLoop() @@ -68,17 +67,10 @@ type delayingType struct { stopCh chan struct{} // heartbeat ensures we wait no more than maxWait before firing - // - // TODO: replace with Ticker (and add to clock) so this can be cleaned up. - // clock.Tick will leak. - heartbeat <-chan time.Time + heartbeat clock.Ticker - // waitingForAdd is an ordered slice of items to be added to the contained work queue - waitingForAdd []waitFor - // waitingTimeByEntry holds wait time by entry, so we can lookup pre-existing indexes - waitingTimeByEntry map[t]time.Time // waitingForAddCh is a buffered channel that feeds waitingForAdd - waitingForAddCh chan waitFor + waitingForAddCh chan *waitFor // metrics counts the number of retries metrics retryMetrics @@ -88,12 +80,62 @@ type delayingType struct { type waitFor struct { data t readyAt time.Time + // index in the priority queue (heap) + index int +} + +// waitForPriorityQueue implements a priority queue for waitFor items. +// +// waitForPriorityQueue implements heap.Interface. The item occurring next in +// time (i.e., the item with the smallest readyAt) is at the root (index 0). +// Peek returns this minimum item at index 0. Pop returns the minimum item after +// it has been removed from the queue and placed at index Len()-1 by +// container/heap. Push adds an item at index Len(), and container/heap +// percolates it into the correct location. +type waitForPriorityQueue []*waitFor + +func (pq waitForPriorityQueue) Len() int { + return len(pq) +} +func (pq waitForPriorityQueue) Less(i, j int) bool { + return pq[i].readyAt.Before(pq[j].readyAt) +} +func (pq waitForPriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +// Push adds an item to the queue. Push should not be called directly; instead, +// use `heap.Push`. +func (pq *waitForPriorityQueue) Push(x interface{}) { + n := len(*pq) + item := x.(*waitFor) + item.index = n + *pq = append(*pq, item) +} + +// Pop removes an item from the queue. Pop should not be called directly; +// instead, use `heap.Pop`. +func (pq *waitForPriorityQueue) Pop() interface{} { + n := len(*pq) + item := (*pq)[n-1] + item.index = -1 + *pq = (*pq)[0:(n - 1)] + return item +} + +// Peek returns the item at the beginning of the queue, without removing the +// item or otherwise mutating the queue. It is safe to call directly. +func (pq waitForPriorityQueue) Peek() interface{} { + return pq[0] } // ShutDown gives a way to shut off this queue func (q *delayingType) ShutDown() { q.Interface.ShutDown() close(q.stopCh) + q.heartbeat.Stop() } // AddAfter adds the given item to the work queue after the given delay @@ -114,7 +156,7 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { select { case <-q.stopCh: // unblock if ShutDown() is called - case q.waitingForAddCh <- waitFor{data: item, readyAt: q.clock.Now().Add(duration)}: + case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}: } } @@ -130,39 +172,42 @@ func (q *delayingType) waitingLoop() { // Make a placeholder channel to use when there are no items in our list never := make(<-chan time.Time) + waitingForQueue := &waitForPriorityQueue{} + heap.Init(waitingForQueue) + + waitingEntryByData := map[t]*waitFor{} + for { if q.Interface.ShuttingDown() { - // discard waiting entries - q.waitingForAdd = nil - q.waitingTimeByEntry = nil return } now := q.clock.Now() // Add ready entries - readyEntries := 0 - for _, entry := range q.waitingForAdd { + for waitingForQueue.Len() > 0 { + entry := waitingForQueue.Peek().(*waitFor) if entry.readyAt.After(now) { break } + + entry = heap.Pop(waitingForQueue).(*waitFor) q.Add(entry.data) - delete(q.waitingTimeByEntry, entry.data) - readyEntries++ + delete(waitingEntryByData, entry.data) } - q.waitingForAdd = q.waitingForAdd[readyEntries:] // Set up a wait for the first item's readyAt (if one exists) nextReadyAt := never - if len(q.waitingForAdd) > 0 { - nextReadyAt = q.clock.After(q.waitingForAdd[0].readyAt.Sub(now)) + if waitingForQueue.Len() > 0 { + entry := waitingForQueue.Peek().(*waitFor) + nextReadyAt = q.clock.After(entry.readyAt.Sub(now)) } select { case <-q.stopCh: return - case <-q.heartbeat: + case <-q.heartbeat.C(): // continue the loop, which will add ready items case <-nextReadyAt: @@ -170,7 +215,7 @@ func (q *delayingType) waitingLoop() { case waitEntry := <-q.waitingForAddCh: if waitEntry.readyAt.After(q.clock.Now()) { - q.waitingForAdd = insert(q.waitingForAdd, q.waitingTimeByEntry, waitEntry) + insert(waitingForQueue, waitingEntryByData, waitEntry) } else { q.Add(waitEntry.data) } @@ -180,7 +225,7 @@ func (q *delayingType) waitingLoop() { select { case waitEntry := <-q.waitingForAddCh: if waitEntry.readyAt.After(q.clock.Now()) { - q.waitingForAdd = insert(q.waitingForAdd, q.waitingTimeByEntry, waitEntry) + insert(waitingForQueue, waitingEntryByData, waitEntry) } else { q.Add(waitEntry.data) } @@ -192,55 +237,19 @@ func (q *delayingType) waitingLoop() { } } -// inserts the given entry into the sorted entries list -// same semantics as append()... the given slice may be modified, -// and the returned value should be used -// -// TODO: This should probably be converted to use container/heap to improve -// running time for a large number of items. -func insert(entries []waitFor, knownEntries map[t]time.Time, entry waitFor) []waitFor { - // if the entry is already in our retry list and the existing time is before the new one, just skip it - existingTime, exists := knownEntries[entry.data] - if exists && existingTime.Before(entry.readyAt) { - return entries - } - - // if the entry exists and is scheduled for later, go ahead and remove the entry +// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue +func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) { + // if the entry already exists, update the time only if it would cause the item to be queued sooner + existing, exists := knownEntries[entry.data] if exists { - if existingIndex := findEntryIndex(entries, existingTime, entry.data); existingIndex >= 0 && existingIndex < len(entries) { - entries = append(entries[:existingIndex], entries[existingIndex+1:]...) + if existing.readyAt.After(entry.readyAt) { + existing.readyAt = entry.readyAt + heap.Fix(q, existing.index) } + + return } - insertionIndex := sort.Search(len(entries), func(i int) bool { - return entry.readyAt.Before(entries[i].readyAt) - }) - - // grow by 1 - entries = append(entries, waitFor{}) - // shift items from the insertion point to the end - copy(entries[insertionIndex+1:], entries[insertionIndex:]) - // insert the record - entries[insertionIndex] = entry - - knownEntries[entry.data] = entry.readyAt - - return entries -} - -// findEntryIndex returns the index for an existing entry -func findEntryIndex(entries []waitFor, existingTime time.Time, data t) int { - index := sort.Search(len(entries), func(i int) bool { - return entries[i].readyAt.After(existingTime) || existingTime == entries[i].readyAt - }) - - // we know this is the earliest possible index, but there could be multiple with the same time - // iterate from here to find the dupe - for ; index < len(entries); index++ { - if entries[index].data == data { - break - } - } - - return index + heap.Push(q, entry) + knownEntries[entry.data] = entry } diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index 3e1a49fe2..dc9a7cc7b 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -29,7 +29,7 @@ type Interface interface { ShuttingDown() bool } -// New constructs a new workqueue (see the package comment). +// New constructs a new work queue (see the package comment). func New() *Type { return NewNamed("") } diff --git a/vendor/k8s.io/client-go/util/workqueue/timed_queue.go b/vendor/k8s.io/client-go/util/workqueue/timed_queue.go deleted file mode 100644 index 2ad90bfdf..000000000 --- a/vendor/k8s.io/client-go/util/workqueue/timed_queue.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -import "time" - -type TimedWorkQueue struct { - *Type -} - -type TimedWorkQueueItem struct { - StartTime time.Time - Object interface{} -} - -func NewTimedWorkQueue() *TimedWorkQueue { - return &TimedWorkQueue{New()} -} - -// Add adds the obj along with the current timestamp to the queue. -func (q TimedWorkQueue) Add(timedItem *TimedWorkQueueItem) { - q.Type.Add(timedItem) -} - -// Get gets the obj along with its timestamp from the queue. -func (q TimedWorkQueue) Get() (timedItem *TimedWorkQueueItem, shutdown bool) { - origin, shutdown := q.Type.Get() - if origin == nil { - return nil, shutdown - } - timedItem, _ = origin.(*TimedWorkQueueItem) - return timedItem, shutdown -} - -func (q TimedWorkQueue) Done(timedItem *TimedWorkQueueItem) error { - q.Type.Done(timedItem) - return nil -} diff --git a/vendor/k8s.io/kube-openapi/LICENSE b/vendor/k8s.io/kube-openapi/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go new file mode 100644 index 000000000..11ed8a6b7 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package proto is a collection of libraries for parsing and indexing the type definitions. +// The openapi spec contains the object model definitions and extensions metadata. +package proto diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go new file mode 100644 index 000000000..a57dcd363 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -0,0 +1,299 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" + + "github.com/googleapis/gnostic/OpenAPIv2" + "gopkg.in/yaml.v2" +) + +func newSchemaError(path *Path, format string, a ...interface{}) error { + err := fmt.Sprintf(format, a...) + if path.Len() == 0 { + return fmt.Errorf("SchemaError: %v", err) + } + return fmt.Errorf("SchemaError(%v): %v", path, err) +} + +// VendorExtensionToMap converts openapi VendorExtension to a map. +func VendorExtensionToMap(e []*openapi_v2.NamedAny) map[string]interface{} { + values := map[string]interface{}{} + + for _, na := range e { + if na.GetName() == "" || na.GetValue() == nil { + continue + } + if na.GetValue().GetYaml() == "" { + continue + } + var value interface{} + err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + values[na.GetName()] = value + } + + return values +} + +// Definitions is an implementation of `Models`. It looks for +// models in an openapi Schema. +type Definitions struct { + models map[string]Schema +} + +var _ Models = &Definitions{} + +// NewOpenAPIData creates a new `Models` out of the openapi document. +func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) { + definitions := Definitions{ + models: map[string]Schema{}, + } + + // Save the list of all models first. This will allow us to + // validate that we don't have any dangling reference. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + definitions.models[namedSchema.GetName()] = nil + } + + // Now, parse each model. We can validate that references exists. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + path := NewPath(namedSchema.GetName()) + schema, err := definitions.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + definitions.models[namedSchema.GetName()] = schema + } + + return &definitions, nil +} + +// We believe the schema is a reference, verify that and returns a new +// Schema +func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetProperties().GetAdditionalProperties()) > 0 { + return nil, newSchemaError(path, "unallowed embedded type definition") + } + if len(s.GetType().GetValue()) > 0 { + return nil, newSchemaError(path, "definition reference can't have a type") + } + + if !strings.HasPrefix(s.GetXRef(), "#/definitions/") { + return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef()) + } + reference := strings.TrimPrefix(s.GetXRef(), "#/definitions/") + if _, ok := d.models[reference]; !ok { + return nil, newSchemaError(path, "unknown model in reference: %q", reference) + } + return &Ref{ + BaseSchema: d.parseBaseSchema(s, path), + reference: reference, + definitions: d, + }, nil +} + +func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) BaseSchema { + return BaseSchema{ + Description: s.GetDescription(), + Extensions: VendorExtensionToMap(s.GetVendorExtension()), + Path: *path, + } +} + +// We believe the schema is a map, verify and return a new schema +func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + var sub Schema + if s.GetAdditionalProperties().GetSchema() == nil { + sub = &Arbitrary{ + BaseSchema: d.parseBaseSchema(s, path), + } + } else { + var err error + sub, err = d.ParseSchema(s.GetAdditionalProperties().GetSchema(), path) + if err != nil { + return nil, err + } + } + return &Map{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, error) { + var t string + if len(s.GetType().GetValue()) > 1 { + return nil, newSchemaError(path, "primitive can't have more than 1 type") + } + if len(s.GetType().GetValue()) == 1 { + t = s.GetType().GetValue()[0] + } + switch t { + case String: // do nothing + case Number: // do nothing + case Integer: // do nothing + case Boolean: // do nothing + default: + return nil, newSchemaError(path, "Unknown primitive type: %q", t) + } + return &Primitive{ + BaseSchema: d.parseBaseSchema(s, path), + Type: t, + Format: s.GetFormat(), + }, nil +} + +func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 1 { + return nil, newSchemaError(path, "array should have exactly one type") + } + if s.GetType().GetValue()[0] != array { + return nil, newSchemaError(path, `array should have type "array"`) + } + if len(s.GetItems().GetSchema()) != 1 { + return nil, newSchemaError(path, "array should have exactly one sub-item") + } + sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path) + if err != nil { + return nil, err + } + return &Array{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + if s.GetProperties() == nil { + return nil, newSchemaError(path, "object doesn't have properties") + } + + fields := map[string]Schema{} + + for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { + var err error + path := path.FieldPath(namedSchema.GetName()) + fields[namedSchema.GetName()], err = d.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + } + + return &Kind{ + BaseSchema: d.parseBaseSchema(s, path), + RequiredFields: s.GetRequired(), + Fields: fields, + }, nil +} + +func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, error) { + return &Arbitrary{ + BaseSchema: d.parseBaseSchema(s, path), + }, nil +} + +// ParseSchema creates a walkable Schema from an openapi schema. While +// this function is public, it doesn't leak through the interface. +func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { + if s.GetXRef() != "" { + return d.parseReference(s, path) + } + objectTypes := s.GetType().GetValue() + switch len(objectTypes) { + case 0: + // in the OpenAPI schema served by older k8s versions, object definitions created from structs did not include + // the type:object property (they only included the "properties" property), so we need to handle this case + if s.GetProperties() != nil { + return d.parseKind(s, path) + } else { + // Definition has no type and no properties. Treat it as an arbitrary value + // TODO: what if it has additionalProperties or patternProperties? + return d.parseArbitrary(s, path) + } + case 1: + t := objectTypes[0] + switch t { + case object: + if s.GetProperties() != nil { + return d.parseKind(s, path) + } else { + return d.parseMap(s, path) + } + case array: + return d.parseArray(s, path) + } + return d.parsePrimitive(s, path) + default: + // the OpenAPI generator never generates (nor it ever did in the past) OpenAPI type definitions with multiple types + return nil, newSchemaError(path, "definitions with multiple types aren't supported") + } +} + +// LookupModel is public through the interface of Models. It +// returns a visitable schema from the given model name. +func (d *Definitions) LookupModel(model string) Schema { + return d.models[model] +} + +func (d *Definitions) ListModels() []string { + models := []string{} + + for model := range d.models { + models = append(models, model) + } + + sort.Strings(models) + return models +} + +type Ref struct { + BaseSchema + + reference string + definitions *Definitions +} + +var _ Reference = &Ref{} + +func (r *Ref) Reference() string { + return r.reference +} + +func (r *Ref) SubSchema() Schema { + return r.definitions.models[r.reference] +} + +func (r *Ref) Accept(v SchemaVisitor) { + v.VisitReference(r) +} + +func (r *Ref) GetName() string { + return fmt.Sprintf("Reference to %q", r.reference) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go new file mode 100644 index 000000000..f26b5ef88 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -0,0 +1,276 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" +) + +// Defines openapi types. +const ( + Integer = "integer" + Number = "number" + String = "string" + Boolean = "boolean" + + // These types are private as they should never leak, and are + // represented by actual structs. + array = "array" + object = "object" +) + +// Models interface describe a model provider. They can give you the +// schema for a specific model. +type Models interface { + LookupModel(string) Schema + ListModels() []string +} + +// SchemaVisitor is an interface that you need to implement if you want +// to "visit" an openapi schema. A dispatch on the Schema type will call +// the appropriate function based on its actual type: +// - Array is a list of one and only one given subtype +// - Map is a map of string to one and only one given subtype +// - Primitive can be string, integer, number and boolean. +// - Kind is an object with specific fields mapping to specific types. +// - Reference is a link to another definition. +type SchemaVisitor interface { + VisitArray(*Array) + VisitMap(*Map) + VisitPrimitive(*Primitive) + VisitKind(*Kind) + VisitReference(Reference) +} + +// SchemaVisitorArbitrary is an additional visitor interface which handles +// arbitrary types. For backwards compatibility, it's a separate interface +// which is checked for at runtime. +type SchemaVisitorArbitrary interface { + SchemaVisitor + VisitArbitrary(*Arbitrary) +} + +// Schema is the base definition of an openapi type. +type Schema interface { + // Giving a visitor here will let you visit the actual type. + Accept(SchemaVisitor) + + // Pretty print the name of the type. + GetName() string + // Describes how to access this field. + GetPath() *Path + // Describes the field. + GetDescription() string + // Returns type extensions. + GetExtensions() map[string]interface{} +} + +// Path helps us keep track of type paths +type Path struct { + parent *Path + key string +} + +func NewPath(key string) Path { + return Path{key: key} +} + +func (p *Path) Get() []string { + if p == nil { + return []string{} + } + if p.key == "" { + return p.parent.Get() + } + return append(p.parent.Get(), p.key) +} + +func (p *Path) Len() int { + return len(p.Get()) +} + +func (p *Path) String() string { + return strings.Join(p.Get(), "") +} + +// ArrayPath appends an array index and creates a new path +func (p *Path) ArrayPath(i int) Path { + return Path{ + parent: p, + key: fmt.Sprintf("[%d]", i), + } +} + +// FieldPath appends a field name and creates a new path +func (p *Path) FieldPath(field string) Path { + return Path{ + parent: p, + key: fmt.Sprintf(".%s", field), + } +} + +// BaseSchema holds data used by each types of schema. +type BaseSchema struct { + Description string + Extensions map[string]interface{} + + Path Path +} + +func (b *BaseSchema) GetDescription() string { + return b.Description +} + +func (b *BaseSchema) GetExtensions() map[string]interface{} { + return b.Extensions +} + +func (b *BaseSchema) GetPath() *Path { + return &b.Path +} + +// Array must have all its element of the same `SubType`. +type Array struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Array{} + +func (a *Array) Accept(v SchemaVisitor) { + v.VisitArray(a) +} + +func (a *Array) GetName() string { + return fmt.Sprintf("Array of %s", a.SubType.GetName()) +} + +// Kind is a complex object. It can have multiple different +// subtypes for each field, as defined in the `Fields` field. Mandatory +// fields are listed in `RequiredFields`. The key of the object is +// always of type `string`. +type Kind struct { + BaseSchema + + // Lists names of required fields. + RequiredFields []string + // Maps field names to types. + Fields map[string]Schema +} + +var _ Schema = &Kind{} + +func (k *Kind) Accept(v SchemaVisitor) { + v.VisitKind(k) +} + +func (k *Kind) GetName() string { + properties := []string{} + for key := range k.Fields { + properties = append(properties, key) + } + return fmt.Sprintf("Kind(%v)", properties) +} + +// IsRequired returns true if `field` is a required field for this type. +func (k *Kind) IsRequired(field string) bool { + for _, f := range k.RequiredFields { + if f == field { + return true + } + } + return false +} + +// Keys returns a alphabetically sorted list of keys. +func (k *Kind) Keys() []string { + keys := make([]string, 0) + for key := range k.Fields { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +// Map is an object who values must all be of the same `SubType`. +// The key of the object is always of type `string`. +type Map struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Map{} + +func (m *Map) Accept(v SchemaVisitor) { + v.VisitMap(m) +} + +func (m *Map) GetName() string { + return fmt.Sprintf("Map of %s", m.SubType.GetName()) +} + +// Primitive is a literal. There can be multiple types of primitives, +// and this subtype can be visited through the `subType` field. +type Primitive struct { + BaseSchema + + // Type of a primitive must be one of: integer, number, string, boolean. + Type string + Format string +} + +var _ Schema = &Primitive{} + +func (p *Primitive) Accept(v SchemaVisitor) { + v.VisitPrimitive(p) +} + +func (p *Primitive) GetName() string { + if p.Format == "" { + return p.Type + } + return fmt.Sprintf("%s (%s)", p.Type, p.Format) +} + +// Arbitrary is a value of any type (primitive, object or array) +type Arbitrary struct { + BaseSchema +} + +var _ Schema = &Arbitrary{} + +func (a *Arbitrary) Accept(v SchemaVisitor) { + if visitor, ok := v.(SchemaVisitorArbitrary); ok { + visitor.VisitArbitrary(a) + } +} + +func (a *Arbitrary) GetName() string { + return "Arbitrary value (primitive, object or array)" +} + +// Reference implementation depends on the type of document. +type Reference interface { + Schema + + Reference() string + SubSchema() Schema +} diff --git a/vendor/vendor.json b/vendor/vendor.json index a9bfbec4f..1297e3a31 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -445,28 +445,52 @@ "revisionTime": "2016-09-30T00:14:02Z" }, { - "checksumSHA1": "APDDi2ohrU7OkChQCekD9tSVUhs=", + "checksumSHA1": "7HCVg+tAuow1iTlDhu/cxPMSzpo=", "path": "github.com/golang/protobuf/jsonpb", - "revision": "84bc9597164f671c0130543778228928d6865c5c", - "revisionTime": "2017-06-08T03:40:07Z" + "revision": "9eb2c01ac278a5d89ce4b2be68fe4500955d8179", + "revisionTime": "2018-06-22T17:40:09Z" }, { - "checksumSHA1": "qlPUeFabwF4RKAOF1H+yBFU1Veg=", + "checksumSHA1": "JZV+pLo8Z/kIYExrZr1Zu9KSKyU=", "path": "github.com/golang/protobuf/proto", - "revision": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55", - "revisionTime": "2017-06-01T23:02:30Z" + "revision": "9eb2c01ac278a5d89ce4b2be68fe4500955d8179", + "revisionTime": "2018-06-22T17:40:09Z" }, { - "checksumSHA1": "Z1gJ3PKzwBpOoPnTSEM5yd0zHYA=", + "checksumSHA1": "DA2cyOt1W92RTyXAqKQ4JWKGR8U=", "path": "github.com/golang/protobuf/protoc-gen-go/descriptor", - "revision": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55", - "revisionTime": "2017-06-01T23:02:30Z" + "revision": "9eb2c01ac278a5d89ce4b2be68fe4500955d8179", + "revisionTime": "2018-06-22T17:40:09Z" }, { - "checksumSHA1": "lZFWy27Qo6+m/keDjNFYTxSmvZw=", + "checksumSHA1": "tkJPssYejSjuAwE2tdEnoEIj93Q=", + "path": "github.com/golang/protobuf/ptypes", + "revision": "9eb2c01ac278a5d89ce4b2be68fe4500955d8179", + "revisionTime": "2018-06-22T17:40:09Z" + }, + { + "checksumSHA1": "G0aiY+KmzFsQLTNzRAGRhJNSj7A=", "path": "github.com/golang/protobuf/ptypes/any", - "revision": "84bc9597164f671c0130543778228928d6865c5c", - "revisionTime": "2017-06-08T03:40:07Z" + "revision": "9eb2c01ac278a5d89ce4b2be68fe4500955d8179", + "revisionTime": "2018-06-22T17:40:09Z" + }, + { + "checksumSHA1": "kjVDCbK5/WiHqP1g4GMUxm75jos=", + "path": "github.com/golang/protobuf/ptypes/duration", + "revision": "9eb2c01ac278a5d89ce4b2be68fe4500955d8179", + "revisionTime": "2018-06-22T17:40:09Z" + }, + { + "checksumSHA1": "dWNJ8UVhDf9NqNOUDtgx2XwRwVg=", + "path": "github.com/golang/protobuf/ptypes/struct", + "revision": "9eb2c01ac278a5d89ce4b2be68fe4500955d8179", + "revisionTime": "2018-06-22T17:40:09Z" + }, + { + "checksumSHA1": "FdeygjOuyR2p5v9b0kNOtzfpjS4=", + "path": "github.com/golang/protobuf/ptypes/timestamp", + "revision": "9eb2c01ac278a5d89ce4b2be68fe4500955d8179", + "revisionTime": "2018-06-22T17:40:09Z" }, { "checksumSHA1": "W+E/2xXcE1GmJ0Qb784ald0Fn6I=", @@ -474,6 +498,12 @@ "revision": "d9eb7a3d35ec988b8585d4a0068e462c27d28380", "revisionTime": "2016-05-29T05:00:41Z" }, + { + "checksumSHA1": "PKVjAJ/e0p/jWtGpZycgoZBLdb4=", + "path": "github.com/google/btree", + "revision": "e89373fe6b4a7413d7acd6da1725b83ef713e6e4", + "revisionTime": "2018-01-24T18:54:31Z" + }, { "checksumSHA1": "/yFfUp3tGt6cK22UVzbq8SjPDCU=", "origin": "k8s.io/client-go/1.5/vendor/github.com/google/gofuzz", @@ -487,6 +517,24 @@ "revision": "8b03ce837f3401233d4e32de3a5f0ea3ff1c13c2", "revisionTime": "2018-06-05T15:39:48Z" }, + { + "checksumSHA1": "b7x/5HeWIHXwSRNED5Envt6tRjs=", + "path": "github.com/googleapis/gnostic/OpenAPIv2", + "revision": "48a0ecefe2e4190c7a2d63b477875854a2f993b3", + "revisionTime": "2018-05-20T01:50:35Z" + }, + { + "checksumSHA1": "IW1cHbutRLvkUTJ1QkqSjsu65bY=", + "path": "github.com/googleapis/gnostic/compiler", + "revision": "48a0ecefe2e4190c7a2d63b477875854a2f993b3", + "revisionTime": "2018-05-20T01:50:35Z" + }, + { + "checksumSHA1": "c2QLSomDNNOk7t6Psq4cDLUDX98=", + "path": "github.com/googleapis/gnostic/extensions", + "revision": "48a0ecefe2e4190c7a2d63b477875854a2f993b3", + "revisionTime": "2018-05-20T01:50:35Z" + }, { "checksumSHA1": "ndG2xmm2bas4/t+kIEUp2xIni8c=", "path": "github.com/gophercloud/gophercloud", @@ -571,6 +619,18 @@ "revision": "caf34a65f60295108141f62929245943bd00f237", "revisionTime": "2017-06-07T03:48:29Z" }, + { + "checksumSHA1": "T1uAtgdoQP82t1GaT1eZjhYmFmM=", + "path": "github.com/gregjones/httpcache", + "revision": "9cad4c3443a7200dd6400aef47183728de563a38", + "revisionTime": "2018-03-05T23:10:24Z" + }, + { + "checksumSHA1": "A+TX1jxqy7iWvcb9ZldoG1b5SsY=", + "path": "github.com/gregjones/httpcache/diskcache", + "revision": "9cad4c3443a7200dd6400aef47183728de563a38", + "revisionTime": "2018-03-05T23:10:24Z" + }, { "checksumSHA1": "DCgYY4AJZ2rJV2XpZ/Y00CDl+Jo=", "path": "github.com/grpc-ecosystem/grpc-gateway/runtime", @@ -613,6 +673,18 @@ "revision": "6bb64b370b90e7ef1fa532be9e591a81c3493e00", "revisionTime": "2016-05-03T14:34:40Z" }, + { + "checksumSHA1": "UquR8kc0nKU285HwLbkievlLQz4=", + "path": "github.com/hashicorp/golang-lru", + "revision": "0fb14efe8c47ae851c0034ed7a448854d3d34cf3", + "revisionTime": "2018-02-01T23:52:37Z" + }, + { + "checksumSHA1": "8Z637dcPkbR5HdLQQBp/9jTbx9Y=", + "path": "github.com/hashicorp/golang-lru/simplelru", + "revision": "0fb14efe8c47ae851c0034ed7a448854d3d34cf3", + "revisionTime": "2018-02-01T23:52:37Z" + }, { "checksumSHA1": "E3Xcanc9ouQwL+CZGOUyA/+giLg=", "path": "github.com/hashicorp/serf/coordinate", @@ -644,10 +716,10 @@ "revisionTime": "2016-08-03T19:07:31Z" }, { - "checksumSHA1": "TAUvj09/lPHXKRl6NXnptwY+WJI=", + "checksumSHA1": "NY84PNFBfNvxJgNEMVKUOJhcVsY=", "path": "github.com/json-iterator/go", - "revision": "e7c7f3b33712573affdcc7a107218e7926b9a05b", - "revisionTime": "2018-02-14T06:06:32Z" + "revision": "f2b4162afba35581b6d4a50d3b8f34e33c144682", + "revisionTime": "2018-06-12T20:28:35Z" }, { "checksumSHA1": "gA95N2LM2hEJLoqrTPaFsSWDJ2Y=", @@ -709,6 +781,18 @@ "revision": "3864e76763d94a6df2f9960b16a20a33da9f9a66", "revisionTime": "2018-05-23T09:45:22Z" }, + { + "checksumSHA1": "ZTcgWKWHsrX0RXYVXn5Xeb8Q0go=", + "path": "github.com/modern-go/concurrent", + "revision": "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94", + "revisionTime": "2018-03-06T01:26:44Z" + }, + { + "checksumSHA1": "ZMzoxY0Lv/LbDmyJXFjlMOyXZC4=", + "path": "github.com/modern-go/reflect2", + "revision": "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd", + "revisionTime": "2018-07-01T02:34:20Z" + }, { "checksumSHA1": "ZYfqG6bNE3cRlbsvpJBL0bF6DSc=", "path": "github.com/mwitkow/go-conntrack", @@ -751,6 +835,12 @@ "revision": "6edb48674bd9467b8e91fda004f2bd7202d60ce4", "revisionTime": "2017-02-06T22:16:52Z" }, + { + "checksumSHA1": "1DFgH7ZOfpUqg0Jsf2719jVgrew=", + "path": "github.com/peterbourgon/diskv", + "revision": "0646ccaebea1ed1539efcab30cae44019090093f", + "revisionTime": "2018-03-12T05:41:25Z" + }, { "checksumSHA1": "jhWbikPpDRvfqYPOvEbZUXzg7KM=", "origin": "github.com/cockroachdb/cockroach/vendor/github.com/petermattis/goid", @@ -941,6 +1031,12 @@ "revision": "94eea52f7b742c7cbe0b03b22f0c4c8631ece122", "revisionTime": "2017-11-28T01:43:40Z" }, + { + "checksumSHA1": "BGm8lKZmvJbf/YOJLeL1rw2WVjA=", + "path": "golang.org/x/crypto/ssh/terminal", + "revision": "a49355c7e3f8fe157a85be2f77e6e269a0f89602", + "revisionTime": "2018-06-20T09:14:27Z" + }, { "checksumSHA1": "NjyXtXsaf0ulRJn6HQSP1FqGL4A=", "path": "golang.org/x/net/bpf", @@ -1068,10 +1164,10 @@ "revisionTime": "2018-03-21T07:56:37Z" }, { - "checksumSHA1": "fpW2dhGFC6SrVzipJx7fjg2DIH8=", + "checksumSHA1": "zc2NI38L40/N4+pjd9P2ESz68/0=", "path": "golang.org/x/sys/windows", - "revision": "c200b10b5d5e122be351b67af224adc6128af5bf", - "revisionTime": "2016-10-22T18:22:21Z" + "revision": "7138fd3d9dc8335c567ca206f4333fb75eb05d56", + "revisionTime": "2018-06-27T13:57:12Z" }, { "checksumSHA1": "PjYlbMS0ttyZYlaevvjA/gV3g1c=", @@ -1380,826 +1476,1242 @@ "versionExact": "v2.2.0" }, { - "checksumSHA1": "yugSOlAEbiAH8bhs5RLNAAmR3Cc=", + "checksumSHA1": "dwwoN+THAC1dJELaF0DrXOMXR74=", + "path": "k8s.io/api/admissionregistration/v1alpha1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "8VSg48eO9NBecLLTVkYX85Zp3LY=", + "path": "k8s.io/api/admissionregistration/v1beta1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "U8E+RGFeHega1nzYmvR/1nWW1xA=", + "path": "k8s.io/api/apps/v1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "4nDncDbSdLq98ulRFY/PWAiuQFI=", + "path": "k8s.io/api/apps/v1beta1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "FX4dh0uNY5RF3SlPibdLbOzb55M=", + "path": "k8s.io/api/apps/v1beta2", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "sStxSsHYYOF/Ng2BghlkXRM4X8I=", + "path": "k8s.io/api/authentication/v1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "vlwMx95TM2Rs+8RMOrCDIUqi5fs=", + "path": "k8s.io/api/authentication/v1beta1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "MfbtRFxCyFct8Ld3KUdvCy474OA=", + "path": "k8s.io/api/authorization/v1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "z7uwHG5MT1hLDxrfMmgnAvW7Lh4=", + "path": "k8s.io/api/authorization/v1beta1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "F/9IiQujGa3jd00I44A/VCEGqpI=", + "path": "k8s.io/api/autoscaling/v1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "gh0PAIQgLHXKEyHErIa6gsgZVck=", + "path": "k8s.io/api/autoscaling/v2beta1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "obBQZzsZ73+XrOMGE9eZCtmyQy8=", + "path": "k8s.io/api/batch/v1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "5/n99A0YA7Mua6CWsA8+nOCbQBY=", + "path": "k8s.io/api/batch/v1beta1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "YSvVNradMcxbtAsV/lZ7so2epuo=", + "path": "k8s.io/api/batch/v2alpha1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "tQn8nKMN3Jz0S4DT8XRRnLNvFcY=", + "path": "k8s.io/api/certificates/v1beta1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "Lf5+f7kznebpHsu5h6+EbnDLnxM=", + "path": "k8s.io/api/core/v1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "h6g5DS23Tp60JxqxxGMmpzCvmGs=", + "path": "k8s.io/api/events/v1beta1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "JiG9bqYvibLrSmlPvvMWEfszvf4=", + "path": "k8s.io/api/extensions/v1beta1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "QwfUIpcDUmc+VxOm5QoZ2Z04M8k=", + "path": "k8s.io/api/networking/v1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "vrJtdnzz4Jm07d7xVUQKn4Ir5dg=", + "path": "k8s.io/api/policy/v1beta1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "wh74w4e3EbLfEt42C4o+lgCUY20=", + "path": "k8s.io/api/rbac/v1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "i/R4LHBt17/gIunG8Ag6i5sdgC4=", + "path": "k8s.io/api/rbac/v1alpha1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "CqdYVuIF4UjFJfbTLgcv/WAmnwk=", + "path": "k8s.io/api/rbac/v1beta1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "CF/x2MgKu45Ef2wuvioZCsOhET0=", + "path": "k8s.io/api/scheduling/v1alpha1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "byaZc4/ja52U1d8CXDy4bx9JaLM=", + "path": "k8s.io/api/scheduling/v1beta1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "O3uEUwGLKgjxDVfrc378/bKCc3Q=", + "path": "k8s.io/api/settings/v1alpha1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "ESckATJEFPchuhy508Ut19Z3V8E=", + "path": "k8s.io/api/storage/v1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "YsUdNVYxIl8SlKf+I2+vrJb1hdM=", + "path": "k8s.io/api/storage/v1alpha1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "fv3HsnQdyjeY/q4b0ClRm9bw2Ko=", + "path": "k8s.io/api/storage/v1beta1", + "revision": "072894a440bdee3a891dea811fe42902311cd2a3", + "revisionTime": "2018-06-28T04:08:59Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "eD3XRvVzKmBUr4mFMD9hAMMuLSE=", "path": "k8s.io/apimachinery/pkg/api/errors", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "jy7AVFN9kxRRc/+IcEBrW0/w6RY=", + "checksumSHA1": "f8wIbNQndGers0bKQdewuuaF6l4=", "path": "k8s.io/apimachinery/pkg/api/meta", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "0iZGAJ4tN12kkf6L9/HPbMMfiKY=", + "checksumSHA1": "8McjjV/wh20eIcYF7wtn9eknvsw=", "path": "k8s.io/apimachinery/pkg/api/resource", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "e/FuiIWG97eGJ+48yAZPw/THUlk=", - "path": "k8s.io/apimachinery/pkg/apimachinery", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "checksumSHA1": "i+fzIBZqazVvACbGXKi2CZlhHXg=", + "path": "k8s.io/apimachinery/pkg/apis/meta/internalversion", + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "Hd4ESY5hCC8x73iVZTB21vMd3nw=", - "path": "k8s.io/apimachinery/pkg/apimachinery/announced", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" - }, - { - "checksumSHA1": "BV1QVm2PziaEQEQalt5X3zCRW28=", - "path": "k8s.io/apimachinery/pkg/apimachinery/registered", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" - }, - { - "checksumSHA1": "WPFsKHs9NX4OGMVjKanWqeEhPl0=", + "checksumSHA1": "ER7IAk0VsKZYNy+/iNdIyRI98ug=", "path": "k8s.io/apimachinery/pkg/apis/meta/v1", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "vsWGhOqx/lokFtZOcV0drEqjy78=", + "checksumSHA1": "lqyM0x9dK8ld+UIco6ZIp+BwQmQ=", "path": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "TAFI3FU4ZjvIClZkPcIgc4AWcOQ=", + "checksumSHA1": "/vl3uCclzRnFqcALeXrAJivVkPQ=", + "path": "k8s.io/apimachinery/pkg/apis/meta/v1beta1", + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "5BuGgpCIBZ4o6tT0QjfZD99hbPQ=", "path": "k8s.io/apimachinery/pkg/conversion", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "xEo+z/ITZUVyycIHoPDhK0eMd9c=", + "checksumSHA1": "lZgVHIUDKFlQKJBYQVWqED5sDnE=", "path": "k8s.io/apimachinery/pkg/conversion/queryparams", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "qZzCF/6VlY8r5g8bviNnft1ea4s=", + "checksumSHA1": "zgYf1VhDcfCpouwRgF5Zodz+g6M=", "path": "k8s.io/apimachinery/pkg/fields", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "qWIpmMOqjEEV6KoP/hQg+ur/ogs=", + "checksumSHA1": "YigOuZWh1gZXOApMGqpyWp7fF/A=", "path": "k8s.io/apimachinery/pkg/labels", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "9BqzTUa7RFscc25qyAQKLkAYHgY=", - "path": "k8s.io/apimachinery/pkg/openapi", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" - }, - { - "checksumSHA1": "mSWcGlciPb7Nmo6Nhy8ic5rqf70=", + "checksumSHA1": "a6oTX4dV6mluH9h5ynzDvj38858=", "path": "k8s.io/apimachinery/pkg/runtime", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "SHAUerkMKVWY30mfLu3Lca5Hx9U=", + "checksumSHA1": "u4uoo6zLkcKCnS3B9i82Y4wMBmo=", "path": "k8s.io/apimachinery/pkg/runtime/schema", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { "checksumSHA1": "zrzqafjVBMG7MbUDRkDjvb/tKjM=", "path": "k8s.io/apimachinery/pkg/runtime/serializer", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "h1WROKxiVBW5FH6InZr89w4rhUs=", + "checksumSHA1": "jpkekuS0xyd/I6OOxqISFR35WbA=", "path": "k8s.io/apimachinery/pkg/runtime/serializer/json", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { "checksumSHA1": "inmKlIskO3iMYraepPTfQPf6a+I=", "path": "k8s.io/apimachinery/pkg/runtime/serializer/protobuf", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { "checksumSHA1": "CxRm8Ny1sWZVlEw1WBfbOMtJP40=", "path": "k8s.io/apimachinery/pkg/runtime/serializer/recognizer", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { "checksumSHA1": "U768j7bVYHO5DwtNAHdIuBtpFMc=", "path": "k8s.io/apimachinery/pkg/runtime/serializer/streaming", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "9K+3sgiTaGZTnv8KXf8UjR8a5lQ=", + "checksumSHA1": "nVwU3gx4pfiMyHOUBgrxyQFaeIM=", "path": "k8s.io/apimachinery/pkg/runtime/serializer/versioning", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { "checksumSHA1": "p9Wv7xurZXAW0jYL/SLNPbiUjaA=", "path": "k8s.io/apimachinery/pkg/selection", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "pPDILUNQnLdZq//lQfcbouI8zOs=", + "checksumSHA1": "SwEleXXE22RGlg0t7wirGDuisO4=", "path": "k8s.io/apimachinery/pkg/types", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "NtbgtroNspdoTC3jSyvEUfZ16dc=", + "checksumSHA1": "1ufnWmjIV06+kIedtpUTD1P6XNI=", + "path": "k8s.io/apimachinery/pkg/util/cache", + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "Gtwo9p42g21v46mUVvj/Qh8yUSE=", + "path": "k8s.io/apimachinery/pkg/util/clock", + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "IY8z33RHtfcmVLs7eyLpUCpZt7A=", "path": "k8s.io/apimachinery/pkg/util/diff", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "0yXRmDoh+ixnHcPgD2TVuQr96Wc=", + "checksumSHA1": "hHYW7OWXFMbRVotl2830pa4kO+Y=", "path": "k8s.io/apimachinery/pkg/util/errors", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { "checksumSHA1": "bQOeFTINeXcfMOAOPArjRma/1mk=", "path": "k8s.io/apimachinery/pkg/util/framer", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "1D6uxyKvuYcpf5dWSuAkju4V74U=", + "checksumSHA1": "9nShqMfXfw5Ct0Rr7P2QhQObVhw=", "path": "k8s.io/apimachinery/pkg/util/intstr", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "pHhdHm4qbXomRjEXMRgJiAI3LnQ=", + "checksumSHA1": "NaIzEbjlhEMikewzRXivi2r0xyM=", "path": "k8s.io/apimachinery/pkg/util/json", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "lgGrBbreZzae+kWnbZx0JRHlww0=", + "checksumSHA1": "rvTUvuVwU6NmzJiseUnl4KQ9498=", + "path": "k8s.io/apimachinery/pkg/util/mergepatch", + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "OrKcR7aVHsamDGWtb2Z0/kAVOtk=", "path": "k8s.io/apimachinery/pkg/util/net", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "CIMK51VUL/ytdt9K6aDwcoN2OWQ=", + "checksumSHA1": "/y9iJ+ZCp2LabYLPPyGroXmbufY=", "path": "k8s.io/apimachinery/pkg/util/rand", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "Om1Zl0Z1mqcAgbl7rL6U3lqw29U=", + "checksumSHA1": "keWEyQHGKGkDo2SARgphbGgN608=", "path": "k8s.io/apimachinery/pkg/util/runtime", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "326d8P0Tw6T7B4pyvBbr+6devTU=", + "checksumSHA1": "ozEwMzA48zwMyQ50SwNKSM852U4=", "path": "k8s.io/apimachinery/pkg/util/sets", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "7gNiplzDd213IdDzfBtH9ZLQIac=", + "checksumSHA1": "COpKHNN+0YT7fn4pColmA/OVrFQ=", + "path": "k8s.io/apimachinery/pkg/util/strategicpatch", + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "0zZltQ8NUw+PeVotck4dqCKT/JY=", "path": "k8s.io/apimachinery/pkg/util/validation", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "aN6xdUVpDdALXX7eXIpHkB2KeOI=", + "checksumSHA1": "bEa1fAicif72PvAU9r4GGylfVa0=", "path": "k8s.io/apimachinery/pkg/util/validation/field", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "wQmzvUriA0AIc5pd64EG+Zjz4IA=", + "checksumSHA1": "WVUAXkkBeN3m2LenBRySrdk97PE=", "path": "k8s.io/apimachinery/pkg/util/wait", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "BTh0mc+q8UHMqwS8vhILP1ORZO8=", + "checksumSHA1": "1bUNGHe9gSNhcdE2EKl+h+VPuDY=", "path": "k8s.io/apimachinery/pkg/util/yaml", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "jNSfgzj8hPVchQHSblfJ1U/YO2g=", + "checksumSHA1": "A8tJJJhCK3xynuOJMiSw4jz27wg=", "path": "k8s.io/apimachinery/pkg/version", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "tNFWlf1AlPjwt45gyZN3jmsIN1U=", + "checksumSHA1": "LmSsUNmqd9ipMsZvsC8a3ZNsodE=", "path": "k8s.io/apimachinery/pkg/watch", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "bce6lDX/UQHPd1qRfo9d9B2dweU=", + "checksumSHA1": "Z0LTjblQqzGRHcnruaPaC6kBIiQ=", + "path": "k8s.io/apimachinery/third_party/forked/golang/json", + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "9sFA+EjKrjpmK4OofQH0p0Rowfg=", "path": "k8s.io/apimachinery/third_party/forked/golang/reflect", - "revision": "75b8dd260ef0469d96d578705a87cffd0e09dab8", - "revisionTime": "2017-03-21T21:09:47Z" + "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", + "revisionTime": "2018-06-21T03:03:51Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "aZfBu8yGnljbJHjiz20WLiSMsfI=", + "checksumSHA1": "WMIZypTArr2Rj7YIOmbmp/+yZkY=", "path": "k8s.io/client-go/discovery", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "+jrs5ykzHiHVJf2t8csxCBAFdzc=", + "checksumSHA1": "OJC5GW5nqC/9wJkBfgxUhtJcK3Y=", "path": "k8s.io/client-go/discovery/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "s0L1FiqMM+WQke6bVnkIjtrjfwU=", + "checksumSHA1": "Uvb/9qGnLy/XSRWjpHlEfWYU+As=", "path": "k8s.io/client-go/kubernetes", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "7VkzfJqPrknsEQ+kDUv0zrjbCkg=", + "checksumSHA1": "HiSKJ3I/VmveJ8CaP3F+si3t0do=", "path": "k8s.io/client-go/kubernetes/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "rRh34tAEtq08Aae/YBTlLXV8Mbw=", + "checksumSHA1": "NpFjNGtZNV0o0Jo7oG7jmjIJ4W0=", "path": "k8s.io/client-go/kubernetes/scheme", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "mPYUwNdFVuICD7UiiqkU73gSGfA=", + "checksumSHA1": "2sVEoijJZ8VDJRY+Q38NnxMQqzc=", + "path": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "ZgHl+SYk5mqDKc/qjGQgQ0FqnMs=", + "path": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "gpF24Wissl7J5PSi6h1FE+5cKt4=", + "path": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "VcxNmvWKizaYMd6HzFMN/XHxma4=", + "path": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "xVKYvZSUmsh5gdX4cKpqMucuKIs=", + "path": "k8s.io/client-go/kubernetes/typed/apps/v1", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "J0/1tTmAGFsZBdTaQzN0pVN0Pao=", + "path": "k8s.io/client-go/kubernetes/typed/apps/v1/fake", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "/u8hJMzPAhaRnYV3N/2kWeo6Gbc=", "path": "k8s.io/client-go/kubernetes/typed/apps/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "uIG5dilN0SvarvhtfLju8bmghrs=", + "checksumSHA1": "u4+WQ3qbxgQQi/d9idLoYH8Pw4w=", "path": "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "oIuu1tM2ZCePxQW03oiUIS0cqm8=", + "checksumSHA1": "7nPY3WCV09+bSkK0fK1pkcYOP3Q=", + "path": "k8s.io/client-go/kubernetes/typed/apps/v1beta2", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "kDbF3yTLCTDIWaSiQe8QKnXLkUw=", + "path": "k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "z8RBQhqz0ef52GAYcGnHbdkRLQw=", "path": "k8s.io/client-go/kubernetes/typed/authentication/v1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "8LZmwjKC6W9CoEIwqUZmWYbmGYM=", + "checksumSHA1": "FQdMfDrQILCK3PEEQVwM+hM2Sno=", "path": "k8s.io/client-go/kubernetes/typed/authentication/v1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "gxfSKIFSwg0Q9lQ9Ru6VBdgS7nc=", + "checksumSHA1": "xNCsaw6UsrEtSjg+2rMYZueCBIM=", "path": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "mFYg73PoqoPp5tIBdqgDzQZlCvE=", + "checksumSHA1": "COsWrq4VbNfuFe+xPfSIA8vvmXs=", "path": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "gauXUMKCxFFQR0vYjMgWa0JEdFY=", + "checksumSHA1": "m8sWDYsVaRB+meitgU1dx+1P0ow=", "path": "k8s.io/client-go/kubernetes/typed/authorization/v1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "TzsZJ0tcWDjJJYQNsE1xBDDnWVo=", + "checksumSHA1": "WkB+L76pnmGC9E/N/pw5UMKWy8w=", "path": "k8s.io/client-go/kubernetes/typed/authorization/v1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "9k1t+56EdB1mQkBWGw23tiAPuNQ=", + "checksumSHA1": "XucNO6BHSXXNkSQAM1ZdiwDmksc=", "path": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "p/YpEYgQdbRURU2pevqa5pGdjzI=", + "checksumSHA1": "89z3RD2/yK6XDKRY8z6wC5hDbn0=", "path": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "QJVvfabGlmrHydChSbkIYIKDMUY=", + "checksumSHA1": "5D9gl3fKkhnLSC17fkz9XIV9STc=", "path": "k8s.io/client-go/kubernetes/typed/autoscaling/v1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "YhBK7DmjKILddLWySOG3txerEQk=", + "checksumSHA1": "n+iIJ/VtbdfLiBY8GCIfa+Oy3NI=", "path": "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "OlZ/WP9Lv6SQeQPn6S69cwv8opE=", - "path": "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "checksumSHA1": "iCPo6cOIT5Z9diNHHUcGyrMDZn8=", + "path": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "iarCmh9glRJFUqeJ1Hem0mkPg78=", - "path": "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "checksumSHA1": "Cs44IvmNBGghs1TtXfK840qhVNw=", + "path": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "bv+dr19WQcDrubXbIUv2toN4DWc=", + "checksumSHA1": "PxB/TF7pmos3op2z+LIJLjZO8jM=", "path": "k8s.io/client-go/kubernetes/typed/batch/v1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "daYvaF5xO2ixbNF0sm6DPgVLFR4=", + "checksumSHA1": "40K3NE6HDzWKk/a+2ZT10mu4r5k=", "path": "k8s.io/client-go/kubernetes/typed/batch/v1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "3rgXMCVpnVPMpqiwVLcZ6AUx9VA=", + "checksumSHA1": "UFKWPad/llvBljTbZqyekULpPKM=", + "path": "k8s.io/client-go/kubernetes/typed/batch/v1beta1", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "xwAzWNXPmWGTtCaUYj8usnvU+ps=", + "path": "k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "7FFOahu3KVI7fiWhA/k1yluvIJY=", "path": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "+uliLGbqpGJddbhT02XygodyyJU=", + "checksumSHA1": "zlvCwlKLZMLcDsdD3sdqQmzxZMY=", "path": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "wVHdBwUm3iiSr2FIOFDmveLc44c=", + "checksumSHA1": "ugmMQOQlWNAEOR+ncbnL+uxkjJ8=", "path": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "n9SF+tWS6E+N3XcQsTdjNCk8s8s=", + "checksumSHA1": "D41Z70J8gWfUyJoxlCMZzzvVBZk=", "path": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "Fr6enVUrMGiReymOpl7G5Jb4x6Y=", + "checksumSHA1": "0E6RtvR8px6fkUWtT5FYjVcowEI=", "path": "k8s.io/client-go/kubernetes/typed/core/v1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "Ra0Aft6z8eCcoo8Zo1rdjFB3h6w=", + "checksumSHA1": "U3kQTqGjstKRZIZyuZlqZ9rcPv8=", "path": "k8s.io/client-go/kubernetes/typed/core/v1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "3liVtBqf4n49k/E8ZHG8a5V57Gw=", + "checksumSHA1": "EG2zaZ66o181NTvDIA1pF+sR+mI=", + "path": "k8s.io/client-go/kubernetes/typed/events/v1beta1", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "qk79twOWQSohS4tcmq/v2b5BeNc=", + "path": "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "X5wptrr04pFFgjZBvNejIFKf3YU=", "path": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "x14WWLz+GU2JUbLPEZE2Tfh5b4s=", + "checksumSHA1": "GkBjc3tePgQVj1Aq4TbWSKcmJ/I=", "path": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "qXM2hNXC5tGX03uImzx4XSPUU1s=", + "checksumSHA1": "2BNRJgbx2OKqe4PnZrEo/LUI1gI=", + "path": "k8s.io/client-go/kubernetes/typed/networking/v1", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "EAQwsMbhLKR6g9yc8b66FXHDP6I=", + "path": "k8s.io/client-go/kubernetes/typed/networking/v1/fake", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "iv+leVP2ERDUBKVaD82rrt8x5EI=", "path": "k8s.io/client-go/kubernetes/typed/policy/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "JoKTxuysAth1eDvKT7DgIdKc4E0=", + "checksumSHA1": "MH6FxyA9Z5h/XoyI21lpEbVAnL4=", "path": "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "DXqxFLINH6dV1XE6nsvix66mUnc=", + "checksumSHA1": "t1v/kvkiHgsoRGB2SpS9wx7Kd1s=", + "path": "k8s.io/client-go/kubernetes/typed/rbac/v1", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "s1klULhKt+ZCj4vHwj99d3Fc4pw=", + "path": "k8s.io/client-go/kubernetes/typed/rbac/v1/fake", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "WBzIRDYpTWf5X0fl7Q9ZsYaayOI=", "path": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "MCuQLrhDu/4zVgDAaXk1tcxzROU=", + "checksumSHA1": "dz1K96PXoJxUlGjpIYCbHRw6qDc=", "path": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "dsF5arKiA1Mgfe5bx3EznJtI+XM=", + "checksumSHA1": "2Db2rdjkeF2FUjL3zcAT518HNUw=", "path": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "RXEoxZ9KePlXA3sQI6VWsmjjyT0=", + "checksumSHA1": "o5JMZoPym8c87/uEHZryhuLzCoI=", "path": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "I0RCWyWrT83nbEwvIRw3AErv/uU=", + "checksumSHA1": "E2UC+VmKSUekz96/0UX+fK0jOGg=", + "path": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "tK02uMqdP/74hP99dQq69rmwQ2o=", + "path": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "S9JHuu0Bwyc7RumKvhMJC49MtkU=", + "path": "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "nL6dR8jAzjs/1PPzS/KYYeZMwnQ=", + "path": "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "L7SxjL9g+rlgK8AEj4SocDiMzjU=", "path": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "hKYa0n7CB086ZXM2fm80ALv4rEY=", + "checksumSHA1": "psDBGAD3dpKFK/tAmBXExk1Li6w=", "path": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "FQb3kXWYLcURqpZp9J1g1vUDtqU=", + "checksumSHA1": "NG+izjfroQyI3r6Ar1IK47KQknU=", "path": "k8s.io/client-go/kubernetes/typed/storage/v1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "e90Qa6qCiuLK8zNHj4LtwhJIaHg=", + "checksumSHA1": "ceZKkz57Ij6cHfx13qX2J9MUMbg=", "path": "k8s.io/client-go/kubernetes/typed/storage/v1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "By0ty5ntYvanInEXAgnS9VKln2U=", + "checksumSHA1": "hLN5UXfDgw36zcIfpgo4WCZraCA=", + "path": "k8s.io/client-go/kubernetes/typed/storage/v1alpha1", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "Koig4n3Fs1OmIVAsXwKDdlGTSuE=", + "path": "k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "uhq68WTNsbCjKPqAwym7TPUx9Xg=", "path": "k8s.io/client-go/kubernetes/typed/storage/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "sOustVhVGdnkX6YhJFQmpIJTK0E=", + "checksumSHA1": "nGtzXxSCQV+6czFpj0PwHqwCW3U=", "path": "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "NnHqF5QII9GH4l2bBCO4WAI9tPw=", - "path": "k8s.io/client-go/pkg/api", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "checksumSHA1": "RncCbxGWIN/JJvMtXrww2XUHKb8=", + "path": "k8s.io/client-go/pkg/apis/clientauthentication", + "revision": "82b3a5753636104067ccc0759f33c870d17686dd", + "revisionTime": "2018-06-30T03:14:19Z" }, { - "checksumSHA1": "8a+TkAQbGwIr7xn1mq4A/4W5Zi8=", - "path": "k8s.io/client-go/pkg/api/install", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "checksumSHA1": "qoi7qmT3tlLpy3WwdZSFdBX9fUE=", + "path": "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1", + "revision": "82b3a5753636104067ccc0759f33c870d17686dd", + "revisionTime": "2018-06-30T03:14:19Z" }, { - "checksumSHA1": "xR+RJV1Y0ApIxJc5HJ9ROPJzAUE=", - "path": "k8s.io/client-go/pkg/api/v1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "checksumSHA1": "XxLmPLR1YkfDTLqlXpONaw9vI5o=", + "path": "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1", + "revision": "82b3a5753636104067ccc0759f33c870d17686dd", + "revisionTime": "2018-06-30T03:14:19Z" }, { - "checksumSHA1": "LXlUYCUTS+vvHBunesoJIoQpjeg=", - "path": "k8s.io/client-go/pkg/apis/apps", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "pWRcTUwLPlUrxnuSf13Je9vUb5k=", - "path": "k8s.io/client-go/pkg/apis/apps/install", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "ey+jPC0jAsdJMRXytd/Kf5ud8+4=", - "path": "k8s.io/client-go/pkg/apis/apps/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "1Bgq5sNb7Lr/DpAQ18tSfdcxRc0=", - "path": "k8s.io/client-go/pkg/apis/authentication", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "tcR4IcZhoRhb2ESgint0e+ciCFg=", - "path": "k8s.io/client-go/pkg/apis/authentication/install", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "YKQVSsTKKDyORxuu032FmGwak+A=", - "path": "k8s.io/client-go/pkg/apis/authentication/v1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "1d8umgBD03+/B0dPkm8ibEfXnYY=", - "path": "k8s.io/client-go/pkg/apis/authentication/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "fROwttd7znBTHtVgI5fptVch/E0=", - "path": "k8s.io/client-go/pkg/apis/authorization", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "hy7/VvKfkGFFBmuvGQx0AkR6rf0=", - "path": "k8s.io/client-go/pkg/apis/authorization/install", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "5BUDpdzd0RJ386SDbQAEitJ7gqk=", - "path": "k8s.io/client-go/pkg/apis/authorization/v1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "zsRAXUsCpt0MEpKaMPyUMSQ4W6U=", - "path": "k8s.io/client-go/pkg/apis/authorization/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "RcNhGbiTRR9lgY5SuGeH/pnCTUo=", - "path": "k8s.io/client-go/pkg/apis/autoscaling", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "QonQuKQ/5PKrXJrrROhecb2sks4=", - "path": "k8s.io/client-go/pkg/apis/autoscaling/install", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "fqjBkYwQuCHRtqGRlsM7CCY/oJY=", - "path": "k8s.io/client-go/pkg/apis/autoscaling/v1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "yVDh7CTkmsXAmyPlm0WnWDbbBDo=", - "path": "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "mCfnnrJMt42LaBFP70iMAn1tglQ=", - "path": "k8s.io/client-go/pkg/apis/batch", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "nL13CbbwoobyU3L2XEuYte21BXQ=", - "path": "k8s.io/client-go/pkg/apis/batch/install", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "KPiDNUjzRvxOxWWGbsl/wkzsnts=", - "path": "k8s.io/client-go/pkg/apis/batch/v1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "kk+5pPULA7r+HVhORCY7xvnjLYA=", - "path": "k8s.io/client-go/pkg/apis/batch/v2alpha1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "UvjuR9+K7/J4egvLRCJlFOF86T8=", - "path": "k8s.io/client-go/pkg/apis/certificates", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "R617NaU9x7BC5qLCH8KayH4oVeo=", - "path": "k8s.io/client-go/pkg/apis/certificates/install", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "XnYE+PADIIf6LPbf/TB3vmyPVj4=", - "path": "k8s.io/client-go/pkg/apis/certificates/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "V5SktS1jIYIRcC/WZMuO8XXB0lg=", - "path": "k8s.io/client-go/pkg/apis/extensions", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "StZg3zfhODhOM7iSvZE1S4eS71M=", - "path": "k8s.io/client-go/pkg/apis/extensions/install", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "r03CaZpSonU1YhTMALq3a+9fePA=", - "path": "k8s.io/client-go/pkg/apis/extensions/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "uK2jynPvFbMEMWF7hr1GyJubgVQ=", - "path": "k8s.io/client-go/pkg/apis/policy", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "9RnFX43MNdGVGi1V/30t7k/HSME=", - "path": "k8s.io/client-go/pkg/apis/policy/install", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "t9ESUxOQm/+3iDK46b/HUgb4Hm8=", - "path": "k8s.io/client-go/pkg/apis/policy/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "O1jhGYkRjesZMcVtHFm1rRMRTJo=", - "path": "k8s.io/client-go/pkg/apis/rbac", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "OvZ8dIDwbOwQGiRP8JbyC5hjE4k=", - "path": "k8s.io/client-go/pkg/apis/rbac/install", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "lZNbMgiUj55f9OXhAYFJINPstow=", - "path": "k8s.io/client-go/pkg/apis/rbac/v1alpha1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "bQV+lTglPnO1dqeibaEaMBCrjLA=", - "path": "k8s.io/client-go/pkg/apis/rbac/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "Q1+vWNab1/wGZc6KYpwFSx7/QbM=", - "path": "k8s.io/client-go/pkg/apis/settings", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "npUnJMBJNwv9CPanReQ0X3rP8Z4=", - "path": "k8s.io/client-go/pkg/apis/settings/install", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "J54pr0/7r5xGm8gT55ZzZ5KyN7I=", - "path": "k8s.io/client-go/pkg/apis/settings/v1alpha1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "a0Q8SOgvkSxIuEu665UtgDtmpCU=", - "path": "k8s.io/client-go/pkg/apis/storage", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "m6lo9e5aXsfZNNc1KStRJ0Mw2cY=", - "path": "k8s.io/client-go/pkg/apis/storage/install", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "2TAFlLYih7oRtZ8KbvMmIrziGXQ=", - "path": "k8s.io/client-go/pkg/apis/storage/v1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "MnArzVKB12JFUZDbCvaaLCBRpxU=", - "path": "k8s.io/client-go/pkg/apis/storage/v1beta1", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "/wssE6/+5UY8QrrbYnpkieVwp8Y=", - "path": "k8s.io/client-go/pkg/util", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "U4YjTHHqEs+YZFYdiV1exJyuwDM=", - "path": "k8s.io/client-go/pkg/util/parsers", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" - }, - { - "checksumSHA1": "ApqZCY/0FjkcTCymLF/hn1557rc=", + "checksumSHA1": "SKxW+zPUzKmMZDo5SO9rAHf4FcM=", "path": "k8s.io/client-go/pkg/version", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "WKKCrNPVWbJYm1im1DA9145z6xI=", + "checksumSHA1": "VKRcI8rC6j+eoR4xYigvb3msdR0=", + "path": "k8s.io/client-go/plugin/pkg/client/auth/exec", + "revision": "82b3a5753636104067ccc0759f33c870d17686dd", + "revisionTime": "2018-06-30T03:14:19Z" + }, + { + "checksumSHA1": "WAtl5P1bup5IS4is2kEc5WBN3YU=", "path": "k8s.io/client-go/rest", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { "checksumSHA1": "+wYCwQaVc1GvMWwOWc3wXd2GT5s=", "path": "k8s.io/client-go/rest/watch", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "+v4dKGC56bjDp8wDVc//oCT+5p0=", + "checksumSHA1": "I3bHgG6WAdMckRtd+KamQHlPRPY=", "path": "k8s.io/client-go/testing", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "ITSxtOSOLAw9NIIJDXhauEtzo+M=", + "checksumSHA1": "fgSEqgz8lHM2xcoq1sMVOudDFRA=", "path": "k8s.io/client-go/tools/cache", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "1jUQmUDroNs5XYhsQna4kNTzMik=", + "checksumSHA1": "ynHaVRHZ3FI66y573C05GL3s0t4=", "path": "k8s.io/client-go/tools/clientcmd/api", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { "checksumSHA1": "rRC9GCWXfyIXKHBCeKpw7exVybE=", "path": "k8s.io/client-go/tools/metrics", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "X7AC6lZGlUJdhKLplcncvP7e8Jc=", + "checksumSHA1": "E8JeIq/Zl3X94M8aPZuvYI0S8gg=", + "path": "k8s.io/client-go/tools/pager", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "n2h6CjJR1o+JM2snIIsCdcMMDWs=", + "path": "k8s.io/client-go/tools/reference", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "xjOr+rKimhz7M8LySdtXK0xX7cs=", "path": "k8s.io/client-go/transport", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "y6BpqJVXsBeykiGljn95RvGONCg=", + "checksumSHA1": "lTGzV+HgAXM6vWKFfedFLlIl94E=", + "path": "k8s.io/client-go/util/buffer", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "rIxDMrsqcf1cjy3dgMaEZw/TvJs=", "path": "k8s.io/client-go/util/cert", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "kyu4NFcUmCTsUMpsPeyFrigorCE=", - "path": "k8s.io/client-go/util/clock", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "checksumSHA1": "KSB22yQOaax0P/RkHCDp80u+Jcs=", + "path": "k8s.io/client-go/util/connrotation", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "pCmdxpwnhYqoDSls5ehHtQFglm4=", + "checksumSHA1": "yXKT7cJNCv5vjwGKhpc/DUsQg+A=", "path": "k8s.io/client-go/util/flowcontrol", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { "checksumSHA1": "W6bJiNAwgNwNJC+y+TPtacgUGlY=", "path": "k8s.io/client-go/util/integer", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" }, { - "checksumSHA1": "cy88zyzH3VJWVaIb41DDBlv+mmU=", + "checksumSHA1": "VK8XBxAuEr0Vp9W96bH1niYlLvc=", + "path": "k8s.io/client-go/util/retry", + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "wgrkDRwF0RXxN7Z9YMDRVV+KeUU=", "path": "k8s.io/client-go/util/workqueue", - "revision": "3627aeb7d4f6ade38f995d2c923e459146493c7e", - "revisionTime": "2017-03-31T20:34:26Z" + "revision": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65", + "revisionTime": "2018-06-28T04:30:50Z", + "version": "kubernetes-1.11.0", + "versionExact": "kubernetes-1.11.0" + }, + { + "checksumSHA1": "F9uRePbZAiiZ1fGtsS0wb3FaxXM=", + "path": "k8s.io/kube-openapi/pkg/util/proto", + "revision": "d83b052f768a50a309c692a9c271da3f3276ff88", + "revisionTime": "2018-06-29T01:24:20Z" } ], "rootPath": "github.com/prometheus/prometheus" From b33f48920c007d8b8f227b1164e87d28f5bfd628 Mon Sep 17 00:00:00 2001 From: jojohappy Date: Tue, 3 Jul 2018 17:16:45 +0800 Subject: [PATCH 2/6] Fixed remove unused package & fetch golang.org/x/text Signed-off-by: jojohappy --- vendor/github.com/PuerkitoBio/purell/LICENSE | 12 - .../github.com/PuerkitoBio/purell/purell.go | 375 - vendor/github.com/PuerkitoBio/urlesc/LICENSE | 27 - .../github.com/PuerkitoBio/urlesc/urlesc.go | 180 - vendor/github.com/docker/distribution/LICENSE | 202 - .../docker/distribution/digest/digest.go | 139 - .../docker/distribution/digest/digester.go | 155 - .../docker/distribution/digest/doc.go | 42 - .../docker/distribution/digest/set.go | 245 - .../docker/distribution/digest/verifiers.go | 44 - .../distribution/reference/reference.go | 334 - .../docker/distribution/reference/regexp.go | 124 - .../github.com/emicklei/go-restful/CHANGES.md | 163 - vendor/github.com/emicklei/go-restful/LICENSE | 22 - vendor/github.com/emicklei/go-restful/Srcfile | 1 - .../emicklei/go-restful/compress.go | 123 - .../emicklei/go-restful/compressor_cache.go | 103 - .../emicklei/go-restful/compressor_pools.go | 91 - .../emicklei/go-restful/compressors.go | 53 - .../emicklei/go-restful/constants.go | 30 - .../emicklei/go-restful/container.go | 361 - .../emicklei/go-restful/cors_filter.go | 202 - .../github.com/emicklei/go-restful/curly.go | 162 - .../emicklei/go-restful/curly_route.go | 52 - vendor/github.com/emicklei/go-restful/doc.go | 196 - .../emicklei/go-restful/entity_accessors.go | 163 - .../github.com/emicklei/go-restful/filter.go | 26 - .../github.com/emicklei/go-restful/jsr311.go | 248 - .../github.com/emicklei/go-restful/log/log.go | 31 - .../github.com/emicklei/go-restful/logger.go | 32 - vendor/github.com/emicklei/go-restful/mime.go | 45 - .../emicklei/go-restful/options_filter.go | 26 - .../emicklei/go-restful/parameter.go | 114 - .../emicklei/go-restful/path_expression.go | 69 - .../github.com/emicklei/go-restful/request.go | 131 - .../emicklei/go-restful/response.go | 235 - .../github.com/emicklei/go-restful/route.go | 183 - .../emicklei/go-restful/route_builder.go | 240 - .../github.com/emicklei/go-restful/router.go | 18 - .../emicklei/go-restful/service_error.go | 23 - .../emicklei/go-restful/swagger/CHANGES.md | 43 - .../swagger/api_declaration_list.go | 64 - .../emicklei/go-restful/swagger/config.go | 38 - .../go-restful/swagger/model_builder.go | 449 - .../emicklei/go-restful/swagger/model_list.go | 86 - .../go-restful/swagger/model_property_ext.go | 66 - .../go-restful/swagger/model_property_list.go | 87 - .../go-restful/swagger/ordered_route_map.go | 36 - .../emicklei/go-restful/swagger/swagger.go | 185 - .../go-restful/swagger/swagger_builder.go | 21 - .../go-restful/swagger/swagger_webservice.go | 440 - .../emicklei/go-restful/web_service.go | 268 - .../go-restful/web_service_container.go | 39 - .../go-openapi/jsonpointer/CODE_OF_CONDUCT.md | 74 - .../github.com/go-openapi/jsonpointer/LICENSE | 202 - .../go-openapi/jsonpointer/pointer.go | 238 - .../jsonreference/CODE_OF_CONDUCT.md | 74 - .../go-openapi/jsonreference/LICENSE | 202 - .../go-openapi/jsonreference/reference.go | 156 - .../go-openapi/spec/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/spec/LICENSE | 202 - vendor/github.com/go-openapi/spec/bindata.go | 274 - .../go-openapi/spec/contact_info.go | 24 - vendor/github.com/go-openapi/spec/expander.go | 626 - .../go-openapi/spec/external_docs.go | 24 - vendor/github.com/go-openapi/spec/header.go | 165 - vendor/github.com/go-openapi/spec/info.go | 168 - vendor/github.com/go-openapi/spec/items.go | 199 - vendor/github.com/go-openapi/spec/license.go | 23 - .../github.com/go-openapi/spec/operation.go | 233 - .../github.com/go-openapi/spec/parameter.go | 299 - .../github.com/go-openapi/spec/path_item.go | 90 - vendor/github.com/go-openapi/spec/paths.go | 97 - vendor/github.com/go-openapi/spec/ref.go | 167 - vendor/github.com/go-openapi/spec/response.go | 113 - .../github.com/go-openapi/spec/responses.go | 122 - vendor/github.com/go-openapi/spec/schema.go | 628 - .../go-openapi/spec/security_scheme.go | 142 - vendor/github.com/go-openapi/spec/spec.go | 79 - vendor/github.com/go-openapi/spec/swagger.go | 317 - vendor/github.com/go-openapi/spec/tag.go | 73 - .../github.com/go-openapi/spec/xml_object.go | 68 - .../go-openapi/swag/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/swag/LICENSE | 202 - vendor/github.com/go-openapi/swag/convert.go | 188 - .../go-openapi/swag/convert_types.go | 595 - vendor/github.com/go-openapi/swag/json.go | 270 - vendor/github.com/go-openapi/swag/loading.go | 49 - vendor/github.com/go-openapi/swag/net.go | 24 - vendor/github.com/go-openapi/swag/path.go | 56 - vendor/github.com/go-openapi/swag/util.go | 318 - vendor/github.com/juju/ratelimit/LICENSE | 191 - vendor/github.com/juju/ratelimit/ratelimit.go | 245 - vendor/github.com/juju/ratelimit/reader.go | 51 - vendor/github.com/mailru/easyjson/LICENSE | 7 - .../github.com/mailru/easyjson/buffer/pool.go | 207 - .../mailru/easyjson/jlexer/error.go | 15 - .../mailru/easyjson/jlexer/lexer.go | 956 - .../mailru/easyjson/jwriter/writer.go | 273 - vendor/github.com/spf13/pflag/LICENSE | 28 - vendor/github.com/spf13/pflag/README.md | 296 - vendor/github.com/spf13/pflag/bool.go | 94 - vendor/github.com/spf13/pflag/bool_slice.go | 147 - vendor/github.com/spf13/pflag/count.go | 96 - vendor/github.com/spf13/pflag/duration.go | 86 - vendor/github.com/spf13/pflag/flag.go | 1128 - vendor/github.com/spf13/pflag/float32.go | 88 - vendor/github.com/spf13/pflag/float64.go | 84 - vendor/github.com/spf13/pflag/golangflag.go | 101 - vendor/github.com/spf13/pflag/int.go | 84 - vendor/github.com/spf13/pflag/int32.go | 88 - vendor/github.com/spf13/pflag/int64.go | 84 - vendor/github.com/spf13/pflag/int8.go | 88 - vendor/github.com/spf13/pflag/int_slice.go | 128 - vendor/github.com/spf13/pflag/ip.go | 94 - vendor/github.com/spf13/pflag/ip_slice.go | 148 - vendor/github.com/spf13/pflag/ipmask.go | 122 - vendor/github.com/spf13/pflag/ipnet.go | 98 - vendor/github.com/spf13/pflag/string.go | 80 - vendor/github.com/spf13/pflag/string_array.go | 103 - vendor/github.com/spf13/pflag/string_slice.go | 129 - vendor/github.com/spf13/pflag/uint.go | 88 - vendor/github.com/spf13/pflag/uint16.go | 88 - vendor/github.com/spf13/pflag/uint32.go | 88 - vendor/github.com/spf13/pflag/uint64.go | 88 - vendor/github.com/spf13/pflag/uint8.go | 88 - vendor/github.com/spf13/pflag/uint_slice.go | 126 - vendor/github.com/ugorji/go/LICENSE | 22 - vendor/github.com/ugorji/go/codec/0doc.go | 193 - vendor/github.com/ugorji/go/codec/binc.go | 918 - vendor/github.com/ugorji/go/codec/cbor.go | 584 - vendor/github.com/ugorji/go/codec/decode.go | 2015 - vendor/github.com/ugorji/go/codec/encode.go | 1405 - .../ugorji/go/codec/fast-path.generated.go | 38900 ---------------- .../ugorji/go/codec/fast-path.go.tmpl | 511 - .../ugorji/go/codec/fast-path.not.go | 32 - .../ugorji/go/codec/gen-dec-array.go.tmpl | 101 - .../ugorji/go/codec/gen-dec-map.go.tmpl | 58 - .../ugorji/go/codec/gen-helper.generated.go | 233 - .../ugorji/go/codec/gen-helper.go.tmpl | 364 - .../ugorji/go/codec/gen.generated.go | 171 - vendor/github.com/ugorji/go/codec/gen.go | 1920 - vendor/github.com/ugorji/go/codec/helper.go | 1129 - .../ugorji/go/codec/helper_internal.go | 242 - .../ugorji/go/codec/helper_not_unsafe.go | 20 - .../ugorji/go/codec/helper_unsafe.go | 45 - vendor/github.com/ugorji/go/codec/json.go | 1072 - vendor/github.com/ugorji/go/codec/msgpack.go | 844 - vendor/github.com/ugorji/go/codec/noop.go | 213 - vendor/github.com/ugorji/go/codec/prebuild.go | 3 - vendor/github.com/ugorji/go/codec/rpc.go | 180 - vendor/github.com/ugorji/go/codec/simple.go | 518 - vendor/github.com/ugorji/go/codec/test.py | 120 - vendor/github.com/ugorji/go/codec/time.go | 222 - vendor/golang.org/x/text/cases/cases.go | 129 - vendor/golang.org/x/text/cases/context.go | 281 - vendor/golang.org/x/text/cases/fold.go | 26 - vendor/golang.org/x/text/cases/gen.go | 831 - vendor/golang.org/x/text/cases/gen_trieval.go | 217 - vendor/golang.org/x/text/cases/info.go | 83 - vendor/golang.org/x/text/cases/map.go | 599 - vendor/golang.org/x/text/cases/tables.go | 2213 - vendor/golang.org/x/text/cases/trieval.go | 213 - vendor/golang.org/x/text/internal/tag/tag.go | 100 - vendor/golang.org/x/text/language/Makefile | 16 - vendor/golang.org/x/text/language/common.go | 16 - vendor/golang.org/x/text/language/coverage.go | 197 - .../golang.org/x/text/language/gen_common.go | 20 - .../golang.org/x/text/language/gen_index.go | 162 - vendor/golang.org/x/text/language/go1_1.go | 38 - vendor/golang.org/x/text/language/go1_2.go | 11 - vendor/golang.org/x/text/language/index.go | 762 - vendor/golang.org/x/text/language/language.go | 975 - vendor/golang.org/x/text/language/lookup.go | 396 - .../golang.org/x/text/language/maketables.go | 1635 - vendor/golang.org/x/text/language/match.go | 840 - vendor/golang.org/x/text/language/parse.go | 859 - vendor/golang.org/x/text/language/tables.go | 2791 -- vendor/golang.org/x/text/language/tags.go | 143 - vendor/golang.org/x/text/runes/cond.go | 126 - vendor/golang.org/x/text/runes/runes.go | 278 - .../golang.org/x/text/secure/precis/class.go | 36 - .../x/text/secure/precis/context.go | 139 - vendor/golang.org/x/text/secure/precis/doc.go | 14 - vendor/golang.org/x/text/secure/precis/gen.go | 310 - .../x/text/secure/precis/gen_trieval.go | 68 - .../x/text/secure/precis/nickname.go | 70 - .../x/text/secure/precis/options.go | 106 - .../x/text/secure/precis/profile.go | 330 - .../x/text/secure/precis/profiles.go | 56 - .../golang.org/x/text/secure/precis/tables.go | 3788 -- .../x/text/secure/precis/transformer.go | 32 - .../x/text/secure/precis/trieval.go | 64 - .../golang.org/x/text/transform/transform.go | 54 +- vendor/golang.org/x/text/unicode/bidi/bidi.go | 2 +- .../golang.org/x/text/unicode/bidi/bracket.go | 42 +- vendor/golang.org/x/text/unicode/bidi/core.go | 5 +- vendor/golang.org/x/text/unicode/bidi/gen.go | 133 - .../x/text/unicode/bidi/gen_ranges.go | 57 - .../x/text/unicode/bidi/gen_trieval.go | 64 - .../golang.org/x/text/unicode/bidi/tables.go | 2 +- .../golang.org/x/text/unicode/bidi/trieval.go | 2 +- .../x/text/unicode/norm/composition.go | 18 +- .../x/text/unicode/norm/forminfo.go | 49 +- .../golang.org/x/text/unicode/norm/input.go | 8 +- vendor/golang.org/x/text/unicode/norm/iter.go | 31 +- .../x/text/unicode/norm/maketables.go | 978 - .../x/text/unicode/norm/normalize.go | 57 +- .../x/text/unicode/norm/readwriter.go | 1 - .../golang.org/x/text/unicode/norm/tables.go | 1028 +- .../golang.org/x/text/unicode/norm/triegen.go | 117 - vendor/golang.org/x/text/width/gen.go | 115 - vendor/golang.org/x/text/width/gen_common.go | 96 - vendor/golang.org/x/text/width/gen_trieval.go | 34 - vendor/golang.org/x/text/width/kind_string.go | 16 - vendor/golang.org/x/text/width/tables.go | 1284 - vendor/golang.org/x/text/width/transform.go | 162 - vendor/golang.org/x/text/width/trieval.go | 30 - vendor/golang.org/x/text/width/width.go | 201 - .../k8s.io/apimachinery/pkg/util/rand/rand.go | 120 - vendor/vendor.json | 193 +- 221 files changed, 720 insertions(+), 94862 deletions(-) delete mode 100644 vendor/github.com/PuerkitoBio/purell/LICENSE delete mode 100644 vendor/github.com/PuerkitoBio/purell/purell.go delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/LICENSE delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/urlesc.go delete mode 100644 vendor/github.com/docker/distribution/LICENSE delete mode 100644 vendor/github.com/docker/distribution/digest/digest.go delete mode 100644 vendor/github.com/docker/distribution/digest/digester.go delete mode 100644 vendor/github.com/docker/distribution/digest/doc.go delete mode 100644 vendor/github.com/docker/distribution/digest/set.go delete mode 100644 vendor/github.com/docker/distribution/digest/verifiers.go delete mode 100644 vendor/github.com/docker/distribution/reference/reference.go delete mode 100644 vendor/github.com/docker/distribution/reference/regexp.go delete mode 100644 vendor/github.com/emicklei/go-restful/CHANGES.md delete mode 100644 vendor/github.com/emicklei/go-restful/LICENSE delete mode 100644 vendor/github.com/emicklei/go-restful/Srcfile delete mode 100644 vendor/github.com/emicklei/go-restful/compress.go delete mode 100644 vendor/github.com/emicklei/go-restful/compressor_cache.go delete mode 100644 vendor/github.com/emicklei/go-restful/compressor_pools.go delete mode 100644 vendor/github.com/emicklei/go-restful/compressors.go delete mode 100644 vendor/github.com/emicklei/go-restful/constants.go delete mode 100644 vendor/github.com/emicklei/go-restful/container.go delete mode 100644 vendor/github.com/emicklei/go-restful/cors_filter.go delete mode 100644 vendor/github.com/emicklei/go-restful/curly.go delete mode 100644 vendor/github.com/emicklei/go-restful/curly_route.go delete mode 100644 vendor/github.com/emicklei/go-restful/doc.go delete mode 100644 vendor/github.com/emicklei/go-restful/entity_accessors.go delete mode 100644 vendor/github.com/emicklei/go-restful/filter.go delete mode 100644 vendor/github.com/emicklei/go-restful/jsr311.go delete mode 100644 vendor/github.com/emicklei/go-restful/log/log.go delete mode 100644 vendor/github.com/emicklei/go-restful/logger.go delete mode 100644 vendor/github.com/emicklei/go-restful/mime.go delete mode 100644 vendor/github.com/emicklei/go-restful/options_filter.go delete mode 100644 vendor/github.com/emicklei/go-restful/parameter.go delete mode 100644 vendor/github.com/emicklei/go-restful/path_expression.go delete mode 100644 vendor/github.com/emicklei/go-restful/request.go delete mode 100644 vendor/github.com/emicklei/go-restful/response.go delete mode 100644 vendor/github.com/emicklei/go-restful/route.go delete mode 100644 vendor/github.com/emicklei/go-restful/route_builder.go delete mode 100644 vendor/github.com/emicklei/go-restful/router.go delete mode 100644 vendor/github.com/emicklei/go-restful/service_error.go delete mode 100644 vendor/github.com/emicklei/go-restful/swagger/CHANGES.md delete mode 100644 vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go delete mode 100644 vendor/github.com/emicklei/go-restful/swagger/config.go delete mode 100644 vendor/github.com/emicklei/go-restful/swagger/model_builder.go delete mode 100644 vendor/github.com/emicklei/go-restful/swagger/model_list.go delete mode 100644 vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go delete mode 100644 vendor/github.com/emicklei/go-restful/swagger/model_property_list.go delete mode 100644 vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go delete mode 100644 vendor/github.com/emicklei/go-restful/swagger/swagger.go delete mode 100644 vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go delete mode 100644 vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go delete mode 100644 vendor/github.com/emicklei/go-restful/web_service.go delete mode 100644 vendor/github.com/emicklei/go-restful/web_service_container.go delete mode 100644 vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/jsonpointer/LICENSE delete mode 100644 vendor/github.com/go-openapi/jsonpointer/pointer.go delete mode 100644 vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/jsonreference/LICENSE delete mode 100644 vendor/github.com/go-openapi/jsonreference/reference.go delete mode 100644 vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/spec/LICENSE delete mode 100644 vendor/github.com/go-openapi/spec/bindata.go delete mode 100644 vendor/github.com/go-openapi/spec/contact_info.go delete mode 100644 vendor/github.com/go-openapi/spec/expander.go delete mode 100644 vendor/github.com/go-openapi/spec/external_docs.go delete mode 100644 vendor/github.com/go-openapi/spec/header.go delete mode 100644 vendor/github.com/go-openapi/spec/info.go delete mode 100644 vendor/github.com/go-openapi/spec/items.go delete mode 100644 vendor/github.com/go-openapi/spec/license.go delete mode 100644 vendor/github.com/go-openapi/spec/operation.go delete mode 100644 vendor/github.com/go-openapi/spec/parameter.go delete mode 100644 vendor/github.com/go-openapi/spec/path_item.go delete mode 100644 vendor/github.com/go-openapi/spec/paths.go delete mode 100644 vendor/github.com/go-openapi/spec/ref.go delete mode 100644 vendor/github.com/go-openapi/spec/response.go delete mode 100644 vendor/github.com/go-openapi/spec/responses.go delete mode 100644 vendor/github.com/go-openapi/spec/schema.go delete mode 100644 vendor/github.com/go-openapi/spec/security_scheme.go delete mode 100644 vendor/github.com/go-openapi/spec/spec.go delete mode 100644 vendor/github.com/go-openapi/spec/swagger.go delete mode 100644 vendor/github.com/go-openapi/spec/tag.go delete mode 100644 vendor/github.com/go-openapi/spec/xml_object.go delete mode 100644 vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/swag/LICENSE delete mode 100644 vendor/github.com/go-openapi/swag/convert.go delete mode 100644 vendor/github.com/go-openapi/swag/convert_types.go delete mode 100644 vendor/github.com/go-openapi/swag/json.go delete mode 100644 vendor/github.com/go-openapi/swag/loading.go delete mode 100644 vendor/github.com/go-openapi/swag/net.go delete mode 100644 vendor/github.com/go-openapi/swag/path.go delete mode 100644 vendor/github.com/go-openapi/swag/util.go delete mode 100644 vendor/github.com/juju/ratelimit/LICENSE delete mode 100644 vendor/github.com/juju/ratelimit/ratelimit.go delete mode 100644 vendor/github.com/juju/ratelimit/reader.go delete mode 100644 vendor/github.com/mailru/easyjson/LICENSE delete mode 100644 vendor/github.com/mailru/easyjson/buffer/pool.go delete mode 100644 vendor/github.com/mailru/easyjson/jlexer/error.go delete mode 100644 vendor/github.com/mailru/easyjson/jlexer/lexer.go delete mode 100644 vendor/github.com/mailru/easyjson/jwriter/writer.go delete mode 100644 vendor/github.com/spf13/pflag/LICENSE delete mode 100644 vendor/github.com/spf13/pflag/README.md delete mode 100644 vendor/github.com/spf13/pflag/bool.go delete mode 100644 vendor/github.com/spf13/pflag/bool_slice.go delete mode 100644 vendor/github.com/spf13/pflag/count.go delete mode 100644 vendor/github.com/spf13/pflag/duration.go delete mode 100644 vendor/github.com/spf13/pflag/flag.go delete mode 100644 vendor/github.com/spf13/pflag/float32.go delete mode 100644 vendor/github.com/spf13/pflag/float64.go delete mode 100644 vendor/github.com/spf13/pflag/golangflag.go delete mode 100644 vendor/github.com/spf13/pflag/int.go delete mode 100644 vendor/github.com/spf13/pflag/int32.go delete mode 100644 vendor/github.com/spf13/pflag/int64.go delete mode 100644 vendor/github.com/spf13/pflag/int8.go delete mode 100644 vendor/github.com/spf13/pflag/int_slice.go delete mode 100644 vendor/github.com/spf13/pflag/ip.go delete mode 100644 vendor/github.com/spf13/pflag/ip_slice.go delete mode 100644 vendor/github.com/spf13/pflag/ipmask.go delete mode 100644 vendor/github.com/spf13/pflag/ipnet.go delete mode 100644 vendor/github.com/spf13/pflag/string.go delete mode 100644 vendor/github.com/spf13/pflag/string_array.go delete mode 100644 vendor/github.com/spf13/pflag/string_slice.go delete mode 100644 vendor/github.com/spf13/pflag/uint.go delete mode 100644 vendor/github.com/spf13/pflag/uint16.go delete mode 100644 vendor/github.com/spf13/pflag/uint32.go delete mode 100644 vendor/github.com/spf13/pflag/uint64.go delete mode 100644 vendor/github.com/spf13/pflag/uint8.go delete mode 100644 vendor/github.com/spf13/pflag/uint_slice.go delete mode 100644 vendor/github.com/ugorji/go/LICENSE delete mode 100644 vendor/github.com/ugorji/go/codec/0doc.go delete mode 100644 vendor/github.com/ugorji/go/codec/binc.go delete mode 100644 vendor/github.com/ugorji/go/codec/cbor.go delete mode 100644 vendor/github.com/ugorji/go/codec/decode.go delete mode 100644 vendor/github.com/ugorji/go/codec/encode.go delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.not.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen-helper.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_internal.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_not_unsafe.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_unsafe.go delete mode 100644 vendor/github.com/ugorji/go/codec/json.go delete mode 100644 vendor/github.com/ugorji/go/codec/msgpack.go delete mode 100644 vendor/github.com/ugorji/go/codec/noop.go delete mode 100644 vendor/github.com/ugorji/go/codec/prebuild.go delete mode 100644 vendor/github.com/ugorji/go/codec/rpc.go delete mode 100644 vendor/github.com/ugorji/go/codec/simple.go delete mode 100755 vendor/github.com/ugorji/go/codec/test.py delete mode 100644 vendor/github.com/ugorji/go/codec/time.go delete mode 100644 vendor/golang.org/x/text/cases/cases.go delete mode 100644 vendor/golang.org/x/text/cases/context.go delete mode 100644 vendor/golang.org/x/text/cases/fold.go delete mode 100644 vendor/golang.org/x/text/cases/gen.go delete mode 100644 vendor/golang.org/x/text/cases/gen_trieval.go delete mode 100644 vendor/golang.org/x/text/cases/info.go delete mode 100644 vendor/golang.org/x/text/cases/map.go delete mode 100644 vendor/golang.org/x/text/cases/tables.go delete mode 100644 vendor/golang.org/x/text/cases/trieval.go delete mode 100644 vendor/golang.org/x/text/internal/tag/tag.go delete mode 100644 vendor/golang.org/x/text/language/Makefile delete mode 100644 vendor/golang.org/x/text/language/common.go delete mode 100644 vendor/golang.org/x/text/language/coverage.go delete mode 100644 vendor/golang.org/x/text/language/gen_common.go delete mode 100644 vendor/golang.org/x/text/language/gen_index.go delete mode 100644 vendor/golang.org/x/text/language/go1_1.go delete mode 100644 vendor/golang.org/x/text/language/go1_2.go delete mode 100644 vendor/golang.org/x/text/language/index.go delete mode 100644 vendor/golang.org/x/text/language/language.go delete mode 100644 vendor/golang.org/x/text/language/lookup.go delete mode 100644 vendor/golang.org/x/text/language/maketables.go delete mode 100644 vendor/golang.org/x/text/language/match.go delete mode 100644 vendor/golang.org/x/text/language/parse.go delete mode 100644 vendor/golang.org/x/text/language/tables.go delete mode 100644 vendor/golang.org/x/text/language/tags.go delete mode 100644 vendor/golang.org/x/text/runes/cond.go delete mode 100644 vendor/golang.org/x/text/runes/runes.go delete mode 100644 vendor/golang.org/x/text/secure/precis/class.go delete mode 100644 vendor/golang.org/x/text/secure/precis/context.go delete mode 100644 vendor/golang.org/x/text/secure/precis/doc.go delete mode 100644 vendor/golang.org/x/text/secure/precis/gen.go delete mode 100644 vendor/golang.org/x/text/secure/precis/gen_trieval.go delete mode 100644 vendor/golang.org/x/text/secure/precis/nickname.go delete mode 100644 vendor/golang.org/x/text/secure/precis/options.go delete mode 100644 vendor/golang.org/x/text/secure/precis/profile.go delete mode 100644 vendor/golang.org/x/text/secure/precis/profiles.go delete mode 100644 vendor/golang.org/x/text/secure/precis/tables.go delete mode 100644 vendor/golang.org/x/text/secure/precis/transformer.go delete mode 100644 vendor/golang.org/x/text/secure/precis/trieval.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go delete mode 100644 vendor/golang.org/x/text/width/gen.go delete mode 100644 vendor/golang.org/x/text/width/gen_common.go delete mode 100644 vendor/golang.org/x/text/width/gen_trieval.go delete mode 100644 vendor/golang.org/x/text/width/kind_string.go delete mode 100644 vendor/golang.org/x/text/width/tables.go delete mode 100644 vendor/golang.org/x/text/width/transform.go delete mode 100644 vendor/golang.org/x/text/width/trieval.go delete mode 100644 vendor/golang.org/x/text/width/width.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/rand/rand.go diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE deleted file mode 100644 index 4b9986dea..000000000 --- a/vendor/github.com/PuerkitoBio/purell/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2012, Martin Angers -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go deleted file mode 100644 index b79da64b3..000000000 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ /dev/null @@ -1,375 +0,0 @@ -/* -Package purell offers URL normalization as described on the wikipedia page: -http://en.wikipedia.org/wiki/URL_normalization -*/ -package purell - -import ( - "bytes" - "fmt" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/PuerkitoBio/urlesc" - "golang.org/x/net/idna" - "golang.org/x/text/secure/precis" - "golang.org/x/text/unicode/norm" -) - -// A set of normalization flags determines how a URL will -// be normalized. -type NormalizationFlags uint - -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) - -const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" -) - -// Regular expressions used by the normalizations -var rxPort = regexp.MustCompile(`(:\d+)/?$`) -var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) -var rxDupSlashes = regexp.MustCompile(`/{2,}`) -var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) -var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) -var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) -var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) -var rxEmptyPort = regexp.MustCompile(`:+$`) - -// Map of flags to implementation function. -// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically -// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. - -// Since maps have undefined traversing order, make a slice of ordered keys -var flagsOrder = []NormalizationFlags{ - FlagLowercaseScheme, - FlagLowercaseHost, - FlagRemoveDefaultPort, - FlagRemoveDirectoryIndex, - FlagRemoveDotSegments, - FlagRemoveFragment, - FlagForceHTTP, // Must be after remove default port (because https=443/http=80) - FlagRemoveDuplicateSlashes, - FlagRemoveWWW, - FlagAddWWW, - FlagSortQuery, - FlagDecodeDWORDHost, - FlagDecodeOctalHost, - FlagDecodeHexHost, - FlagRemoveUnnecessaryHostDots, - FlagRemoveEmptyPortSeparator, - FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last - FlagAddTrailingSlash, -} - -// ... and then the map, where order is unimportant -var flags = map[NormalizationFlags]func(*url.URL){ - FlagLowercaseScheme: lowercaseScheme, - FlagLowercaseHost: lowercaseHost, - FlagRemoveDefaultPort: removeDefaultPort, - FlagRemoveDirectoryIndex: removeDirectoryIndex, - FlagRemoveDotSegments: removeDotSegments, - FlagRemoveFragment: removeFragment, - FlagForceHTTP: forceHTTP, - FlagRemoveDuplicateSlashes: removeDuplicateSlashes, - FlagRemoveWWW: removeWWW, - FlagAddWWW: addWWW, - FlagSortQuery: sortQuery, - FlagDecodeDWORDHost: decodeDWORDHost, - FlagDecodeOctalHost: decodeOctalHost, - FlagDecodeHexHost: decodeHexHost, - FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, - FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, - FlagRemoveTrailingSlash: removeTrailingSlash, - FlagAddTrailingSlash: addTrailingSlash, -} - -// MustNormalizeURLString returns the normalized string, and panics if an error occurs. -// It takes an URL string as input, as well as the normalization flags. -func MustNormalizeURLString(u string, f NormalizationFlags) string { - result, e := NormalizeURLString(u, f) - if e != nil { - panic(e) - } - return result -} - -// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. -// It takes an URL string as input, as well as the normalization flags. -func NormalizeURLString(u string, f NormalizationFlags) (string, error) { - if parsed, e := url.Parse(u); e != nil { - return "", e - } else { - options := make([]precis.Option, 1, 3) - options[0] = precis.IgnoreCase - if f&FlagLowercaseHost == FlagLowercaseHost { - options = append(options, precis.FoldCase()) - } - options = append(options, precis.Norm(norm.NFC)) - profile := precis.NewFreeform(options...) - if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil { - return "", e - } - return NormalizeURL(parsed, f), nil - } - panic("Unreachable code.") -} - -// NormalizeURL returns the normalized string. -// It takes a parsed URL object as input, as well as the normalization flags. -func NormalizeURL(u *url.URL, f NormalizationFlags) string { - for _, k := range flagsOrder { - if f&k == k { - flags[k](u) - } - } - return urlesc.Escape(u) -} - -func lowercaseScheme(u *url.URL) { - if len(u.Scheme) > 0 { - u.Scheme = strings.ToLower(u.Scheme) - } -} - -func lowercaseHost(u *url.URL) { - if len(u.Host) > 0 { - u.Host = strings.ToLower(u.Host) - } -} - -func removeDefaultPort(u *url.URL) { - if len(u.Host) > 0 { - scheme := strings.ToLower(u.Scheme) - u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { - return "" - } - return val - }) - } -} - -func removeTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if strings.HasSuffix(u.Path, "/") { - u.Path = u.Path[:l-1] - } - } else if l = len(u.Host); l > 0 { - if strings.HasSuffix(u.Host, "/") { - u.Host = u.Host[:l-1] - } - } -} - -func addTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } else if l = len(u.Host); l > 0 { - if !strings.HasSuffix(u.Host, "/") { - u.Host += "/" - } - } -} - -func removeDotSegments(u *url.URL) { - if len(u.Path) > 0 { - var dotFree []string - var lastIsDot bool - - sections := strings.Split(u.Path, "/") - for _, s := range sections { - if s == ".." { - if len(dotFree) > 0 { - dotFree = dotFree[:len(dotFree)-1] - } - } else if s != "." { - dotFree = append(dotFree, s) - } - lastIsDot = (s == "." || s == "..") - } - // Special case if host does not end with / and new path does not begin with / - u.Path = strings.Join(dotFree, "/") - if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - // Special case if the last segment was a dot, make sure the path ends with a slash - if lastIsDot && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } -} - -func removeDirectoryIndex(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") - } -} - -func removeFragment(u *url.URL) { - u.Fragment = "" -} - -func forceHTTP(u *url.URL) { - if strings.ToLower(u.Scheme) == "https" { - u.Scheme = "http" - } -} - -func removeDuplicateSlashes(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") - } -} - -func removeWWW(u *url.URL) { - if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = u.Host[4:] - } -} - -func addWWW(u *url.URL) { - if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = "www." + u.Host - } -} - -func sortQuery(u *url.URL) { - q := u.Query() - - if len(q) > 0 { - arKeys := make([]string, len(q)) - i := 0 - for k, _ := range q { - arKeys[i] = k - i++ - } - sort.Strings(arKeys) - buf := new(bytes.Buffer) - for _, k := range arKeys { - sort.Strings(q[k]) - for _, v := range q[k] { - if buf.Len() > 0 { - buf.WriteRune('&') - } - buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) - } - } - - // Rebuild the raw query string - u.RawQuery = buf.String() - } -} - -func decodeDWORDHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { - var parts [4]int64 - - dword, _ := strconv.ParseInt(matches[1], 10, 0) - for i, shift := range []uint{24, 16, 8, 0} { - parts[i] = dword >> shift & 0xFF - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) - } - } -} - -func decodeOctalHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { - var parts [4]int64 - - for i := 1; i <= 4; i++ { - parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) - } - } -} - -func decodeHexHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { - // Conversion is safe because of regex validation - parsed, _ := strconv.ParseInt(matches[1], 16, 0) - // Set host as DWORD (base 10) encoded host - u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) - // The rest is the same as decoding a DWORD host - decodeDWORDHost(u) - } - } -} - -func removeUnncessaryHostDots(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { - // Trim the leading and trailing dots - u.Host = strings.Trim(matches[1], ".") - if len(matches) > 2 { - u.Host += matches[2] - } - } - } -} - -func removeEmptyPortSeparator(u *url.URL) { - if len(u.Host) > 0 { - u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") - } -} diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE deleted file mode 100644 index 744875676..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go deleted file mode 100644 index 1b8462459..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package urlesc implements query escaping as per RFC 3986. -// It contains some parts of the net/url package, modified so as to allow -// some reserved characters incorrectly escaped by net/url. -// See https://github.com/golang/go/issues/5684 -package urlesc - -import ( - "bytes" - "net/url" - "strings" -) - -type encoding int - -const ( - encodePath encoding = 1 + iota - encodeUserPassword - encodeQueryComponent - encodeFragment -) - -// Return true if the specified character should be escaped when -// appearing in a URL string, according to RFC 3986. -func shouldEscape(c byte, mode encoding) bool { - // §2.3 Unreserved characters (alphanum) - if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { - return false - } - - switch c { - case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) - return false - - // §2.2 Reserved characters (reserved) - case ':', '/', '?', '#', '[', ']', '@', // gen-delims - '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims - // Different sections of the URL allow a few of - // the reserved characters to appear unescaped. - switch mode { - case encodePath: // §3.3 - // The RFC allows sub-delims and : @. - // '/', '[' and ']' can be used to assign meaning to individual path - // segments. This package only manipulates the path as a whole, - // so we allow those as well. That leaves only ? and # to escape. - return c == '?' || c == '#' - - case encodeUserPassword: // §3.2.1 - // The RFC allows : and sub-delims in - // userinfo. The parsing of userinfo treats ':' as special so we must escape - // all the gen-delims. - return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' - - case encodeQueryComponent: // §3.4 - // The RFC allows / and ?. - return c != '/' && c != '?' - - case encodeFragment: // §4.1 - // The RFC text is silent but the grammar allows - // everything, so escape nothing but # - return c == '#' - } - } - - // Everything else must be escaped. - return true -} - -// QueryEscape escapes the string so it can be safely placed -// inside a URL query. -func QueryEscape(s string) string { - return escape(s, encodeQueryComponent) -} - -func escape(s string, mode encoding) string { - spaceCount, hexCount := 0, 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(c, mode) { - if c == ' ' && mode == encodeQueryComponent { - spaceCount++ - } else { - hexCount++ - } - } - } - - if spaceCount == 0 && hexCount == 0 { - return s - } - - t := make([]byte, len(s)+2*hexCount) - j := 0 - for i := 0; i < len(s); i++ { - switch c := s[i]; { - case c == ' ' && mode == encodeQueryComponent: - t[j] = '+' - j++ - case shouldEscape(c, mode): - t[j] = '%' - t[j+1] = "0123456789ABCDEF"[c>>4] - t[j+2] = "0123456789ABCDEF"[c&15] - j += 3 - default: - t[j] = s[i] - j++ - } - } - return string(t) -} - -var uiReplacer = strings.NewReplacer( - "%21", "!", - "%27", "'", - "%28", "(", - "%29", ")", - "%2A", "*", -) - -// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. -func unescapeUserinfo(s string) string { - return uiReplacer.Replace(s) -} - -// Escape reassembles the URL into a valid URL string. -// The general form of the result is one of: -// -// scheme:opaque -// scheme://userinfo@host/path?query#fragment -// -// If u.Opaque is non-empty, String uses the first form; -// otherwise it uses the second form. -// -// In the second form, the following rules apply: -// - if u.Scheme is empty, scheme: is omitted. -// - if u.User is nil, userinfo@ is omitted. -// - if u.Host is empty, host/ is omitted. -// - if u.Scheme and u.Host are empty and u.User is nil, -// the entire scheme://userinfo@host/ is omitted. -// - if u.Host is non-empty and u.Path begins with a /, -// the form host/path does not add its own /. -// - if u.RawQuery is empty, ?query is omitted. -// - if u.Fragment is empty, #fragment is omitted. -func Escape(u *url.URL) string { - var buf bytes.Buffer - if u.Scheme != "" { - buf.WriteString(u.Scheme) - buf.WriteByte(':') - } - if u.Opaque != "" { - buf.WriteString(u.Opaque) - } else { - if u.Scheme != "" || u.Host != "" || u.User != nil { - buf.WriteString("//") - if ui := u.User; ui != nil { - buf.WriteString(unescapeUserinfo(ui.String())) - buf.WriteByte('@') - } - if h := u.Host; h != "" { - buf.WriteString(h) - } - } - if u.Path != "" && u.Path[0] != '/' && u.Host != "" { - buf.WriteByte('/') - } - buf.WriteString(escape(u.Path, encodePath)) - } - if u.RawQuery != "" { - buf.WriteByte('?') - buf.WriteString(u.RawQuery) - } - if u.Fragment != "" { - buf.WriteByte('#') - buf.WriteString(escape(u.Fragment, encodeFragment)) - } - return buf.String() -} diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE deleted file mode 100644 index e06d20818..000000000 --- a/vendor/github.com/docker/distribution/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/docker/distribution/digest/digest.go b/vendor/github.com/docker/distribution/digest/digest.go deleted file mode 100644 index 31d821bba..000000000 --- a/vendor/github.com/docker/distribution/digest/digest.go +++ /dev/null @@ -1,139 +0,0 @@ -package digest - -import ( - "fmt" - "hash" - "io" - "regexp" - "strings" -) - -const ( - // DigestSha256EmptyTar is the canonical sha256 digest of empty data - DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -) - -// Digest allows simple protection of hex formatted digest strings, prefixed -// by their algorithm. Strings of type Digest have some guarantee of being in -// the correct format and it provides quick access to the components of a -// digest string. -// -// The following is an example of the contents of Digest types: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// This allows to abstract the digest behind this type and work only in those -// terms. -type Digest string - -// NewDigest returns a Digest from alg and a hash.Hash object. -func NewDigest(alg Algorithm, h hash.Hash) Digest { - return NewDigestFromBytes(alg, h.Sum(nil)) -} - -// NewDigestFromBytes returns a new digest from the byte contents of p. -// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) -// functions. This is also useful for rebuilding digests from binary -// serializations. -func NewDigestFromBytes(alg Algorithm, p []byte) Digest { - return Digest(fmt.Sprintf("%s:%x", alg, p)) -} - -// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. -func NewDigestFromHex(alg, hex string) Digest { - return Digest(fmt.Sprintf("%s:%s", alg, hex)) -} - -// DigestRegexp matches valid digest types. -var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) - -// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. -var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) - -var ( - // ErrDigestInvalidFormat returned when digest format invalid. - ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") - - // ErrDigestInvalidLength returned when digest has invalid length. - ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") - - // ErrDigestUnsupported returned when the digest algorithm is unsupported. - ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") -) - -// ParseDigest parses s and returns the validated digest object. An error will -// be returned if the format is invalid. -func ParseDigest(s string) (Digest, error) { - d := Digest(s) - - return d, d.Validate() -} - -// FromReader returns the most valid digest for the underlying content using -// the canonical digest algorithm. -func FromReader(rd io.Reader) (Digest, error) { - return Canonical.FromReader(rd) -} - -// FromBytes digests the input and returns a Digest. -func FromBytes(p []byte) Digest { - return Canonical.FromBytes(p) -} - -// Validate checks that the contents of d is a valid digest, returning an -// error if not. -func (d Digest) Validate() error { - s := string(d) - - if !DigestRegexpAnchored.MatchString(s) { - return ErrDigestInvalidFormat - } - - i := strings.Index(s, ":") - if i < 0 { - return ErrDigestInvalidFormat - } - - // case: "sha256:" with no hex. - if i+1 == len(s) { - return ErrDigestInvalidFormat - } - - switch algorithm := Algorithm(s[:i]); algorithm { - case SHA256, SHA384, SHA512: - if algorithm.Size()*2 != len(s[i+1:]) { - return ErrDigestInvalidLength - } - break - default: - return ErrDigestUnsupported - } - - return nil -} - -// Algorithm returns the algorithm portion of the digest. This will panic if -// the underlying digest is not in a valid format. -func (d Digest) Algorithm() Algorithm { - return Algorithm(d[:d.sepIndex()]) -} - -// Hex returns the hex digest portion of the digest. This will panic if the -// underlying digest is not in a valid format. -func (d Digest) Hex() string { - return string(d[d.sepIndex()+1:]) -} - -func (d Digest) String() string { - return string(d) -} - -func (d Digest) sepIndex() int { - i := strings.Index(string(d), ":") - - if i < 0 { - panic("could not find ':' in digest: " + d) - } - - return i -} diff --git a/vendor/github.com/docker/distribution/digest/digester.go b/vendor/github.com/docker/distribution/digest/digester.go deleted file mode 100644 index f3105a45b..000000000 --- a/vendor/github.com/docker/distribution/digest/digester.go +++ /dev/null @@ -1,155 +0,0 @@ -package digest - -import ( - "crypto" - "fmt" - "hash" - "io" -) - -// Algorithm identifies and implementation of a digester by an identifier. -// Note the that this defines both the hash algorithm used and the string -// encoding. -type Algorithm string - -// supported digest types -const ( - SHA256 Algorithm = "sha256" // sha256 with hex encoding - SHA384 Algorithm = "sha384" // sha384 with hex encoding - SHA512 Algorithm = "sha512" // sha512 with hex encoding - - // Canonical is the primary digest algorithm used with the distribution - // project. Other digests may be used but this one is the primary storage - // digest. - Canonical = SHA256 -) - -var ( - // TODO(stevvooe): Follow the pattern of the standard crypto package for - // registration of digests. Effectively, we are a registerable set and - // common symbol access. - - // algorithms maps values to hash.Hash implementations. Other algorithms - // may be available but they cannot be calculated by the digest package. - algorithms = map[Algorithm]crypto.Hash{ - SHA256: crypto.SHA256, - SHA384: crypto.SHA384, - SHA512: crypto.SHA512, - } -) - -// Available returns true if the digest type is available for use. If this -// returns false, New and Hash will return nil. -func (a Algorithm) Available() bool { - h, ok := algorithms[a] - if !ok { - return false - } - - // check availability of the hash, as well - return h.Available() -} - -func (a Algorithm) String() string { - return string(a) -} - -// Size returns number of bytes returned by the hash. -func (a Algorithm) Size() int { - h, ok := algorithms[a] - if !ok { - return 0 - } - return h.Size() -} - -// Set implemented to allow use of Algorithm as a command line flag. -func (a *Algorithm) Set(value string) error { - if value == "" { - *a = Canonical - } else { - // just do a type conversion, support is queried with Available. - *a = Algorithm(value) - } - - return nil -} - -// New returns a new digester for the specified algorithm. If the algorithm -// does not have a digester implementation, nil will be returned. This can be -// checked by calling Available before calling New. -func (a Algorithm) New() Digester { - return &digester{ - alg: a, - hash: a.Hash(), - } -} - -// Hash returns a new hash as used by the algorithm. If not available, the -// method will panic. Check Algorithm.Available() before calling. -func (a Algorithm) Hash() hash.Hash { - if !a.Available() { - // NOTE(stevvooe): A missing hash is usually a programming error that - // must be resolved at compile time. We don't import in the digest - // package to allow users to choose their hash implementation (such as - // when using stevvooe/resumable or a hardware accelerated package). - // - // Applications that may want to resolve the hash at runtime should - // call Algorithm.Available before call Algorithm.Hash(). - panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) - } - - return algorithms[a].New() -} - -// FromReader returns the digest of the reader using the algorithm. -func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { - digester := a.New() - - if _, err := io.Copy(digester.Hash(), rd); err != nil { - return "", err - } - - return digester.Digest(), nil -} - -// FromBytes digests the input and returns a Digest. -func (a Algorithm) FromBytes(p []byte) Digest { - digester := a.New() - - if _, err := digester.Hash().Write(p); err != nil { - // Writes to a Hash should never fail. None of the existing - // hash implementations in the stdlib or hashes vendored - // here can return errors from Write. Having a panic in this - // condition instead of having FromBytes return an error value - // avoids unnecessary error handling paths in all callers. - panic("write to hash function returned error: " + err.Error()) - } - - return digester.Digest() -} - -// TODO(stevvooe): Allow resolution of verifiers using the digest type and -// this registration system. - -// Digester calculates the digest of written data. Writes should go directly -// to the return value of Hash, while calling Digest will return the current -// value of the digest. -type Digester interface { - Hash() hash.Hash // provides direct access to underlying hash instance. - Digest() Digest -} - -// digester provides a simple digester definition that embeds a hasher. -type digester struct { - alg Algorithm - hash hash.Hash -} - -func (d *digester) Hash() hash.Hash { - return d.hash -} - -func (d *digester) Digest() Digest { - return NewDigest(d.alg, d.hash) -} diff --git a/vendor/github.com/docker/distribution/digest/doc.go b/vendor/github.com/docker/distribution/digest/doc.go deleted file mode 100644 index f64b0db32..000000000 --- a/vendor/github.com/docker/distribution/digest/doc.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package digest provides a generalized type to opaquely represent message -// digests and their operations within the registry. The Digest type is -// designed to serve as a flexible identifier in a content-addressable system. -// More importantly, it provides tools and wrappers to work with -// hash.Hash-based digests with little effort. -// -// Basics -// -// The format of a digest is simply a string with two parts, dubbed the -// "algorithm" and the "digest", separated by a colon: -// -// : -// -// An example of a sha256 digest representation follows: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// In this case, the string "sha256" is the algorithm and the hex bytes are -// the "digest". -// -// Because the Digest type is simply a string, once a valid Digest is -// obtained, comparisons are cheap, quick and simple to express with the -// standard equality operator. -// -// Verification -// -// The main benefit of using the Digest type is simple verification against a -// given digest. The Verifier interface, modeled after the stdlib hash.Hash -// interface, provides a common write sink for digest verification. After -// writing is complete, calling the Verifier.Verified method will indicate -// whether or not the stream of bytes matches the target digest. -// -// Missing Features -// -// In addition to the above, we intend to add the following features to this -// package: -// -// 1. A Digester type that supports write sink digest calculation. -// -// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. -// -package digest diff --git a/vendor/github.com/docker/distribution/digest/set.go b/vendor/github.com/docker/distribution/digest/set.go deleted file mode 100644 index 4b9313c1a..000000000 --- a/vendor/github.com/docker/distribution/digest/set.go +++ /dev/null @@ -1,245 +0,0 @@ -package digest - -import ( - "errors" - "sort" - "strings" - "sync" -) - -var ( - // ErrDigestNotFound is used when a matching digest - // could not be found in a set. - ErrDigestNotFound = errors.New("digest not found") - - // ErrDigestAmbiguous is used when multiple digests - // are found in a set. None of the matching digests - // should be considered valid matches. - ErrDigestAmbiguous = errors.New("ambiguous digest string") -) - -// Set is used to hold a unique set of digests which -// may be easily referenced by easily referenced by a string -// representation of the digest as well as short representation. -// The uniqueness of the short representation is based on other -// digests in the set. If digests are omitted from this set, -// collisions in a larger set may not be detected, therefore it -// is important to always do short representation lookups on -// the complete set of digests. To mitigate collisions, an -// appropriately long short code should be used. -type Set struct { - mutex sync.RWMutex - entries digestEntries -} - -// NewSet creates an empty set of digests -// which may have digests added. -func NewSet() *Set { - return &Set{ - entries: digestEntries{}, - } -} - -// checkShortMatch checks whether two digests match as either whole -// values or short values. This function does not test equality, -// rather whether the second value could match against the first -// value. -func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { - if len(hex) == len(shortHex) { - if hex != shortHex { - return false - } - if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - } else if !strings.HasPrefix(hex, shortHex) { - return false - } else if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - return true -} - -// Lookup looks for a digest matching the given string representation. -// If no digests could be found ErrDigestNotFound will be returned -// with an empty digest value. If multiple matches are found -// ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (Digest, error) { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - if len(dst.entries) == 0 { - return "", ErrDigestNotFound - } - var ( - searchFunc func(int) bool - alg Algorithm - hex string - ) - dgst, err := ParseDigest(d) - if err == ErrDigestInvalidFormat { - hex = d - searchFunc = func(i int) bool { - return dst.entries[i].val >= d - } - } else { - hex = dgst.Hex() - alg = dgst.Algorithm() - searchFunc = func(i int) bool { - if dst.entries[i].val == hex { - return dst.entries[i].alg >= alg - } - return dst.entries[i].val >= hex - } - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { - return "", ErrDigestNotFound - } - if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { - return dst.entries[idx].digest, nil - } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { - return "", ErrDigestAmbiguous - } - - return dst.entries[idx].digest, nil -} - -// Add adds the given digest to the set. An error will be returned -// if the given digest is invalid. If the digest already exists in the -// set, this operation will be a no-op. -func (dst *Set) Add(d Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) { - dst.entries = append(dst.entries, entry) - return nil - } else if dst.entries[idx].digest == d { - return nil - } - - entries := append(dst.entries, nil) - copy(entries[idx+1:], entries[idx:len(entries)-1]) - entries[idx] = entry - dst.entries = entries - return nil -} - -// Remove removes the given digest from the set. An err will be -// returned if the given digest is invalid. If the digest does -// not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - // Not found if idx is after or value at idx is not digest - if idx == len(dst.entries) || dst.entries[idx].digest != d { - return nil - } - - entries := dst.entries - copy(entries[idx:], entries[idx+1:]) - entries = entries[:len(entries)-1] - dst.entries = entries - - return nil -} - -// All returns all the digests in the set -func (dst *Set) All() []Digest { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - retValues := make([]Digest, len(dst.entries)) - for i := range dst.entries { - retValues[i] = dst.entries[i].digest - } - - return retValues -} - -// ShortCodeTable returns a map of Digest to unique short codes. The -// length represents the minimum value, the maximum length may be the -// entire value of digest if uniqueness cannot be achieved without the -// full value. This function will attempt to make short codes as short -// as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[Digest]string { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - m := make(map[Digest]string, len(dst.entries)) - l := length - resetIdx := 0 - for i := 0; i < len(dst.entries); i++ { - var short string - extended := true - for extended { - extended = false - if len(dst.entries[i].val) <= l { - short = dst.entries[i].digest.String() - } else { - short = dst.entries[i].val[:l] - for j := i + 1; j < len(dst.entries); j++ { - if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { - if j > resetIdx { - resetIdx = j - } - extended = true - } else { - break - } - } - if extended { - l++ - } - } - } - m[dst.entries[i].digest] = short - if i >= resetIdx { - l = length - } - } - return m -} - -type digestEntry struct { - alg Algorithm - val string - digest Digest -} - -type digestEntries []*digestEntry - -func (d digestEntries) Len() int { - return len(d) -} - -func (d digestEntries) Less(i, j int) bool { - if d[i].val != d[j].val { - return d[i].val < d[j].val - } - return d[i].alg < d[j].alg -} - -func (d digestEntries) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} diff --git a/vendor/github.com/docker/distribution/digest/verifiers.go b/vendor/github.com/docker/distribution/digest/verifiers.go deleted file mode 100644 index 9af3be134..000000000 --- a/vendor/github.com/docker/distribution/digest/verifiers.go +++ /dev/null @@ -1,44 +0,0 @@ -package digest - -import ( - "hash" - "io" -) - -// Verifier presents a general verification interface to be used with message -// digests and other byte stream verifications. Users instantiate a Verifier -// from one of the various methods, write the data under test to it then check -// the result with the Verified method. -type Verifier interface { - io.Writer - - // Verified will return true if the content written to Verifier matches - // the digest. - Verified() bool -} - -// NewDigestVerifier returns a verifier that compares the written bytes -// against a passed in digest. -func NewDigestVerifier(d Digest) (Verifier, error) { - if err := d.Validate(); err != nil { - return nil, err - } - - return hashVerifier{ - hash: d.Algorithm().Hash(), - digest: d, - }, nil -} - -type hashVerifier struct { - digest Digest - hash hash.Hash -} - -func (hv hashVerifier) Write(p []byte) (n int, err error) { - return hv.hash.Write(p) -} - -func (hv hashVerifier) Verified() bool { - return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) -} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go deleted file mode 100644 index bb09fa25d..000000000 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ /dev/null @@ -1,334 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [hostname '/'] component ['/' component]* -// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] -// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -package reference - -import ( - "errors" - "fmt" - - "github.com/docker/distribution/digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with hostname and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -func SplitHostname(named Named) (string, string) { - name := named.Name() - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - // TODO(dmcgowan): Provide more specific and helpful error - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - ref := reference{ - name: matches[1], - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.ParseDigest(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name, otherwise an error is -// returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - ref, err := Parse(s) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - if !anchoredNameRegexp.MatchString(name) { - return nil, ErrReferenceInvalidFormat - } - return repository(name), nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - return taggedReference{ - name: name.Name(), - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - return canonicalReference{ - name: name.Name(), - digest: digest, - }, nil -} - -func getBestReferenceType(ref reference) Reference { - if ref.name == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - name: ref.name, - digest: ref.digest, - } - } - return repository(ref.name) - } - if ref.digest == "" { - return taggedReference{ - name: ref.name, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - name string - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.name + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Name() string { - return r.name -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository string - -func (r repository) String() string { - return string(r) -} - -func (r repository) Name() string { - return string(r) -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return d.String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - name string - tag string -} - -func (t taggedReference) String() string { - return t.name + ":" + t.tag -} - -func (t taggedReference) Name() string { - return t.name -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - name string - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.name + "@" + c.digest.String() -} - -func (c canonicalReference) Name() string { - return c.name -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go deleted file mode 100644 index 9a7d366bc..000000000 --- a/vendor/github.com/docker/distribution/reference/regexp.go +++ /dev/null @@ -1,124 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // hostnameComponentRegexp restricts the registry hostname component of a - // repository name to start with a component as defined by hostnameRegexp - // and followed by an optional port. - hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // hostnameRegexp defines the structure of potential hostname components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - hostnameRegexp = expression( - hostnameComponentRegexp, - optional(repeated(literal(`.`), hostnameComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the hostname and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(hostnameRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // hostname and trailing components. - anchoredNameRegexp = anchored( - optional(capture(hostnameRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md deleted file mode 100644 index 070bca7cd..000000000 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ /dev/null @@ -1,163 +0,0 @@ -Change history of go-restful -= -2016-02-14 -- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response -- add constructors for custom entity accessors for xml and json - -2015-09-27 -- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency - -2015-09-25 -- fixed problem with changing Header after WriteHeader (issue 235) - -2015-09-14 -- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write) -- added support for custom EntityReaderWriters. - -2015-08-06 -- add support for reading entities from compressed request content -- use sync.Pool for compressors of http response and request body -- add Description to Parameter for documentation in Swagger UI - -2015-03-20 -- add configurable logging - -2015-03-18 -- if not specified, the Operation is derived from the Route function - -2015-03-17 -- expose Parameter creation functions -- make trace logger an interface -- fix OPTIONSFilter -- customize rendering of ServiceError -- JSR311 router now handles wildcards -- add Notes to Route - -2014-11-27 -- (api add) PrettyPrint per response. (as proposed in #167) - -2014-11-12 -- (api add) ApiVersion(.) for documentation in Swagger UI - -2014-11-10 -- (api change) struct fields tagged with "description" show up in Swagger UI - -2014-10-31 -- (api change) ReturnsError -> Returns -- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder -- fix swagger nested structs -- sort Swagger response messages by code - -2014-10-23 -- (api add) ReturnsError allows you to document Http codes in swagger -- fixed problem with greedy CurlyRouter -- (api add) Access-Control-Max-Age in CORS -- add tracing functionality (injectable) for debugging purposes -- support JSON parse 64bit int -- fix empty parameters for swagger -- WebServicesUrl is now optional for swagger -- fixed duplicate AccessControlAllowOrigin in CORS -- (api change) expose ServeMux in container -- (api add) added AllowedDomains in CORS -- (api add) ParameterNamed for detailed documentation - -2014-04-16 -- (api add) expose constructor of Request for testing. - -2014-06-27 -- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification). -- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons). - -2014-07-03 -- (api add) CORS can be configured with a list of allowed domains - -2014-03-12 -- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter) - -2014-02-26 -- (api add) Request now provides information about the matched Route, see method SelectedRoutePath - -2014-02-17 -- (api change) renamed parameter constants (go-lint checks) - -2014-01-10 - - (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier - -2014-01-07 - - (api change) Write* methods in Response now return the error or nil. - - added example of serving HTML from a Go template. - - fixed comparing Allowed headers in CORS (is now case-insensitive) - -2013-11-13 - - (api add) Response knows how many bytes are written to the response body. - -2013-10-29 - - (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information. - -2013-10-04 - - (api add) Response knows what HTTP status has been written - - (api add) Request can have attributes (map of string->interface, also called request-scoped variables - -2013-09-12 - - (api change) Router interface simplified - - Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths - -2013-08-05 - - add OPTIONS support - - add CORS support - -2013-08-27 - - fixed some reported issues (see github) - - (api change) deprecated use of WriteError; use WriteErrorString instead - -2014-04-15 - - (fix) v1.0.1 tag: fix Issue 111: WriteErrorString - -2013-08-08 - - (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer. - - (api add) the swagger package has be extended to have a UI per container. - - if panic is detected then a small stack trace is printed (thanks to runner-mei) - - (api add) WriteErrorString to Response - -Important API changes: - - - (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead. - - (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead. - - -2013-07-06 - - - (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature. - -2013-06-19 - - - (improve) DoNotRecover option, moved request body closer, improved ReadEntity - -2013-06-03 - - - (api change) removed Dispatcher interface, hide PathExpression - - changed receiver names of type functions to be more idiomatic Go - -2013-06-02 - - - (optimize) Cache the RegExp compilation of Paths. - -2013-05-22 - - - (api add) Added support for request/response filter functions - -2013-05-18 - - - - (api add) Added feature to change the default Http Request Dispatch function (travis cline) - - (api change) Moved Swagger Webservice to swagger package (see example restful-user) - -[2012-11-14 .. 2013-05-18> - - - See https://github.com/emicklei/go-restful/commits - -2012-11-14 - - - Initial commit - - diff --git a/vendor/github.com/emicklei/go-restful/LICENSE b/vendor/github.com/emicklei/go-restful/LICENSE deleted file mode 100644 index ece7ec61e..000000000 --- a/vendor/github.com/emicklei/go-restful/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012,2013 Ernest Micklei - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/Srcfile b/vendor/github.com/emicklei/go-restful/Srcfile deleted file mode 100644 index 16fd18689..000000000 --- a/vendor/github.com/emicklei/go-restful/Srcfile +++ /dev/null @@ -1 +0,0 @@ -{"SkipDirs": ["examples"]} diff --git a/vendor/github.com/emicklei/go-restful/compress.go b/vendor/github.com/emicklei/go-restful/compress.go deleted file mode 100644 index 220b37712..000000000 --- a/vendor/github.com/emicklei/go-restful/compress.go +++ /dev/null @@ -1,123 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bufio" - "compress/gzip" - "compress/zlib" - "errors" - "io" - "net" - "net/http" - "strings" -) - -// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting. -var EnableContentEncoding = false - -// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib) -type CompressingResponseWriter struct { - writer http.ResponseWriter - compressor io.WriteCloser - encoding string -} - -// Header is part of http.ResponseWriter interface -func (c *CompressingResponseWriter) Header() http.Header { - return c.writer.Header() -} - -// WriteHeader is part of http.ResponseWriter interface -func (c *CompressingResponseWriter) WriteHeader(status int) { - c.writer.WriteHeader(status) -} - -// Write is part of http.ResponseWriter interface -// It is passed through the compressor -func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) { - if c.isCompressorClosed() { - return -1, errors.New("Compressing error: tried to write data using closed compressor") - } - return c.compressor.Write(bytes) -} - -// CloseNotify is part of http.CloseNotifier interface -func (c *CompressingResponseWriter) CloseNotify() <-chan bool { - return c.writer.(http.CloseNotifier).CloseNotify() -} - -// Close the underlying compressor -func (c *CompressingResponseWriter) Close() error { - if c.isCompressorClosed() { - return errors.New("Compressing error: tried to close already closed compressor") - } - - c.compressor.Close() - if ENCODING_GZIP == c.encoding { - currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer)) - } - if ENCODING_DEFLATE == c.encoding { - currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer)) - } - // gc hint needed? - c.compressor = nil - return nil -} - -func (c *CompressingResponseWriter) isCompressorClosed() bool { - return nil == c.compressor -} - -// Hijack implements the Hijacker interface -// This is especially useful when combining Container.EnabledContentEncoding -// in combination with websockets (for instance gorilla/websocket) -func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hijacker, ok := c.writer.(http.Hijacker) - if !ok { - return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface") - } - return hijacker.Hijack() -} - -// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested. -func wantsCompressedResponse(httpRequest *http.Request) (bool, string) { - header := httpRequest.Header.Get(HEADER_AcceptEncoding) - gi := strings.Index(header, ENCODING_GZIP) - zi := strings.Index(header, ENCODING_DEFLATE) - // use in order of appearance - if gi == -1 { - return zi != -1, ENCODING_DEFLATE - } else if zi == -1 { - return gi != -1, ENCODING_GZIP - } else { - if gi < zi { - return true, ENCODING_GZIP - } - return true, ENCODING_DEFLATE - } -} - -// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate} -func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) { - httpWriter.Header().Set(HEADER_ContentEncoding, encoding) - c := new(CompressingResponseWriter) - c.writer = httpWriter - var err error - if ENCODING_GZIP == encoding { - w := currentCompressorProvider.AcquireGzipWriter() - w.Reset(httpWriter) - c.compressor = w - c.encoding = ENCODING_GZIP - } else if ENCODING_DEFLATE == encoding { - w := currentCompressorProvider.AcquireZlibWriter() - w.Reset(httpWriter) - c.compressor = w - c.encoding = ENCODING_DEFLATE - } else { - return nil, errors.New("Unknown encoding:" + encoding) - } - return c, err -} diff --git a/vendor/github.com/emicklei/go-restful/compressor_cache.go b/vendor/github.com/emicklei/go-restful/compressor_cache.go deleted file mode 100644 index ee426010a..000000000 --- a/vendor/github.com/emicklei/go-restful/compressor_cache.go +++ /dev/null @@ -1,103 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "compress/gzip" - "compress/zlib" -) - -// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount -// of writers and readers (resources). -// If a new resource is acquired and all are in use, it will return a new unmanaged resource. -type BoundedCachedCompressors struct { - gzipWriters chan *gzip.Writer - gzipReaders chan *gzip.Reader - zlibWriters chan *zlib.Writer - writersCapacity int - readersCapacity int -} - -// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors. -func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors { - b := &BoundedCachedCompressors{ - gzipWriters: make(chan *gzip.Writer, writersCapacity), - gzipReaders: make(chan *gzip.Reader, readersCapacity), - zlibWriters: make(chan *zlib.Writer, writersCapacity), - writersCapacity: writersCapacity, - readersCapacity: readersCapacity, - } - for ix := 0; ix < writersCapacity; ix++ { - b.gzipWriters <- newGzipWriter() - b.zlibWriters <- newZlibWriter() - } - for ix := 0; ix < readersCapacity; ix++ { - b.gzipReaders <- newGzipReader() - } - return b -} - -// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released. -func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer { - var writer *gzip.Writer - select { - case writer, _ = <-b.gzipWriters: - default: - // return a new unmanaged one - writer = newGzipWriter() - } - return writer -} - -// ReleaseGzipWriter accepts a writer (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) { - // forget the unmanaged ones - if len(b.gzipWriters) < b.writersCapacity { - b.gzipWriters <- w - } -} - -// AcquireGzipReader returns a *gzip.Reader. Needs to be released. -func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader { - var reader *gzip.Reader - select { - case reader, _ = <-b.gzipReaders: - default: - // return a new unmanaged one - reader = newGzipReader() - } - return reader -} - -// ReleaseGzipReader accepts a reader (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) { - // forget the unmanaged ones - if len(b.gzipReaders) < b.readersCapacity { - b.gzipReaders <- r - } -} - -// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released. -func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer { - var writer *zlib.Writer - select { - case writer, _ = <-b.zlibWriters: - default: - // return a new unmanaged one - writer = newZlibWriter() - } - return writer -} - -// ReleaseZlibWriter accepts a writer (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) { - // forget the unmanaged ones - if len(b.zlibWriters) < b.writersCapacity { - b.zlibWriters <- w - } -} diff --git a/vendor/github.com/emicklei/go-restful/compressor_pools.go b/vendor/github.com/emicklei/go-restful/compressor_pools.go deleted file mode 100644 index d866ce64b..000000000 --- a/vendor/github.com/emicklei/go-restful/compressor_pools.go +++ /dev/null @@ -1,91 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "compress/gzip" - "compress/zlib" - "sync" -) - -// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool. -type SyncPoolCompessors struct { - GzipWriterPool *sync.Pool - GzipReaderPool *sync.Pool - ZlibWriterPool *sync.Pool -} - -// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors. -func NewSyncPoolCompessors() *SyncPoolCompessors { - return &SyncPoolCompessors{ - GzipWriterPool: &sync.Pool{ - New: func() interface{} { return newGzipWriter() }, - }, - GzipReaderPool: &sync.Pool{ - New: func() interface{} { return newGzipReader() }, - }, - ZlibWriterPool: &sync.Pool{ - New: func() interface{} { return newZlibWriter() }, - }, - } -} - -func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer { - return s.GzipWriterPool.Get().(*gzip.Writer) -} - -func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) { - s.GzipWriterPool.Put(w) -} - -func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader { - return s.GzipReaderPool.Get().(*gzip.Reader) -} - -func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) { - s.GzipReaderPool.Put(r) -} - -func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer { - return s.ZlibWriterPool.Get().(*zlib.Writer) -} - -func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) { - s.ZlibWriterPool.Put(w) -} - -func newGzipWriter() *gzip.Writer { - // create with an empty bytes writer; it will be replaced before using the gzipWriter - writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) - if err != nil { - panic(err.Error()) - } - return writer -} - -func newGzipReader() *gzip.Reader { - // create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader - // we can safely use currentCompressProvider because it is set on package initialization. - w := currentCompressorProvider.AcquireGzipWriter() - defer currentCompressorProvider.ReleaseGzipWriter(w) - b := new(bytes.Buffer) - w.Reset(b) - w.Flush() - w.Close() - reader, err := gzip.NewReader(bytes.NewReader(b.Bytes())) - if err != nil { - panic(err.Error()) - } - return reader -} - -func newZlibWriter() *zlib.Writer { - writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) - if err != nil { - panic(err.Error()) - } - return writer -} diff --git a/vendor/github.com/emicklei/go-restful/compressors.go b/vendor/github.com/emicklei/go-restful/compressors.go deleted file mode 100644 index f028456e0..000000000 --- a/vendor/github.com/emicklei/go-restful/compressors.go +++ /dev/null @@ -1,53 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "compress/gzip" - "compress/zlib" -) - -type CompressorProvider interface { - // Returns a *gzip.Writer which needs to be released later. - // Before using it, call Reset(). - AcquireGzipWriter() *gzip.Writer - - // Releases an aqcuired *gzip.Writer. - ReleaseGzipWriter(w *gzip.Writer) - - // Returns a *gzip.Reader which needs to be released later. - AcquireGzipReader() *gzip.Reader - - // Releases an aqcuired *gzip.Reader. - ReleaseGzipReader(w *gzip.Reader) - - // Returns a *zlib.Writer which needs to be released later. - // Before using it, call Reset(). - AcquireZlibWriter() *zlib.Writer - - // Releases an aqcuired *zlib.Writer. - ReleaseZlibWriter(w *zlib.Writer) -} - -// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip). -var currentCompressorProvider CompressorProvider - -func init() { - currentCompressorProvider = NewSyncPoolCompessors() -} - -// CurrentCompressorProvider returns the current CompressorProvider. -// It is initialized using a SyncPoolCompessors. -func CurrentCompressorProvider() CompressorProvider { - return currentCompressorProvider -} - -// CompressorProvider sets the actual provider of compressors (zlib or gzip). -func SetCompressorProvider(p CompressorProvider) { - if p == nil { - panic("cannot set compressor provider to nil") - } - currentCompressorProvider = p -} diff --git a/vendor/github.com/emicklei/go-restful/constants.go b/vendor/github.com/emicklei/go-restful/constants.go deleted file mode 100644 index 203439c5e..000000000 --- a/vendor/github.com/emicklei/go-restful/constants.go +++ /dev/null @@ -1,30 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -const ( - MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() - MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() - MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default - - HEADER_Allow = "Allow" - HEADER_Accept = "Accept" - HEADER_Origin = "Origin" - HEADER_ContentType = "Content-Type" - HEADER_LastModified = "Last-Modified" - HEADER_AcceptEncoding = "Accept-Encoding" - HEADER_ContentEncoding = "Content-Encoding" - HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers" - HEADER_AccessControlRequestMethod = "Access-Control-Request-Method" - HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers" - HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods" - HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin" - HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials" - HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers" - HEADER_AccessControlMaxAge = "Access-Control-Max-Age" - - ENCODING_GZIP = "gzip" - ENCODING_DEFLATE = "deflate" -) diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go deleted file mode 100644 index 4e53cccb9..000000000 --- a/vendor/github.com/emicklei/go-restful/container.go +++ /dev/null @@ -1,361 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "errors" - "fmt" - "net/http" - "os" - "runtime" - "strings" - "sync" - - "github.com/emicklei/go-restful/log" -) - -// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests. -// The requests are further dispatched to routes of WebServices using a RouteSelector -type Container struct { - webServicesLock sync.RWMutex - webServices []*WebService - ServeMux *http.ServeMux - isRegisteredOnRoot bool - containerFilters []FilterFunction - doNotRecover bool // default is false - recoverHandleFunc RecoverHandleFunction - serviceErrorHandleFunc ServiceErrorHandleFunction - router RouteSelector // default is a RouterJSR311, CurlyRouter is the faster alternative - contentEncodingEnabled bool // default is false -} - -// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311) -func NewContainer() *Container { - return &Container{ - webServices: []*WebService{}, - ServeMux: http.NewServeMux(), - isRegisteredOnRoot: false, - containerFilters: []FilterFunction{}, - doNotRecover: false, - recoverHandleFunc: logStackOnRecover, - serviceErrorHandleFunc: writeServiceError, - router: RouterJSR311{}, - contentEncodingEnabled: false} -} - -// RecoverHandleFunction declares functions that can be used to handle a panic situation. -// The first argument is what recover() returns. The second must be used to communicate an error response. -type RecoverHandleFunction func(interface{}, http.ResponseWriter) - -// RecoverHandler changes the default function (logStackOnRecover) to be called -// when a panic is detected. DoNotRecover must be have its default value (=false). -func (c *Container) RecoverHandler(handler RecoverHandleFunction) { - c.recoverHandleFunc = handler -} - -// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation. -// The first argument is the service error, the second is the request that resulted in the error and -// the third must be used to communicate an error response. -type ServiceErrorHandleFunction func(ServiceError, *Request, *Response) - -// ServiceErrorHandler changes the default function (writeServiceError) to be called -// when a ServiceError is detected. -func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) { - c.serviceErrorHandleFunc = handler -} - -// DoNotRecover controls whether panics will be caught to return HTTP 500. -// If set to true, Route functions are responsible for handling any error situation. -// Default value is false = recover from panics. This has performance implications. -func (c *Container) DoNotRecover(doNot bool) { - c.doNotRecover = doNot -} - -// Router changes the default Router (currently RouterJSR311) -func (c *Container) Router(aRouter RouteSelector) { - c.router = aRouter -} - -// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. -func (c *Container) EnableContentEncoding(enabled bool) { - c.contentEncodingEnabled = enabled -} - -// Add a WebService to the Container. It will detect duplicate root paths and exit in that case. -func (c *Container) Add(service *WebService) *Container { - c.webServicesLock.Lock() - defer c.webServicesLock.Unlock() - - // if rootPath was not set then lazy initialize it - if len(service.rootPath) == 0 { - service.Path("/") - } - - // cannot have duplicate root paths - for _, each := range c.webServices { - if each.RootPath() == service.RootPath() { - log.Printf("[restful] WebService with duplicate root path detected:['%v']", each) - os.Exit(1) - } - } - - // If not registered on root then add specific mapping - if !c.isRegisteredOnRoot { - c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux) - } - c.webServices = append(c.webServices, service) - return c -} - -// addHandler may set a new HandleFunc for the serveMux -// this function must run inside the critical region protected by the webServicesLock. -// returns true if the function was registered on root ("/") -func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool { - pattern := fixedPrefixPath(service.RootPath()) - // check if root path registration is needed - if "/" == pattern || "" == pattern { - serveMux.HandleFunc("/", c.dispatch) - return true - } - // detect if registration already exists - alreadyMapped := false - for _, each := range c.webServices { - if each.RootPath() == service.RootPath() { - alreadyMapped = true - break - } - } - if !alreadyMapped { - serveMux.HandleFunc(pattern, c.dispatch) - if !strings.HasSuffix(pattern, "/") { - serveMux.HandleFunc(pattern+"/", c.dispatch) - } - } - return false -} - -func (c *Container) Remove(ws *WebService) error { - if c.ServeMux == http.DefaultServeMux { - errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) - log.Printf(errMsg) - return errors.New(errMsg) - } - c.webServicesLock.Lock() - defer c.webServicesLock.Unlock() - // build a new ServeMux and re-register all WebServices - newServeMux := http.NewServeMux() - newServices := []*WebService{} - newIsRegisteredOnRoot := false - for _, each := range c.webServices { - if each.rootPath != ws.rootPath { - // If not registered on root then add specific mapping - if !newIsRegisteredOnRoot { - newIsRegisteredOnRoot = c.addHandler(each, newServeMux) - } - newServices = append(newServices, each) - } - } - c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot - return nil -} - -// logStackOnRecover is the default RecoverHandleFunction and is called -// when DoNotRecover is false and the recoverHandleFunc is not set for the container. -// Default implementation logs the stacktrace and writes the stacktrace on the response. -// This may be a security issue as it exposes sourcecode information. -func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) { - var buffer bytes.Buffer - buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason)) - for i := 2; ; i += 1 { - _, file, line, ok := runtime.Caller(i) - if !ok { - break - } - buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line)) - } - log.Print(buffer.String()) - httpWriter.WriteHeader(http.StatusInternalServerError) - httpWriter.Write(buffer.Bytes()) -} - -// writeServiceError is the default ServiceErrorHandleFunction and is called -// when a ServiceError is returned during route selection. Default implementation -// calls resp.WriteErrorString(err.Code, err.Message) -func writeServiceError(err ServiceError, req *Request, resp *Response) { - resp.WriteErrorString(err.Code, err.Message) -} - -// Dispatch the incoming Http Request to a matching WebService. -func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { - writer := httpWriter - - // CompressingResponseWriter should be closed after all operations are done - defer func() { - if compressWriter, ok := writer.(*CompressingResponseWriter); ok { - compressWriter.Close() - } - }() - - // Instal panic recovery unless told otherwise - if !c.doNotRecover { // catch all for 500 response - defer func() { - if r := recover(); r != nil { - c.recoverHandleFunc(r, writer) - return - } - }() - } - // Install closing the request body (if any) - defer func() { - if nil != httpRequest.Body { - httpRequest.Body.Close() - } - }() - - // Detect if compression is needed - // assume without compression, test for override - if c.contentEncodingEnabled { - doCompress, encoding := wantsCompressedResponse(httpRequest) - if doCompress { - var err error - writer, err = NewCompressingResponseWriter(httpWriter, encoding) - if err != nil { - log.Print("[restful] unable to install compressor: ", err) - httpWriter.WriteHeader(http.StatusInternalServerError) - return - } - } - } - // Find best match Route ; err is non nil if no match was found - var webService *WebService - var route *Route - var err error - func() { - c.webServicesLock.RLock() - defer c.webServicesLock.RUnlock() - webService, route, err = c.router.SelectRoute( - c.webServices, - httpRequest) - }() - if err != nil { - // a non-200 response has already been written - // run container filters anyway ; they should not touch the response... - chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { - switch err.(type) { - case ServiceError: - ser := err.(ServiceError) - c.serviceErrorHandleFunc(ser, req, resp) - } - // TODO - }} - chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer)) - return - } - wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest) - // pass through filters (if any) - if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 { - // compose filter chain - allFilters := []FilterFunction{} - allFilters = append(allFilters, c.containerFilters...) - allFilters = append(allFilters, webService.filters...) - allFilters = append(allFilters, route.Filters...) - chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) { - // handle request by route after passing all filters - route.Function(wrappedRequest, wrappedResponse) - }} - chain.ProcessFilter(wrappedRequest, wrappedResponse) - } else { - // no filters, handle request by route - route.Function(wrappedRequest, wrappedResponse) - } -} - -// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {} -func fixedPrefixPath(pathspec string) string { - varBegin := strings.Index(pathspec, "{") - if -1 == varBegin { - return pathspec - } - return pathspec[:varBegin] -} - -// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server -func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) { - c.ServeMux.ServeHTTP(httpwriter, httpRequest) -} - -// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics. -func (c *Container) Handle(pattern string, handler http.Handler) { - c.ServeMux.Handle(pattern, handler) -} - -// HandleWithFilter registers the handler for the given pattern. -// Container's filter chain is applied for handler. -// If a handler already exists for pattern, HandleWithFilter panics. -func (c *Container) HandleWithFilter(pattern string, handler http.Handler) { - f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) { - if len(c.containerFilters) == 0 { - handler.ServeHTTP(httpResponse, httpRequest) - return - } - - chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { - handler.ServeHTTP(httpResponse, httpRequest) - }} - chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse)) - } - - c.Handle(pattern, http.HandlerFunc(f)) -} - -// Filter appends a container FilterFunction. These are called before dispatching -// a http.Request to a WebService from the container -func (c *Container) Filter(filter FilterFunction) { - c.containerFilters = append(c.containerFilters, filter) -} - -// RegisteredWebServices returns the collections of added WebServices -func (c *Container) RegisteredWebServices() []*WebService { - c.webServicesLock.RLock() - defer c.webServicesLock.RUnlock() - result := make([]*WebService, len(c.webServices)) - for ix := range c.webServices { - result[ix] = c.webServices[ix] - } - return result -} - -// computeAllowedMethods returns a list of HTTP methods that are valid for a Request -func (c *Container) computeAllowedMethods(req *Request) []string { - // Go through all RegisteredWebServices() and all its Routes to collect the options - methods := []string{} - requestPath := req.Request.URL.Path - for _, ws := range c.RegisteredWebServices() { - matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath) - if matches != nil { - finalMatch := matches[len(matches)-1] - for _, rt := range ws.Routes() { - matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch) - if matches != nil { - lastMatch := matches[len(matches)-1] - if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. - methods = append(methods, rt.Method) - } - } - } - } - } - // methods = append(methods, "OPTIONS") not sure about this - return methods -} - -// newBasicRequestResponse creates a pair of Request,Response from its http versions. -// It is basic because no parameter or (produces) content-type information is given. -func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) { - resp := NewResponse(httpWriter) - resp.requestAccept = httpRequest.Header.Get(HEADER_Accept) - return NewRequest(httpRequest), resp -} diff --git a/vendor/github.com/emicklei/go-restful/cors_filter.go b/vendor/github.com/emicklei/go-restful/cors_filter.go deleted file mode 100644 index 1efeef072..000000000 --- a/vendor/github.com/emicklei/go-restful/cors_filter.go +++ /dev/null @@ -1,202 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "regexp" - "strconv" - "strings" -) - -// CrossOriginResourceSharing is used to create a Container Filter that implements CORS. -// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page -// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from. -// -// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing -// http://enable-cors.org/server.html -// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request -type CrossOriginResourceSharing struct { - ExposeHeaders []string // list of Header names - AllowedHeaders []string // list of Header names - AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed. - AllowedMethods []string - MaxAge int // number of seconds before requiring new Options request - CookiesAllowed bool - Container *Container - - allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check. -} - -// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html -// and http://www.html5rocks.com/static/images/cors_server_flowchart.png -func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) { - origin := req.Request.Header.Get(HEADER_Origin) - if len(origin) == 0 { - if trace { - traceLogger.Print("no Http header Origin set") - } - chain.ProcessFilter(req, resp) - return - } - if !c.isOriginAllowed(origin) { // check whether this origin is allowed - if trace { - traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns) - } - chain.ProcessFilter(req, resp) - return - } - if req.Request.Method != "OPTIONS" { - c.doActualRequest(req, resp) - chain.ProcessFilter(req, resp) - return - } - if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" { - c.doPreflightRequest(req, resp) - } else { - c.doActualRequest(req, resp) - chain.ProcessFilter(req, resp) - return - } -} - -func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) { - c.setOptionsHeaders(req, resp) - // continue processing the response -} - -func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) { - if len(c.AllowedMethods) == 0 { - if c.Container == nil { - c.AllowedMethods = DefaultContainer.computeAllowedMethods(req) - } else { - c.AllowedMethods = c.Container.computeAllowedMethods(req) - } - } - - acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod) - if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) { - if trace { - traceLogger.Printf("Http header %s:%s is not in %v", - HEADER_AccessControlRequestMethod, - acrm, - c.AllowedMethods) - } - return - } - acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) - if len(acrhs) > 0 { - for _, each := range strings.Split(acrhs, ",") { - if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) { - if trace { - traceLogger.Printf("Http header %s:%s is not in %v", - HEADER_AccessControlRequestHeaders, - acrhs, - c.AllowedHeaders) - } - return - } - } - } - resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ",")) - resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs) - c.setOptionsHeaders(req, resp) - - // return http 200 response, no body -} - -func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) { - c.checkAndSetExposeHeaders(resp) - c.setAllowOriginHeader(req, resp) - c.checkAndSetAllowCredentials(resp) - if c.MaxAge > 0 { - resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge)) - } -} - -func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool { - if len(origin) == 0 { - return false - } - if len(c.AllowedDomains) == 0 { - return true - } - - allowed := false - for _, domain := range c.AllowedDomains { - if domain == origin { - allowed = true - break - } - } - - if !allowed { - if len(c.allowedOriginPatterns) == 0 { - // compile allowed domains to allowed origin patterns - allowedOriginRegexps, err := compileRegexps(c.AllowedDomains) - if err != nil { - return false - } - c.allowedOriginPatterns = allowedOriginRegexps - } - - for _, pattern := range c.allowedOriginPatterns { - if allowed = pattern.MatchString(origin); allowed { - break - } - } - } - - return allowed -} - -func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) { - origin := req.Request.Header.Get(HEADER_Origin) - if c.isOriginAllowed(origin) { - resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) - } -} - -func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) { - if len(c.ExposeHeaders) > 0 { - resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ",")) - } -} - -func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) { - if c.CookiesAllowed { - resp.AddHeader(HEADER_AccessControlAllowCredentials, "true") - } -} - -func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool { - for _, each := range allowedMethods { - if each == method { - return true - } - } - return false -} - -func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool { - for _, each := range c.AllowedHeaders { - if strings.ToLower(each) == strings.ToLower(header) { - return true - } - } - return false -} - -// Take a list of strings and compile them into a list of regular expressions. -func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { - regexps := []*regexp.Regexp{} - for _, regexpStr := range regexpStrings { - r, err := regexp.Compile(regexpStr) - if err != nil { - return regexps, err - } - regexps = append(regexps, r) - } - return regexps, nil -} diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/curly.go deleted file mode 100644 index 185300dbc..000000000 --- a/vendor/github.com/emicklei/go-restful/curly.go +++ /dev/null @@ -1,162 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "net/http" - "regexp" - "sort" - "strings" -) - -// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets. -type CurlyRouter struct{} - -// SelectRoute is part of the Router interface and returns the best match -// for the WebService and its Route for the given Request. -func (c CurlyRouter) SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) { - - requestTokens := tokenizePath(httpRequest.URL.Path) - - detectedService := c.detectWebService(requestTokens, webServices) - if detectedService == nil { - if trace { - traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path) - } - return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - candidateRoutes := c.selectRoutes(detectedService, requestTokens) - if len(candidateRoutes) == 0 { - if trace { - traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path) - } - return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest) - if selectedRoute == nil { - return detectedService, nil, err - } - return detectedService, selectedRoute, nil -} - -// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. -func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { - candidates := sortableCurlyRoutes{} - for _, each := range ws.routes { - matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens) - if matches { - candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? - } - } - sort.Sort(sort.Reverse(candidates)) - return candidates -} - -// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are. -func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) { - if len(routeTokens) < len(requestTokens) { - // proceed in matching only if last routeToken is wildcard - count := len(routeTokens) - if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") { - return false, 0, 0 - } - // proceed - } - for i, routeToken := range routeTokens { - if i == len(requestTokens) { - // reached end of request path - return false, 0, 0 - } - requestToken := requestTokens[i] - if strings.HasPrefix(routeToken, "{") { - paramCount++ - if colon := strings.Index(routeToken, ":"); colon != -1 { - // match by regex - matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken) - if !matchesToken { - return false, 0, 0 - } - if matchesRemainder { - break - } - } - } else { // no { prefix - if requestToken != routeToken { - return false, 0, 0 - } - staticCount++ - } - } - return true, paramCount, staticCount -} - -// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens -// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]} -func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) { - regPart := routeToken[colon+1 : len(routeToken)-1] - if regPart == "*" { - if trace { - traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken) - } - return true, true - } - matched, err := regexp.MatchString(regPart, requestToken) - return (matched && err == nil), false -} - -// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type -// headers of the Request. See also RouterJSR311 in jsr311.go -func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) { - // tracing is done inside detectRoute - return RouterJSR311{}.detectRoute(candidateRoutes.routes(), httpRequest) -} - -// detectWebService returns the best matching webService given the list of path tokens. -// see also computeWebserviceScore -func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { - var best *WebService - score := -1 - for _, each := range webServices { - matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) - if matches && (eachScore > score) { - best = each - score = eachScore - } - } - return best -} - -// computeWebserviceScore returns whether tokens match and -// the weighted score of the longest matching consecutive tokens from the beginning. -func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { - if len(tokens) > len(requestTokens) { - return false, 0 - } - score := 0 - for i := 0; i < len(tokens); i++ { - each := requestTokens[i] - other := tokens[i] - if len(each) == 0 && len(other) == 0 { - score++ - continue - } - if len(other) > 0 && strings.HasPrefix(other, "{") { - // no empty match - if len(each) == 0 { - return false, score - } - score += 1 - } else { - // not a parameter - if each != other { - return false, score - } - score += (len(tokens) - i) * 10 //fuzzy - } - } - return true, score -} diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/curly_route.go deleted file mode 100644 index 296f94650..000000000 --- a/vendor/github.com/emicklei/go-restful/curly_route.go +++ /dev/null @@ -1,52 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements. -type curlyRoute struct { - route Route - paramCount int - staticCount int -} - -type sortableCurlyRoutes []curlyRoute - -func (s *sortableCurlyRoutes) add(route curlyRoute) { - *s = append(*s, route) -} - -func (s sortableCurlyRoutes) routes() (routes []Route) { - for _, each := range s { - routes = append(routes, each.route) // TODO change return type - } - return routes -} - -func (s sortableCurlyRoutes) Len() int { - return len(s) -} -func (s sortableCurlyRoutes) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s sortableCurlyRoutes) Less(i, j int) bool { - ci := s[i] - cj := s[j] - - // primary key - if ci.staticCount < cj.staticCount { - return true - } - if ci.staticCount > cj.staticCount { - return false - } - // secundary key - if ci.paramCount < cj.paramCount { - return true - } - if ci.paramCount > cj.paramCount { - return false - } - return ci.route.Path < cj.route.Path -} diff --git a/vendor/github.com/emicklei/go-restful/doc.go b/vendor/github.com/emicklei/go-restful/doc.go deleted file mode 100644 index d40405bf7..000000000 --- a/vendor/github.com/emicklei/go-restful/doc.go +++ /dev/null @@ -1,196 +0,0 @@ -/* -Package restful, a lean package for creating REST-style WebServices without magic. - -WebServices and Routes - -A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls. -Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes. -WebServices must be added to a container (see below) in order to handler Http requests from a server. - -A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept). -This package has the logic to find the best matching Route and if found, call its Function. - - ws := new(restful.WebService) - ws. - Path("/users"). - Consumes(restful.MIME_JSON, restful.MIME_XML). - Produces(restful.MIME_JSON, restful.MIME_XML) - - ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource - - ... - - // GET http://localhost:8080/users/1 - func (u UserResource) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - ... - } - -The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response. - -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation. - -Regular expression matching Routes - -A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path. -For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters. -Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax) -This feature requires the use of a CurlyRouter. - -Containers - -A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests. -Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container. -The Default container of go-restful uses the http.DefaultServeMux. -You can create your own Container and create a new http.Server for that particular container. - - container := restful.NewContainer() - server := &http.Server{Addr: ":8081", Handler: container} - -Filters - -A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses. -You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc. -In the restful package there are three hooks into the request,response flow where filters can be added. -Each filter must define a FilterFunction: - - func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain) - -Use the following statement to pass the request,response pair to the next filter or RouteFunction - - chain.ProcessFilter(req, resp) - -Container Filters - -These are processed before any registered WebService. - - // install a (global) filter for the default container (processed before any webservice) - restful.Filter(globalLogging) - -WebService Filters - -These are processed before any Route of a WebService. - - // install a webservice filter (processed before any route) - ws.Filter(webserviceLogging).Filter(measureTime) - - -Route Filters - -These are processed before calling the function associated with the Route. - - // install 2 chained route filters (processed before calling findUser) - ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser)) - -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations. - -Response Encoding - -Two encodings are supported: gzip and deflate. To enable this for all responses: - - restful.DefaultContainer.EnableContentEncoding(true) - -If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding. -Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route. - -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go - -OPTIONS support - -By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request. - - Filter(OPTIONSFilter()) - -CORS - -By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests. - - cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer} - Filter(cors.Filter) - -Error Handling - -Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why. -For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation. - - 400: Bad Request - -If path or query parameters are not valid (content or type) then use http.StatusBadRequest. - - 404: Not Found - -Despite a valid URI, the resource requested may not be available - - 500: Internal Server Error - -If the application logic could not process the request (or write the response) then use http.StatusInternalServerError. - - 405: Method Not Allowed - -The request has a valid URL but the method (GET,PUT,POST,...) is not allowed. - - 406: Not Acceptable - -The request does not have or has an unknown Accept Header set for this operation. - - 415: Unsupported Media Type - -The request does not have or has an unknown Content-Type Header set for this operation. - -ServiceError - -In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response. - -Performance options - -This package has several options that affect the performance of your service. It is important to understand them and how you can change it. - - restful.DefaultContainer.Router(CurlyRouter{}) - -The default router is the RouterJSR311 which is an implementation of its spec (http://jsr311.java.net/nonav/releases/1.1/spec/spec.html). -However, it uses regular expressions for all its routes which, depending on your usecase, may consume a significant amount of time. -The CurlyRouter implementation is more lightweight that also allows you to use wildcards and expressions, but only if needed. - - restful.DefaultContainer.DoNotRecover(true) - -DoNotRecover controls whether panics will be caught to return HTTP 500. -If set to true, Route functions are responsible for handling any error situation. -Default value is false; it will recover from panics. This has performance implications. - - restful.SetCacheReadEntity(false) - -SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable. -If you expect to read large amounts of payload data, and you do not use this feature, you should set it to false. - - restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20)) - -If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool. -Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation. - -Trouble shooting - -This package has the means to produce detail logging of the complete Http request matching process and filter invocation. -Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as: - - restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile)) - -Logging - -The restful.SetLogger() method allows you to override the logger used by the package. By default restful -uses the standard library `log` package and logs to stdout. Different logging packages are supported as -long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your -preferred package is simple. - -Resources - -[project]: https://github.com/emicklei/go-restful - -[examples]: https://github.com/emicklei/go-restful/blob/master/examples - -[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/ - -[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape - -(c) 2012-2015, http://ernestmicklei.com. MIT License -*/ -package restful diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/entity_accessors.go deleted file mode 100644 index 6ecf6c7f8..000000000 --- a/vendor/github.com/emicklei/go-restful/entity_accessors.go +++ /dev/null @@ -1,163 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "encoding/json" - "encoding/xml" - "strings" - "sync" -) - -// EntityReaderWriter can read and write values using an encoding such as JSON,XML. -type EntityReaderWriter interface { - // Read a serialized version of the value from the request. - // The Request may have a decompressing reader. Depends on Content-Encoding. - Read(req *Request, v interface{}) error - - // Write a serialized version of the value on the response. - // The Response may have a compressing writer. Depends on Accept-Encoding. - // status should be a valid Http Status code - Write(resp *Response, status int, v interface{}) error -} - -// entityAccessRegistry is a singleton -var entityAccessRegistry = &entityReaderWriters{ - protection: new(sync.RWMutex), - accessors: map[string]EntityReaderWriter{}, -} - -// entityReaderWriters associates MIME to an EntityReaderWriter -type entityReaderWriters struct { - protection *sync.RWMutex - accessors map[string]EntityReaderWriter -} - -func init() { - RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON)) - RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML)) -} - -// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type. -func RegisterEntityAccessor(mime string, erw EntityReaderWriter) { - entityAccessRegistry.protection.Lock() - defer entityAccessRegistry.protection.Unlock() - entityAccessRegistry.accessors[mime] = erw -} - -// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content. -// This package is already initialized with such an accessor using the MIME_JSON contentType. -func NewEntityAccessorJSON(contentType string) EntityReaderWriter { - return entityJSONAccess{ContentType: contentType} -} - -// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content. -// This package is already initialized with such an accessor using the MIME_XML contentType. -func NewEntityAccessorXML(contentType string) EntityReaderWriter { - return entityXMLAccess{ContentType: contentType} -} - -// accessorAt returns the registered ReaderWriter for this MIME type. -func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) { - r.protection.RLock() - defer r.protection.RUnlock() - er, ok := r.accessors[mime] - if !ok { - // retry with reverse lookup - // more expensive but we are in an exceptional situation anyway - for k, v := range r.accessors { - if strings.Contains(mime, k) { - return v, true - } - } - } - return er, ok -} - -// entityXMLAccess is a EntityReaderWriter for XML encoding -type entityXMLAccess struct { - // This is used for setting the Content-Type header when writing - ContentType string -} - -// Read unmarshalls the value from XML -func (e entityXMLAccess) Read(req *Request, v interface{}) error { - return xml.NewDecoder(req.Request.Body).Decode(v) -} - -// Write marshalls the value to JSON and set the Content-Type Header. -func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error { - return writeXML(resp, status, e.ContentType, v) -} - -// writeXML marshalls the value to JSON and set the Content-Type Header. -func writeXML(resp *Response, status int, contentType string, v interface{}) error { - if v == nil { - resp.WriteHeader(status) - // do not write a nil representation - return nil - } - if resp.prettyPrint { - // pretty output must be created and written explicitly - output, err := xml.MarshalIndent(v, " ", " ") - if err != nil { - return err - } - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - _, err = resp.Write([]byte(xml.Header)) - if err != nil { - return err - } - _, err = resp.Write(output) - return err - } - // not-so-pretty - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - return xml.NewEncoder(resp).Encode(v) -} - -// entityJSONAccess is a EntityReaderWriter for JSON encoding -type entityJSONAccess struct { - // This is used for setting the Content-Type header when writing - ContentType string -} - -// Read unmarshalls the value from JSON -func (e entityJSONAccess) Read(req *Request, v interface{}) error { - decoder := json.NewDecoder(req.Request.Body) - decoder.UseNumber() - return decoder.Decode(v) -} - -// Write marshalls the value to JSON and set the Content-Type Header. -func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error { - return writeJSON(resp, status, e.ContentType, v) -} - -// write marshalls the value to JSON and set the Content-Type Header. -func writeJSON(resp *Response, status int, contentType string, v interface{}) error { - if v == nil { - resp.WriteHeader(status) - // do not write a nil representation - return nil - } - if resp.prettyPrint { - // pretty output must be created and written explicitly - output, err := json.MarshalIndent(v, " ", " ") - if err != nil { - return err - } - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - _, err = resp.Write(output) - return err - } - // not-so-pretty - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - return json.NewEncoder(resp).Encode(v) -} diff --git a/vendor/github.com/emicklei/go-restful/filter.go b/vendor/github.com/emicklei/go-restful/filter.go deleted file mode 100644 index 4b86656e1..000000000 --- a/vendor/github.com/emicklei/go-restful/filter.go +++ /dev/null @@ -1,26 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction. -type FilterChain struct { - Filters []FilterFunction // ordered list of FilterFunction - Index int // index into filters that is currently in progress - Target RouteFunction // function to call after passing all filters -} - -// ProcessFilter passes the request,response pair through the next of Filters. -// Each filter can decide to proceed to the next Filter or handle the Response itself. -func (f *FilterChain) ProcessFilter(request *Request, response *Response) { - if f.Index < len(f.Filters) { - f.Index++ - f.Filters[f.Index-1](request, response, f) - } else { - f.Target(request, response) - } -} - -// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction -type FilterFunction func(*Request, *Response, *FilterChain) diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go deleted file mode 100644 index 511444ac6..000000000 --- a/vendor/github.com/emicklei/go-restful/jsr311.go +++ /dev/null @@ -1,248 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "errors" - "fmt" - "net/http" - "sort" -) - -// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions) -// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html. -// RouterJSR311 implements the Router interface. -// Concept of locators is not implemented. -type RouterJSR311 struct{} - -// SelectRoute is part of the Router interface and returns the best match -// for the WebService and its Route for the given Request. -func (r RouterJSR311) SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) { - - // Identify the root resource class (WebService) - dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices) - if err != nil { - return nil, nil, NewError(http.StatusNotFound, "") - } - // Obtain the set of candidate methods (Routes) - routes := r.selectRoutes(dispatcher, finalMatch) - if len(routes) == 0 { - return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - - // Identify the method (Route) that will handle the request - route, ok := r.detectRoute(routes, httpRequest) - return dispatcher, route, ok -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 -func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { - // http method - methodOk := []Route{} - for _, each := range routes { - if httpRequest.Method == each.Method { - methodOk = append(methodOk, each) - } - } - if len(methodOk) == 0 { - if trace { - traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method) - } - return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed") - } - inputMediaOk := methodOk - - // content-type - contentType := httpRequest.Header.Get(HEADER_ContentType) - inputMediaOk = []Route{} - for _, each := range methodOk { - if each.matchesContentType(contentType) { - inputMediaOk = append(inputMediaOk, each) - } - } - if len(inputMediaOk) == 0 { - if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType) - } - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") - } - - // accept - outputMediaOk := []Route{} - accept := httpRequest.Header.Get(HEADER_Accept) - if len(accept) == 0 { - accept = "*/*" - } - for _, each := range inputMediaOk { - if each.matchesAccept(accept) { - outputMediaOk = append(outputMediaOk, each) - } - } - if len(outputMediaOk) == 0 { - if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept) - } - return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable") - } - // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil - return &outputMediaOk[0], nil -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 -// n/m > n/* > */* -func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route { - // TODO - return &routes[0] -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2) -func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route { - filtered := &sortableRouteCandidates{} - for _, each := range dispatcher.Routes() { - pathExpr := each.pathExpr - matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder) - if matches != nil { - lastMatch := matches[len(matches)-1] - if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. - filtered.candidates = append(filtered.candidates, - routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount}) - } - } - } - if len(filtered.candidates) == 0 { - if trace { - traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder) - } - return []Route{} - } - sort.Sort(sort.Reverse(filtered)) - - // select other routes from candidates whoes expression matches rmatch - matchingRoutes := []Route{filtered.candidates[0].route} - for c := 1; c < len(filtered.candidates); c++ { - each := filtered.candidates[c] - if each.route.pathExpr.Matcher.MatchString(pathRemainder) { - matchingRoutes = append(matchingRoutes, each.route) - } - } - return matchingRoutes -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1) -func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) { - filtered := &sortableDispatcherCandidates{} - for _, each := range dispatchers { - matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath) - if matches != nil { - filtered.candidates = append(filtered.candidates, - dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount}) - } - } - if len(filtered.candidates) == 0 { - if trace { - traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath) - } - return nil, "", errors.New("not found") - } - sort.Sort(sort.Reverse(filtered)) - return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil -} - -// Types and functions to support the sorting of Routes - -type routeCandidate struct { - route Route - matchesCount int // the number of capturing groups - literalCount int // the number of literal characters (means those not resulting from template variable substitution) - nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) -} - -func (r routeCandidate) expressionToMatch() string { - return r.route.pathExpr.Source -} - -func (r routeCandidate) String() string { - return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount) -} - -type sortableRouteCandidates struct { - candidates []routeCandidate -} - -func (rcs *sortableRouteCandidates) Len() int { - return len(rcs.candidates) -} -func (rcs *sortableRouteCandidates) Swap(i, j int) { - rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i] -} -func (rcs *sortableRouteCandidates) Less(i, j int) bool { - ci := rcs.candidates[i] - cj := rcs.candidates[j] - // primary key - if ci.literalCount < cj.literalCount { - return true - } - if ci.literalCount > cj.literalCount { - return false - } - // secundary key - if ci.matchesCount < cj.matchesCount { - return true - } - if ci.matchesCount > cj.matchesCount { - return false - } - // tertiary key - if ci.nonDefaultCount < cj.nonDefaultCount { - return true - } - if ci.nonDefaultCount > cj.nonDefaultCount { - return false - } - // quaternary key ("source" is interpreted as Path) - return ci.route.Path < cj.route.Path -} - -// Types and functions to support the sorting of Dispatchers - -type dispatcherCandidate struct { - dispatcher *WebService - finalMatch string - matchesCount int // the number of capturing groups - literalCount int // the number of literal characters (means those not resulting from template variable substitution) - nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) -} -type sortableDispatcherCandidates struct { - candidates []dispatcherCandidate -} - -func (dc *sortableDispatcherCandidates) Len() int { - return len(dc.candidates) -} -func (dc *sortableDispatcherCandidates) Swap(i, j int) { - dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i] -} -func (dc *sortableDispatcherCandidates) Less(i, j int) bool { - ci := dc.candidates[i] - cj := dc.candidates[j] - // primary key - if ci.matchesCount < cj.matchesCount { - return true - } - if ci.matchesCount > cj.matchesCount { - return false - } - // secundary key - if ci.literalCount < cj.literalCount { - return true - } - if ci.literalCount > cj.literalCount { - return false - } - // tertiary key - return ci.nonDefaultCount < cj.nonDefaultCount -} diff --git a/vendor/github.com/emicklei/go-restful/log/log.go b/vendor/github.com/emicklei/go-restful/log/log.go deleted file mode 100644 index f70d89524..000000000 --- a/vendor/github.com/emicklei/go-restful/log/log.go +++ /dev/null @@ -1,31 +0,0 @@ -package log - -import ( - stdlog "log" - "os" -) - -// Logger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger -type StdLogger interface { - Print(v ...interface{}) - Printf(format string, v ...interface{}) -} - -var Logger StdLogger - -func init() { - // default Logger - SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile)) -} - -func SetLogger(customLogger StdLogger) { - Logger = customLogger -} - -func Print(v ...interface{}) { - Logger.Print(v...) -} - -func Printf(format string, v ...interface{}) { - Logger.Printf(format, v...) -} diff --git a/vendor/github.com/emicklei/go-restful/logger.go b/vendor/github.com/emicklei/go-restful/logger.go deleted file mode 100644 index 3f1c4db86..000000000 --- a/vendor/github.com/emicklei/go-restful/logger.go +++ /dev/null @@ -1,32 +0,0 @@ -package restful - -// Copyright 2014 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. -import ( - "github.com/emicklei/go-restful/log" -) - -var trace bool = false -var traceLogger log.StdLogger - -func init() { - traceLogger = log.Logger // use the package logger by default -} - -// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set. -// You may call EnableTracing() directly to enable trace logging to the package-wide logger. -func TraceLogger(logger log.StdLogger) { - traceLogger = logger - EnableTracing(logger != nil) -} - -// expose the setter for the global logger on the top-level package -func SetLogger(customLogger log.StdLogger) { - log.SetLogger(customLogger) -} - -// EnableTracing can be used to Trace logging on and off. -func EnableTracing(enabled bool) { - trace = enabled -} diff --git a/vendor/github.com/emicklei/go-restful/mime.go b/vendor/github.com/emicklei/go-restful/mime.go deleted file mode 100644 index d7ea2b615..000000000 --- a/vendor/github.com/emicklei/go-restful/mime.go +++ /dev/null @@ -1,45 +0,0 @@ -package restful - -import ( - "strconv" - "strings" -) - -type mime struct { - media string - quality float64 -} - -// insertMime adds a mime to a list and keeps it sorted by quality. -func insertMime(l []mime, e mime) []mime { - for i, each := range l { - // if current mime has lower quality then insert before - if e.quality > each.quality { - left := append([]mime{}, l[0:i]...) - return append(append(left, e), l[i:]...) - } - } - return append(l, e) -} - -// sortedMimes returns a list of mime sorted (desc) by its specified quality. -func sortedMimes(accept string) (sorted []mime) { - for _, each := range strings.Split(accept, ",") { - typeAndQuality := strings.Split(strings.Trim(each, " "), ";") - if len(typeAndQuality) == 1 { - sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) - } else { - // take factor - parts := strings.Split(typeAndQuality[1], "=") - if len(parts) == 2 { - f, err := strconv.ParseFloat(parts[1], 64) - if err != nil { - traceLogger.Printf("unable to parse quality in %s, %v", each, err) - } else { - sorted = insertMime(sorted, mime{typeAndQuality[0], f}) - } - } - } - } - return -} diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/options_filter.go deleted file mode 100644 index 4514eadcf..000000000 --- a/vendor/github.com/emicklei/go-restful/options_filter.go +++ /dev/null @@ -1,26 +0,0 @@ -package restful - -import "strings" - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method -// and provides the response with a set of allowed methods for the request URL Path. -// As for any filter, you can also install it for a particular WebService within a Container. -// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). -func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) { - if "OPTIONS" != req.Request.Method { - chain.ProcessFilter(req, resp) - return - } - resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ",")) -} - -// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method -// and provides the response with a set of allowed methods for the request URL Path. -// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). -func OPTIONSFilter() FilterFunction { - return DefaultContainer.OPTIONSFilter -} diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/parameter.go deleted file mode 100644 index e11c8162a..000000000 --- a/vendor/github.com/emicklei/go-restful/parameter.go +++ /dev/null @@ -1,114 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -const ( - // PathParameterKind = indicator of Request parameter type "path" - PathParameterKind = iota - - // QueryParameterKind = indicator of Request parameter type "query" - QueryParameterKind - - // BodyParameterKind = indicator of Request parameter type "body" - BodyParameterKind - - // HeaderParameterKind = indicator of Request parameter type "header" - HeaderParameterKind - - // FormParameterKind = indicator of Request parameter type "form" - FormParameterKind -) - -// Parameter is for documententing the parameter used in a Http Request -// ParameterData kinds are Path,Query and Body -type Parameter struct { - data *ParameterData -} - -// ParameterData represents the state of a Parameter. -// It is made public to make it accessible to e.g. the Swagger package. -type ParameterData struct { - Name, Description, DataType, DataFormat string - Kind int - Required bool - AllowableValues map[string]string - AllowMultiple bool - DefaultValue string -} - -// Data returns the state of the Parameter -func (p *Parameter) Data() ParameterData { - return *p.data -} - -// Kind returns the parameter type indicator (see const for valid values) -func (p *Parameter) Kind() int { - return p.data.Kind -} - -func (p *Parameter) bePath() *Parameter { - p.data.Kind = PathParameterKind - return p -} -func (p *Parameter) beQuery() *Parameter { - p.data.Kind = QueryParameterKind - return p -} -func (p *Parameter) beBody() *Parameter { - p.data.Kind = BodyParameterKind - return p -} - -func (p *Parameter) beHeader() *Parameter { - p.data.Kind = HeaderParameterKind - return p -} - -func (p *Parameter) beForm() *Parameter { - p.data.Kind = FormParameterKind - return p -} - -// Required sets the required field and returns the receiver -func (p *Parameter) Required(required bool) *Parameter { - p.data.Required = required - return p -} - -// AllowMultiple sets the allowMultiple field and returns the receiver -func (p *Parameter) AllowMultiple(multiple bool) *Parameter { - p.data.AllowMultiple = multiple - return p -} - -// AllowableValues sets the allowableValues field and returns the receiver -func (p *Parameter) AllowableValues(values map[string]string) *Parameter { - p.data.AllowableValues = values - return p -} - -// DataType sets the dataType field and returns the receiver -func (p *Parameter) DataType(typeName string) *Parameter { - p.data.DataType = typeName - return p -} - -// DataFormat sets the dataFormat field for Swagger UI -func (p *Parameter) DataFormat(formatName string) *Parameter { - p.data.DataFormat = formatName - return p -} - -// DefaultValue sets the default value field and returns the receiver -func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter { - p.data.DefaultValue = stringRepresentation - return p -} - -// Description sets the description value field and returns the receiver -func (p *Parameter) Description(doc string) *Parameter { - p.data.Description = doc - return p -} diff --git a/vendor/github.com/emicklei/go-restful/path_expression.go b/vendor/github.com/emicklei/go-restful/path_expression.go deleted file mode 100644 index a921e6f22..000000000 --- a/vendor/github.com/emicklei/go-restful/path_expression.go +++ /dev/null @@ -1,69 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "fmt" - "regexp" - "strings" -) - -// PathExpression holds a compiled path expression (RegExp) needed to match against -// Http request paths and to extract path parameter values. -type pathExpression struct { - LiteralCount int // the number of literal characters (means those not resulting from template variable substitution) - VarCount int // the number of named parameters (enclosed by {}) in the path - Matcher *regexp.Regexp - Source string // Path as defined by the RouteBuilder - tokens []string -} - -// NewPathExpression creates a PathExpression from the input URL path. -// Returns an error if the path is invalid. -func newPathExpression(path string) (*pathExpression, error) { - expression, literalCount, varCount, tokens := templateToRegularExpression(path) - compiled, err := regexp.Compile(expression) - if err != nil { - return nil, err - } - return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3 -func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) { - var buffer bytes.Buffer - buffer.WriteString("^") - //tokens = strings.Split(template, "/") - tokens = tokenizePath(template) - for _, each := range tokens { - if each == "" { - continue - } - buffer.WriteString("/") - if strings.HasPrefix(each, "{") { - // check for regular expression in variable - colon := strings.Index(each, ":") - if colon != -1 { - // extract expression - paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1]) - if paramExpr == "*" { // special case - buffer.WriteString("(.*)") - } else { - buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache - } - } else { - // plain var - buffer.WriteString("([^/]+?)") - } - varCount += 1 - } else { - literalCount += len(each) - encoded := each // TODO URI encode - buffer.WriteString(regexp.QuoteMeta(encoded)) - } - } - return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens -} diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go deleted file mode 100644 index 3e4234697..000000000 --- a/vendor/github.com/emicklei/go-restful/request.go +++ /dev/null @@ -1,131 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "compress/zlib" - "io/ioutil" - "net/http" -) - -var defaultRequestContentType string - -var doCacheReadEntityBytes = true - -// Request is a wrapper for a http Request that provides convenience methods -type Request struct { - Request *http.Request - bodyContent *[]byte // to cache the request body for multiple reads of ReadEntity - pathParameters map[string]string - attributes map[string]interface{} // for storing request-scoped values - selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees -} - -func NewRequest(httpRequest *http.Request) *Request { - return &Request{ - Request: httpRequest, - pathParameters: map[string]string{}, - attributes: map[string]interface{}{}, - } // empty parameters, attributes -} - -// If ContentType is missing or */* is given then fall back to this type, otherwise -// a "Unable to unmarshal content of type:" response is returned. -// Valid values are restful.MIME_JSON and restful.MIME_XML -// Example: -// restful.DefaultRequestContentType(restful.MIME_JSON) -func DefaultRequestContentType(mime string) { - defaultRequestContentType = mime -} - -// SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable. -// Default is true (due to backwardcompatibility). For better performance, you should set it to false if you don't need it. -func SetCacheReadEntity(doCache bool) { - doCacheReadEntityBytes = doCache -} - -// PathParameter accesses the Path parameter value by its name -func (r *Request) PathParameter(name string) string { - return r.pathParameters[name] -} - -// PathParameters accesses the Path parameter values -func (r *Request) PathParameters() map[string]string { - return r.pathParameters -} - -// QueryParameter returns the (first) Query parameter value by its name -func (r *Request) QueryParameter(name string) string { - return r.Request.FormValue(name) -} - -// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error. -func (r *Request) BodyParameter(name string) (string, error) { - err := r.Request.ParseForm() - if err != nil { - return "", err - } - return r.Request.PostFormValue(name), nil -} - -// HeaderParameter returns the HTTP Header value of a Header name or empty if missing -func (r *Request) HeaderParameter(name string) string { - return r.Request.Header.Get(name) -} - -// ReadEntity checks the Accept header and reads the content into the entityPointer. -func (r *Request) ReadEntity(entityPointer interface{}) (err error) { - contentType := r.Request.Header.Get(HEADER_ContentType) - contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding) - - // OLD feature, cache the body for reads - if doCacheReadEntityBytes { - if r.bodyContent == nil { - data, err := ioutil.ReadAll(r.Request.Body) - if err != nil { - return err - } - r.bodyContent = &data - } - r.Request.Body = ioutil.NopCloser(bytes.NewReader(*r.bodyContent)) - } - - // check if the request body needs decompression - if ENCODING_GZIP == contentEncoding { - gzipReader := currentCompressorProvider.AcquireGzipReader() - defer currentCompressorProvider.ReleaseGzipReader(gzipReader) - gzipReader.Reset(r.Request.Body) - r.Request.Body = gzipReader - } else if ENCODING_DEFLATE == contentEncoding { - zlibReader, err := zlib.NewReader(r.Request.Body) - if err != nil { - return err - } - r.Request.Body = zlibReader - } - - // lookup the EntityReader - entityReader, ok := entityAccessRegistry.accessorAt(contentType) - if !ok { - return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType) - } - return entityReader.Read(r, entityPointer) -} - -// SetAttribute adds or replaces the attribute with the given value. -func (r *Request) SetAttribute(name string, value interface{}) { - r.attributes[name] = value -} - -// Attribute returns the value associated to the given name. Returns nil if absent. -func (r Request) Attribute(name string) interface{} { - return r.attributes[name] -} - -// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees -func (r Request) SelectedRoutePath() string { - return r.selectedRoutePath -} diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/response.go deleted file mode 100644 index 971cd0b42..000000000 --- a/vendor/github.com/emicklei/go-restful/response.go +++ /dev/null @@ -1,235 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "errors" - "net/http" -) - -// DEPRECATED, use DefaultResponseContentType(mime) -var DefaultResponseMimeType string - -//PrettyPrintResponses controls the indentation feature of XML and JSON serialization -var PrettyPrintResponses = true - -// Response is a wrapper on the actual http ResponseWriter -// It provides several convenience methods to prepare and write response content. -type Response struct { - http.ResponseWriter - requestAccept string // mime-type what the Http Request says it wants to receive - routeProduces []string // mime-types what the Route says it can produce - statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200) - contentLength int // number of bytes written for the response body - prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. - err error // err property is kept when WriteError is called -} - -// Creates a new response based on a http ResponseWriter. -func NewResponse(httpWriter http.ResponseWriter) *Response { - return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types -} - -// If Accept header matching fails, fall back to this type. -// Valid values are restful.MIME_JSON and restful.MIME_XML -// Example: -// restful.DefaultResponseContentType(restful.MIME_JSON) -func DefaultResponseContentType(mime string) { - DefaultResponseMimeType = mime -} - -// InternalServerError writes the StatusInternalServerError header. -// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason) -func (r Response) InternalServerError() Response { - r.WriteHeader(http.StatusInternalServerError) - return r -} - -// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output. -func (r *Response) PrettyPrint(bePretty bool) { - r.prettyPrint = bePretty -} - -// AddHeader is a shortcut for .Header().Add(header,value) -func (r Response) AddHeader(header string, value string) Response { - r.Header().Add(header, value) - return r -} - -// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing. -func (r *Response) SetRequestAccepts(mime string) { - r.requestAccept = mime -} - -// EntityWriter returns the registered EntityWriter that the entity (requested resource) -// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say. -// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable. -func (r *Response) EntityWriter() (EntityReaderWriter, bool) { - sorted := sortedMimes(r.requestAccept) - for _, eachAccept := range sorted { - for _, eachProduce := range r.routeProduces { - if eachProduce == eachAccept.media { - if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok { - return w, true - } - } - } - if eachAccept.media == "*/*" { - for _, each := range r.routeProduces { - if w, ok := entityAccessRegistry.accessorAt(each); ok { - return w, true - } - } - } - } - // if requestAccept is empty - writer, ok := entityAccessRegistry.accessorAt(r.requestAccept) - if !ok { - // if not registered then fallback to the defaults (if set) - if DefaultResponseMimeType == MIME_JSON { - return entityAccessRegistry.accessorAt(MIME_JSON) - } - if DefaultResponseMimeType == MIME_XML { - return entityAccessRegistry.accessorAt(MIME_XML) - } - // Fallback to whatever the route says it can produce. - // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - for _, each := range r.routeProduces { - if w, ok := entityAccessRegistry.accessorAt(each); ok { - return w, true - } - } - if trace { - traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept) - } - } - return writer, ok -} - -// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200) -func (r *Response) WriteEntity(value interface{}) error { - return r.WriteHeaderAndEntity(http.StatusOK, value) -} - -// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters. -// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces. -// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header. -// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead. -// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written. -// Current implementation ignores any q-parameters in the Accept Header. -// Returns an error if the value could not be written on the response. -func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error { - writer, ok := r.EntityWriter() - if !ok { - r.WriteHeader(http.StatusNotAcceptable) - return nil - } - return writer.Write(r, status, value) -} - -// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value) -// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteAsXml(value interface{}) error { - return writeXML(r, http.StatusOK, MIME_XML, value) -} - -// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value) -// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteHeaderAndXml(status int, value interface{}) error { - return writeXML(r, status, MIME_XML, value) -} - -// WriteAsJson is a convenience method for writing a value in json. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteAsJson(value interface{}) error { - return writeJSON(r, http.StatusOK, MIME_JSON, value) -} - -// WriteJson is a convenience method for writing a value in Json with a given Content-Type. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteJson(value interface{}, contentType string) error { - return writeJSON(r, http.StatusOK, contentType, value) -} - -// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error { - return writeJSON(r, status, contentType, value) -} - -// WriteError write the http status and the error string on the response. -func (r *Response) WriteError(httpStatus int, err error) error { - r.err = err - return r.WriteErrorString(httpStatus, err.Error()) -} - -// WriteServiceError is a convenience method for a responding with a status and a ServiceError -func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error { - r.err = err - return r.WriteHeaderAndEntity(httpStatus, err) -} - -// WriteErrorString is a convenience method for an error status with the actual error -func (r *Response) WriteErrorString(httpStatus int, errorReason string) error { - if r.err == nil { - // if not called from WriteError - r.err = errors.New(errorReason) - } - r.WriteHeader(httpStatus) - if _, err := r.Write([]byte(errorReason)); err != nil { - return err - } - return nil -} - -// Flush implements http.Flusher interface, which sends any buffered data to the client. -func (r *Response) Flush() { - if f, ok := r.ResponseWriter.(http.Flusher); ok { - f.Flush() - } else if trace { - traceLogger.Printf("ResponseWriter %v doesn't support Flush", r) - } -} - -// WriteHeader is overridden to remember the Status Code that has been written. -// Changes to the Header of the response have no effect after this. -func (r *Response) WriteHeader(httpStatus int) { - r.statusCode = httpStatus - r.ResponseWriter.WriteHeader(httpStatus) -} - -// StatusCode returns the code that has been written using WriteHeader. -func (r Response) StatusCode() int { - if 0 == r.statusCode { - // no status code has been written yet; assume OK - return http.StatusOK - } - return r.statusCode -} - -// Write writes the data to the connection as part of an HTTP reply. -// Write is part of http.ResponseWriter interface. -func (r *Response) Write(bytes []byte) (int, error) { - written, err := r.ResponseWriter.Write(bytes) - r.contentLength += written - return written, err -} - -// ContentLength returns the number of bytes written for the response content. -// Note that this value is only correct if all data is written through the Response using its Write* methods. -// Data written directly using the underlying http.ResponseWriter is not accounted for. -func (r Response) ContentLength() int { - return r.contentLength -} - -// CloseNotify is part of http.CloseNotifier interface -func (r Response) CloseNotify() <-chan bool { - return r.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -// Error returns the err created by WriteError -func (r Response) Error() error { - return r.err -} diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go deleted file mode 100644 index f54e8622e..000000000 --- a/vendor/github.com/emicklei/go-restful/route.go +++ /dev/null @@ -1,183 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "net/http" - "strings" -) - -// RouteFunction declares the signature of a function that can be bound to a Route. -type RouteFunction func(*Request, *Response) - -// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. -type Route struct { - Method string - Produces []string - Consumes []string - Path string // webservice root path + described path - Function RouteFunction - Filters []FilterFunction - - // cached values for dispatching - relativePath string - pathParts []string - pathExpr *pathExpression // cached compilation of relativePath as RegExp - - // documentation - Doc string - Notes string - Operation string - ParameterDocs []*Parameter - ResponseErrors map[int]ResponseError - ReadSample, WriteSample interface{} // structs that model an example request or response payload -} - -// Initialize for Route -func (r *Route) postBuild() { - r.pathParts = tokenizePath(r.Path) -} - -// Create Request and Response from their http versions -func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) { - params := r.extractParameters(httpRequest.URL.Path) - wrappedRequest := NewRequest(httpRequest) - wrappedRequest.pathParameters = params - wrappedRequest.selectedRoutePath = r.Path - wrappedResponse := NewResponse(httpWriter) - wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept) - wrappedResponse.routeProduces = r.Produces - return wrappedRequest, wrappedResponse -} - -// dispatchWithFilters call the function after passing through its own filters -func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) { - if len(r.Filters) > 0 { - chain := FilterChain{Filters: r.Filters, Target: r.Function} - chain.ProcessFilter(wrappedRequest, wrappedResponse) - } else { - // unfiltered - r.Function(wrappedRequest, wrappedResponse) - } -} - -// Return whether the mimeType matches to what this Route can produce. -func (r Route) matchesAccept(mimeTypesWithQuality string) bool { - parts := strings.Split(mimeTypesWithQuality, ",") - for _, each := range parts { - var withoutQuality string - if strings.Contains(each, ";") { - withoutQuality = strings.Split(each, ";")[0] - } else { - withoutQuality = each - } - // trim before compare - withoutQuality = strings.Trim(withoutQuality, " ") - if withoutQuality == "*/*" { - return true - } - for _, producibleType := range r.Produces { - if producibleType == "*/*" || producibleType == withoutQuality { - return true - } - } - } - return false -} - -// Return whether this Route can consume content with a type specified by mimeTypes (can be empty). -func (r Route) matchesContentType(mimeTypes string) bool { - - if len(r.Consumes) == 0 { - // did not specify what it can consume ; any media type (“*/*”) is assumed - return true - } - - if len(mimeTypes) == 0 { - // idempotent methods with (most-likely or garanteed) empty content match missing Content-Type - m := r.Method - if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { - return true - } - // proceed with default - mimeTypes = MIME_OCTET - } - - parts := strings.Split(mimeTypes, ",") - for _, each := range parts { - var contentType string - if strings.Contains(each, ";") { - contentType = strings.Split(each, ";")[0] - } else { - contentType = each - } - // trim before compare - contentType = strings.Trim(contentType, " ") - for _, consumeableType := range r.Consumes { - if consumeableType == "*/*" || consumeableType == contentType { - return true - } - } - } - return false -} - -// Extract the parameters from the request url path -func (r Route) extractParameters(urlPath string) map[string]string { - urlParts := tokenizePath(urlPath) - pathParameters := map[string]string{} - for i, key := range r.pathParts { - var value string - if i >= len(urlParts) { - value = "" - } else { - value = urlParts[i] - } - if strings.HasPrefix(key, "{") { // path-parameter - if colon := strings.Index(key, ":"); colon != -1 { - // extract by regex - regPart := key[colon+1 : len(key)-1] - keyPart := key[1:colon] - if regPart == "*" { - pathParameters[keyPart] = untokenizePath(i, urlParts) - break - } else { - pathParameters[keyPart] = value - } - } else { - // without enclosing {} - pathParameters[key[1:len(key)-1]] = value - } - } - } - return pathParameters -} - -// Untokenize back into an URL path using the slash separator -func untokenizePath(offset int, parts []string) string { - var buffer bytes.Buffer - for p := offset; p < len(parts); p++ { - buffer.WriteString(parts[p]) - // do not end - if p < len(parts)-1 { - buffer.WriteString("/") - } - } - return buffer.String() -} - -// Tokenize an URL path using the slash separator ; the result does not have empty tokens -func tokenizePath(path string) []string { - if "/" == path { - return []string{} - } - return strings.Split(strings.Trim(path, "/"), "/") -} - -// for debugging -func (r Route) String() string { - return r.Method + " " + r.Path -} diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go deleted file mode 100644 index 8bc1ab684..000000000 --- a/vendor/github.com/emicklei/go-restful/route_builder.go +++ /dev/null @@ -1,240 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "os" - "reflect" - "runtime" - "strings" - - "github.com/emicklei/go-restful/log" -) - -// RouteBuilder is a helper to construct Routes. -type RouteBuilder struct { - rootPath string - currentPath string - produces []string - consumes []string - httpMethod string // required - function RouteFunction // required - filters []FilterFunction - // documentation - doc string - notes string - operation string - readSample, writeSample interface{} - parameters []*Parameter - errorMap map[int]ResponseError -} - -// Do evaluates each argument with the RouteBuilder itself. -// This allows you to follow DRY principles without breaking the fluent programming style. -// Example: -// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) -// -// func Returns500(b *RouteBuilder) { -// b.Returns(500, "Internal Server Error", restful.ServiceError{}) -// } -func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { - for _, each := range oneArgBlocks { - each(b) - } - return b -} - -// To bind the route to a function. -// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required. -func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder { - b.function = function - return b -} - -// Method specifies what HTTP method to match. Required. -func (b *RouteBuilder) Method(method string) *RouteBuilder { - b.httpMethod = method - return b -} - -// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header. -func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder { - b.produces = mimeTypes - return b -} - -// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these -func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder { - b.consumes = mimeTypes - return b -} - -// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/". -func (b *RouteBuilder) Path(subPath string) *RouteBuilder { - b.currentPath = subPath - return b -} - -// Doc tells what this route is all about. Optional. -func (b *RouteBuilder) Doc(documentation string) *RouteBuilder { - b.doc = documentation - return b -} - -// A verbose explanation of the operation behavior. Optional. -func (b *RouteBuilder) Notes(notes string) *RouteBuilder { - b.notes = notes - return b -} - -// Reads tells what resource type will be read from the request payload. Optional. -// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type. -func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder { - b.readSample = sample - typeAsName := reflect.TypeOf(sample).String() - bodyParameter := &Parameter{&ParameterData{Name: "body"}} - bodyParameter.beBody() - bodyParameter.Required(true) - bodyParameter.DataType(typeAsName) - b.Param(bodyParameter) - return b -} - -// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not. -// Use this to modify or extend information for the Parameter (through its Data()). -func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) { - for _, each := range b.parameters { - if each.Data().Name == name { - return each - } - } - return p -} - -// Writes tells what resource type will be written as the response payload. Optional. -func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder { - b.writeSample = sample - return b -} - -// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates). -func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder { - if b.parameters == nil { - b.parameters = []*Parameter{} - } - b.parameters = append(b.parameters, parameter) - return b -} - -// Operation allows you to document what the actual method/function call is of the Route. -// Unless called, the operation name is derived from the RouteFunction set using To(..). -func (b *RouteBuilder) Operation(name string) *RouteBuilder { - b.operation = name - return b -} - -// ReturnsError is deprecated, use Returns instead. -func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder { - log.Print("ReturnsError is deprecated, use Returns instead.") - return b.Returns(code, message, model) -} - -// Returns allows you to document what responses (errors or regular) can be expected. -// The model parameter is optional ; either pass a struct instance or use nil if not applicable. -func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder { - err := ResponseError{ - Code: code, - Message: message, - Model: model, - } - // lazy init because there is no NewRouteBuilder (yet) - if b.errorMap == nil { - b.errorMap = map[int]ResponseError{} - } - b.errorMap[code] = err - return b -} - -type ResponseError struct { - Code int - Message string - Model interface{} -} - -func (b *RouteBuilder) servicePath(path string) *RouteBuilder { - b.rootPath = path - return b -} - -// Filter appends a FilterFunction to the end of filters for this Route to build. -func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder { - b.filters = append(b.filters, filter) - return b -} - -// If no specific Route path then set to rootPath -// If no specific Produces then set to rootProduces -// If no specific Consumes then set to rootConsumes -func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) { - if len(b.produces) == 0 { - b.produces = rootProduces - } - if len(b.consumes) == 0 { - b.consumes = rootConsumes - } -} - -// Build creates a new Route using the specification details collected by the RouteBuilder -func (b *RouteBuilder) Build() Route { - pathExpr, err := newPathExpression(b.currentPath) - if err != nil { - log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err) - os.Exit(1) - } - if b.function == nil { - log.Printf("[restful] No function specified for route:" + b.currentPath) - os.Exit(1) - } - operationName := b.operation - if len(operationName) == 0 && b.function != nil { - // extract from definition - operationName = nameOfFunction(b.function) - } - route := Route{ - Method: b.httpMethod, - Path: concatPath(b.rootPath, b.currentPath), - Produces: b.produces, - Consumes: b.consumes, - Function: b.function, - Filters: b.filters, - relativePath: b.currentPath, - pathExpr: pathExpr, - Doc: b.doc, - Notes: b.notes, - Operation: operationName, - ParameterDocs: b.parameters, - ResponseErrors: b.errorMap, - ReadSample: b.readSample, - WriteSample: b.writeSample} - route.postBuild() - return route -} - -func concatPath(path1, path2 string) string { - return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") -} - -// nameOfFunction returns the short name of the function f for documentation. -// It uses a runtime feature for debugging ; its value may change for later Go versions. -func nameOfFunction(f interface{}) string { - fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer()) - tokenized := strings.Split(fun.Name(), ".") - last := tokenized[len(tokenized)-1] - last = strings.TrimSuffix(last, ")·fm") // < Go 1.5 - last = strings.TrimSuffix(last, ")-fm") // Go 1.5 - last = strings.TrimSuffix(last, "·fm") // < Go 1.5 - last = strings.TrimSuffix(last, "-fm") // Go 1.5 - return last -} diff --git a/vendor/github.com/emicklei/go-restful/router.go b/vendor/github.com/emicklei/go-restful/router.go deleted file mode 100644 index 9b32fb675..000000000 --- a/vendor/github.com/emicklei/go-restful/router.go +++ /dev/null @@ -1,18 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "net/http" - -// A RouteSelector finds the best matching Route given the input HTTP Request -type RouteSelector interface { - - // SelectRoute finds a Route given the input HTTP Request and a list of WebServices. - // It returns a selected Route and its containing WebService or an error indicating - // a problem. - SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) -} diff --git a/vendor/github.com/emicklei/go-restful/service_error.go b/vendor/github.com/emicklei/go-restful/service_error.go deleted file mode 100644 index 62d1108bb..000000000 --- a/vendor/github.com/emicklei/go-restful/service_error.go +++ /dev/null @@ -1,23 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "fmt" - -// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request. -type ServiceError struct { - Code int - Message string -} - -// NewError returns a ServiceError using the code and reason -func NewError(code int, message string) ServiceError { - return ServiceError{Code: code, Message: message} -} - -// Error returns a text representation of the service error -func (s ServiceError) Error() string { - return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message) -} diff --git a/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md b/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md deleted file mode 100644 index 736f6f37c..000000000 --- a/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md +++ /dev/null @@ -1,43 +0,0 @@ -Change history of swagger -= -2015-10-16 -- add type override mechanism for swagger models (MR 254, nathanejohnson) -- replace uses of wildcard in generated apidocs (issue 251) - -2015-05-25 -- (api break) changed the type of Properties in Model -- (api break) changed the type of Models in ApiDeclaration -- (api break) changed the parameter type of PostBuildDeclarationMapFunc - -2015-04-09 -- add ModelBuildable interface for customization of Model - -2015-03-17 -- preserve order of Routes per WebService in Swagger listing -- fix use of $ref and type in Swagger models -- add api version to listing - -2014-11-14 -- operation parameters are now sorted using ordering path,query,form,header,body - -2014-11-12 -- respect omitempty tag value for embedded structs -- expose ApiVersion of WebService to Swagger ApiDeclaration - -2014-05-29 -- (api add) Ability to define custom http.Handler to serve swagger-ui static files - -2014-05-04 -- (fix) include model for array element type of response - -2014-01-03 -- (fix) do not add primitive type to the Api models - -2013-11-27 -- (fix) make Swagger work for WebServices with root ("/" or "") paths - -2013-10-29 -- (api add) package variable LogInfo to customize logging function - -2013-10-15 -- upgraded to spec version 1.2 (https://github.com/wordnik/swagger-core/wiki/1.2-transition) \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go b/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go deleted file mode 100644 index 9f4c3690a..000000000 --- a/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go +++ /dev/null @@ -1,64 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// ApiDeclarationList maintains an ordered list of ApiDeclaration. -type ApiDeclarationList struct { - List []ApiDeclaration -} - -// At returns the ApiDeclaration by its path unless absent, then ok is false -func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) { - for _, each := range l.List { - if each.ResourcePath == path { - return each, true - } - } - return a, false -} - -// Put adds or replaces a ApiDeclaration with this name -func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) { - // maybe replace existing - for i, each := range l.List { - if each.ResourcePath == path { - // replace - l.List[i] = a - return - } - } - // add - l.List = append(l.List, a) -} - -// Do enumerates all the properties, each with its assigned name -func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) { - for _, each := range l.List { - block(each.ResourcePath, each) - } -} - -// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty -func (l ApiDeclarationList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.ResourcePath) - buf.WriteString("\": ") - encoder.Encode(each) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} diff --git a/vendor/github.com/emicklei/go-restful/swagger/config.go b/vendor/github.com/emicklei/go-restful/swagger/config.go deleted file mode 100644 index 510d6fc13..000000000 --- a/vendor/github.com/emicklei/go-restful/swagger/config.go +++ /dev/null @@ -1,38 +0,0 @@ -package swagger - -import ( - "net/http" - - "github.com/emicklei/go-restful" -) - -// PostBuildDeclarationMapFunc can be used to modify the api declaration map. -type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList) - -type MapSchemaFormatFunc func(typeName string) string - -type Config struct { - // url where the services are available, e.g. http://localhost:8080 - // if left empty then the basePath of Swagger is taken from the actual request - WebServicesUrl string - // path where the JSON api is avaiable , e.g. /apidocs - ApiPath string - // [optional] path where the swagger UI will be served, e.g. /swagger - SwaggerPath string - // [optional] location of folder containing Swagger HTML5 application index.html - SwaggerFilePath string - // api listing is constructed from this list of restful WebServices. - WebServices []*restful.WebService - // will serve all static content (scripts,pages,images) - StaticHandler http.Handler - // [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled. - DisableCORS bool - // Top-level API version. Is reflected in the resource listing. - ApiVersion string - // If set then call this handler after building the complete ApiDeclaration Map - PostBuildHandler PostBuildDeclarationMapFunc - // Swagger global info struct - Info Info - // [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field convertion. - SchemaFormatHandler MapSchemaFormatFunc -} diff --git a/vendor/github.com/emicklei/go-restful/swagger/model_builder.go b/vendor/github.com/emicklei/go-restful/swagger/model_builder.go deleted file mode 100644 index 398e83049..000000000 --- a/vendor/github.com/emicklei/go-restful/swagger/model_builder.go +++ /dev/null @@ -1,449 +0,0 @@ -package swagger - -import ( - "encoding/json" - "reflect" - "strings" -) - -// ModelBuildable is used for extending Structs that need more control over -// how the Model appears in the Swagger api declaration. -type ModelBuildable interface { - PostBuildModel(m *Model) *Model -} - -type modelBuilder struct { - Models *ModelList - Config *Config -} - -type documentable interface { - SwaggerDoc() map[string]string -} - -// Check if this structure has a method with signature func () SwaggerDoc() map[string]string -// If it exists, retrive the documentation and overwrite all struct tag descriptions -func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string { - if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok { - return docable.SwaggerDoc() - } - return make(map[string]string) -} - -// addModelFrom creates and adds a Model to the builder and detects and calls -// the post build hook for customizations -func (b modelBuilder) addModelFrom(sample interface{}) { - if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil { - // allow customizations - if buildable, ok := sample.(ModelBuildable); ok { - modelOrNil = buildable.PostBuildModel(modelOrNil) - b.Models.Put(modelOrNil.Id, *modelOrNil) - } - } -} - -func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model { - modelName := b.keyFrom(st) - if nameOverride != "" { - modelName = nameOverride - } - // no models needed for primitive types - if b.isPrimitiveType(modelName) { - return nil - } - // golang encoding/json packages says array and slice values encode as - // JSON arrays, except that []byte encodes as a base64-encoded string. - // If we see a []byte here, treat it at as a primitive type (string) - // and deal with it in buildArrayTypeProperty. - if (st.Kind() == reflect.Slice || st.Kind() == reflect.Array) && - st.Elem().Kind() == reflect.Uint8 { - return nil - } - // see if we already have visited this model - if _, ok := b.Models.At(modelName); ok { - return nil - } - sm := Model{ - Id: modelName, - Required: []string{}, - Properties: ModelPropertyList{}} - - // reference the model before further initializing (enables recursive structs) - b.Models.Put(modelName, sm) - - // check for slice or array - if st.Kind() == reflect.Slice || st.Kind() == reflect.Array { - b.addModel(st.Elem(), "") - return &sm - } - // check for structure or primitive type - if st.Kind() != reflect.Struct { - return &sm - } - - fullDoc := getDocFromMethodSwaggerDoc2(st) - modelDescriptions := []string{} - - for i := 0; i < st.NumField(); i++ { - field := st.Field(i) - jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName) - if len(modelDescription) > 0 { - modelDescriptions = append(modelDescriptions, modelDescription) - } - - // add if not omitted - if len(jsonName) != 0 { - // update description - if fieldDoc, ok := fullDoc[jsonName]; ok { - prop.Description = fieldDoc - } - // update Required - if b.isPropertyRequired(field) { - sm.Required = append(sm.Required, jsonName) - } - sm.Properties.Put(jsonName, prop) - } - } - - // We always overwrite documentation if SwaggerDoc method exists - // "" is special for documenting the struct itself - if modelDoc, ok := fullDoc[""]; ok { - sm.Description = modelDoc - } else if len(modelDescriptions) != 0 { - sm.Description = strings.Join(modelDescriptions, "\n") - } - - // update model builder with completed model - b.Models.Put(modelName, sm) - - return &sm -} - -func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool { - required := true - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if len(s) > 1 && s[1] == "omitempty" { - return false - } - } - return required -} - -func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) { - jsonName = b.jsonNameOfField(field) - if len(jsonName) == 0 { - // empty name signals skip property - return "", "", prop - } - - if tag := field.Tag.Get("modelDescription"); tag != "" { - modelDescription = tag - } - - prop.setPropertyMetadata(field) - if prop.Type != nil { - return jsonName, modelDescription, prop - } - fieldType := field.Type - - // check if type is doing its own marshalling - marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem() - if fieldType.Implements(marshalerType) { - var pType = "string" - if prop.Type == nil { - prop.Type = &pType - } - if prop.Format == "" { - prop.Format = b.jsonSchemaFormat(fieldType.String()) - } - return jsonName, modelDescription, prop - } - - // check if annotation says it is a string - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if len(s) > 1 && s[1] == "string" { - stringt := "string" - prop.Type = &stringt - return jsonName, modelDescription, prop - } - } - - fieldKind := fieldType.Kind() - switch { - case fieldKind == reflect.Struct: - jsonName, prop := b.buildStructTypeProperty(field, jsonName, model) - return jsonName, modelDescription, prop - case fieldKind == reflect.Slice || fieldKind == reflect.Array: - jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName) - return jsonName, modelDescription, prop - case fieldKind == reflect.Ptr: - jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName) - return jsonName, modelDescription, prop - case fieldKind == reflect.String: - stringt := "string" - prop.Type = &stringt - return jsonName, modelDescription, prop - case fieldKind == reflect.Map: - // if it's a map, it's unstructured, and swagger 1.2 can't handle it - objectType := "object" - prop.Type = &objectType - return jsonName, modelDescription, prop - } - - if b.isPrimitiveType(fieldType.String()) { - mapped := b.jsonSchemaType(fieldType.String()) - prop.Type = &mapped - prop.Format = b.jsonSchemaFormat(fieldType.String()) - return jsonName, modelDescription, prop - } - modelType := fieldType.String() - prop.Ref = &modelType - - if fieldType.Name() == "" { // override type of anonymous structs - nestedTypeName := modelName + "." + jsonName - prop.Ref = &nestedTypeName - b.addModel(fieldType, nestedTypeName) - } - return jsonName, modelDescription, prop -} - -func hasNamedJSONTag(field reflect.StructField) bool { - parts := strings.Split(field.Tag.Get("json"), ",") - if len(parts) == 0 { - return false - } - for _, s := range parts[1:] { - if s == "inline" { - return false - } - } - return len(parts[0]) > 0 -} - -func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) { - prop.setPropertyMetadata(field) - // Check for type override in tag - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - // check for anonymous - if len(fieldType.Name()) == 0 { - // anonymous - anonType := model.Id + "." + jsonName - b.addModel(fieldType, anonType) - prop.Ref = &anonType - return jsonName, prop - } - - if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) { - // embedded struct - sub := modelBuilder{new(ModelList), b.Config} - sub.addModel(fieldType, "") - subKey := sub.keyFrom(fieldType) - // merge properties from sub - subModel, _ := sub.Models.At(subKey) - subModel.Properties.Do(func(k string, v ModelProperty) { - model.Properties.Put(k, v) - // if subModel says this property is required then include it - required := false - for _, each := range subModel.Required { - if k == each { - required = true - break - } - } - if required { - model.Required = append(model.Required, k) - } - }) - // add all new referenced models - sub.Models.Do(func(key string, sub Model) { - if key != subKey { - if _, ok := b.Models.At(key); !ok { - b.Models.Put(key, sub) - } - } - }) - // empty name signals skip property - return "", prop - } - // simple struct - b.addModel(fieldType, "") - var pType = fieldType.String() - prop.Ref = &pType - return jsonName, prop -} - -func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) { - // check for type override in tags - prop.setPropertyMetadata(field) - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - if fieldType.Elem().Kind() == reflect.Uint8 { - stringt := "string" - prop.Type = &stringt - return jsonName, prop - } - var pType = "array" - prop.Type = &pType - isPrimitive := b.isPrimitiveType(fieldType.Elem().Name()) - elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem()) - prop.Items = new(Item) - if isPrimitive { - mapped := b.jsonSchemaType(elemTypeName) - prop.Items.Type = &mapped - } else { - prop.Items.Ref = &elemTypeName - } - // add|overwrite model for element type - if fieldType.Elem().Kind() == reflect.Ptr { - fieldType = fieldType.Elem() - } - if !isPrimitive { - b.addModel(fieldType.Elem(), elemTypeName) - } - return jsonName, prop -} - -func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) { - prop.setPropertyMetadata(field) - // Check for type override in tags - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - - // override type of pointer to list-likes - if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array { - var pType = "array" - prop.Type = &pType - isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name()) - elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem()) - if isPrimitive { - primName := b.jsonSchemaType(elemName) - prop.Items = &Item{Ref: &primName} - } else { - prop.Items = &Item{Ref: &elemName} - } - if !isPrimitive { - // add|overwrite model for element type - b.addModel(fieldType.Elem().Elem(), elemName) - } - } else { - // non-array, pointer type - var pType = b.jsonSchemaType(fieldType.String()[1:]) // no star, include pkg path - if b.isPrimitiveType(fieldType.String()[1:]) { - prop.Type = &pType - prop.Format = b.jsonSchemaFormat(fieldType.String()[1:]) - return jsonName, prop - } - prop.Ref = &pType - elemName := "" - if fieldType.Elem().Name() == "" { - elemName = modelName + "." + jsonName - prop.Ref = &elemName - } - b.addModel(fieldType.Elem(), elemName) - } - return jsonName, prop -} - -func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string { - if t.Kind() == reflect.Ptr { - return t.String()[1:] - } - if t.Name() == "" { - return modelName + "." + jsonName - } - return b.keyFrom(t) -} - -func (b modelBuilder) keyFrom(st reflect.Type) string { - key := st.String() - if len(st.Name()) == 0 { // unnamed type - // Swagger UI has special meaning for [ - key = strings.Replace(key, "[]", "||", -1) - } - return key -} - -// see also https://golang.org/ref/spec#Numeric_types -func (b modelBuilder) isPrimitiveType(modelName string) bool { - if len(modelName) == 0 { - return false - } - return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName) -} - -// jsonNameOfField returns the name of the field as it should appear in JSON format -// An empty string indicates that this field is not part of the JSON representation -func (b modelBuilder) jsonNameOfField(field reflect.StructField) string { - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if s[0] == "-" { - // empty name signals skip property - return "" - } else if s[0] != "" { - return s[0] - } - } - return field.Name -} - -// see also http://json-schema.org/latest/json-schema-core.html#anchor8 -func (b modelBuilder) jsonSchemaType(modelName string) string { - schemaMap := map[string]string{ - "uint": "integer", - "uint8": "integer", - "uint16": "integer", - "uint32": "integer", - "uint64": "integer", - - "int": "integer", - "int8": "integer", - "int16": "integer", - "int32": "integer", - "int64": "integer", - - "byte": "integer", - "float64": "number", - "float32": "number", - "bool": "boolean", - "time.Time": "string", - } - mapped, ok := schemaMap[modelName] - if !ok { - return modelName // use as is (custom or struct) - } - return mapped -} - -func (b modelBuilder) jsonSchemaFormat(modelName string) string { - if b.Config != nil && b.Config.SchemaFormatHandler != nil { - if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" { - return mapped - } - } - schemaMap := map[string]string{ - "int": "int32", - "int32": "int32", - "int64": "int64", - "byte": "byte", - "uint": "integer", - "uint8": "byte", - "float64": "double", - "float32": "float", - "time.Time": "date-time", - "*time.Time": "date-time", - } - mapped, ok := schemaMap[modelName] - if !ok { - return "" // no format - } - return mapped -} diff --git a/vendor/github.com/emicklei/go-restful/swagger/model_list.go b/vendor/github.com/emicklei/go-restful/swagger/model_list.go deleted file mode 100644 index 9bb6cb678..000000000 --- a/vendor/github.com/emicklei/go-restful/swagger/model_list.go +++ /dev/null @@ -1,86 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// NamedModel associates a name with a Model (not using its Id) -type NamedModel struct { - Name string - Model Model -} - -// ModelList encapsulates a list of NamedModel (association) -type ModelList struct { - List []NamedModel -} - -// Put adds or replaces a Model by its name -func (l *ModelList) Put(name string, model Model) { - for i, each := range l.List { - if each.Name == name { - // replace - l.List[i] = NamedModel{name, model} - return - } - } - // add - l.List = append(l.List, NamedModel{name, model}) -} - -// At returns a Model by its name, ok is false if absent -func (l *ModelList) At(name string) (m Model, ok bool) { - for _, each := range l.List { - if each.Name == name { - return each.Model, true - } - } - return m, false -} - -// Do enumerates all the models, each with its assigned name -func (l *ModelList) Do(block func(name string, value Model)) { - for _, each := range l.List { - block(each.Name, each.Model) - } -} - -// MarshalJSON writes the ModelList as if it was a map[string]Model -func (l ModelList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.Name) - buf.WriteString("\": ") - encoder.Encode(each.Model) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} - -// UnmarshalJSON reads back a ModelList. This is an expensive operation. -func (l *ModelList) UnmarshalJSON(data []byte) error { - raw := map[string]interface{}{} - json.NewDecoder(bytes.NewReader(data)).Decode(&raw) - for k, v := range raw { - // produces JSON bytes for each value - data, err := json.Marshal(v) - if err != nil { - return err - } - var m Model - json.NewDecoder(bytes.NewReader(data)).Decode(&m) - l.Put(k, m) - } - return nil -} diff --git a/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go b/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go deleted file mode 100644 index 04fff2c57..000000000 --- a/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go +++ /dev/null @@ -1,66 +0,0 @@ -package swagger - -import ( - "reflect" - "strings" -) - -func (prop *ModelProperty) setDescription(field reflect.StructField) { - if tag := field.Tag.Get("description"); tag != "" { - prop.Description = tag - } -} - -func (prop *ModelProperty) setDefaultValue(field reflect.StructField) { - if tag := field.Tag.Get("default"); tag != "" { - prop.DefaultValue = Special(tag) - } -} - -func (prop *ModelProperty) setEnumValues(field reflect.StructField) { - // We use | to separate the enum values. This value is chosen - // since its unlikely to be useful in actual enumeration values. - if tag := field.Tag.Get("enum"); tag != "" { - prop.Enum = strings.Split(tag, "|") - } -} - -func (prop *ModelProperty) setMaximum(field reflect.StructField) { - if tag := field.Tag.Get("maximum"); tag != "" { - prop.Maximum = tag - } -} - -func (prop *ModelProperty) setType(field reflect.StructField) { - if tag := field.Tag.Get("type"); tag != "" { - prop.Type = &tag - } -} - -func (prop *ModelProperty) setMinimum(field reflect.StructField) { - if tag := field.Tag.Get("minimum"); tag != "" { - prop.Minimum = tag - } -} - -func (prop *ModelProperty) setUniqueItems(field reflect.StructField) { - tag := field.Tag.Get("unique") - switch tag { - case "true": - v := true - prop.UniqueItems = &v - case "false": - v := false - prop.UniqueItems = &v - } -} - -func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) { - prop.setDescription(field) - prop.setEnumValues(field) - prop.setMinimum(field) - prop.setMaximum(field) - prop.setUniqueItems(field) - prop.setDefaultValue(field) - prop.setType(field) -} diff --git a/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go b/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go deleted file mode 100644 index 3babb1944..000000000 --- a/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go +++ /dev/null @@ -1,87 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// NamedModelProperty associates a name to a ModelProperty -type NamedModelProperty struct { - Name string - Property ModelProperty -} - -// ModelPropertyList encapsulates a list of NamedModelProperty (association) -type ModelPropertyList struct { - List []NamedModelProperty -} - -// At returns the ModelPropety by its name unless absent, then ok is false -func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) { - for _, each := range l.List { - if each.Name == name { - return each.Property, true - } - } - return p, false -} - -// Put adds or replaces a ModelProperty with this name -func (l *ModelPropertyList) Put(name string, prop ModelProperty) { - // maybe replace existing - for i, each := range l.List { - if each.Name == name { - // replace - l.List[i] = NamedModelProperty{Name: name, Property: prop} - return - } - } - // add - l.List = append(l.List, NamedModelProperty{Name: name, Property: prop}) -} - -// Do enumerates all the properties, each with its assigned name -func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) { - for _, each := range l.List { - block(each.Name, each.Property) - } -} - -// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty -func (l ModelPropertyList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.Name) - buf.WriteString("\": ") - encoder.Encode(each.Property) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} - -// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation. -func (l *ModelPropertyList) UnmarshalJSON(data []byte) error { - raw := map[string]interface{}{} - json.NewDecoder(bytes.NewReader(data)).Decode(&raw) - for k, v := range raw { - // produces JSON bytes for each value - data, err := json.Marshal(v) - if err != nil { - return err - } - var m ModelProperty - json.NewDecoder(bytes.NewReader(data)).Decode(&m) - l.Put(k, m) - } - return nil -} diff --git a/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go b/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go deleted file mode 100644 index b33ccfbeb..000000000 --- a/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go +++ /dev/null @@ -1,36 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "github.com/emicklei/go-restful" - -type orderedRouteMap struct { - elements map[string][]restful.Route - keys []string -} - -func newOrderedRouteMap() *orderedRouteMap { - return &orderedRouteMap{ - elements: map[string][]restful.Route{}, - keys: []string{}, - } -} - -func (o *orderedRouteMap) Add(key string, route restful.Route) { - routes, ok := o.elements[key] - if ok { - routes = append(routes, route) - o.elements[key] = routes - return - } - o.elements[key] = []restful.Route{route} - o.keys = append(o.keys, key) -} - -func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) { - for _, k := range o.keys { - block(k, o.elements[k]) - } -} diff --git a/vendor/github.com/emicklei/go-restful/swagger/swagger.go b/vendor/github.com/emicklei/go-restful/swagger/swagger.go deleted file mode 100644 index 9c40833e7..000000000 --- a/vendor/github.com/emicklei/go-restful/swagger/swagger.go +++ /dev/null @@ -1,185 +0,0 @@ -// Package swagger implements the structures of the Swagger -// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md -package swagger - -const swaggerVersion = "1.2" - -// 4.3.3 Data Type Fields -type DataTypeFields struct { - Type *string `json:"type,omitempty"` // if Ref not used - Ref *string `json:"$ref,omitempty"` // if Type not used - Format string `json:"format,omitempty"` - DefaultValue Special `json:"defaultValue,omitempty"` - Enum []string `json:"enum,omitempty"` - Minimum string `json:"minimum,omitempty"` - Maximum string `json:"maximum,omitempty"` - Items *Item `json:"items,omitempty"` - UniqueItems *bool `json:"uniqueItems,omitempty"` -} - -type Special string - -// 4.3.4 Items Object -type Item struct { - Type *string `json:"type,omitempty"` - Ref *string `json:"$ref,omitempty"` - Format string `json:"format,omitempty"` -} - -// 5.1 Resource Listing -type ResourceListing struct { - SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2 - Apis []Resource `json:"apis"` - ApiVersion string `json:"apiVersion"` - Info Info `json:"info"` - Authorizations []Authorization `json:"authorizations,omitempty"` -} - -// 5.1.2 Resource Object -type Resource struct { - Path string `json:"path"` // relative or absolute, must start with / - Description string `json:"description"` -} - -// 5.1.3 Info Object -type Info struct { - Title string `json:"title"` - Description string `json:"description"` - TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"` - Contact string `json:"contact,omitempty"` - License string `json:"license,omitempty"` - LicenseUrl string `json:"licenseUrl,omitempty"` -} - -// 5.1.5 -type Authorization struct { - Type string `json:"type"` - PassAs string `json:"passAs"` - Keyname string `json:"keyname"` - Scopes []Scope `json:"scopes"` - GrantTypes []GrantType `json:"grandTypes"` -} - -// 5.1.6, 5.2.11 -type Scope struct { - // Required. The name of the scope. - Scope string `json:"scope"` - // Recommended. A short description of the scope. - Description string `json:"description"` -} - -// 5.1.7 -type GrantType struct { - Implicit Implicit `json:"implicit"` - AuthorizationCode AuthorizationCode `json:"authorization_code"` -} - -// 5.1.8 Implicit Object -type Implicit struct { - // Required. The login endpoint definition. - loginEndpoint LoginEndpoint `json:"loginEndpoint"` - // An optional alternative name to standard "access_token" OAuth2 parameter. - TokenName string `json:"tokenName"` -} - -// 5.1.9 Authorization Code Object -type AuthorizationCode struct { - TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"` - TokenEndpoint TokenEndpoint `json:"tokenEndpoint"` -} - -// 5.1.10 Login Endpoint Object -type LoginEndpoint struct { - // Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` -} - -// 5.1.11 Token Request Endpoint Object -type TokenRequestEndpoint struct { - // Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` - // An optional alternative name to standard "client_id" OAuth2 parameter. - ClientIdName string `json:"clientIdName"` - // An optional alternative name to the standard "client_secret" OAuth2 parameter. - ClientSecretName string `json:"clientSecretName"` -} - -// 5.1.12 Token Endpoint Object -type TokenEndpoint struct { - // Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` - // An optional alternative name to standard "access_token" OAuth2 parameter. - TokenName string `json:"tokenName"` -} - -// 5.2 API Declaration -type ApiDeclaration struct { - SwaggerVersion string `json:"swaggerVersion"` - ApiVersion string `json:"apiVersion"` - BasePath string `json:"basePath"` - ResourcePath string `json:"resourcePath"` // must start with / - Info Info `json:"info"` - Apis []Api `json:"apis,omitempty"` - Models ModelList `json:"models,omitempty"` - Produces []string `json:"produces,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Authorizations []Authorization `json:"authorizations,omitempty"` -} - -// 5.2.2 API Object -type Api struct { - Path string `json:"path"` // relative or absolute, must start with / - Description string `json:"description"` - Operations []Operation `json:"operations,omitempty"` -} - -// 5.2.3 Operation Object -type Operation struct { - DataTypeFields - Method string `json:"method"` - Summary string `json:"summary,omitempty"` - Notes string `json:"notes,omitempty"` - Nickname string `json:"nickname"` - Authorizations []Authorization `json:"authorizations,omitempty"` - Parameters []Parameter `json:"parameters"` - ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional - Produces []string `json:"produces,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Deprecated string `json:"deprecated,omitempty"` -} - -// 5.2.4 Parameter Object -type Parameter struct { - DataTypeFields - ParamType string `json:"paramType"` // path,query,body,header,form - Name string `json:"name"` - Description string `json:"description"` - Required bool `json:"required"` - AllowMultiple bool `json:"allowMultiple"` -} - -// 5.2.5 Response Message Object -type ResponseMessage struct { - Code int `json:"code"` - Message string `json:"message"` - ResponseModel string `json:"responseModel,omitempty"` -} - -// 5.2.6, 5.2.7 Models Object -type Model struct { - Id string `json:"id"` - Description string `json:"description,omitempty"` - Required []string `json:"required,omitempty"` - Properties ModelPropertyList `json:"properties"` - SubTypes []string `json:"subTypes,omitempty"` - Discriminator string `json:"discriminator,omitempty"` -} - -// 5.2.8 Properties Object -type ModelProperty struct { - DataTypeFields - Description string `json:"description,omitempty"` -} - -// 5.2.10 -type Authorizations map[string]Authorization diff --git a/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go b/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go deleted file mode 100644 index 05a3c7e76..000000000 --- a/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go +++ /dev/null @@ -1,21 +0,0 @@ -package swagger - -type SwaggerBuilder struct { - SwaggerService -} - -func NewSwaggerBuilder(config Config) *SwaggerBuilder { - return &SwaggerBuilder{*newSwaggerService(config)} -} - -func (sb SwaggerBuilder) ProduceListing() ResourceListing { - return sb.SwaggerService.produceListing() -} - -func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration { - return sb.SwaggerService.produceAllDeclarations() -} - -func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) { - return sb.SwaggerService.produceDeclarations(route) -} diff --git a/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go b/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go deleted file mode 100644 index 58dd62590..000000000 --- a/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go +++ /dev/null @@ -1,440 +0,0 @@ -package swagger - -import ( - "fmt" - - "github.com/emicklei/go-restful" - // "github.com/emicklei/hopwatch" - "net/http" - "reflect" - "sort" - "strings" - - "github.com/emicklei/go-restful/log" -) - -type SwaggerService struct { - config Config - apiDeclarationMap *ApiDeclarationList -} - -func newSwaggerService(config Config) *SwaggerService { - sws := &SwaggerService{ - config: config, - apiDeclarationMap: new(ApiDeclarationList)} - - // Build all ApiDeclarations - for _, each := range config.WebServices { - rootPath := each.RootPath() - // skip the api service itself - if rootPath != config.ApiPath { - if rootPath == "" || rootPath == "/" { - // use routes - for _, route := range each.Routes() { - entry := staticPathFromRoute(route) - _, exists := sws.apiDeclarationMap.At(entry) - if !exists { - sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry)) - } - } - } else { // use root path - sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath())) - } - } - } - - // if specified then call the PostBuilderHandler - if config.PostBuildHandler != nil { - config.PostBuildHandler(sws.apiDeclarationMap) - } - return sws -} - -// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf -var LogInfo = func(format string, v ...interface{}) { - // use the restful package-wide logger - log.Printf(format, v...) -} - -// InstallSwaggerService add the WebService that provides the API documentation of all services -// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki). -func InstallSwaggerService(aSwaggerConfig Config) { - RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer) -} - -// RegisterSwaggerService add the WebService that provides the API documentation of all services -// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki). -func RegisterSwaggerService(config Config, wsContainer *restful.Container) { - sws := newSwaggerService(config) - ws := new(restful.WebService) - ws.Path(config.ApiPath) - ws.Produces(restful.MIME_JSON) - if config.DisableCORS { - ws.Filter(enableCORS) - } - ws.Route(ws.GET("/").To(sws.getListing)) - ws.Route(ws.GET("/{a}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations)) - LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath) - wsContainer.Add(ws) - - // Check paths for UI serving - if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" { - swaggerPathSlash := config.SwaggerPath - // path must end with slash / - if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] { - LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)") - swaggerPathSlash += "/" - } - - LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath) - wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath)))) - - //if we define a custom static handler use it - } else if config.StaticHandler != nil && config.SwaggerPath != "" { - swaggerPathSlash := config.SwaggerPath - // path must end with slash / - if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] { - LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)") - swaggerPathSlash += "/" - - } - LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler) - wsContainer.Handle(swaggerPathSlash, config.StaticHandler) - - } else { - LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served") - } -} - -func staticPathFromRoute(r restful.Route) string { - static := r.Path - bracket := strings.Index(static, "{") - if bracket <= 1 { // result cannot be empty - return static - } - if bracket != -1 { - static = r.Path[:bracket] - } - if strings.HasSuffix(static, "/") { - return static[:len(static)-1] - } else { - return static - } -} - -func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" { - // prevent duplicate header - if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 { - resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin) - } - } - chain.ProcessFilter(req, resp) -} - -func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) { - listing := sws.produceListing() - resp.WriteAsJson(listing) -} - -func (sws SwaggerService) produceListing() ResourceListing { - listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info} - sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) { - ref := Resource{Path: k} - if len(v.Apis) > 0 { // use description of first (could still be empty) - ref.Description = v.Apis[0].Description - } - listing.Apis = append(listing.Apis, ref) - }) - return listing -} - -func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) { - decl, ok := sws.produceDeclarations(composeRootPath(req)) - if !ok { - resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found") - return - } - // unless WebServicesUrl is given - if len(sws.config.WebServicesUrl) == 0 { - // update base path from the actual request - // TODO how to detect https? assume http for now - var host string - // X-Forwarded-Host or Host or Request.Host - hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific? - if !ok || len(hostvalues) == 0 { - forwarded, ok := req.Request.Header["Host"] // without reverse-proxy - if !ok || len(forwarded) == 0 { - // fallback to Host field - host = req.Request.Host - } else { - host = forwarded[0] - } - } else { - host = hostvalues[0] - } - // inspect Referer for the scheme (http vs https) - scheme := "http" - if referer := req.Request.Header["Referer"]; len(referer) > 0 { - if strings.HasPrefix(referer[0], "https") { - scheme = "https" - } - } - decl.BasePath = fmt.Sprintf("%s://%s", scheme, host) - } - resp.WriteAsJson(decl) -} - -func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration { - decls := map[string]ApiDeclaration{} - sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) { - decls[k] = v - }) - return decls -} - -func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) { - decl, ok := sws.apiDeclarationMap.At(route) - if !ok { - return nil, false - } - decl.BasePath = sws.config.WebServicesUrl - return &decl, true -} - -// composeDeclaration uses all routes and parameters to create a ApiDeclaration -func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration { - decl := ApiDeclaration{ - SwaggerVersion: swaggerVersion, - BasePath: sws.config.WebServicesUrl, - ResourcePath: pathPrefix, - Models: ModelList{}, - ApiVersion: ws.Version()} - - // collect any path parameters - rootParams := []Parameter{} - for _, param := range ws.PathParameters() { - rootParams = append(rootParams, asSwaggerParameter(param.Data())) - } - // aggregate by path - pathToRoutes := newOrderedRouteMap() - for _, other := range ws.Routes() { - if strings.HasPrefix(other.Path, pathPrefix) { - pathToRoutes.Add(other.Path, other) - } - } - pathToRoutes.Do(func(path string, routes []restful.Route) { - api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()} - voidString := "void" - for _, route := range routes { - operation := Operation{ - Method: route.Method, - Summary: route.Doc, - Notes: route.Notes, - // Type gets overwritten if there is a write sample - DataTypeFields: DataTypeFields{Type: &voidString}, - Parameters: []Parameter{}, - Nickname: route.Operation, - ResponseMessages: composeResponseMessages(route, &decl, &sws.config)} - - operation.Consumes = route.Consumes - operation.Produces = route.Produces - - // share root params if any - for _, swparam := range rootParams { - operation.Parameters = append(operation.Parameters, swparam) - } - // route specific params - for _, param := range route.ParameterDocs { - operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data())) - } - - sws.addModelsFromRouteTo(&operation, route, &decl) - api.Operations = append(api.Operations, operation) - } - decl.Apis = append(decl.Apis, api) - }) - return decl -} - -func withoutWildcard(path string) string { - if strings.HasSuffix(path, ":*}") { - return path[0:len(path)-3] + "}" - } - return path -} - -// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them. -func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) { - if route.ResponseErrors == nil { - return messages - } - // sort by code - codes := sort.IntSlice{} - for code, _ := range route.ResponseErrors { - codes = append(codes, code) - } - codes.Sort() - for _, code := range codes { - each := route.ResponseErrors[code] - message := ResponseMessage{ - Code: code, - Message: each.Message, - } - if each.Model != nil { - st := reflect.TypeOf(each.Model) - isCollection, st := detectCollectionType(st) - modelName := modelBuilder{}.keyFrom(st) - if isCollection { - modelName = "array[" + modelName + "]" - } - modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "") - // reference the model - message.ResponseModel = modelName - } - messages = append(messages, message) - } - return -} - -// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it. -func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) { - if route.ReadSample != nil { - sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models) - } - if route.WriteSample != nil { - sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models) - } -} - -func detectCollectionType(st reflect.Type) (bool, reflect.Type) { - isCollection := false - if st.Kind() == reflect.Slice || st.Kind() == reflect.Array { - st = st.Elem() - isCollection = true - } else { - if st.Kind() == reflect.Ptr { - if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array { - st = st.Elem().Elem() - isCollection = true - } - } - } - return isCollection, st -} - -// addModelFromSample creates and adds (or overwrites) a Model from a sample resource -func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) { - if isResponse { - type_, items := asDataType(sample, &sws.config) - operation.Type = type_ - operation.Items = items - } - modelBuilder{Models: models, Config: &sws.config}.addModelFrom(sample) -} - -func asSwaggerParameter(param restful.ParameterData) Parameter { - return Parameter{ - DataTypeFields: DataTypeFields{ - Type: ¶m.DataType, - Format: asFormat(param.DataType, param.DataFormat), - DefaultValue: Special(param.DefaultValue), - }, - Name: param.Name, - Description: param.Description, - ParamType: asParamType(param.Kind), - - Required: param.Required} -} - -// Between 1..7 path parameters is supported -func composeRootPath(req *restful.Request) string { - path := "/" + req.PathParameter("a") - b := req.PathParameter("b") - if b == "" { - return path - } - path = path + "/" + b - c := req.PathParameter("c") - if c == "" { - return path - } - path = path + "/" + c - d := req.PathParameter("d") - if d == "" { - return path - } - path = path + "/" + d - e := req.PathParameter("e") - if e == "" { - return path - } - path = path + "/" + e - f := req.PathParameter("f") - if f == "" { - return path - } - path = path + "/" + f - g := req.PathParameter("g") - if g == "" { - return path - } - return path + "/" + g -} - -func asFormat(dataType string, dataFormat string) string { - if dataFormat != "" { - return dataFormat - } - return "" // TODO -} - -func asParamType(kind int) string { - switch { - case kind == restful.PathParameterKind: - return "path" - case kind == restful.QueryParameterKind: - return "query" - case kind == restful.BodyParameterKind: - return "body" - case kind == restful.HeaderParameterKind: - return "header" - case kind == restful.FormParameterKind: - return "form" - } - return "" -} - -func asDataType(any interface{}, config *Config) (*string, *Item) { - // If it's not a collection, return the suggested model name - st := reflect.TypeOf(any) - isCollection, st := detectCollectionType(st) - modelName := modelBuilder{}.keyFrom(st) - // if it's not a collection we are done - if !isCollection { - return &modelName, nil - } - - // XXX: This is not very elegant - // We create an Item object referring to the given model - models := ModelList{} - mb := modelBuilder{Models: &models, Config: config} - mb.addModelFrom(any) - - elemTypeName := mb.getElementTypeName(modelName, "", st) - item := new(Item) - if mb.isPrimitiveType(elemTypeName) { - mapped := mb.jsonSchemaType(elemTypeName) - item.Type = &mapped - } else { - item.Ref = &elemTypeName - } - tmp := "array" - return &tmp, item -} diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go deleted file mode 100644 index 2a51004f8..000000000 --- a/vendor/github.com/emicklei/go-restful/web_service.go +++ /dev/null @@ -1,268 +0,0 @@ -package restful - -import ( - "errors" - "os" - "sync" - - "github.com/emicklei/go-restful/log" -) - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// WebService holds a collection of Route values that bind a Http Method + URL Path to a function. -type WebService struct { - rootPath string - pathExpr *pathExpression // cached compilation of rootPath as RegExp - routes []Route - produces []string - consumes []string - pathParameters []*Parameter - filters []FilterFunction - documentation string - apiVersion string - - dynamicRoutes bool - - // protects 'routes' if dynamic routes are enabled - routesLock sync.RWMutex -} - -func (w *WebService) SetDynamicRoutes(enable bool) { - w.dynamicRoutes = enable -} - -// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it. -func (w *WebService) compilePathExpression() { - compiled, err := newPathExpression(w.rootPath) - if err != nil { - log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err) - os.Exit(1) - } - w.pathExpr = compiled -} - -// ApiVersion sets the API version for documentation purposes. -func (w *WebService) ApiVersion(apiVersion string) *WebService { - w.apiVersion = apiVersion - return w -} - -// Version returns the API version for documentation purposes. -func (w *WebService) Version() string { return w.apiVersion } - -// Path specifies the root URL template path of the WebService. -// All Routes will be relative to this path. -func (w *WebService) Path(root string) *WebService { - w.rootPath = root - if len(w.rootPath) == 0 { - w.rootPath = "/" - } - w.compilePathExpression() - return w -} - -// Param adds a PathParameter to document parameters used in the root path. -func (w *WebService) Param(parameter *Parameter) *WebService { - if w.pathParameters == nil { - w.pathParameters = []*Parameter{} - } - w.pathParameters = append(w.pathParameters, parameter) - return w -} - -// PathParameter creates a new Parameter of kind Path for documentation purposes. -// It is initialized as required with string as its DataType. -func (w *WebService) PathParameter(name, description string) *Parameter { - return PathParameter(name, description) -} - -// PathParameter creates a new Parameter of kind Path for documentation purposes. -// It is initialized as required with string as its DataType. -func PathParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}} - p.bePath() - return p -} - -// QueryParameter creates a new Parameter of kind Query for documentation purposes. -// It is initialized as not required with string as its DataType. -func (w *WebService) QueryParameter(name, description string) *Parameter { - return QueryParameter(name, description) -} - -// QueryParameter creates a new Parameter of kind Query for documentation purposes. -// It is initialized as not required with string as its DataType. -func QueryParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} - p.beQuery() - return p -} - -// BodyParameter creates a new Parameter of kind Body for documentation purposes. -// It is initialized as required without a DataType. -func (w *WebService) BodyParameter(name, description string) *Parameter { - return BodyParameter(name, description) -} - -// BodyParameter creates a new Parameter of kind Body for documentation purposes. -// It is initialized as required without a DataType. -func BodyParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}} - p.beBody() - return p -} - -// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. -// It is initialized as not required with string as its DataType. -func (w *WebService) HeaderParameter(name, description string) *Parameter { - return HeaderParameter(name, description) -} - -// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. -// It is initialized as not required with string as its DataType. -func HeaderParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} - p.beHeader() - return p -} - -// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. -// It is initialized as required with string as its DataType. -func (w *WebService) FormParameter(name, description string) *Parameter { - return FormParameter(name, description) -} - -// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. -// It is initialized as required with string as its DataType. -func FormParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} - p.beForm() - return p -} - -// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes. -func (w *WebService) Route(builder *RouteBuilder) *WebService { - w.routesLock.Lock() - defer w.routesLock.Unlock() - builder.copyDefaults(w.produces, w.consumes) - w.routes = append(w.routes, builder.Build()) - return w -} - -// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method' -func (w *WebService) RemoveRoute(path, method string) error { - if !w.dynamicRoutes { - return errors.New("dynamic routes are not enabled.") - } - w.routesLock.Lock() - defer w.routesLock.Unlock() - newRoutes := make([]Route, (len(w.routes) - 1)) - current := 0 - for ix := range w.routes { - if w.routes[ix].Method == method && w.routes[ix].Path == path { - continue - } - newRoutes[current] = w.routes[ix] - current = current + 1 - } - w.routes = newRoutes - return nil -} - -// Method creates a new RouteBuilder and initialize its http method -func (w *WebService) Method(httpMethod string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method(httpMethod) -} - -// Produces specifies that this WebService can produce one or more MIME types. -// Http requests must have one of these values set for the Accept header. -func (w *WebService) Produces(contentTypes ...string) *WebService { - w.produces = contentTypes - return w -} - -// Consumes specifies that this WebService can consume one or more MIME types. -// Http requests must have one of these values set for the Content-Type header. -func (w *WebService) Consumes(accepts ...string) *WebService { - w.consumes = accepts - return w -} - -// Routes returns the Routes associated with this WebService -func (w *WebService) Routes() []Route { - if !w.dynamicRoutes { - return w.routes - } - // Make a copy of the array to prevent concurrency problems - w.routesLock.RLock() - defer w.routesLock.RUnlock() - result := make([]Route, len(w.routes)) - for ix := range w.routes { - result[ix] = w.routes[ix] - } - return result -} - -// RootPath returns the RootPath associated with this WebService. Default "/" -func (w *WebService) RootPath() string { - return w.rootPath -} - -// PathParameters return the path parameter names for (shared amoung its Routes) -func (w *WebService) PathParameters() []*Parameter { - return w.pathParameters -} - -// Filter adds a filter function to the chain of filters applicable to all its Routes -func (w *WebService) Filter(filter FilterFunction) *WebService { - w.filters = append(w.filters, filter) - return w -} - -// Doc is used to set the documentation of this service. -func (w *WebService) Doc(plainText string) *WebService { - w.documentation = plainText - return w -} - -// Documentation returns it. -func (w *WebService) Documentation() string { - return w.documentation -} - -/* - Convenience methods -*/ - -// HEAD is a shortcut for .Method("HEAD").Path(subPath) -func (w *WebService) HEAD(subPath string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method("HEAD").Path(subPath) -} - -// GET is a shortcut for .Method("GET").Path(subPath) -func (w *WebService) GET(subPath string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method("GET").Path(subPath) -} - -// POST is a shortcut for .Method("POST").Path(subPath) -func (w *WebService) POST(subPath string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method("POST").Path(subPath) -} - -// PUT is a shortcut for .Method("PUT").Path(subPath) -func (w *WebService) PUT(subPath string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method("PUT").Path(subPath) -} - -// PATCH is a shortcut for .Method("PATCH").Path(subPath) -func (w *WebService) PATCH(subPath string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method("PATCH").Path(subPath) -} - -// DELETE is a shortcut for .Method("DELETE").Path(subPath) -func (w *WebService) DELETE(subPath string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method("DELETE").Path(subPath) -} diff --git a/vendor/github.com/emicklei/go-restful/web_service_container.go b/vendor/github.com/emicklei/go-restful/web_service_container.go deleted file mode 100644 index c9d31b06c..000000000 --- a/vendor/github.com/emicklei/go-restful/web_service_container.go +++ /dev/null @@ -1,39 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "net/http" -) - -// DefaultContainer is a restful.Container that uses http.DefaultServeMux -var DefaultContainer *Container - -func init() { - DefaultContainer = NewContainer() - DefaultContainer.ServeMux = http.DefaultServeMux -} - -// If set the true then panics will not be caught to return HTTP 500. -// In that case, Route functions are responsible for handling any error situation. -// Default value is false = recover from panics. This has performance implications. -// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true) -var DoNotRecover = false - -// Add registers a new WebService add it to the DefaultContainer. -func Add(service *WebService) { - DefaultContainer.Add(service) -} - -// Filter appends a container FilterFunction from the DefaultContainer. -// These are called before dispatching a http.Request to a WebService. -func Filter(filter FilterFunction) { - DefaultContainer.Filter(filter) -} - -// RegisteredWebServices returns the collections of WebServices from the DefaultContainer -func RegisteredWebServices() []*WebService { - return DefaultContainer.RegisteredWebServices() -} diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonpointer/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go deleted file mode 100644 index 39dd012c2..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonpointer -// repository-desc An implementation of JSON Pointer - Go language -// -// description Main and unique file. -// -// created 25-02-2013 - -package jsonpointer - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/go-openapi/swag" -) - -const ( - emptyPointer = `` - pointerSeparator = `/` - - invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator -) - -var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() - -// JSONPointable is an interface for structs to implement when they need to customize the -// json pointer process -type JSONPointable interface { - JSONLookup(string) (interface{}, error) -} - -type implStruct struct { - mode string // "SET" or "GET" - - inDocument interface{} - - setInValue interface{} - - getOutNode interface{} - getOutKind reflect.Kind - outError error -} - -// New creates a new json pointer for the given string -func New(jsonPointerString string) (Pointer, error) { - - var p Pointer - err := p.parse(jsonPointerString) - return p, err - -} - -// Pointer the json pointer reprsentation -type Pointer struct { - referenceTokens []string -} - -// "Constructor", parses the given string JSON pointer -func (p *Pointer) parse(jsonPointerString string) error { - - var err error - - if jsonPointerString != emptyPointer { - if !strings.HasPrefix(jsonPointerString, pointerSeparator) { - err = errors.New(invalidStart) - } else { - referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - for _, referenceToken := range referenceTokens[1:] { - p.referenceTokens = append(p.referenceTokens, referenceToken) - } - } - } - - return err -} - -// Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { - return p.get(document, swag.DefaultJSONNameProvider) -} - -// GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { - return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) -} - -func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { - kind := reflect.Invalid - rValue := reflect.Indirect(reflect.ValueOf(node)) - kind = rValue.Kind() - switch kind { - - case reflect.Struct: - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) - if err != nil { - return nil, kind, err - } - return r, kind, nil - } - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return nil, kind, fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - return fld.Interface(), kind, nil - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - mv := rValue.MapIndex(kv) - if mv.IsValid() && !swag.IsZero(mv) { - return mv.Interface(), kind, nil - } - return nil, kind, fmt.Errorf("object has no key %q", decodedToken) - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return nil, kind, err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - return elem.Interface(), kind, nil - - default: - return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) - } - -} - -func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { - - if nameProvider == nil { - nameProvider = swag.DefaultJSONNameProvider - } - - kind := reflect.Invalid - - // Full document when empty - if len(p.referenceTokens) == 0 { - return node, kind, nil - } - - for _, token := range p.referenceTokens { - - decodedToken := Unescape(token) - - r, knd, err := getSingleImpl(node, decodedToken, nameProvider) - if err != nil { - return nil, knd, err - } - node, kind = r, knd - - } - - rValue := reflect.ValueOf(node) - kind = rValue.Kind() - - return node, kind, nil -} - -// DecodedTokens returns the decoded tokens -func (p *Pointer) DecodedTokens() []string { - result := make([]string, 0, len(p.referenceTokens)) - for _, t := range p.referenceTokens { - result = append(result, Unescape(t)) - } - return result -} - -// IsEmpty returns true if this is an empty json pointer -// this indicates that it points to the root document -func (p *Pointer) IsEmpty() bool { - return len(p.referenceTokens) == 0 -} - -// Pointer to string representation function -func (p *Pointer) String() string { - - if len(p.referenceTokens) == 0 { - return emptyPointer - } - - pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) - - return pointerString -} - -// Specific JSON pointer encoding here -// ~0 => ~ -// ~1 => / -// ... and vice versa - -const ( - encRefTok0 = `~0` - encRefTok1 = `~1` - decRefTok0 = `~` - decRefTok1 = `/` -) - -// Unescape unescapes a json pointer reference token string to the original representation -func Unescape(token string) string { - step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) - step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) - return step2 -} - -// Escape escapes a pointer reference token string -func Escape(token string) string { - step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) - step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) - return step2 -} diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/jsonreference/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go deleted file mode 100644 index 3bc0a6e26..000000000 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonreference -// repository-desc An implementation of JSON Reference - Go language -// -// description Main and unique file. -// -// created 26-02-2013 - -package jsonreference - -import ( - "errors" - "net/url" - "strings" - - "github.com/PuerkitoBio/purell" - "github.com/go-openapi/jsonpointer" -) - -const ( - fragmentRune = `#` -) - -// New creates a new reference for the given string -func New(jsonReferenceString string) (Ref, error) { - - var r Ref - err := r.parse(jsonReferenceString) - return r, err - -} - -// MustCreateRef parses the ref string and panics when it's invalid. -// Use the New method for a version that returns an error -func MustCreateRef(ref string) Ref { - r, err := New(ref) - if err != nil { - panic(err) - } - return r -} - -// Ref represents a json reference object -type Ref struct { - referenceURL *url.URL - referencePointer jsonpointer.Pointer - - HasFullURL bool - HasURLPathOnly bool - HasFragmentOnly bool - HasFileScheme bool - HasFullFilePath bool -} - -// GetURL gets the URL for this reference -func (r *Ref) GetURL() *url.URL { - return r.referenceURL -} - -// GetPointer gets the json pointer for this reference -func (r *Ref) GetPointer() *jsonpointer.Pointer { - return &r.referencePointer -} - -// String returns the best version of the url for this reference -func (r *Ref) String() string { - - if r.referenceURL != nil { - return r.referenceURL.String() - } - - if r.HasFragmentOnly { - return fragmentRune + r.referencePointer.String() - } - - return r.referencePointer.String() -} - -// IsRoot returns true if this reference is a root document -func (r *Ref) IsRoot() bool { - return r.referenceURL != nil && - !r.IsCanonical() && - !r.HasURLPathOnly && - r.referenceURL.Fragment == "" -} - -// IsCanonical returns true when this pointer starts with http(s):// or file:// -func (r *Ref) IsCanonical() bool { - return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) -} - -// "Constructor", parses the given string JSON reference -func (r *Ref) parse(jsonReferenceString string) error { - - parsed, err := url.Parse(jsonReferenceString) - if err != nil { - return err - } - - r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) - refURL := r.referenceURL - - if refURL.Scheme != "" && refURL.Host != "" { - r.HasFullURL = true - } else { - if refURL.Path != "" { - r.HasURLPathOnly = true - } else if refURL.RawQuery == "" && refURL.Fragment != "" { - r.HasFragmentOnly = true - } - } - - r.HasFileScheme = refURL.Scheme == "file" - r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") - - // invalid json-pointer error means url has no json-pointer fragment. simply ignore error - r.referencePointer, _ = jsonpointer.New(refURL.Fragment) - - return nil -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - childURL := child.GetURL() - parentURL := r.GetURL() - if childURL == nil { - return nil, errors.New("child url is nil") - } - if parentURL == nil { - return &child, nil - } - - ref, err := New(parentURL.ResolveReference(childURL).String()) - if err != nil { - return nil, err - } - return &ref, nil -} diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/spec/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go deleted file mode 100644 index 294cbccf7..000000000 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by go-bindata. -// sources: -// schemas/jsonschema-draft-04.json -// schemas/v2/schema.json -// DO NOT EDIT! - -package spec - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xcc\x57\x3b\x6f\xdb\x30\x10\xde\xfd\x2b\x04\xa5\x63\x52\xb9\x40\xa7\x6c\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x85\x0d\x43\xff\xbd\xa4\xa8\x07\x29\x91\x92\x2d\xbb\x48\xb4\xc4\xe1\xbd\xbe\x3b\xde\x8b\xe7\x45\x20\xbf\x10\xc7\xe1\x73\x10\x1e\x84\xc8\x9e\xa3\xe8\x35\x67\xf4\x29\xdf\x1d\x20\x45\x9f\x19\xdf\x47\x31\x47\x89\x78\x5a\x7e\x8d\xf4\xd9\x43\xf8\xa8\x85\x3e\xe9\xff\x67\x48\xc6\x90\xef\x38\xce\x04\x66\x54\x49\x7f\x67\x1c\x02\xcd\x12\xa4\x20\x50\xad\xa2\xe3\x4e\x30\xc5\x8a\x39\x97\xdc\x1a\x71\x45\xd0\x6c\xdf\x38\x47\x27\x8b\x50\x11\xc5\x29\x03\xa5\x1c\x55\xe4\x47\x9b\x98\x62\xba\x12\x90\x2a\x7d\x5f\x7a\x24\x5c\x9f\x9f\xa5\x83\x1c\x12\xa5\xe2\x21\x0c\xca\x96\xa9\xec\xf8\xc3\x8c\xe5\x12\xd7\x5f\x58\x51\x01\x7b\xe0\x7e\x10\xb8\x66\x18\xc2\xc0\x69\x91\x4a\x8e\xe5\x25\xfa\x7f\x40\x82\x0a\x22\x96\x43\x3b\x88\x90\xdf\x0a\xea\xda\x82\x1d\x19\x91\x8b\xfa\x58\xa5\x21\xc5\x1c\x6b\x9d\x0a\x42\x50\x06\x1b\x27\x8c\x1c\xa7\x19\x81\x3f\xd2\x97\x7c\x68\x1a\x68\xe5\xc0\xba\x8d\x74\x10\x6e\x19\x23\x80\xa8\xfa\xd9\x3a\x1e\x84\xb4\x20\x44\xff\x4d\xb7\xfa\x84\x6d\x5f\x61\x27\xd4\xaf\x5c\x70\x4c\xf7\xa1\xcf\x7e\x45\x9d\x73\xcf\xc6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x28\x7e\x2b\xa0\xa1\x0a\x5e\x40\x07\x73\x61\x80\x6d\x6d\x34\x8e\xe9\xd3\x8c\xb3\x0c\xb8\xc0\xbd\xe8\xe9\xa2\xf3\x78\x53\xa3\xec\x01\x49\x18\x4f\x91\xba\xab\xb0\xe0\x38\x74\xc6\xaa\x2b\xca\x7b\x6b\x16\x58\x10\x98\xd4\xeb\x14\xb5\xeb\x7d\x96\x82\x26\x4b\xcf\xe6\x71\x2a\xcf\xb0\x4c\xcd\x2a\xf7\x3d\x6a\x9b\x74\xf3\x56\x5e\x8f\x02\xc7\x1d\x29\x72\x59\x28\xbf\x5a\x16\xfb\xc6\x4d\xfb\xe8\x58\xb3\x8c\x1b\x77\x0a\x77\x86\xa6\xb4\xb4\xf5\x64\x93\xbb\xa0\x24\x88\xe4\x1e\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x74\xfc\x09\x74\x2f\x0e\xbd\x9e\x3b\xd5\xbc\x2c\x1f\xaf\xd6\xd0\xb6\x52\xbb\xdf\x22\x21\x80\x4f\xe7\xa8\xb7\x78\xb8\xd4\x7d\x74\x07\x13\xc5\x71\x05\x05\x91\xa6\x91\xf4\x7b\x38\x3d\xe9\x1e\x6e\x1d\xab\xef\x3c\x0c\x74\xbf\x7d\xd5\x6c\xce\x89\xa5\xbe\x8d\xf7\x66\xce\xee\xd1\x86\x67\x80\x34\xad\x8f\xc3\xb3\xae\xc6\x1c\xe3\xb7\xc2\x96\xd9\xb4\x72\x0c\xf0\xab\x92\xe9\x5a\x05\xee\x5c\xb2\x87\xc6\x7f\xa9\x9b\x17\x6b\xb0\xcc\x75\x77\x96\x16\xb7\xcf\x1c\xde\x0a\xcc\x21\x1e\x53\x64\x0e\x73\x4f\x81\xbc\xb8\x07\xa6\xe6\xfa\x50\x55\xe2\x5b\x4d\xad\x4b\xb6\xb6\x81\x49\x77\xc7\xca\x68\x1a\x90\x67\xd7\x78\x3f\x3c\xba\xa3\x8e\xdd\xe8\x7b\xc0\x8a\x21\x03\x1a\x03\xdd\xdd\x11\xd1\x20\xd3\x46\x72\x55\x7d\x93\x0d\xb3\xcf\x34\x52\x46\x03\xd9\x8d\x75\xe2\x0e\x42\xbd\xb9\xdf\xe9\xdd\x34\xb6\x24\x9b\x5b\xa4\x56\x3f\x6b\xac\xd8\x01\x30\x1e\x25\xce\x3a\x77\xc6\x73\xd4\xbd\x96\xc9\xf5\x06\xbc\xca\xf8\x44\xb0\x2e\x09\x5a\xf3\xf5\x3a\x94\x7b\xb7\xa8\x9f\x7f\x17\x8e\x58\x53\xb2\x0e\xfc\xf5\x92\x8c\xc2\x4c\x49\xca\x84\xe7\x7d\x5d\xb6\x2f\x7e\x4f\x79\xba\x96\xe6\x75\xb7\x87\x9b\x0d\xdc\xb5\xbd\xae\xbb\x85\xb8\x8e\x64\x67\xd1\xe8\x18\xe5\xe2\x5f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00") - -func jsonschemaDraft04JSONBytes() ([]byte, error) { - return bindataRead( - _jsonschemaDraft04JSON, - "jsonschema-draft-04.json", - ) -} - -func jsonschemaDraft04JSON() (*asset, error) { - bytes, err := jsonschemaDraft04JSONBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1441640690, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\xdf\x73\xdc\xb6\xf1\x7f\xcf\x5f\x81\xb9\x78\x46\xf6\x24\xd6\x39\xfe\x7e\x5f\xea\x97\x8c\x1a\x39\x89\x5a\xbb\xd2\xf8\x9c\xf6\xc1\x95\x67\x70\x24\x4e\x87\x84\x3f\x2e\x04\x29\xe9\xea\xea\x7f\xef\x02\xfc\x71\x04\x01\x90\x20\x89\x3b\x9d\x6d\x7a\xa6\x8d\x8e\x04\x16\x8b\xc5\x62\xf7\xb3\x0b\x10\xf8\xf4\x0d\x42\xb3\x94\xa6\x01\x99\xbd\x42\xb3\x33\xf4\xb7\xc5\xe5\x3f\xd0\xc2\x5b\x93\x10\xa3\x55\x9c\xa0\xc5\x1d\xbe\xb9\x21\x09\x7a\x79\xfa\x02\x9d\x5d\x5d\x9c\xce\xbe\xe7\x15\xa8\xcf\x4b\xaf\xd3\x74\xf3\x6a\x3e\x67\x79\x91\x53\x1a\xcf\x6f\x5f\xce\x99\xa8\x7b\xfa\x3b\x8b\xa3\x6f\xf3\xc2\x4f\xf2\x47\xb5\x1a\xfc\xe5\xf3\xa2\x60\x9c\xdc\xcc\xfd\x04\xaf\xd2\xe7\x2f\xfe\xbf\xa8\x5c\xd4\x4b\xb7\x1b\xc1\x54\xbc\xfc\x9d\x78\x69\xfe\x2c\x21\x7f\x66\x34\x21\xbc\xf9\x0f\xf0\x1b\x9e\x14\xad\x8b\xd7\x9c\xb3\x68\x15\x97\x7f\x6f\x70\xba\x66\x33\xf8\xfb\x5a\xd4\xc5\xbe\x4f\x53\x1a\x47\x38\xb8\x4a\xe2\x0d\x49\x52\x4a\x18\xd0\x59\xe1\x80\x11\x51\x00\xca\xa7\x24\x89\xa4\xb7\x9f\x72\x52\x1f\xef\x9f\x57\x3f\x78\x97\x12\xb2\xe2\xac\x7d\x3b\xf7\xc9\x8a\x46\x82\x2c\x9b\xdf\x92\xc8\x8f\x93\xd7\xf7\x29\x89\x18\x3c\x98\x89\xd2\x0f\xf0\xff\x0f\x39\x79\x0d\xdd\x92\xfb\x1a\xed\xb2\xdb\x2c\x4d\x68\x74\x53\xf4\x05\x9e\x93\x28\x0b\xab\x6e\x8b\x27\x30\x26\xb3\xe2\xd7\x75\x55\xcc\x27\xcc\x4b\xe8\x86\x73\xc4\xa9\xbc\x5f\x93\x6a\x0c\x6f\x49\xc2\xf9\x42\xf1\x0a\xa5\x6b\xca\x90\x1f\x7b\x59\x48\xa2\xf4\xb4\xe0\xb4\x2e\xc2\xce\xce\x8a\x52\x52\xbd\x75\xcc\x52\x9b\x8e\x14\x62\xe6\xaf\x3e\x7e\xf8\xf8\xe9\x61\x8e\x5e\xfd\x1b\xfe\x5d\x7f\xf7\xf4\xc7\x57\xf0\x97\xff\xdd\xb3\x1f\x9f\xcc\xda\xfa\xc3\x1b\x42\x4f\x23\x1c\x12\x04\x1a\x4a\x37\xcf\xf2\x1e\x11\xa1\xa0\xe8\xf5\x3d\x0e\x37\x01\x79\x85\x4e\x76\x8a\x79\x22\x73\xba\xc4\x8c\x5c\x81\x72\xf4\xe5\x76\xde\xca\x16\xa7\x8a\xb8\xce\xa1\x34\xd6\xb1\x33\xc7\x1b\x7a\xd2\x90\xb5\x50\xf8\x9a\x42\x18\xc5\x5d\x14\x7c\x43\x41\xc6\x12\x05\x0f\xde\x66\x0d\x12\x0d\xe6\xce\x50\x00\xd5\xb8\x90\xde\x5e\xbc\x7d\x8d\x78\x4f\x19\xc2\x9e\x47\x36\x29\xf1\xd1\x72\x5b\x31\xbb\xeb\x9e\x9e\x89\x90\xf8\x14\xbf\x87\xea\x2a\x1b\xa0\xdc\x7e\xe6\xf5\x67\xa3\x68\x1a\x79\x38\x42\x05\x8d\x51\x6c\x88\x29\xdf\x29\xcd\xca\x32\xec\x6a\xd6\x5e\x77\xd7\xaf\x17\x6e\xb4\x9f\x80\x5a\x82\xc2\x58\x31\x51\x94\x3d\x37\x51\x4b\x08\xdb\xc0\x43\x1b\xfd\x28\x8b\x1a\x69\x31\xe2\x65\x09\x4d\xb7\x16\xaa\x56\x96\xd4\xd6\x3f\xef\x23\x27\x5d\x25\x89\x6a\x8a\x6f\x98\x6e\x16\xe2\x24\xc1\xdb\x9d\x1e\xd0\x94\x84\xf5\x72\xc6\x06\x81\x5e\x69\x12\x1f\xaa\xda\x59\x44\xff\xcc\xc8\x45\x41\x23\x4d\x32\x22\xf1\x40\xee\xf9\x04\xc7\xc1\x79\xec\x59\x74\x49\x2a\xdd\xb0\xf0\x3a\x1d\x52\xcc\xa9\xc6\xad\xe9\x66\xcb\x2f\x24\x22\x09\x0e\x10\xaf\x9e\x84\x98\x3f\x46\x78\x19\x67\xa9\x66\xb6\x2a\x5e\x51\x3c\x2d\xcc\x7d\x55\xac\x72\xf4\x8a\xcf\xe8\xf2\x8c\xe5\xd4\x32\x78\x47\xf1\x5a\xf6\x90\x2d\x02\xd4\x7a\xc9\x52\x8e\xf2\xc0\x69\x3c\x66\xad\x1b\x8d\xd6\x0c\x06\x5c\x27\xdb\x33\x94\xab\x04\xc2\x91\x0f\x56\x87\x78\x14\x2c\xb7\x20\x5a\xf7\x24\x35\xce\xbe\x57\xa5\x3a\xa6\x75\x06\x20\x27\x4a\xa9\x57\x79\x64\x70\xed\x4b\x70\xd0\x9d\x8d\xcb\x94\x86\x33\x10\xc4\x11\x07\x04\xb5\xe7\x92\x0b\x5d\xac\xe3\x2c\x00\xcf\x40\x90\x4f\x57\x2b\x92\x00\x46\x40\xab\x24\x0e\x45\x09\x21\xa7\x53\x84\x7e\xa1\xe9\xaf\xd9\x12\xfd\x1c\xe0\xdb\x18\x74\x0f\xbd\xc5\xc9\x1f\x7e\x7c\x17\x21\x40\x16\x38\x08\xe2\x3b\xe2\x1b\x7a\x01\x6a\x14\xb2\xcb\xd5\x82\x24\xb7\xd4\x1b\x33\x8e\xdc\xeb\x0a\x62\x9c\x7b\x96\x93\x13\xa8\xb5\x5d\x8a\xe0\x32\x53\xec\xa5\x76\xea\x5a\x16\xd6\x52\x0a\xa0\x41\x30\xba\x76\x94\xca\xc2\xaa\xc2\x37\x1d\x7a\x83\x3b\x5b\x93\xf1\x53\x5e\x53\x32\x19\xa5\x34\x60\x60\x40\xd7\x24\x0d\xeb\x39\xfd\x0d\x73\x91\xc3\xb0\x91\x43\x48\x7d\x50\x30\xba\xda\x42\x59\x94\xa3\xba\x9c\xcb\x42\x12\x08\xda\x85\x80\x61\x0e\x91\x02\x8e\xe8\x7f\x44\xbf\x0c\x23\x9b\x25\xc1\x48\x5e\x7e\x7b\xf7\x06\x6d\x62\x0a\xfc\x00\x33\x05\x8e\xf3\x54\xb9\x9e\xca\x84\xf2\xe7\x9c\x06\xb8\x3b\x3d\x6b\x30\xe5\xe9\x58\xe6\x04\x0d\x04\xc3\x05\xde\x9e\x59\x49\xc9\xc0\x65\xce\x4c\x9b\xe5\x3d\x98\xb1\x97\x74\x5f\x9d\x4f\x46\xdd\xd7\xfb\x3c\xa1\x8d\x03\xfd\xdb\xfe\x14\xbc\xae\xd4\x45\x17\x05\xfc\x3d\x45\x17\xe9\x09\x43\x24\xf2\xe2\x2c\xc1\x37\x60\x44\x41\xe3\x32\xc6\xfd\x12\xba\x5c\x00\x28\x8e\x43\x18\x08\xba\x0c\xaa\x6a\x07\xd5\xfb\xaa\x4d\x2b\x5d\x3f\x16\x1d\x52\x42\x00\x4b\xeb\xf9\x8e\x04\x20\xeb\xdb\x3c\x84\x63\xa5\x0c\x68\xe4\xd3\x5b\xea\x67\x80\xc4\x80\x0d\x21\x21\x76\x8a\x40\x62\x5b\x14\x66\x10\xcd\x80\x8f\x4c\xca\x8a\x45\x95\x93\x32\xbc\x3c\x39\x55\xc2\xc8\x3d\x0a\xa3\xa6\x0e\x10\xa8\x5a\x11\xe3\x3d\xe5\xb0\xb8\x6d\x14\xdb\xe6\x8e\x4d\x00\x65\x92\xbe\x81\x6e\x27\xc2\x2f\x92\x49\x0a\x9f\x8d\xd1\xbc\x8c\x44\x72\x20\x04\x68\x92\xe7\xb4\xf2\xf6\x59\x81\x79\x96\x42\xcd\x61\xb0\x72\x72\x0c\xc6\x91\x3f\x29\x82\x69\xbf\x00\x86\x22\x1c\x95\x23\xe4\x86\xaa\x69\x22\xb8\x3d\xf6\xbd\x6a\xaf\x7f\xf7\x13\x02\x38\x97\x81\x9f\x15\x8e\x81\x09\x5c\x50\x0b\x56\xa5\x6e\xe9\x62\xc9\x3d\xf6\xaa\x6c\x6e\xbf\x9d\x32\x45\x79\x3d\x7b\x23\xfb\x8c\x06\x83\x6a\xac\x56\xb6\x5a\xe5\xda\xc4\xcb\x2e\x2f\xc6\xcd\xb9\xe2\xc4\x4c\xfe\xc9\x3e\x26\x70\xe1\x3a\x8e\xdd\xfa\x93\x3c\xdd\x36\x66\x88\x95\x04\x41\x48\x43\xf2\x3e\xa7\xd1\x99\x2e\xd4\xb8\xd6\x2a\xdb\x55\x42\x80\x5f\xdf\xbf\xbf\x42\x21\x40\x38\x70\xf9\x0d\x8b\xc2\xd9\xc0\x8d\xa1\xec\x09\x81\x76\x49\xa3\x81\x38\xe8\x88\xe2\x7c\x39\x3b\x24\x09\x43\xce\x10\x89\x57\x6a\x96\x48\x37\x54\xb5\x97\x0f\x52\x75\x43\x9a\xa8\x51\x70\x06\x0e\x22\xc4\xc9\x76\x54\xfc\xbd\x4c\x28\x81\x88\x35\xa7\x54\xaa\x45\x35\xf6\x8f\x16\xfc\x57\x1c\x7c\x3f\x22\xba\x37\x18\x5a\xf1\xce\x36\xa5\xd6\xa4\x59\x31\x76\xe1\xbb\x48\xfb\x14\x01\x27\xdd\xa5\x5c\xba\x64\xaf\x49\x6f\x1b\x84\xdb\x33\xc5\xdd\x22\x16\x4d\x9a\xbb\xc9\x96\x26\xf9\x3f\x88\xad\x82\x8e\x2b\xb6\xb4\x59\xf0\x16\x92\xbb\xf2\x66\x9a\xba\x5c\x78\x0b\x49\xc5\x0a\x36\x26\xb1\xb2\xee\xd2\x42\x4b\x59\x7b\x69\x52\xf3\x39\x0e\xf1\x70\x4a\x8c\xda\xb9\x8c\xe3\x80\xe0\xa8\xa9\x9e\x2b\x9c\x05\xa9\x84\xa6\x15\x46\xd5\xb4\x7d\x1b\xa7\x52\xea\x5e\xd0\x32\xc6\x48\x02\xf8\xbb\x02\x42\x47\xe4\x34\x0a\xc2\xbd\x81\xd0\x0d\xb1\xcc\x08\xee\x7c\xb4\x5e\xf9\x33\x47\x74\xe4\xe5\xd4\xe1\x84\x7c\x12\xc0\xdc\x72\x42\x2a\xde\x34\xa3\x81\xe1\xb4\xd6\x04\x2b\xd3\x65\x98\xa0\x70\xea\xad\x1d\x51\x72\x64\xb7\xb4\x93\x4e\xbb\x9a\x67\x9d\x9c\xc8\xeb\x56\x61\x2c\x4f\x29\x31\x61\xbb\x09\x05\x4b\x9e\xf0\x44\x04\x8e\xb6\xe8\x16\x07\xd4\xcf\x11\x26\x83\x60\x23\x83\x32\xb1\x2f\xc2\xa6\x93\xc2\xdc\xd4\xb3\x12\x21\x95\xa7\xec\x0f\x6e\x67\xfd\xd3\x0f\x2f\x9e\xff\xe5\xfa\xd3\xff\x3d\x3c\x7b\xf2\xdf\x8f\x4f\x8b\xf6\x9f\x3d\xe9\x67\xc1\xff\x89\x83\x8c\x18\xf2\x1c\x7b\x30\x2b\x51\x9c\x36\x40\xa8\x7e\x84\x2c\x65\xd4\x29\x25\x6d\x37\xfa\x77\x64\xd7\x95\x2e\xf5\xcb\xe5\x59\x53\xc1\x38\x22\x97\x2b\x29\x86\xe8\x31\x3a\xda\x81\xb1\xa8\xcf\xb7\x00\xbd\x23\x62\x6d\xc9\xd3\x2c\x89\x5c\x6b\x59\x1f\x1e\x14\xd5\xa7\x53\xd9\xc4\xbe\x23\xeb\x6a\xdb\x93\x54\x53\x95\x76\x53\x62\x2d\x52\x93\x93\x5f\x4a\x93\x3d\x28\xad\x68\x40\x16\x3a\x6a\xb5\x5f\xd7\x46\xbb\x6d\x6d\x21\xcb\xc2\x86\x48\x41\x89\xd5\x5b\x48\x55\xa5\x5b\x26\xef\x91\x61\x15\x49\x89\x55\xb9\x39\xcf\xa4\xe5\x4d\xcc\x5a\x9a\x77\x06\xf8\xf4\xd3\x4c\x90\xb4\x9e\x5f\xa9\x9c\x53\x91\x98\xd2\x85\x73\xca\x0e\x38\xf1\x54\x53\x92\x9b\x71\xb1\xa2\xde\x7c\x4a\xa3\x94\xdc\xa8\x8f\x75\xe8\x1c\x95\x29\x86\xce\x09\x51\xa5\xc4\x7a\x5b\x08\x5d\xc2\xc2\x04\x35\x12\x1a\x52\xbe\xca\xc0\xf2\x04\x85\x96\x9e\x17\x07\x01\x0c\x25\x54\xf8\x59\xcb\x93\x69\x85\xbb\x51\xcb\x80\x22\xcb\x60\xc5\x82\x64\x59\x58\x4b\x29\xc4\xf7\x34\xcc\x42\x3b\x4a\x65\x61\x83\x01\xf1\x82\x8c\x81\x50\xde\xf6\x21\xa9\xd4\xd2\x73\x09\xe5\xed\xb9\x2c\x0a\x77\x70\xd9\x87\xa4\x52\xcb\x24\xcb\x37\x24\xba\x49\x2d\xf1\xef\xae\xb8\xa9\xcf\xbd\xa8\x55\xc5\x4d\xb8\xbc\xd8\x39\x69\xb7\x14\x25\x0a\x9b\x7a\x79\x61\x3f\x55\xaa\xd2\xa6\x3e\xf6\xa1\x55\x96\xd6\xd2\x92\x33\x86\x16\xe4\xea\x15\xf4\xba\x12\x59\xeb\x47\x64\xd4\x09\x98\x79\x14\x3c\xe5\xa5\x12\x06\x1b\xfa\xb8\x2b\x6f\x98\xf9\xfd\x61\x90\xe2\x99\x1f\xc9\xe9\x36\x8b\xb7\xec\x4e\x85\xe0\xa9\x70\x54\x5b\x1e\x3a\x25\x62\x25\xfc\x0e\x82\x2b\x74\xff\x9c\x67\x3d\x45\x64\xd5\xbd\x6b\x86\xe7\x8d\x35\x65\x8c\xbb\x0f\x97\xb1\xbf\xbd\xaa\xd6\xf5\xc6\x6d\x7c\xa8\xbb\x16\x69\xdf\x9f\x8c\x1b\xaf\x8f\x31\x6d\xe3\x2a\xbb\x9d\xa7\xd6\x35\xc9\xed\x2a\x58\xe7\xcb\xf7\x94\xc7\xc5\x7c\x8f\x9b\xd8\x3d\x43\x21\x8a\x2e\xd0\x25\x2f\x9d\xb1\x71\xfb\xdb\x1c\xef\x18\xd9\x31\x6e\x40\x11\x63\x04\x76\xce\x09\x83\x95\x2b\x12\xc2\x41\xec\x61\xbd\xd0\x6c\xa0\x18\xd7\xe5\x6e\xbc\x54\x53\xe0\x3e\xb9\x52\x13\xdb\x77\x6b\x22\x12\x20\x71\x82\x20\x76\xcf\xbf\x6c\xa8\xd8\xe6\x83\x55\xb6\xc7\x4b\xe4\x09\x2c\x1c\x9c\x0e\xc8\xc4\x6a\xc3\x39\xbb\x38\x6d\xc4\xb6\x8a\x1c\xb7\x57\x16\x62\x91\x2d\x17\x4d\x46\x8e\x2d\xec\xe9\x9c\xeb\x9f\xa9\x06\x1c\xcf\x44\x93\x03\x3d\xfe\x4f\x3f\xd5\x26\xa3\x3a\xd4\xa8\x1e\x3e\x36\x35\x04\xa1\x86\x90\x75\x8a\x4d\xa7\xd8\x74\x8a\x4d\x5b\x7b\x3d\xc5\xa6\x5f\x68\x6c\xfa\x4d\xfd\xbf\x25\x4e\x02\xde\x93\xed\x04\x93\x26\x98\x54\x7b\x2a\x74\x62\x42\x49\xfb\x43\x49\x82\x99\xd7\xe1\x26\xdd\x36\x57\x15\xa5\x96\x6d\x76\xbf\xb4\xb1\x25\x9a\x61\x88\xc1\x94\xe2\x49\x19\x5c\x53\xdb\xe5\xb6\x60\x38\x0a\xb6\x5c\x6f\x45\xc2\x86\xaf\x8a\x73\xa6\x78\xce\x26\x33\x7d\x33\x31\x21\xbc\xa3\x44\x78\xff\x82\x01\x7c\xcb\xad\xfe\x04\xf5\xd0\x04\xf5\x26\xa8\x37\x41\x3d\xd4\x84\x7a\xdc\xe4\x9d\xe3\x14\x4f\x68\x6f\x42\x7b\xb5\xa7\xa5\x5a\x4c\x80\x6f\x02\x7c\x3a\xde\x3f\x0f\xc0\xd7\x78\xc8\xf7\x69\x4d\x20\x10\x4d\x20\x70\x02\x81\x5d\xbd\x9e\x40\xe0\xd7\x04\x02\xf9\x27\x2c\x9f\x27\x00\x34\x7d\xb6\x59\x3c\x2d\x1e\x75\x6f\x9f\x1c\x04\x18\xb5\x4e\x4d\xfa\xd6\xb1\xd6\xb4\xa8\xe1\x1c\x62\x1e\x39\x8c\xe4\x8a\x35\x41\xc8\x69\x65\xb5\xfa\xf7\x75\x40\xae\x09\x69\xa1\x09\x69\x4d\x48\x6b\x42\x5a\xa8\x89\xb4\xa2\x38\xfa\xeb\x21\x36\xa9\xea\x3f\x1e\x19\xf4\x75\x9a\x71\xd3\x9c\x4e\x74\x16\xf4\x5a\x32\x8e\x03\x29\x9a\x96\xab\x07\x92\x33\xa0\x61\x65\x64\xaf\x1b\x18\x5a\x33\xa4\x83\x04\x2e\xef\x62\x1e\xd8\x09\x45\xd1\x3a\xd8\x57\x76\x64\xda\x7e\x4d\x7b\x06\x80\x2b\x47\x8c\x94\xd5\x8f\xe1\x04\x04\x83\xa5\x13\xd0\x73\x3a\x07\x3d\xc6\x4b\x09\x17\x5c\x7f\xe7\x3e\x1c\xb8\x68\x8f\x5d\x2d\xfb\x67\x79\xb4\xfb\x7c\xd7\x9d\xb9\x74\x1a\xad\x35\xbc\x1e\xd1\xa0\xe6\x2b\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x66\x0f\x68\xb1\xeb\x73\x8e\x0e\x20\x36\xa4\x45\x17\x68\x6d\x40\xbb\x4e\x20\xdd\x90\xfe\xba\xc0\x7d\xa3\xfa\x3b\x0a\x1c\xda\xb6\x2c\xf9\x97\x98\x89\x38\xe4\xa2\x88\x99\x86\x01\x49\x07\x2d\x9f\xe7\xf3\xe9\xc5\x20\xf0\x39\x40\xe6\xa3\x10\xea\x3e\x25\xbd\xef\x86\xdb\x05\x6d\x81\x81\x07\x08\xbb\x13\x28\x83\xc0\xcd\xc7\x28\x1c\x42\xea\x07\x69\xbd\x5d\xf4\xa6\xb4\xdf\x18\x06\x72\xaf\x7f\x26\x67\x3e\x6c\x23\x94\x21\x96\xcc\x18\xc6\x74\x7e\xfa\xce\xcb\x44\xdb\xc3\x1e\xa1\xd0\xcc\xa8\xca\x95\x6b\xbf\x9a\x99\xd3\x0a\x4d\x3c\xe8\x01\x95\x26\x15\x36\x06\x4f\xd5\x02\x28\x8b\x94\xd3\xa3\x89\x51\xb7\xd0\x29\x1b\xb5\x1f\x94\x97\xfa\xb3\xfd\x7a\x32\x28\x0f\xd6\xa8\xa1\xc3\x41\xa0\xa2\xb6\x96\xb3\x09\x4d\x9d\x33\x1d\x5a\x68\xdb\x29\x3d\x77\x86\x98\xc1\x1c\x21\xe5\x7d\xea\x9e\x7b\x7d\x38\xeb\x27\x50\x9f\x72\x6c\x0e\x62\xc2\x69\x9c\x0c\x89\x4e\x12\x88\xf9\x2f\xa3\xc0\x78\x30\xe3\xe0\x23\xd8\xee\x43\xe5\x84\x54\xbd\x0c\x78\x41\x03\x26\x74\x7f\x24\x62\xb1\x02\x50\x17\xe8\x2e\x66\xb4\xfe\xba\xb0\x76\x0c\xcc\x17\x1e\x46\xbb\x38\xb0\x64\x0a\x9d\x8f\x26\x74\x7e\x14\x14\xe4\x66\xe9\xca\x6e\x4b\xcf\xde\xcc\xd9\xf1\x1a\xa3\xe6\x6a\x97\x85\x45\x1a\x75\xb3\xc3\x74\xfe\xd0\xb4\x12\xd9\x45\x69\x5a\x89\x9c\x56\x22\xa7\x95\xc8\xc7\x5b\x89\x7c\x04\xc8\x28\xf9\x24\xdd\xb5\x89\x63\x2f\x29\x2c\x69\xbe\xcb\x31\x0c\xbf\x16\x62\xa6\xf4\xb7\xe3\xd2\x42\x1d\x8d\xe1\xfe\x52\x75\x8a\x4a\x0c\xab\x77\x16\x56\x37\x4c\x98\xce\xd5\x97\x25\x2d\x87\x59\xfb\xf1\xf8\x16\x5b\x9d\xb4\x5d\x10\xa7\xed\x6e\x70\xe7\x65\x75\x7a\x3b\x04\x4a\x40\xef\x87\xd4\x04\x95\x4f\xe8\x32\x53\x0f\x6f\x1e\x0d\x02\xef\x12\xbc\xd9\xb8\x3a\xae\xfc\x58\xe6\x2a\xbf\xfc\xd3\x95\x06\xf5\xb9\x5e\xcc\xb5\xb6\x8d\x3c\x77\xd6\x19\xc0\x3f\x96\x71\xed\xb8\x7a\x76\xb8\xad\xd3\x9d\xc5\x6b\x95\xed\x5a\x62\x46\xbd\xb3\x2c\x5d\xf3\x7b\x24\xf2\xcd\xa6\x0b\xe5\xe8\xfd\x46\x0a\xcc\x8a\x30\xde\xd0\xbf\x93\xad\x1b\x5a\x31\x06\x06\x5f\x5e\x40\x60\x46\x3d\x9a\xba\xa4\x79\x85\x19\xbb\x8b\x13\xdf\x25\xcd\xb3\x0d\xe7\xd3\xa1\x28\x0b\xb2\x9e\x47\x18\xfb\x29\xf6\x89\x96\x6a\xf5\xf7\xb5\x56\xf3\xda\xc6\x79\xbf\x96\xe6\x31\x4e\xd2\x15\xbd\x75\xb9\xf5\xf9\xf8\x4c\x49\x63\x7e\x1d\x60\x0c\x1b\x28\xa2\xb1\xfd\xed\xc0\x23\x9c\x77\xbf\x7b\x88\x87\x7a\xae\x7e\xbb\xf8\x5b\xcf\x36\x6b\xe4\x42\x9c\x1f\xe5\x71\x7c\xba\x69\xb0\xd7\x87\xd5\xd1\x55\x10\xdf\x49\x77\x1c\x00\x4f\x71\x52\xdc\x27\xfb\x5b\x9f\x7b\xe9\xdc\x68\x6c\x2e\x14\x8b\x24\x18\xe7\x7b\x74\x6b\xb4\x10\x7e\x77\x7b\xcc\x83\x5e\xdb\x5e\x84\x22\xfa\xb0\xc8\x6b\x68\xa9\x29\x52\xee\xd1\x13\x8b\xbb\x87\x3f\xff\x59\xa1\x20\x8e\xc7\x9d\x15\x69\xfc\x07\xf9\xf2\x67\xc3\xa6\x10\xfa\xa1\x67\x43\x25\xdd\x69\x16\xc8\xb3\x40\x87\x91\xa7\x89\x50\xb4\xbc\xc7\x89\x80\x77\x72\x9f\xe6\xc2\xb1\xcc\x05\x35\xb0\x3b\x32\xa4\xf4\xf5\x4d\x93\x6a\x48\xbe\x30\xfc\x34\x4d\x42\xa4\x9f\x84\x8b\xe6\x28\x3a\x58\x78\x90\xbb\x2c\xb7\x2a\xdf\x3d\xea\x70\x49\xa6\xba\xe2\x59\x91\x6f\xc7\x3a\x4c\xe3\x0a\xc0\x6e\x96\x34\x5f\x4f\xef\x68\xa0\x88\x10\x9f\xf8\x28\x8d\xc5\xd9\x37\x08\x17\xf7\xf9\xe5\xf7\xb4\x06\x81\xf6\xfa\x89\x92\x37\xd9\x88\x69\xba\x3e\x38\xdd\xa9\xdc\x3b\x2f\x89\xc8\x9a\x8c\xe1\xce\x37\x6d\x1a\xce\x7a\x1d\xac\x76\x37\xeb\x20\xe1\xa7\x09\x8e\x18\xf0\xc4\x2f\xff\x48\x63\x2f\x0e\xca\xef\xd8\xc5\x75\xff\x6d\xe2\x34\xce\x7e\x9d\x79\x14\x1b\x92\x64\x0b\xc1\x9f\x30\xf9\xd1\x9d\xf2\xbb\x66\xf0\x7a\xcb\xa6\x65\x3b\x86\x89\x79\x95\xf5\x99\xc7\x6e\xa5\xab\x44\xe4\x9f\xa9\xfc\x73\x43\x37\xba\x8b\xc7\x77\x4b\x47\x82\x5c\x2b\x97\xbb\xb3\x7f\x0e\xc5\x6e\xed\x41\x58\x3f\x74\xc8\x8e\xff\xe6\xd6\x3e\x47\xdb\xfa\x4a\xf2\x7a\x27\xe1\x74\x2b\xdf\xae\xa9\xe6\x16\x1b\x67\xdb\xf7\x2a\x03\xae\xdb\x1b\xe0\xf2\x6b\xb7\xaa\x21\x65\x47\x8e\xb3\x2f\xdc\xca\x26\x5a\x76\xff\xb8\xff\xaa\xad\xea\x97\xb2\x87\xc7\xd9\x97\x6c\x6a\xbf\x9c\xb6\xa5\xdf\x50\x54\x1b\x2f\x65\xeb\x8f\xfb\x2f\x7a\x6a\x52\xdc\x6b\x6b\xf2\x17\x3c\x3b\xac\xd0\xdc\x90\xe4\xec\xcb\xb4\x9a\x18\x95\xbd\x93\xfb\x94\xe2\x3e\x1b\xd3\x0b\x51\xbf\xe7\xc9\xe9\x57\x67\xd5\x44\x88\xdc\x29\x7f\xd4\x54\x78\x19\x0c\x59\x20\x68\x7d\x54\x2a\x78\x52\xfc\xd5\xa8\x4d\x32\xbd\x3e\x2c\x97\x61\xfa\x37\xfc\x7f\x0f\xff\x0b\x00\x00\xff\xff\x31\x8b\xeb\xb6\x54\x9c\x00\x00") - -func v2SchemaJSONBytes() ([]byte, error) { - return bindataRead( - _v2SchemaJSON, - "v2/schema.json", - ) -} - -func v2SchemaJSON() (*asset, error) { - bytes, err := v2SchemaJSONBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "v2/schema.json", size: 40020, mode: os.FileMode(420), modTime: time.Unix(1446147817, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "jsonschema-draft-04.json": jsonschemaDraft04JSON, - "v2/schema.json": v2SchemaJSON, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}}, - "v2": &bintree{nil, map[string]*bintree{ - "schema.json": &bintree{v2SchemaJSON, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go deleted file mode 100644 index f285970aa..000000000 --- a/vendor/github.com/go-openapi/spec/contact_info.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// ContactInfo contact information for the exposed API. -// -// For more information: http://goo.gl/8us55a#contactObject -type ContactInfo struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` - Email string `json:"email,omitempty"` -} diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go deleted file mode 100644 index eb1490b05..000000000 --- a/vendor/github.com/go-openapi/spec/expander.go +++ /dev/null @@ -1,626 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "net/url" - "reflect" - "strings" - "sync" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// ResolutionCache a cache for resolving urls -type ResolutionCache interface { - Get(string) (interface{}, bool) - Set(string, interface{}) -} - -type simpleCache struct { - lock sync.Mutex - store map[string]interface{} -} - -var resCache = initResolutionCache() - -func initResolutionCache() ResolutionCache { - return &simpleCache{store: map[string]interface{}{ - "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), - "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), - }} -} - -func (s *simpleCache) Get(uri string) (interface{}, bool) { - s.lock.Lock() - v, ok := s.store[uri] - s.lock.Unlock() - return v, ok -} - -func (s *simpleCache) Set(uri string, data interface{}) { - s.lock.Lock() - s.store[uri] = data - s.lock.Unlock() -} - -// ResolveRef resolves a reference against a context root -func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { - resolver, err := defaultSchemaLoader(root, nil, nil) - if err != nil { - return nil, err - } - - result := new(Schema) - if err := resolver.Resolve(ref, result); err != nil { - return nil, err - } - return result, nil -} - -// ResolveParameter resolves a paramter reference against a context root -func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { - resolver, err := defaultSchemaLoader(root, nil, nil) - if err != nil { - return nil, err - } - - result := new(Parameter) - if err := resolver.Resolve(&ref, result); err != nil { - return nil, err - } - return result, nil -} - -// ResolveResponse resolves response a reference against a context root -func ResolveResponse(root interface{}, ref Ref) (*Response, error) { - resolver, err := defaultSchemaLoader(root, nil, nil) - if err != nil { - return nil, err - } - - result := new(Response) - if err := resolver.Resolve(&ref, result); err != nil { - return nil, err - } - return result, nil -} - -type schemaLoader struct { - loadingRef *Ref - startingRef *Ref - currentRef *Ref - root interface{} - cache ResolutionCache - loadDoc func(string) (json.RawMessage, error) -} - -var idPtr, _ = jsonpointer.New("/id") -var schemaPtr, _ = jsonpointer.New("/$schema") -var refPtr, _ = jsonpointer.New("/$ref") - -func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*schemaLoader, error) { - if cache == nil { - cache = resCache - } - - var ptr *jsonpointer.Pointer - if ref != nil { - ptr = ref.GetPointer() - } - - currentRef := nextRef(root, ref, ptr) - - return &schemaLoader{ - root: root, - loadingRef: ref, - startingRef: ref, - cache: cache, - loadDoc: func(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil - }, - currentRef: currentRef, - }, nil -} - -func idFromNode(node interface{}) (*Ref, error) { - if idValue, _, err := idPtr.Get(node); err == nil { - if refStr, ok := idValue.(string); ok && refStr != "" { - idRef, err := NewRef(refStr) - if err != nil { - return nil, err - } - return &idRef, nil - } - } - return nil, nil -} - -func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { - if startingRef == nil { - return nil - } - if ptr == nil { - return startingRef - } - - ret := startingRef - var idRef *Ref - node := startingNode - - for _, tok := range ptr.DecodedTokens() { - node, _, _ = jsonpointer.GetForToken(node, tok) - if node == nil { - break - } - - idRef, _ = idFromNode(node) - if idRef != nil { - nw, err := ret.Inherits(*idRef) - if err != nil { - break - } - ret = nw - } - - refRef, _, _ := refPtr.Get(node) - if refRef != nil { - rf, _ := NewRef(refRef.(string)) - nw, err := ret.Inherits(rf) - if err != nil { - break - } - ret = nw - } - - } - return ret -} - -func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error { - tgt := reflect.ValueOf(target) - if tgt.Kind() != reflect.Ptr { - return fmt.Errorf("resolve ref: target needs to be a pointer") - } - - oldRef := currentRef - if currentRef != nil { - var err error - currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer())) - if err != nil { - return err - } - } - if currentRef == nil { - currentRef = ref - } - - refURL := currentRef.GetURL() - if refURL == nil { - return nil - } - if currentRef.IsRoot() { - nv := reflect.ValueOf(node) - reflect.Indirect(tgt).Set(reflect.Indirect(nv)) - return nil - } - - if strings.HasPrefix(refURL.String(), "#") { - res, _, err := ref.GetPointer().Get(node) - if err != nil { - res, _, err = ref.GetPointer().Get(r.root) - if err != nil { - return err - } - } - rv := reflect.Indirect(reflect.ValueOf(res)) - tgtType := reflect.Indirect(tgt).Type() - if rv.Type().AssignableTo(tgtType) { - reflect.Indirect(tgt).Set(reflect.Indirect(reflect.ValueOf(res))) - } else { - if err := swag.DynamicJSONToStruct(rv.Interface(), target); err != nil { - return err - } - } - - return nil - } - - if refURL.Scheme != "" && refURL.Host != "" { - // most definitely take the red pill - data, _, _, err := r.load(refURL) - if err != nil { - return err - } - - if ((oldRef == nil && currentRef != nil) || - (oldRef != nil && currentRef == nil) || - oldRef.String() != currentRef.String()) && - ((oldRef == nil && ref != nil) || - (oldRef != nil && ref == nil) || - (oldRef.String() != ref.String())) { - - return r.resolveRef(currentRef, ref, data, target) - } - - var res interface{} - if currentRef.String() != "" { - res, _, err = currentRef.GetPointer().Get(data) - if err != nil { - return err - } - } else { - res = data - } - - if err := swag.DynamicJSONToStruct(res, target); err != nil { - return err - } - - } - return nil -} - -func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { - toFetch := *refURL - toFetch.Fragment = "" - - data, fromCache := r.cache.Get(toFetch.String()) - if !fromCache { - b, err := r.loadDoc(toFetch.String()) - if err != nil { - return nil, url.URL{}, false, err - } - - if err := json.Unmarshal(b, &data); err != nil { - return nil, url.URL{}, false, err - } - r.cache.Set(toFetch.String(), data) - } - - return data, toFetch, fromCache, nil -} -func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error { - if err := r.resolveRef(r.currentRef, ref, r.root, target); err != nil { - return err - } - - return nil -} - -type specExpander struct { - spec *Swagger - resolver *schemaLoader -} - -// ExpandSpec expands the references in a swagger spec -func ExpandSpec(spec *Swagger) error { - resolver, err := defaultSchemaLoader(spec, nil, nil) - if err != nil { - return err - } - - for key, defintition := range spec.Definitions { - var def *Schema - var err error - if def, err = expandSchema(defintition, []string{"#/definitions/" + key}, resolver); err != nil { - return err - } - spec.Definitions[key] = *def - } - - for key, parameter := range spec.Parameters { - if err := expandParameter(¶meter, resolver); err != nil { - return err - } - spec.Parameters[key] = parameter - } - - for key, response := range spec.Responses { - if err := expandResponse(&response, resolver); err != nil { - return err - } - spec.Responses[key] = response - } - - if spec.Paths != nil { - for key, path := range spec.Paths.Paths { - if err := expandPathItem(&path, resolver); err != nil { - return err - } - spec.Paths.Paths[key] = path - } - } - - return nil -} - -// ExpandSchema expands the refs in the schema object -func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { - - if schema == nil { - return nil - } - if root == nil { - root = schema - } - - nrr, _ := NewRef(schema.ID) - var rrr *Ref - if nrr.String() != "" { - switch root.(type) { - case *Schema: - rid, _ := NewRef(root.(*Schema).ID) - rrr, _ = rid.Inherits(nrr) - case *Swagger: - rid, _ := NewRef(root.(*Swagger).ID) - rrr, _ = rid.Inherits(nrr) - } - - } - - resolver, err := defaultSchemaLoader(root, rrr, cache) - if err != nil { - return err - } - - refs := []string{""} - if rrr != nil { - refs[0] = rrr.String() - } - var s *Schema - if s, err = expandSchema(*schema, refs, resolver); err != nil { - return nil - } - *schema = *s - return nil -} - -func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) { - if target.Items != nil { - if target.Items.Schema != nil { - t, err := expandSchema(*target.Items.Schema, parentRefs, resolver) - if err != nil { - return nil, err - } - *target.Items.Schema = *t - } - for i := range target.Items.Schemas { - t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver) - if err != nil { - return nil, err - } - target.Items.Schemas[i] = *t - } - } - return &target, nil -} - -func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (schema *Schema, err error) { - defer func() { - schema = &target - }() - if target.Ref.String() == "" && target.Ref.IsRoot() { - target = *resolver.root.(*Schema) - return - } - - // t is the new expanded schema - var t *Schema - for target.Ref.String() != "" { - // var newTarget Schema - pRefs := strings.Join(parentRefs, ",") - pRefs += "," - if strings.Contains(pRefs, target.Ref.String()+",") { - err = nil - return - } - - if err = resolver.Resolve(&target.Ref, &t); err != nil { - return - } - parentRefs = append(parentRefs, target.Ref.String()) - target = *t - } - - if t, err = expandItems(target, parentRefs, resolver); err != nil { - return - } - target = *t - - for i := range target.AllOf { - if t, err = expandSchema(target.AllOf[i], parentRefs, resolver); err != nil { - return - } - target.AllOf[i] = *t - } - for i := range target.AnyOf { - if t, err = expandSchema(target.AnyOf[i], parentRefs, resolver); err != nil { - return - } - target.AnyOf[i] = *t - } - for i := range target.OneOf { - if t, err = expandSchema(target.OneOf[i], parentRefs, resolver); err != nil { - return - } - target.OneOf[i] = *t - } - if target.Not != nil { - if t, err = expandSchema(*target.Not, parentRefs, resolver); err != nil { - return - } - *target.Not = *t - } - for k, _ := range target.Properties { - if t, err = expandSchema(target.Properties[k], parentRefs, resolver); err != nil { - return - } - target.Properties[k] = *t - } - if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { - if t, err = expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver); err != nil { - return - } - *target.AdditionalProperties.Schema = *t - } - for k, _ := range target.PatternProperties { - if t, err = expandSchema(target.PatternProperties[k], parentRefs, resolver); err != nil { - return - } - target.PatternProperties[k] = *t - } - for k, _ := range target.Dependencies { - if target.Dependencies[k].Schema != nil { - if t, err = expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver); err != nil { - return - } - *target.Dependencies[k].Schema = *t - } - } - if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { - if t, err = expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver); err != nil { - return - } - *target.AdditionalItems.Schema = *t - } - for k, _ := range target.Definitions { - if t, err = expandSchema(target.Definitions[k], parentRefs, resolver); err != nil { - return - } - target.Definitions[k] = *t - } - return -} - -func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error { - if pathItem == nil { - return nil - } - if pathItem.Ref.String() != "" { - if err := resolver.Resolve(&pathItem.Ref, &pathItem); err != nil { - return err - } - } - - for idx := range pathItem.Parameters { - if err := expandParameter(&(pathItem.Parameters[idx]), resolver); err != nil { - return err - } - } - if err := expandOperation(pathItem.Get, resolver); err != nil { - return err - } - if err := expandOperation(pathItem.Head, resolver); err != nil { - return err - } - if err := expandOperation(pathItem.Options, resolver); err != nil { - return err - } - if err := expandOperation(pathItem.Put, resolver); err != nil { - return err - } - if err := expandOperation(pathItem.Post, resolver); err != nil { - return err - } - if err := expandOperation(pathItem.Patch, resolver); err != nil { - return err - } - if err := expandOperation(pathItem.Delete, resolver); err != nil { - return err - } - return nil -} - -func expandOperation(op *Operation, resolver *schemaLoader) error { - if op == nil { - return nil - } - for i, param := range op.Parameters { - if err := expandParameter(¶m, resolver); err != nil { - return err - } - op.Parameters[i] = param - } - - if op.Responses != nil { - responses := op.Responses - if err := expandResponse(responses.Default, resolver); err != nil { - return err - } - for code, response := range responses.StatusCodeResponses { - if err := expandResponse(&response, resolver); err != nil { - return err - } - responses.StatusCodeResponses[code] = response - } - } - return nil -} - -func expandResponse(response *Response, resolver *schemaLoader) error { - if response == nil { - return nil - } - - if response.Ref.String() != "" { - if err := resolver.Resolve(&response.Ref, response); err != nil { - return err - } - } - - if response.Schema != nil { - parentRefs := []string{response.Schema.Ref.String()} - if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil { - return err - } - if s, err := expandSchema(*response.Schema, parentRefs, resolver); err != nil { - return err - } else { - *response.Schema = *s - } - } - return nil -} - -func expandParameter(parameter *Parameter, resolver *schemaLoader) error { - if parameter == nil { - return nil - } - if parameter.Ref.String() != "" { - if err := resolver.Resolve(¶meter.Ref, parameter); err != nil { - return err - } - } - if parameter.Schema != nil { - parentRefs := []string{parameter.Schema.Ref.String()} - if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); err != nil { - return err - } - if s, err := expandSchema(*parameter.Schema, parentRefs, resolver); err != nil { - return err - } else { - *parameter.Schema = *s - } - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go deleted file mode 100644 index 88add91b2..000000000 --- a/vendor/github.com/go-openapi/spec/external_docs.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// ExternalDocumentation allows referencing an external resource for -// extended documentation. -// -// For more information: http://goo.gl/8us55a#externalDocumentationObject -type ExternalDocumentation struct { - Description string `json:"description,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go deleted file mode 100644 index 758b84531..000000000 --- a/vendor/github.com/go-openapi/spec/header.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -type HeaderProps struct { - Description string `json:"description,omitempty"` -} - -// Header describes a header for a response of the API -// -// For more information: http://goo.gl/8us55a#headerObject -type Header struct { - CommonValidations - SimpleSchema - HeaderProps -} - -// ResponseHeader creates a new header instance for use in a response -func ResponseHeader() *Header { - return new(Header) -} - -// WithDescription sets the description on this response, allows for chaining -func (h *Header) WithDescription(description string) *Header { - h.Description = description - return h -} - -// Typed a fluent builder method for the type of parameter -func (h *Header) Typed(tpe, format string) *Header { - h.Type = tpe - h.Format = format - return h -} - -// CollectionOf a fluent builder method for an array item -func (h *Header) CollectionOf(items *Items, format string) *Header { - h.Type = "array" - h.Items = items - h.CollectionFormat = format - return h -} - -// WithDefault sets the default value on this item -func (h *Header) WithDefault(defaultValue interface{}) *Header { - h.Default = defaultValue - return h -} - -// WithMaxLength sets a max length value -func (h *Header) WithMaxLength(max int64) *Header { - h.MaxLength = &max - return h -} - -// WithMinLength sets a min length value -func (h *Header) WithMinLength(min int64) *Header { - h.MinLength = &min - return h -} - -// WithPattern sets a pattern value -func (h *Header) WithPattern(pattern string) *Header { - h.Pattern = pattern - return h -} - -// WithMultipleOf sets a multiple of value -func (h *Header) WithMultipleOf(number float64) *Header { - h.MultipleOf = &number - return h -} - -// WithMaximum sets a maximum number value -func (h *Header) WithMaximum(max float64, exclusive bool) *Header { - h.Maximum = &max - h.ExclusiveMaximum = exclusive - return h -} - -// WithMinimum sets a minimum number value -func (h *Header) WithMinimum(min float64, exclusive bool) *Header { - h.Minimum = &min - h.ExclusiveMinimum = exclusive - return h -} - -// WithEnum sets a the enum values (replace) -func (h *Header) WithEnum(values ...interface{}) *Header { - h.Enum = append([]interface{}{}, values...) - return h -} - -// WithMaxItems sets the max items -func (h *Header) WithMaxItems(size int64) *Header { - h.MaxItems = &size - return h -} - -// WithMinItems sets the min items -func (h *Header) WithMinItems(size int64) *Header { - h.MinItems = &size - return h -} - -// UniqueValues dictates that this array can only have unique items -func (h *Header) UniqueValues() *Header { - h.UniqueItems = true - return h -} - -// AllowDuplicates this array can have duplicates -func (h *Header) AllowDuplicates() *Header { - h.UniqueItems = false - return h -} - -// MarshalJSON marshal this to JSON -func (h Header) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(h.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(h.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(h.HeaderProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// UnmarshalJSON marshal this from JSON -func (h *Header) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &h.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &h.HeaderProps); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go deleted file mode 100644 index fb8b7c4ac..000000000 --- a/vendor/github.com/go-openapi/spec/info.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Extensions vendor specific extensions -type Extensions map[string]interface{} - -// Add adds a value to these extensions -func (e Extensions) Add(key string, value interface{}) { - realKey := strings.ToLower(key) - e[realKey] = value -} - -// GetString gets a string value from the extensions -func (e Extensions) GetString(key string) (string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(string) - return str, ok - } - return "", false -} - -// GetBool gets a string value from the extensions -func (e Extensions) GetBool(key string) (bool, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(bool) - return str, ok - } - return false, false -} - -// GetStringSlice gets a string value from the extensions -func (e Extensions) GetStringSlice(key string) ([]string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - arr, ok := v.([]interface{}) - if !ok { - return nil, false - } - var strs []string - for _, iface := range arr { - str, ok := iface.(string) - if !ok { - return nil, false - } - strs = append(strs, str) - } - return strs, ok - } - return nil, false -} - -// VendorExtensible composition block. -type VendorExtensible struct { - Extensions Extensions -} - -// AddExtension adds an extension to this extensible object -func (v *VendorExtensible) AddExtension(key string, value interface{}) { - if value == nil { - return - } - if v.Extensions == nil { - v.Extensions = make(map[string]interface{}) - } - v.Extensions.Add(key, value) -} - -// MarshalJSON marshals the extensions to json -func (v VendorExtensible) MarshalJSON() ([]byte, error) { - toser := make(map[string]interface{}) - for k, v := range v.Extensions { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - toser[k] = v - } - } - return json.Marshal(toser) -} - -// UnmarshalJSON for this extensible object -func (v *VendorExtensible) UnmarshalJSON(data []byte) error { - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if v.Extensions == nil { - v.Extensions = map[string]interface{}{} - } - v.Extensions[k] = vv - } - } - return nil -} - -// InfoProps the properties for an info definition -type InfoProps struct { - Description string `json:"description,omitempty"` - Title string `json:"title,omitempty"` - TermsOfService string `json:"termsOfService,omitempty"` - Contact *ContactInfo `json:"contact,omitempty"` - License *License `json:"license,omitempty"` - Version string `json:"version,omitempty"` -} - -// Info object provides metadata about the API. -// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. -// -// For more information: http://goo.gl/8us55a#infoObject -type Info struct { - VendorExtensible - InfoProps -} - -// JSONLookup look up a value by the json property name -func (i Info) JSONLookup(token string) (interface{}, error) { - if ex, ok := i.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(i.InfoProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (i Info) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.InfoProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (i *Info) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &i.InfoProps); err != nil { - return err - } - if err := json.Unmarshal(data, &i.VendorExtensible); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go deleted file mode 100644 index 4d57ea5ca..000000000 --- a/vendor/github.com/go-openapi/spec/items.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -type SimpleSchema struct { - Type string `json:"type,omitempty"` - Format string `json:"format,omitempty"` - Items *Items `json:"items,omitempty"` - CollectionFormat string `json:"collectionFormat,omitempty"` - Default interface{} `json:"default,omitempty"` -} - -func (s *SimpleSchema) TypeName() string { - if s.Format != "" { - return s.Format - } - return s.Type -} - -func (s *SimpleSchema) ItemsTypeName() string { - if s.Items == nil { - return "" - } - return s.Items.TypeName() -} - -type CommonValidations struct { - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` -} - -// Items a limited subset of JSON-Schema's items object. -// It is used by parameter definitions that are not located in "body". -// -// For more information: http://goo.gl/8us55a#items-object- -type Items struct { - Refable - CommonValidations - SimpleSchema -} - -// NewItems creates a new instance of items -func NewItems() *Items { - return &Items{} -} - -// Typed a fluent builder method for the type of item -func (i *Items) Typed(tpe, format string) *Items { - i.Type = tpe - i.Format = format - return i -} - -// CollectionOf a fluent builder method for an array item -func (i *Items) CollectionOf(items *Items, format string) *Items { - i.Type = "array" - i.Items = items - i.CollectionFormat = format - return i -} - -// WithDefault sets the default value on this item -func (i *Items) WithDefault(defaultValue interface{}) *Items { - i.Default = defaultValue - return i -} - -// WithMaxLength sets a max length value -func (i *Items) WithMaxLength(max int64) *Items { - i.MaxLength = &max - return i -} - -// WithMinLength sets a min length value -func (i *Items) WithMinLength(min int64) *Items { - i.MinLength = &min - return i -} - -// WithPattern sets a pattern value -func (i *Items) WithPattern(pattern string) *Items { - i.Pattern = pattern - return i -} - -// WithMultipleOf sets a multiple of value -func (i *Items) WithMultipleOf(number float64) *Items { - i.MultipleOf = &number - return i -} - -// WithMaximum sets a maximum number value -func (i *Items) WithMaximum(max float64, exclusive bool) *Items { - i.Maximum = &max - i.ExclusiveMaximum = exclusive - return i -} - -// WithMinimum sets a minimum number value -func (i *Items) WithMinimum(min float64, exclusive bool) *Items { - i.Minimum = &min - i.ExclusiveMinimum = exclusive - return i -} - -// WithEnum sets a the enum values (replace) -func (i *Items) WithEnum(values ...interface{}) *Items { - i.Enum = append([]interface{}{}, values...) - return i -} - -// WithMaxItems sets the max items -func (i *Items) WithMaxItems(size int64) *Items { - i.MaxItems = &size - return i -} - -// WithMinItems sets the min items -func (i *Items) WithMinItems(size int64) *Items { - i.MinItems = &size - return i -} - -// UniqueValues dictates that this array can only have unique items -func (i *Items) UniqueValues() *Items { - i.UniqueItems = true - return i -} - -// AllowDuplicates this array can have duplicates -func (i *Items) AllowDuplicates() *Items { - i.UniqueItems = false - return i -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (i *Items) UnmarshalJSON(data []byte) error { - var validations CommonValidations - if err := json.Unmarshal(data, &validations); err != nil { - return err - } - var ref Refable - if err := json.Unmarshal(data, &ref); err != nil { - return err - } - var simpleSchema SimpleSchema - if err := json.Unmarshal(data, &simpleSchema); err != nil { - return err - } - i.Refable = ref - i.CommonValidations = validations - i.SimpleSchema = simpleSchema - return nil -} - -// MarshalJSON converts this items object to JSON -func (i Items) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(i.Refable) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b3, b1, b2), nil -} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go deleted file mode 100644 index f20961b4f..000000000 --- a/vendor/github.com/go-openapi/spec/license.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// License information for the exposed API. -// -// For more information: http://goo.gl/8us55a#licenseObject -type License struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go deleted file mode 100644 index de1db6f02..000000000 --- a/vendor/github.com/go-openapi/spec/operation.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -type OperationProps struct { - Description string `json:"description,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] - Tags []string `json:"tags,omitempty"` - Summary string `json:"summary,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - ID string `json:"operationId,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` - Responses *Responses `json:"responses,omitempty"` -} - -// Operation describes a single API operation on a path. -// -// For more information: http://goo.gl/8us55a#operationObject -type Operation struct { - VendorExtensible - OperationProps -} - -// SuccessResponse gets a success response model -func (o *Operation) SuccessResponse() (*Response, int, bool) { - if o.Responses == nil { - return nil, 0, false - } - - for k, v := range o.Responses.StatusCodeResponses { - if k/100 == 2 { - return &v, k, true - } - } - - return o.Responses.Default, 0, false -} - -// JSONLookup look up a value by the json property name -func (o Operation) JSONLookup(token string) (interface{}, error) { - if ex, ok := o.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(o.OperationProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (o *Operation) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &o.OperationProps); err != nil { - return err - } - if err := json.Unmarshal(data, &o.VendorExtensible); err != nil { - return err - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (o Operation) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(o.OperationProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(o.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -// NewOperation creates a new operation instance. -// It expects an ID as parameter but not passing an ID is also valid. -func NewOperation(id string) *Operation { - op := new(Operation) - op.ID = id - return op -} - -// WithID sets the ID property on this operation, allows for chaining. -func (o *Operation) WithID(id string) *Operation { - o.ID = id - return o -} - -// WithDescription sets the description on this operation, allows for chaining -func (o *Operation) WithDescription(description string) *Operation { - o.Description = description - return o -} - -// WithSummary sets the summary on this operation, allows for chaining -func (o *Operation) WithSummary(summary string) *Operation { - o.Summary = summary - return o -} - -// WithExternalDocs sets/removes the external docs for/from this operation. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (o *Operation) WithExternalDocs(description, url string) *Operation { - if description == "" && url == "" { - o.ExternalDocs = nil - return o - } - - if o.ExternalDocs == nil { - o.ExternalDocs = &ExternalDocumentation{} - } - o.ExternalDocs.Description = description - o.ExternalDocs.URL = url - return o -} - -// Deprecate marks the operation as deprecated -func (o *Operation) Deprecate() *Operation { - o.Deprecated = true - return o -} - -// Undeprecate marks the operation as not deprected -func (o *Operation) Undeprecate() *Operation { - o.Deprecated = false - return o -} - -// WithConsumes adds media types for incoming body values -func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { - o.Consumes = append(o.Consumes, mediaTypes...) - return o -} - -// WithProduces adds media types for outgoing body values -func (o *Operation) WithProduces(mediaTypes ...string) *Operation { - o.Produces = append(o.Produces, mediaTypes...) - return o -} - -// WithTags adds tags for this operation -func (o *Operation) WithTags(tags ...string) *Operation { - o.Tags = append(o.Tags, tags...) - return o -} - -// AddParam adds a parameter to this operation, when a parameter for that location -// and with that name already exists it will be replaced -func (o *Operation) AddParam(param *Parameter) *Operation { - if param == nil { - return o - } - - for i, p := range o.Parameters { - if p.Name == param.Name && p.In == param.In { - params := append(o.Parameters[:i], *param) - params = append(params, o.Parameters[i+1:]...) - o.Parameters = params - return o - } - } - - o.Parameters = append(o.Parameters, *param) - return o -} - -// RemoveParam removes a parameter from the operation -func (o *Operation) RemoveParam(name, in string) *Operation { - for i, p := range o.Parameters { - if p.Name == name && p.In == name { - o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) - return o - } - } - return o -} - -// SecuredWith adds a security scope to this operation. -func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { - o.Security = append(o.Security, map[string][]string{name: scopes}) - return o -} - -// WithDefaultResponse adds a default response to the operation. -// Passing a nil value will remove the response -func (o *Operation) WithDefaultResponse(response *Response) *Operation { - return o.RespondsWith(0, response) -} - -// RespondsWith adds a status code response to the operation. -// When the code is 0 the value of the response will be used as default response value. -// When the value of the response is nil it will be removed from the operation -func (o *Operation) RespondsWith(code int, response *Response) *Operation { - if o.Responses == nil { - o.Responses = new(Responses) - } - if code == 0 { - o.Responses.Default = response - return o - } - if response == nil { - delete(o.Responses.StatusCodeResponses, code) - return o - } - if o.Responses.StatusCodeResponses == nil { - o.Responses.StatusCodeResponses = make(map[int]Response) - } - o.Responses.StatusCodeResponses[code] = *response - return o -} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go deleted file mode 100644 index 8fb66d12a..000000000 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// QueryParam creates a query parameter -func QueryParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} -} - -// HeaderParam creates a header parameter, this is always required by default -func HeaderParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} -} - -// PathParam creates a path parameter, this is always required -func PathParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} -} - -// BodyParam creates a body parameter -func BodyParam(name string, schema *Schema) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, SimpleSchema: SimpleSchema{Type: "object"}} -} - -// FormDataParam creates a body parameter -func FormDataParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} -} - -// FileParam creates a body parameter -func FileParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, SimpleSchema: SimpleSchema{Type: "file"}} -} - -// SimpleArrayParam creates a param for a simple array (string, int, date etc) -func SimpleArrayParam(name, tpe, fmt string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name}, SimpleSchema: SimpleSchema{Type: "array", CollectionFormat: "csv", Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} -} - -// ParamRef creates a parameter that's a json reference -func ParamRef(uri string) *Parameter { - p := new(Parameter) - p.Ref = MustCreateRef(uri) - return p -} - -type ParamProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - In string `json:"in,omitempty"` - Required bool `json:"required,omitempty"` - Schema *Schema `json:"schema,omitempty"` // when in == "body" - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` // when in == "query" || "formData" -} - -// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). -// -// There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, the path parameter is `itemId`. -// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. -// * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be *one* body parameter. The name of the body parameter has no effect on the parameter itself and is used for documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or `multipart/form-data` are used as the content type of the request (in Swagger's definition, the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be declared together with a body parameter for the same operation. Form parameters have a different format based on the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4): -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is `submit-name`. This type of form parameters is more commonly used for file transfers. -// -// For more information: http://goo.gl/8us55a#parameterObject -type Parameter struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible - ParamProps -} - -// JSONLookup look up a value by the json property name -func (p Parameter) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == "$ref" { - return &p.Ref, nil - } - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) - if err != nil { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) - if err != nil { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.ParamProps, token) - return r, err -} - -// WithDescription a fluent builder method for the description of the parameter -func (p *Parameter) WithDescription(description string) *Parameter { - p.Description = description - return p -} - -// Named a fluent builder method to override the name of the parameter -func (p *Parameter) Named(name string) *Parameter { - p.Name = name - return p -} - -// WithLocation a fluent builder method to override the location of the parameter -func (p *Parameter) WithLocation(in string) *Parameter { - p.In = in - return p -} - -// Typed a fluent builder method for the type of the parameter value -func (p *Parameter) Typed(tpe, format string) *Parameter { - p.Type = tpe - p.Format = format - return p -} - -// CollectionOf a fluent builder method for an array parameter -func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { - p.Type = "array" - p.Items = items - p.CollectionFormat = format - return p -} - -// WithDefault sets the default value on this parameter -func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { - p.AsOptional() // with default implies optional - p.Default = defaultValue - return p -} - -// AllowsEmptyValues flags this parameter as being ok with empty values -func (p *Parameter) AllowsEmptyValues() *Parameter { - p.AllowEmptyValue = true - return p -} - -// NoEmptyValues flags this parameter as not liking empty values -func (p *Parameter) NoEmptyValues() *Parameter { - p.AllowEmptyValue = false - return p -} - -// AsOptional flags this parameter as optional -func (p *Parameter) AsOptional() *Parameter { - p.Required = false - return p -} - -// AsRequired flags this parameter as required -func (p *Parameter) AsRequired() *Parameter { - if p.Default != nil { // with a default required makes no sense - return p - } - p.Required = true - return p -} - -// WithMaxLength sets a max length value -func (p *Parameter) WithMaxLength(max int64) *Parameter { - p.MaxLength = &max - return p -} - -// WithMinLength sets a min length value -func (p *Parameter) WithMinLength(min int64) *Parameter { - p.MinLength = &min - return p -} - -// WithPattern sets a pattern value -func (p *Parameter) WithPattern(pattern string) *Parameter { - p.Pattern = pattern - return p -} - -// WithMultipleOf sets a multiple of value -func (p *Parameter) WithMultipleOf(number float64) *Parameter { - p.MultipleOf = &number - return p -} - -// WithMaximum sets a maximum number value -func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { - p.Maximum = &max - p.ExclusiveMaximum = exclusive - return p -} - -// WithMinimum sets a minimum number value -func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { - p.Minimum = &min - p.ExclusiveMinimum = exclusive - return p -} - -// WithEnum sets a the enum values (replace) -func (p *Parameter) WithEnum(values ...interface{}) *Parameter { - p.Enum = append([]interface{}{}, values...) - return p -} - -// WithMaxItems sets the max items -func (p *Parameter) WithMaxItems(size int64) *Parameter { - p.MaxItems = &size - return p -} - -// WithMinItems sets the min items -func (p *Parameter) WithMinItems(size int64) *Parameter { - p.MinItems = &size - return p -} - -// UniqueValues dictates that this array can only have unique items -func (p *Parameter) UniqueValues() *Parameter { - p.UniqueItems = true - return p -} - -// AllowDuplicates this array can have duplicates -func (p *Parameter) AllowDuplicates() *Parameter { - p.UniqueItems = false - return p -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Parameter) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - if err := json.Unmarshal(data, &p.ParamProps); err != nil { - return err - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (p Parameter) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(p.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.ParamProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b3, b1, b2, b4, b5), nil -} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go deleted file mode 100644 index 9ab3ec538..000000000 --- a/vendor/github.com/go-openapi/spec/path_item.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// pathItemProps the path item specific properties -type PathItemProps struct { - Get *Operation `json:"get,omitempty"` - Put *Operation `json:"put,omitempty"` - Post *Operation `json:"post,omitempty"` - Delete *Operation `json:"delete,omitempty"` - Options *Operation `json:"options,omitempty"` - Head *Operation `json:"head,omitempty"` - Patch *Operation `json:"patch,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` -} - -// PathItem describes the operations available on a single path. -// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// The path itself is still exposed to the documentation viewer but they will -// not know which operations and parameters are available. -// -// For more information: http://goo.gl/8us55a#pathItemObject -type PathItem struct { - Refable - VendorExtensible - PathItemProps -} - -// JSONLookup look up a value by the json property name -func (p PathItem) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == "$ref" { - return &p.Ref, nil - } - r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *PathItem) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - if err := json.Unmarshal(data, &p.PathItemProps); err != nil { - return err - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (p PathItem) MarshalJSON() ([]byte, error) { - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.PathItemProps) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b3, b4, b5) - return concated, nil -} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go deleted file mode 100644 index 9dc82a290..000000000 --- a/vendor/github.com/go-openapi/spec/paths.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/go-openapi/swag" -) - -// Paths holds the relative paths to the individual endpoints. -// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order -// to construct the full URL. -// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// -// For more information: http://goo.gl/8us55a#pathsObject -type Paths struct { - VendorExtensible - Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" -} - -// JSONLookup look up a value by the json property name -func (p Paths) JSONLookup(token string) (interface{}, error) { - if pi, ok := p.Paths[token]; ok { - return &pi, nil - } - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Paths) UnmarshalJSON(data []byte) error { - var res map[string]json.RawMessage - if err := json.Unmarshal(data, &res); err != nil { - return err - } - for k, v := range res { - if strings.HasPrefix(strings.ToLower(k), "x-") { - if p.Extensions == nil { - p.Extensions = make(map[string]interface{}) - } - var d interface{} - if err := json.Unmarshal(v, &d); err != nil { - return err - } - p.Extensions[k] = d - } - if strings.HasPrefix(k, "/") { - if p.Paths == nil { - p.Paths = make(map[string]PathItem) - } - var pi PathItem - if err := json.Unmarshal(v, &pi); err != nil { - return err - } - p.Paths[k] = pi - } - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (p Paths) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - - pths := make(map[string]PathItem) - for k, v := range p.Paths { - if strings.HasPrefix(k, "/") { - pths[k] = v - } - } - b2, err := json.Marshal(pths) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go deleted file mode 100644 index 68631df8b..000000000 --- a/vendor/github.com/go-openapi/spec/ref.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "net/http" - "os" - "path/filepath" - - "github.com/go-openapi/jsonreference" -) - -// Refable is a struct for things that accept a $ref property -type Refable struct { - Ref Ref -} - -// MarshalJSON marshals the ref to json -func (r Refable) MarshalJSON() ([]byte, error) { - return r.Ref.MarshalJSON() -} - -// UnmarshalJSON unmarshalss the ref from json -func (r *Refable) UnmarshalJSON(d []byte) error { - return json.Unmarshal(d, &r.Ref) -} - -// Ref represents a json reference that is potentially resolved -type Ref struct { - jsonreference.Ref -} - -// RemoteURI gets the remote uri part of the ref -func (r *Ref) RemoteURI() string { - if r.String() == "" { - return r.String() - } - - u := *r.GetURL() - u.Fragment = "" - return u.String() -} - -// IsValidURI returns true when the url the ref points to can be found -func (r *Ref) IsValidURI() bool { - if r.String() == "" { - return true - } - - v := r.RemoteURI() - if v == "" { - return true - } - - if r.HasFullURL { - rr, err := http.Get(v) - if err != nil { - return false - } - - return rr.StatusCode/100 == 2 - } - - if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { - return false - } - - // check for local file - pth := v - if r.HasURLPathOnly { - p, e := filepath.Abs(pth) - if e != nil { - return false - } - pth = p - } - - fi, err := os.Stat(pth) - if err != nil { - return false - } - - return !fi.IsDir() -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - ref, err := r.Ref.Inherits(child.Ref) - if err != nil { - return nil, err - } - return &Ref{Ref: *ref}, nil -} - -// NewRef creates a new instance of a ref object -// returns an error when the reference uri is an invalid uri -func NewRef(refURI string) (Ref, error) { - ref, err := jsonreference.New(refURI) - if err != nil { - return Ref{}, err - } - return Ref{Ref: ref}, nil -} - -// MustCreateRef creates a ref object but -func MustCreateRef(refURI string) Ref { - return Ref{Ref: jsonreference.MustCreateRef(refURI)} -} - -// // NewResolvedRef creates a resolved ref -// func NewResolvedRef(refURI string, data interface{}) Ref { -// return Ref{ -// Ref: jsonreference.MustCreateRef(refURI), -// Resolved: data, -// } -// } - -// MarshalJSON marshals this ref into a JSON object -func (r Ref) MarshalJSON() ([]byte, error) { - str := r.String() - if str == "" { - if r.IsRoot() { - return []byte(`{"$ref":"#"}`), nil - } - return []byte("{}"), nil - } - v := map[string]interface{}{"$ref": str} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshals this ref from a JSON object -func (r *Ref) UnmarshalJSON(d []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(d, &v); err != nil { - return err - } - - if v == nil { - return nil - } - - if vv, ok := v["$ref"]; ok { - if str, ok := vv.(string); ok { - ref, err := jsonreference.New(str) - if err != nil { - return err - } - *r = Ref{Ref: ref} - } - } - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go deleted file mode 100644 index 308cc8478..000000000 --- a/vendor/github.com/go-openapi/spec/response.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// ResponseProps properties specific to a response -type ResponseProps struct { - Description string `json:"description,omitempty"` - Schema *Schema `json:"schema,omitempty"` - Headers map[string]Header `json:"headers,omitempty"` - Examples map[string]interface{} `json:"examples,omitempty"` -} - -// Response describes a single response from an API Operation. -// -// For more information: http://goo.gl/8us55a#responseObject -type Response struct { - Refable - ResponseProps -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Response) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponseProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.Refable); err != nil { - return err - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (r Response) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponseProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.Refable) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// NewResponse creates a new response instance -func NewResponse() *Response { - return new(Response) -} - -// ResponseRef creates a response as a json reference -func ResponseRef(url string) *Response { - resp := NewResponse() - resp.Ref = MustCreateRef(url) - return resp -} - -// WithDescription sets the description on this response, allows for chaining -func (r *Response) WithDescription(description string) *Response { - r.Description = description - return r -} - -// WithSchema sets the schema on this response, allows for chaining. -// Passing a nil argument removes the schema from this response -func (r *Response) WithSchema(schema *Schema) *Response { - r.Schema = schema - return r -} - -// AddHeader adds a header to this response -func (r *Response) AddHeader(name string, header *Header) *Response { - if header == nil { - return r.RemoveHeader(name) - } - if r.Headers == nil { - r.Headers = make(map[string]Header) - } - r.Headers[name] = *header - return r -} - -// RemoveHeader removes a header from this response -func (r *Response) RemoveHeader(name string) *Response { - delete(r.Headers, name) - return r -} - -// AddExample adds an example to this response -func (r *Response) AddExample(mediaType string, example interface{}) *Response { - if r.Examples == nil { - r.Examples = make(map[string]interface{}) - } - r.Examples[mediaType] = example - return r -} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go deleted file mode 100644 index ea071ca63..000000000 --- a/vendor/github.com/go-openapi/spec/responses.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - - "github.com/go-openapi/swag" -) - -// Responses is a container for the expected responses of an operation. -// The container maps a HTTP response code to the expected response. -// It is not expected from the documentation to necessarily cover all possible HTTP response codes, -// since they may not be known in advance. However, it is expected from the documentation to cover -// a successful operation response and any known errors. -// -// The `default` can be used a default response object for all HTTP codes that are not covered -// individually by the specification. -// -// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response -// for a successful operation call. -// -// For more information: http://goo.gl/8us55a#responsesObject -type Responses struct { - VendorExtensible - ResponsesProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (r Responses) JSONLookup(token string) (interface{}, error) { - if token == "default" { - return r.Default, nil - } - if ex, ok := r.Extensions[token]; ok { - return &ex, nil - } - if i, err := strconv.Atoi(token); err == nil { - if scr, ok := r.StatusCodeResponses[i]; ok { - return &scr, nil - } - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Responses) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { - r.ResponsesProps = ResponsesProps{} - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (r Responses) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponsesProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -type ResponsesProps struct { - Default *Response - StatusCodeResponses map[int]Response -} - -func (r ResponsesProps) MarshalJSON() ([]byte, error) { - toser := map[string]Response{} - if r.Default != nil { - toser["default"] = *r.Default - } - for k, v := range r.StatusCodeResponses { - toser[strconv.Itoa(k)] = v - } - return json.Marshal(toser) -} - -func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]Response - if err := json.Unmarshal(data, &res); err != nil { - return nil - } - if v, ok := res["default"]; ok { - r.Default = &v - delete(res, "default") - } - for k, v := range res { - if nk, err := strconv.Atoi(k); err == nil { - if r.StatusCodeResponses == nil { - r.StatusCodeResponses = map[int]Response{} - } - r.StatusCodeResponses[nk] = v - } - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go deleted file mode 100644 index eb88f005c..000000000 --- a/vendor/github.com/go-openapi/spec/schema.go +++ /dev/null @@ -1,628 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// BooleanProperty creates a boolean property -func BooleanProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} -} - -// BoolProperty creates a boolean property -func BoolProperty() *Schema { return BooleanProperty() } - -// StringProperty creates a string property -func StringProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// CharProperty creates a string property -func CharProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// Float64Property creates a float64/double property -func Float64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} -} - -// Float32Property creates a float32/float property -func Float32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} -} - -// Int8Property creates an int8 property -func Int8Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} -} - -// Int16Property creates an int16 property -func Int16Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} -} - -// Int32Property creates an int32 property -func Int32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} -} - -// Int64Property creates an int64 property -func Int64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} -} - -// StrFmtProperty creates a property for the named string format -func StrFmtProperty(format string) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} -} - -// DateProperty creates a date property -func DateProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} -} - -// DateTimeProperty creates a date time property -func DateTimeProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} -} - -// MapProperty creates a map property -func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} -} - -// RefProperty creates a ref property -func RefProperty(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// RefSchema creates a ref property -func RefSchema(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// ArrayProperty creates an array property -func ArrayProperty(items *Schema) *Schema { - if items == nil { - return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} - } - return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} -} - -// ComposedSchema creates a schema with allOf -func ComposedSchema(schemas ...Schema) *Schema { - s := new(Schema) - s.AllOf = schemas - return s -} - -// SchemaURL represents a schema url -type SchemaURL string - -// MarshalJSON marshal this to JSON -func (r SchemaURL) MarshalJSON() ([]byte, error) { - if r == "" { - return []byte("{}"), nil - } - v := map[string]interface{}{"$schema": string(r)} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshal this from JSON -func (r *SchemaURL) UnmarshalJSON(data []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(data, &v); err != nil { - return err - } - if v == nil { - return nil - } - if vv, ok := v["$schema"]; ok { - if str, ok := vv.(string); ok { - u, err := url.Parse(str) - if err != nil { - return err - } - - *r = SchemaURL(u.String()) - } - } - return nil -} - -// type ExtraSchemaProps map[string]interface{} - -// // JSONSchema represents a structure that is a json schema draft 04 -// type JSONSchema struct { -// SchemaProps -// ExtraSchemaProps -// } - -// // MarshalJSON marshal this to JSON -// func (s JSONSchema) MarshalJSON() ([]byte, error) { -// b1, err := json.Marshal(s.SchemaProps) -// if err != nil { -// return nil, err -// } -// b2, err := s.Ref.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b3, err := s.Schema.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b4, err := json.Marshal(s.ExtraSchemaProps) -// if err != nil { -// return nil, err -// } -// return swag.ConcatJSON(b1, b2, b3, b4), nil -// } - -// // UnmarshalJSON marshal this from JSON -// func (s *JSONSchema) UnmarshalJSON(data []byte) error { -// var sch JSONSchema -// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Ref); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Schema); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { -// return err -// } -// *s = sch -// return nil -// } - -type SchemaProps struct { - ID string `json:"id,omitempty"` - Ref Ref `json:"-,omitempty"` - Schema SchemaURL `json:"-,omitempty"` - Description string `json:"description,omitempty"` - Type StringOrArray `json:"type,omitempty"` - Format string `json:"format,omitempty"` - Title string `json:"title,omitempty"` - Default interface{} `json:"default,omitempty"` - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` - MaxProperties *int64 `json:"maxProperties,omitempty"` - MinProperties *int64 `json:"minProperties,omitempty"` - Required []string `json:"required,omitempty"` - Items *SchemaOrArray `json:"items,omitempty"` - AllOf []Schema `json:"allOf,omitempty"` - OneOf []Schema `json:"oneOf,omitempty"` - AnyOf []Schema `json:"anyOf,omitempty"` - Not *Schema `json:"not,omitempty"` - Properties map[string]Schema `json:"properties,omitempty"` - AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` - PatternProperties map[string]Schema `json:"patternProperties,omitempty"` - Dependencies Dependencies `json:"dependencies,omitempty"` - AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` - Definitions Definitions `json:"definitions,omitempty"` -} - -type SwaggerSchemaProps struct { - Discriminator string `json:"discriminator,omitempty"` - ReadOnly bool `json:"readOnly,omitempty"` - XML *XMLObject `json:"xml,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// Schema the schema object allows the definition of input and output data types. -// These types can be objects, but also primitives and arrays. -// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) -// and uses a predefined subset of it. -// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. -// -// For more information: http://goo.gl/8us55a#schemaObject -type Schema struct { - VendorExtensible - SchemaProps - SwaggerSchemaProps - ExtraProps map[string]interface{} `json:"-"` -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s Schema) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - if ex, ok := s.ExtraProps[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) - if r != nil || err != nil { - return r, err - } - r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) - return r, err -} - -// WithID sets the id for this schema, allows for chaining -func (s *Schema) WithID(id string) *Schema { - s.ID = id - return s -} - -// WithTitle sets the title for this schema, allows for chaining -func (s *Schema) WithTitle(title string) *Schema { - s.Title = title - return s -} - -// WithDescription sets the description for this schema, allows for chaining -func (s *Schema) WithDescription(description string) *Schema { - s.Description = description - return s -} - -// WithProperties sets the properties for this schema -func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { - s.Properties = schemas - return s -} - -// SetProperty sets a property on this schema -func (s *Schema) SetProperty(name string, schema Schema) *Schema { - if s.Properties == nil { - s.Properties = make(map[string]Schema) - } - s.Properties[name] = schema - return s -} - -// WithAllOf sets the all of property -func (s *Schema) WithAllOf(schemas ...Schema) *Schema { - s.AllOf = schemas - return s -} - -// WithMaxProperties sets the max number of properties an object can have -func (s *Schema) WithMaxProperties(max int64) *Schema { - s.MaxProperties = &max - return s -} - -// WithMinProperties sets the min number of properties an object must have -func (s *Schema) WithMinProperties(min int64) *Schema { - s.MinProperties = &min - return s -} - -// Typed sets the type of this schema for a single value item -func (s *Schema) Typed(tpe, format string) *Schema { - s.Type = []string{tpe} - s.Format = format - return s -} - -// AddType adds a type with potential format to the types for this schema -func (s *Schema) AddType(tpe, format string) *Schema { - s.Type = append(s.Type, tpe) - if format != "" { - s.Format = format - } - return s -} - -// CollectionOf a fluent builder method for an array parameter -func (s *Schema) CollectionOf(items Schema) *Schema { - s.Type = []string{"array"} - s.Items = &SchemaOrArray{Schema: &items} - return s -} - -// WithDefault sets the default value on this parameter -func (s *Schema) WithDefault(defaultValue interface{}) *Schema { - s.Default = defaultValue - return s -} - -// WithRequired flags this parameter as required -func (s *Schema) WithRequired(items ...string) *Schema { - s.Required = items - return s -} - -// AddRequired adds field names to the required properties array -func (s *Schema) AddRequired(items ...string) *Schema { - s.Required = append(s.Required, items...) - return s -} - -// WithMaxLength sets a max length value -func (s *Schema) WithMaxLength(max int64) *Schema { - s.MaxLength = &max - return s -} - -// WithMinLength sets a min length value -func (s *Schema) WithMinLength(min int64) *Schema { - s.MinLength = &min - return s -} - -// WithPattern sets a pattern value -func (s *Schema) WithPattern(pattern string) *Schema { - s.Pattern = pattern - return s -} - -// WithMultipleOf sets a multiple of value -func (s *Schema) WithMultipleOf(number float64) *Schema { - s.MultipleOf = &number - return s -} - -// WithMaximum sets a maximum number value -func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { - s.Maximum = &max - s.ExclusiveMaximum = exclusive - return s -} - -// WithMinimum sets a minimum number value -func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { - s.Minimum = &min - s.ExclusiveMinimum = exclusive - return s -} - -// WithEnum sets a the enum values (replace) -func (s *Schema) WithEnum(values ...interface{}) *Schema { - s.Enum = append([]interface{}{}, values...) - return s -} - -// WithMaxItems sets the max items -func (s *Schema) WithMaxItems(size int64) *Schema { - s.MaxItems = &size - return s -} - -// WithMinItems sets the min items -func (s *Schema) WithMinItems(size int64) *Schema { - s.MinItems = &size - return s -} - -// UniqueValues dictates that this array can only have unique items -func (s *Schema) UniqueValues() *Schema { - s.UniqueItems = true - return s -} - -// AllowDuplicates this array can have duplicates -func (s *Schema) AllowDuplicates() *Schema { - s.UniqueItems = false - return s -} - -// AddToAllOf adds a schema to the allOf property -func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { - s.AllOf = append(s.AllOf, schemas...) - return s -} - -// WithDiscriminator sets the name of the discriminator field -func (s *Schema) WithDiscriminator(discriminator string) *Schema { - s.Discriminator = discriminator - return s -} - -// AsReadOnly flags this schema as readonly -func (s *Schema) AsReadOnly() *Schema { - s.ReadOnly = true - return s -} - -// AsWritable flags this schema as writeable (not read-only) -func (s *Schema) AsWritable() *Schema { - s.ReadOnly = false - return s -} - -// WithExample sets the example for this schema -func (s *Schema) WithExample(example interface{}) *Schema { - s.Example = example - return s -} - -// WithExternalDocs sets/removes the external docs for/from this schema. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (s *Schema) WithExternalDocs(description, url string) *Schema { - if description == "" && url == "" { - s.ExternalDocs = nil - return s - } - - if s.ExternalDocs == nil { - s.ExternalDocs = &ExternalDocumentation{} - } - s.ExternalDocs.Description = description - s.ExternalDocs.URL = url - return s -} - -// WithXMLName sets the xml name for the object -func (s *Schema) WithXMLName(name string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Name = name - return s -} - -// WithXMLNamespace sets the xml namespace for the object -func (s *Schema) WithXMLNamespace(namespace string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Namespace = namespace - return s -} - -// WithXMLPrefix sets the xml prefix for the object -func (s *Schema) WithXMLPrefix(prefix string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Prefix = prefix - return s -} - -// AsXMLAttribute flags this object as xml attribute -func (s *Schema) AsXMLAttribute() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = true - return s -} - -// AsXMLElement flags this object as an xml node -func (s *Schema) AsXMLElement() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = false - return s -} - -// AsWrappedXML flags this object as wrapped, this is mostly useful for array types -func (s *Schema) AsWrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = true - return s -} - -// AsUnwrappedXML flags this object as an xml node -func (s *Schema) AsUnwrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = false - return s -} - -// MarshalJSON marshal this to JSON -func (s Schema) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SchemaProps) - if err != nil { - return nil, fmt.Errorf("schema props %v", err) - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, fmt.Errorf("vendor props %v", err) - } - b3, err := s.Ref.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("ref prop %v", err) - } - b4, err := s.Schema.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("schema prop %v", err) - } - b5, err := json.Marshal(s.SwaggerSchemaProps) - if err != nil { - return nil, fmt.Errorf("common validations %v", err) - } - var b6 []byte - if s.ExtraProps != nil { - jj, err := json.Marshal(s.ExtraProps) - if err != nil { - return nil, fmt.Errorf("extra props %v", err) - } - b6 = jj - } - return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *Schema) UnmarshalJSON(data []byte) error { - var sch Schema - if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { - return err - } - if err := json.Unmarshal(data, &sch.Ref); err != nil { - return err - } - if err := json.Unmarshal(data, &sch.Schema); err != nil { - return err - } - if err := json.Unmarshal(data, &sch.SwaggerSchemaProps); err != nil { - return err - } - - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - delete(d, "$ref") - delete(d, "$schema") - for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { - delete(d, pn) - } - - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if sch.Extensions == nil { - sch.Extensions = map[string]interface{}{} - } - sch.Extensions[k] = vv - continue - } - if sch.ExtraProps == nil { - sch.ExtraProps = map[string]interface{}{} - } - sch.ExtraProps[k] = vv - } - - *s = sch - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go deleted file mode 100644 index 22d4f10af..000000000 --- a/vendor/github.com/go-openapi/spec/security_scheme.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - basic = "basic" - apiKey = "apiKey" - oauth2 = "oauth2" - implicit = "implicit" - password = "password" - application = "application" - accessCode = "accessCode" -) - -// BasicAuth creates a basic auth security scheme -func BasicAuth() *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} -} - -// APIKeyAuth creates an api key auth security scheme -func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} -} - -// OAuth2Implicit creates an implicit flow oauth2 security scheme -func OAuth2Implicit(authorizationURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: implicit, - AuthorizationURL: authorizationURL, - }} -} - -// OAuth2Password creates a password flow oauth2 security scheme -func OAuth2Password(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: password, - TokenURL: tokenURL, - }} -} - -// OAuth2Application creates an application flow oauth2 security scheme -func OAuth2Application(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: application, - TokenURL: tokenURL, - }} -} - -// OAuth2AccessToken creates an access token flow oauth2 security scheme -func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: accessCode, - AuthorizationURL: authorizationURL, - TokenURL: tokenURL, - }} -} - -type SecuritySchemeProps struct { - Description string `json:"description,omitempty"` - Type string `json:"type"` - Name string `json:"name,omitempty"` // api key - In string `json:"in,omitempty"` // api key - Flow string `json:"flow,omitempty"` // oauth2 - AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 - TokenURL string `json:"tokenUrl,omitempty"` // oauth2 - Scopes map[string]string `json:"scopes,omitempty"` // oauth2 -} - -// AddScope adds a scope to this security scheme -func (s *SecuritySchemeProps) AddScope(scope, description string) { - if s.Scopes == nil { - s.Scopes = make(map[string]string) - } - s.Scopes[scope] = description -} - -// SecurityScheme allows the definition of a security scheme that can be used by the operations. -// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) -// and OAuth2's common flows (implicit, password, application and access code). -// -// For more information: http://goo.gl/8us55a#securitySchemeObject -type SecurityScheme struct { - VendorExtensible - SecuritySchemeProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (s SecurityScheme) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SecuritySchemeProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *SecurityScheme) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { - return err - } - if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go deleted file mode 100644 index cc2ae56b2..000000000 --- a/vendor/github.com/go-openapi/spec/spec.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import "encoding/json" - -//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... -//go:generate perl -pi -e s,Json,JSON,g bindata.go - -const ( - // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs - SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema schema - JSONSchemaURL = "http://json-schema.org/draft-04/schema#" -) - -var ( - jsonSchema = MustLoadJSONSchemaDraft04() - swaggerSchema = MustLoadSwagger20Schema() -) - -// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error -func MustLoadJSONSchemaDraft04() *Schema { - d, e := JSONSchemaDraft04() - if e != nil { - panic(e) - } - return d -} - -// JSONSchemaDraft04 loads the json schema document for json shema draft04 -func JSONSchemaDraft04() (*Schema, error) { - b, err := Asset("jsonschema-draft-04.json") - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} - -// MustLoadSwagger20Schema panics when Swagger20Schema returns an error -func MustLoadSwagger20Schema() *Schema { - d, e := Swagger20Schema() - if e != nil { - panic(e) - } - return d -} - -// Swagger20Schema loads the swagger 2.0 schema from the embedded assets -func Swagger20Schema() (*Schema, error) { - - b, err := Asset("v2/schema.json") - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go deleted file mode 100644 index ff3ef875e..000000000 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "strconv" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Swagger this is the root document object for the API specification. -// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) together into one document. -// -// For more information: http://goo.gl/8us55a#swagger-object- -type Swagger struct { - VendorExtensible - SwaggerProps -} - -// JSONLookup look up a value by the json property name -func (s Swagger) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) - return r, err -} - -// MarshalJSON marshals this swagger structure to json -func (s Swagger) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SwaggerProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON unmarshals a swagger spec from json -func (s *Swagger) UnmarshalJSON(data []byte) error { - var sw Swagger - if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { - return err - } - if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { - return err - } - *s = sw - return nil -} - -type SwaggerProps struct { - ID string `json:"id,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] - Swagger string `json:"swagger,omitempty"` - Info *Info `json:"info,omitempty"` - Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` // must start with a leading "/" - Paths *Paths `json:"paths"` // required - Definitions Definitions `json:"definitions"` - Parameters map[string]Parameter `json:"parameters,omitempty"` - Responses map[string]Response `json:"responses,omitempty"` - SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Tags []Tag `json:"tags,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -// Dependencies represent a dependencies property -type Dependencies map[string]SchemaOrStringArray - -// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property -type SchemaOrBool struct { - Allows bool - Schema *Schema -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { - if token == "allows" { - return s.Allows, nil - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -var jsTrue = []byte("true") -var jsFalse = []byte("false") - -// MarshalJSON convert this object to JSON -func (s SchemaOrBool) MarshalJSON() ([]byte, error) { - if s.Schema != nil { - return json.Marshal(s.Schema) - } - - if s.Schema == nil && !s.Allows { - return jsFalse, nil - } - return jsTrue, nil -} - -// UnmarshalJSON converts this bool or schema object from a JSON structure -func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { - var nw SchemaOrBool - if len(data) >= 4 { - if data[0] == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') - } - *s = nw - return nil -} - -// SchemaOrStringArray represents a schema or a string array -type SchemaOrStringArray struct { - Schema *Schema - Property []string -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { - if len(s.Property) > 0 { - return json.Marshal(s.Property) - } - if s.Schema != nil { - return json.Marshal(s.Schema) - } - return nil, nil -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - var nw SchemaOrStringArray - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Property); err != nil { - return err - } - } - *s = nw - return nil -} - -// Definitions contains the models explicitly defined in this spec -// An object to hold data types that can be consumed and produced by operations. -// These data types can be primitives, arrays or models. -// -// For more information: http://goo.gl/8us55a#definitionsObject -type Definitions map[string]Schema - -// SecurityDefinitions a declaration of the security schemes available to be used in the specification. -// This does not enforce the security schemes on the operations and only serves to provide -// the relevant details for each scheme. -// -// For more information: http://goo.gl/8us55a#securityDefinitionsObject -type SecurityDefinitions map[string]*SecurityScheme - -// StringOrArray represents a value that can either be a string -// or an array of strings. Mainly here for serialization purposes -type StringOrArray []string - -// Contains returns true when the value is contained in the slice -func (s StringOrArray) Contains(value string) bool { - for _, str := range s { - if str == value { - return true - } - } - return false -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { - if _, err := strconv.Atoi(token); err == nil { - r, _, err := jsonpointer.GetForToken(s.Schemas, token) - return r, err - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string -func (s *StringOrArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - - if first == '[' { - var parsed []string - if err := json.Unmarshal(data, &parsed); err != nil { - return err - } - *s = StringOrArray(parsed) - return nil - } - - var single interface{} - if err := json.Unmarshal(data, &single); err != nil { - return err - } - if single == nil { - return nil - } - switch single.(type) { - case string: - *s = StringOrArray([]string{single.(string)}) - return nil - default: - return fmt.Errorf("only string or array is allowed, not %T", single) - } -} - -// MarshalJSON converts this string or array to a JSON array or JSON string -func (s StringOrArray) MarshalJSON() ([]byte, error) { - if len(s) == 1 { - return json.Marshal([]string(s)[0]) - } - return json.Marshal([]string(s)) -} - -// SchemaOrArray represents a value that can either be a Schema -// or an array of Schema. Mainly here for serialization purposes -type SchemaOrArray struct { - Schema *Schema - Schemas []Schema -} - -// Len returns the number of schemas in this property -func (s SchemaOrArray) Len() int { - if s.Schema != nil { - return 1 - } - return len(s.Schemas) -} - -// ContainsType returns true when one of the schemas is of the specified type -func (s *SchemaOrArray) ContainsType(name string) bool { - if s.Schema != nil { - return s.Schema.Type != nil && s.Schema.Type.Contains(name) - } - return false -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrArray) MarshalJSON() ([]byte, error) { - if len(s.Schemas) > 0 { - return json.Marshal(s.Schemas) - } - return json.Marshal(s.Schema) -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { - var nw SchemaOrArray - var first byte - if len(data) > 1 { - first = data[0] - } - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Schemas); err != nil { - return err - } - } - *s = nw - return nil -} - -// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go deleted file mode 100644 index 97f555840..000000000 --- a/vendor/github.com/go-openapi/spec/tag.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -type TagProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -// NewTag creates a new tag -func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { - return Tag{TagProps: TagProps{description, name, externalDocs}} -} - -// Tag allows adding meta data to a single tag that is used by the [Operation Object](http://goo.gl/8us55a#operationObject). -// It is not mandatory to have a Tag Object per tag used there. -// -// For more information: http://goo.gl/8us55a#tagObject -type Tag struct { - VendorExtensible - TagProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (t Tag) JSONLookup(token string) (interface{}, error) { - if ex, ok := t.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(t.TagProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (t Tag) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(t.TagProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(t.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (t *Tag) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &t.TagProps); err != nil { - return err - } - return json.Unmarshal(data, &t.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go deleted file mode 100644 index 945a46703..000000000 --- a/vendor/github.com/go-openapi/spec/xml_object.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// XMLObject a metadata object that allows for more fine-tuned XML model definitions. -// -// For more information: http://goo.gl/8us55a#xmlObject -type XMLObject struct { - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` - Prefix string `json:"prefix,omitempty"` - Attribute bool `json:"attribute,omitempty"` - Wrapped bool `json:"wrapped,omitempty"` -} - -// WithName sets the xml name for the object -func (x *XMLObject) WithName(name string) *XMLObject { - x.Name = name - return x -} - -// WithNamespace sets the xml namespace for the object -func (x *XMLObject) WithNamespace(namespace string) *XMLObject { - x.Namespace = namespace - return x -} - -// WithPrefix sets the xml prefix for the object -func (x *XMLObject) WithPrefix(prefix string) *XMLObject { - x.Prefix = prefix - return x -} - -// AsAttribute flags this object as xml attribute -func (x *XMLObject) AsAttribute() *XMLObject { - x.Attribute = true - return x -} - -// AsElement flags this object as an xml node -func (x *XMLObject) AsElement() *XMLObject { - x.Attribute = false - return x -} - -// AsWrapped flags this object as wrapped, this is mostly useful for array types -func (x *XMLObject) AsWrapped() *XMLObject { - x.Wrapped = true - return x -} - -// AsUnwrapped flags this object as an xml node -func (x *XMLObject) AsUnwrapped() *XMLObject { - x.Wrapped = false - return x -} diff --git a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/swag/LICENSE b/vendor/github.com/go-openapi/swag/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/swag/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go deleted file mode 100644 index 28d912410..000000000 --- a/vendor/github.com/go-openapi/swag/convert.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "math" - "strconv" - "strings" -) - -// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER -const ( - maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 - minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 -) - -// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive -func IsFloat64AJSONInteger(f float64) bool { - if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { - return false - } - - return f == float64(int64(f)) || f == float64(uint64(f)) -} - -var evaluatesAsTrue = map[string]struct{}{ - "true": struct{}{}, - "1": struct{}{}, - "yes": struct{}{}, - "ok": struct{}{}, - "y": struct{}{}, - "on": struct{}{}, - "selected": struct{}{}, - "checked": struct{}{}, - "t": struct{}{}, - "enabled": struct{}{}, -} - -// ConvertBool turn a string into a boolean -func ConvertBool(str string) (bool, error) { - _, ok := evaluatesAsTrue[strings.ToLower(str)] - return ok, nil -} - -// ConvertFloat32 turn a string into a float32 -func ConvertFloat32(str string) (float32, error) { - f, err := strconv.ParseFloat(str, 32) - if err != nil { - return 0, err - } - return float32(f), nil -} - -// ConvertFloat64 turn a string into a float64 -func ConvertFloat64(str string) (float64, error) { - return strconv.ParseFloat(str, 64) -} - -// ConvertInt8 turn a string into int8 boolean -func ConvertInt8(str string) (int8, error) { - i, err := strconv.ParseInt(str, 10, 8) - if err != nil { - return 0, err - } - return int8(i), nil -} - -// ConvertInt16 turn a string into a int16 -func ConvertInt16(str string) (int16, error) { - i, err := strconv.ParseInt(str, 10, 16) - if err != nil { - return 0, err - } - return int16(i), nil -} - -// ConvertInt32 turn a string into a int32 -func ConvertInt32(str string) (int32, error) { - i, err := strconv.ParseInt(str, 10, 32) - if err != nil { - return 0, err - } - return int32(i), nil -} - -// ConvertInt64 turn a string into a int64 -func ConvertInt64(str string) (int64, error) { - return strconv.ParseInt(str, 10, 64) -} - -// ConvertUint8 turn a string into a uint8 -func ConvertUint8(str string) (uint8, error) { - i, err := strconv.ParseUint(str, 10, 8) - if err != nil { - return 0, err - } - return uint8(i), nil -} - -// ConvertUint16 turn a string into a uint16 -func ConvertUint16(str string) (uint16, error) { - i, err := strconv.ParseUint(str, 10, 16) - if err != nil { - return 0, err - } - return uint16(i), nil -} - -// ConvertUint32 turn a string into a uint32 -func ConvertUint32(str string) (uint32, error) { - i, err := strconv.ParseUint(str, 10, 32) - if err != nil { - return 0, err - } - return uint32(i), nil -} - -// ConvertUint64 turn a string into a uint64 -func ConvertUint64(str string) (uint64, error) { - return strconv.ParseUint(str, 10, 64) -} - -// FormatBool turns a boolean into a string -func FormatBool(value bool) string { - return strconv.FormatBool(value) -} - -// FormatFloat32 turns a float32 into a string -func FormatFloat32(value float32) string { - return strconv.FormatFloat(float64(value), 'f', -1, 32) -} - -// FormatFloat64 turns a float64 into a string -func FormatFloat64(value float64) string { - return strconv.FormatFloat(value, 'f', -1, 64) -} - -// FormatInt8 turns an int8 into a string -func FormatInt8(value int8) string { - return strconv.FormatInt(int64(value), 10) -} - -// FormatInt16 turns an int16 into a string -func FormatInt16(value int16) string { - return strconv.FormatInt(int64(value), 10) -} - -// FormatInt32 turns an int32 into a string -func FormatInt32(value int32) string { - return strconv.FormatInt(int64(value), 10) -} - -// FormatInt64 turns an int64 into a string -func FormatInt64(value int64) string { - return strconv.FormatInt(value, 10) -} - -// FormatUint8 turns an uint8 into a string -func FormatUint8(value uint8) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint16 turns an uint16 into a string -func FormatUint16(value uint16) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint32 turns an uint32 into a string -func FormatUint32(value uint32) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint64 turns an uint64 into a string -func FormatUint64(value uint64) string { - return strconv.FormatUint(value, 10) -} diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go deleted file mode 100644 index c95e4e78b..000000000 --- a/vendor/github.com/go-openapi/swag/convert_types.go +++ /dev/null @@ -1,595 +0,0 @@ -package swag - -import "time" - -// This file was taken from the aws go sdk - -// String returns a pointer to of the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to of the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to of the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int32 returns a pointer to of the int64 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int32Value(v *int32) int32 { - if v != nil { - return *v - } - return 0 -} - -// Int32Slice converts a slice of int64 values into a slice of -// int32 pointers -func Int32Slice(src []int32) []*int32 { - dst := make([]*int32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int32ValueSlice converts a slice of int32 pointers into a slice of -// int32 values -func Int32ValueSlice(src []*int32) []int32 { - dst := make([]int32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int32Map converts a string map of int32 values into a string -// map of int32 pointers -func Int32Map(src map[string]int32) map[string]*int32 { - dst := make(map[string]*int32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int32ValueMap converts a string map of int32 pointers into a string -// map of int32 values -func Int32ValueMap(src map[string]*int32) map[string]int32 { - dst := make(map[string]int32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to of the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint returns a pouinter to of the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintValue returns the value of the uint pouinter passed in or -// 0 if the pouinter is nil. -func UintValue(v *uint) uint { - if v != nil { - return *v - } - return 0 -} - -// UintSlice converts a slice of uint values uinto a slice of -// uint pouinters -func UintSlice(src []uint) []*uint { - dst := make([]*uint, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// UintValueSlice converts a slice of uint pouinters uinto a slice of -// uint values -func UintValueSlice(src []*uint) []uint { - dst := make([]uint, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// UintMap converts a string map of uint values uinto a string -// map of uint pouinters -func UintMap(src map[string]uint) map[string]*uint { - dst := make(map[string]*uint) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// UintValueMap converts a string map of uint pouinters uinto a string -// map of uint values -func UintValueMap(src map[string]*uint) map[string]uint { - dst := make(map[string]uint) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint32 returns a pouinter to of the uint64 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Value returns the value of the uint64 pouinter passed in or -// 0 if the pouinter is nil. -func Uint32Value(v *uint32) uint32 { - if v != nil { - return *v - } - return 0 -} - -// Uint32Slice converts a slice of uint64 values uinto a slice of -// uint32 pouinters -func Uint32Slice(src []uint32) []*uint32 { - dst := make([]*uint32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint32ValueSlice converts a slice of uint32 pouinters uinto a slice of -// uint32 values -func Uint32ValueSlice(src []*uint32) []uint32 { - dst := make([]uint32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint32Map converts a string map of uint32 values uinto a string -// map of uint32 pouinters -func Uint32Map(src map[string]uint32) map[string]*uint32 { - dst := make(map[string]*uint32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint32ValueMap converts a string map of uint32 pouinters uinto a string -// map of uint32 values -func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { - dst := make(map[string]uint32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint64 returns a pouinter to of the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Value returns the value of the uint64 pouinter passed in or -// 0 if the pouinter is nil. -func Uint64Value(v *uint64) uint64 { - if v != nil { - return *v - } - return 0 -} - -// Uint64Slice converts a slice of uint64 values uinto a slice of -// uint64 pouinters -func Uint64Slice(src []uint64) []*uint64 { - dst := make([]*uint64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint64ValueSlice converts a slice of uint64 pouinters uinto a slice of -// uint64 values -func Uint64ValueSlice(src []*uint64) []uint64 { - dst := make([]uint64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint64Map converts a string map of uint64 values uinto a string -// map of uint64 pouinters -func Uint64Map(src map[string]uint64) map[string]*uint64 { - dst := make(map[string]*uint64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint64ValueMap converts a string map of uint64 pouinters uinto a string -// map of uint64 values -func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { - dst := make(map[string]uint64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to of the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to of the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go deleted file mode 100644 index 6e9ec20fc..000000000 --- a/vendor/github.com/go-openapi/swag/json.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "bytes" - "encoding/json" - "reflect" - "strings" - "sync" - - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" -) - -// DefaultJSONNameProvider the default cache for types -var DefaultJSONNameProvider = NewNameProvider() - -const comma = byte(',') - -var closers = map[byte]byte{ - '{': '}', - '[': ']', -} - -type ejMarshaler interface { - MarshalEasyJSON(w *jwriter.Writer) -} - -type ejUnmarshaler interface { - UnmarshalEasyJSON(w *jlexer.Lexer) -} - -// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaller -// so it takes the fastest option available. -func WriteJSON(data interface{}) ([]byte, error) { - if d, ok := data.(ejMarshaler); ok { - jw := new(jwriter.Writer) - d.MarshalEasyJSON(jw) - return jw.BuildBytes() - } - if d, ok := data.(json.Marshaler); ok { - return d.MarshalJSON() - } - return json.Marshal(data) -} - -// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaller -// so it takes the fastes option available -func ReadJSON(data []byte, value interface{}) error { - if d, ok := value.(ejUnmarshaler); ok { - jl := &jlexer.Lexer{Data: data} - d.UnmarshalEasyJSON(jl) - return jl.Error() - } - if d, ok := value.(json.Unmarshaler); ok { - return d.UnmarshalJSON(data) - } - return json.Unmarshal(data, value) -} - -// DynamicJSONToStruct converts an untyped json structure into a struct -func DynamicJSONToStruct(data interface{}, target interface{}) error { - // TODO: convert straight to a json typed map (mergo + iterate?) - b, err := WriteJSON(data) - if err != nil { - return err - } - if err := ReadJSON(b, target); err != nil { - return err - } - return nil -} - -// ConcatJSON concatenates multiple json objects efficiently -func ConcatJSON(blobs ...[]byte) []byte { - if len(blobs) == 0 { - return nil - } - if len(blobs) == 1 { - return blobs[0] - } - - last := len(blobs) - 1 - var opening, closing byte - a := 0 - idx := 0 - buf := bytes.NewBuffer(nil) - - for i, b := range blobs { - if len(b) > 0 && opening == 0 { // is this an array or an object? - opening, closing = b[0], closers[b[0]] - } - - if opening != '{' && opening != '[' { - continue // don't know how to concatenate non container objects - } - - if len(b) < 3 { // yep empty but also the last one, so closing this thing - if i == last && a > 0 { - buf.WriteByte(closing) - } - continue - } - - idx = 0 - if a > 0 { // we need to join with a comma for everything beyond the first non-empty item - buf.WriteByte(comma) - idx = 1 // this is not the first or the last so we want to drop the leading bracket - } - - if i != last { // not the last one, strip brackets - buf.Write(b[idx : len(b)-1]) - } else { // last one, strip only the leading bracket - buf.Write(b[idx:]) - } - a++ - } - // somehow it ended up being empty, so provide a default value - if buf.Len() == 0 { - buf.WriteByte(opening) - buf.WriteByte(closing) - } - return buf.Bytes() -} - -// ToDynamicJSON turns an object into a properly JSON typed structure -func ToDynamicJSON(data interface{}) interface{} { - // TODO: convert straight to a json typed map (mergo + iterate?) - b, _ := json.Marshal(data) - var res interface{} - json.Unmarshal(b, &res) - return res -} - -// FromDynamicJSON turns an object into a properly JSON typed structure -func FromDynamicJSON(data, target interface{}) error { - b, _ := json.Marshal(data) - return json.Unmarshal(b, target) -} - -// NameProvider represents an object capabale of translating from go property names -// to json property names -// This type is thread-safe. -type NameProvider struct { - lock *sync.Mutex - index map[reflect.Type]nameIndex -} - -type nameIndex struct { - jsonNames map[string]string - goNames map[string]string -} - -// NewNameProvider creates a new name provider -func NewNameProvider() *NameProvider { - return &NameProvider{ - lock: &sync.Mutex{}, - index: make(map[reflect.Type]nameIndex), - } -} - -func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) { - for i := 0; i < tpe.NumField(); i++ { - targetDes := tpe.Field(i) - - if targetDes.PkgPath != "" { // unexported - continue - } - - if targetDes.Anonymous { // walk embedded structures tree down first - buildnameIndex(targetDes.Type, idx, reverseIdx) - continue - } - - if tag := targetDes.Tag.Get("json"); tag != "" { - - parts := strings.Split(tag, ",") - if len(parts) == 0 { - continue - } - - nm := parts[0] - if nm == "-" { - continue - } - if nm == "" { // empty string means we want to use the Go name - nm = targetDes.Name - } - - idx[nm] = targetDes.Name - reverseIdx[targetDes.Name] = nm - } - } -} - -func newNameIndex(tpe reflect.Type) nameIndex { - var idx = make(map[string]string, tpe.NumField()) - var reverseIdx = make(map[string]string, tpe.NumField()) - - buildnameIndex(tpe, idx, reverseIdx) - return nameIndex{jsonNames: idx, goNames: reverseIdx} -} - -// GetJSONNames gets all the json property names for a type -func (n *NameProvider) GetJSONNames(subject interface{}) []string { - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - - var res []string - for k := range names.jsonNames { - res = append(res, k) - } - return res -} - -// GetJSONName gets the json name for a go property name -func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) { - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - return n.GetJSONNameForType(tpe, name) -} - -// GetJSONNameForType gets the json name for a go property name on a given type -func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - nme, ok := names.goNames[name] - return nme, ok -} - -func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { - n.lock.Lock() - defer n.lock.Unlock() - names := newNameIndex(tpe) - n.index[tpe] = names - return names -} - -// GetGoName gets the go name for a json property name -func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) { - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - return n.GetGoNameForType(tpe, name) -} - -// GetGoNameForType gets the go name for a given type for a json property name -func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - nme, ok := names.jsonNames[name] - return nme, ok -} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go deleted file mode 100644 index 6dbc31330..000000000 --- a/vendor/github.com/go-openapi/swag/loading.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "fmt" - "io/ioutil" - "net/http" - "strings" -) - -// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes)(path) -} - -// LoadStrategy returns a loader function for a given path or uri -func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { - if strings.HasPrefix(path, "http") { - return remote - } - return local -} - -func loadHTTPBytes(path string) ([]byte, error) { - resp, err := http.Get(path) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) - } - - return ioutil.ReadAll(resp.Body) -} diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/net.go deleted file mode 100644 index 8323fa37b..000000000 --- a/vendor/github.com/go-openapi/swag/net.go +++ /dev/null @@ -1,24 +0,0 @@ -package swag - -import ( - "net" - "strconv" -) - -// SplitHostPort splits a network address into a host and a port. -// The port is -1 when there is no port to be found -func SplitHostPort(addr string) (host string, port int, err error) { - h, p, err := net.SplitHostPort(addr) - if err != nil { - return "", -1, err - } - if p == "" { - return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr} - } - - pi, err := strconv.Atoi(p) - if err != nil { - return "", -1, err - } - return h, pi, nil -} diff --git a/vendor/github.com/go-openapi/swag/path.go b/vendor/github.com/go-openapi/swag/path.go deleted file mode 100644 index 273e9fbed..000000000 --- a/vendor/github.com/go-openapi/swag/path.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "os" - "path/filepath" - "runtime" - "strings" -) - -const ( - // GOPATHKey represents the env key for gopath - GOPATHKey = "GOPATH" -) - -// FindInSearchPath finds a package in a provided lists of paths -func FindInSearchPath(searchPath, pkg string) string { - pathsList := filepath.SplitList(searchPath) - for _, path := range pathsList { - if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil { - if _, err := os.Stat(evaluatedPath); err == nil { - return evaluatedPath - } - } - } - return "" -} - -// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT -func FindInGoSearchPath(pkg string) string { - return FindInSearchPath(FullGoSearchPath(), pkg) -} - -// FullGoSearchPath gets the search paths for finding packages -func FullGoSearchPath() string { - allPaths := os.Getenv(GOPATHKey) - if allPaths != "" { - allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":") - } else { - allPaths = runtime.GOROOT() - } - return allPaths -} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go deleted file mode 100644 index d8b54ee61..000000000 --- a/vendor/github.com/go-openapi/swag/util.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "math" - "reflect" - "regexp" - "sort" - "strings" -) - -// Taken from https://github.com/golang/lint/blob/1fab560e16097e5b69afb66eb93aab843ef77845/lint.go#L663-L698 -var commonInitialisms = map[string]bool{ - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "JSON": true, - "LHS": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UUID": true, - "UID": true, - "UI": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XSRF": true, - "XSS": true, -} -var initialisms []string - -func init() { - for k := range commonInitialisms { - initialisms = append(initialisms, k) - } - sort.Sort(sort.Reverse(byLength(initialisms))) -} - -// JoinByFormat joins a string array by a known format: -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) -func JoinByFormat(data []string, format string) []string { - if len(data) == 0 { - return data - } - var sep string - switch format { - case "ssv": - sep = " " - case "tsv": - sep = "\t" - case "pipes": - sep = "|" - case "multi": - return data - default: - sep = "," - } - return []string{strings.Join(data, sep)} -} - -// SplitByFormat splits a string by a known format: -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) -func SplitByFormat(data, format string) []string { - if data == "" { - return nil - } - var sep string - switch format { - case "ssv": - sep = " " - case "tsv": - sep = "\t" - case "pipes": - sep = "|" - case "multi": - return nil - default: - sep = "," - } - var result []string - for _, s := range strings.Split(data, sep) { - if ts := strings.TrimSpace(s); ts != "" { - result = append(result, ts) - } - } - return result -} - -type byLength []string - -func (s byLength) Len() int { - return len(s) -} -func (s byLength) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byLength) Less(i, j int) bool { - return len(s[i]) < len(s[j]) -} - -// Prepares strings by splitting by caps, spaces, dashes, and underscore -func split(str string) (words []string) { - repl := strings.NewReplacer( - "@", "At ", - "&", "And ", - "|", "Pipe ", - "$", "Dollar ", - "!", "Bang ", - "-", " ", - "_", " ", - ) - - rex1 := regexp.MustCompile(`(\p{Lu})`) - rex2 := regexp.MustCompile(`(\pL|\pM|\pN|\p{Pc})+`) - - str = trim(str) - - // Convert dash and underscore to spaces - str = repl.Replace(str) - - // Split when uppercase is found (needed for Snake) - str = rex1.ReplaceAllString(str, " $1") - // check if consecutive single char things make up an initialism - - for _, k := range initialisms { - str = strings.Replace(str, rex1.ReplaceAllString(k, " $1"), " "+k, -1) - } - // Get the final list of words - words = rex2.FindAllString(str, -1) - - return -} - -// Removes leading whitespaces -func trim(str string) string { - return strings.Trim(str, " ") -} - -// Shortcut to strings.ToUpper() -func upper(str string) string { - return strings.ToUpper(trim(str)) -} - -// Shortcut to strings.ToLower() -func lower(str string) string { - return strings.ToLower(trim(str)) -} - -// ToFileName lowercases and underscores a go type name -func ToFileName(name string) string { - var out []string - for _, w := range split(name) { - out = append(out, lower(w)) - } - return strings.Join(out, "_") -} - -// ToCommandName lowercases and underscores a go type name -func ToCommandName(name string) string { - var out []string - for _, w := range split(name) { - out = append(out, lower(w)) - } - return strings.Join(out, "-") -} - -// ToHumanNameLower represents a code name as a human series of words -func ToHumanNameLower(name string) string { - var out []string - for _, w := range split(name) { - if !commonInitialisms[upper(w)] { - out = append(out, lower(w)) - } else { - out = append(out, w) - } - } - return strings.Join(out, " ") -} - -// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized -func ToHumanNameTitle(name string) string { - var out []string - for _, w := range split(name) { - uw := upper(w) - if !commonInitialisms[uw] { - out = append(out, upper(w[:1])+lower(w[1:])) - } else { - out = append(out, w) - } - } - return strings.Join(out, " ") -} - -// ToJSONName camelcases a name which can be underscored or pascal cased -func ToJSONName(name string) string { - var out []string - for i, w := range split(name) { - if i == 0 { - out = append(out, lower(w)) - continue - } - out = append(out, upper(w[:1])+lower(w[1:])) - } - return strings.Join(out, "") -} - -// ToVarName camelcases a name which can be underscored or pascal cased -func ToVarName(name string) string { - res := ToGoName(name) - if len(res) <= 1 { - return lower(res) - } - return lower(res[:1]) + res[1:] -} - -// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes -func ToGoName(name string) string { - var out []string - for _, w := range split(name) { - uw := upper(w) - mod := int(math.Min(float64(len(uw)), 2)) - if !commonInitialisms[uw] && !commonInitialisms[uw[:len(uw)-mod]] { - uw = upper(w[:1]) + lower(w[1:]) - } - out = append(out, uw) - } - return strings.Join(out, "") -} - -// ContainsStringsCI searches a slice of strings for a case-insensitive match -func ContainsStringsCI(coll []string, item string) bool { - for _, a := range coll { - if strings.EqualFold(a, item) { - return true - } - } - return false -} - -type zeroable interface { - IsZero() bool -} - -// IsZero returns true when the value passed into the function is a zero value. -// This allows for safer checking of interface values. -func IsZero(data interface{}) bool { - // check for things that have an IsZero method instead - if vv, ok := data.(zeroable); ok { - return vv.IsZero() - } - // continue with slightly more complex reflection - v := reflect.ValueOf(data) - switch v.Kind() { - case reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - case reflect.Struct, reflect.Array: - return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) - case reflect.Invalid: - return true - } - return false -} - -// CommandLineOptionsGroup represents a group of user-defined command line options -type CommandLineOptionsGroup struct { - ShortDescription string - LongDescription string - Options interface{} -} diff --git a/vendor/github.com/juju/ratelimit/LICENSE b/vendor/github.com/juju/ratelimit/LICENSE deleted file mode 100644 index ade9307b3..000000000 --- a/vendor/github.com/juju/ratelimit/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -All files in this repository are licensed as follows. If you contribute -to this repository, it is assumed that you license your contribution -under the same license unless you state otherwise. - -All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/github.com/juju/ratelimit/ratelimit.go b/vendor/github.com/juju/ratelimit/ratelimit.go deleted file mode 100644 index 3ef32fbcc..000000000 --- a/vendor/github.com/juju/ratelimit/ratelimit.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3 with static-linking exception. -// See LICENCE file for details. - -// The ratelimit package provides an efficient token bucket implementation -// that can be used to limit the rate of arbitrary things. -// See http://en.wikipedia.org/wiki/Token_bucket. -package ratelimit - -import ( - "math" - "strconv" - "sync" - "time" -) - -// Bucket represents a token bucket that fills at a predetermined rate. -// Methods on Bucket may be called concurrently. -type Bucket struct { - startTime time.Time - capacity int64 - quantum int64 - fillInterval time.Duration - - // The mutex guards the fields following it. - mu sync.Mutex - - // avail holds the number of available tokens - // in the bucket, as of availTick ticks from startTime. - // It will be negative when there are consumers - // waiting for tokens. - avail int64 - availTick int64 -} - -// NewBucket returns a new token bucket that fills at the -// rate of one token every fillInterval, up to the given -// maximum capacity. Both arguments must be -// positive. The bucket is initially full. -func NewBucket(fillInterval time.Duration, capacity int64) *Bucket { - return NewBucketWithQuantum(fillInterval, capacity, 1) -} - -// rateMargin specifes the allowed variance of actual -// rate from specified rate. 1% seems reasonable. -const rateMargin = 0.01 - -// NewBucketWithRate returns a token bucket that fills the bucket -// at the rate of rate tokens per second up to the given -// maximum capacity. Because of limited clock resolution, -// at high rates, the actual rate may be up to 1% different from the -// specified rate. -func NewBucketWithRate(rate float64, capacity int64) *Bucket { - for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) { - fillInterval := time.Duration(1e9 * float64(quantum) / rate) - if fillInterval <= 0 { - continue - } - tb := NewBucketWithQuantum(fillInterval, capacity, quantum) - if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin { - return tb - } - } - panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64)) -} - -// nextQuantum returns the next quantum to try after q. -// We grow the quantum exponentially, but slowly, so we -// get a good fit in the lower numbers. -func nextQuantum(q int64) int64 { - q1 := q * 11 / 10 - if q1 == q { - q1++ - } - return q1 -} - -// NewBucketWithQuantum is similar to NewBucket, but allows -// the specification of the quantum size - quantum tokens -// are added every fillInterval. -func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket { - if fillInterval <= 0 { - panic("token bucket fill interval is not > 0") - } - if capacity <= 0 { - panic("token bucket capacity is not > 0") - } - if quantum <= 0 { - panic("token bucket quantum is not > 0") - } - return &Bucket{ - startTime: time.Now(), - capacity: capacity, - quantum: quantum, - avail: capacity, - fillInterval: fillInterval, - } -} - -// Wait takes count tokens from the bucket, waiting until they are -// available. -func (tb *Bucket) Wait(count int64) { - if d := tb.Take(count); d > 0 { - time.Sleep(d) - } -} - -// WaitMaxDuration is like Wait except that it will -// only take tokens from the bucket if it needs to wait -// for no greater than maxWait. It reports whether -// any tokens have been removed from the bucket -// If no tokens have been removed, it returns immediately. -func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool { - d, ok := tb.TakeMaxDuration(count, maxWait) - if d > 0 { - time.Sleep(d) - } - return ok -} - -const infinityDuration time.Duration = 0x7fffffffffffffff - -// Take takes count tokens from the bucket without blocking. It returns -// the time that the caller should wait until the tokens are actually -// available. -// -// Note that if the request is irrevocable - there is no way to return -// tokens to the bucket once this method commits us to taking them. -func (tb *Bucket) Take(count int64) time.Duration { - d, _ := tb.take(time.Now(), count, infinityDuration) - return d -} - -// TakeMaxDuration is like Take, except that -// it will only take tokens from the bucket if the wait -// time for the tokens is no greater than maxWait. -// -// If it would take longer than maxWait for the tokens -// to become available, it does nothing and reports false, -// otherwise it returns the time that the caller should -// wait until the tokens are actually available, and reports -// true. -func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) { - return tb.take(time.Now(), count, maxWait) -} - -// TakeAvailable takes up to count immediately available tokens from the -// bucket. It returns the number of tokens removed, or zero if there are -// no available tokens. It does not block. -func (tb *Bucket) TakeAvailable(count int64) int64 { - return tb.takeAvailable(time.Now(), count) -} - -// takeAvailable is the internal version of TakeAvailable - it takes the -// current time as an argument to enable easy testing. -func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 { - if count <= 0 { - return 0 - } - tb.mu.Lock() - defer tb.mu.Unlock() - - tb.adjust(now) - if tb.avail <= 0 { - return 0 - } - if count > tb.avail { - count = tb.avail - } - tb.avail -= count - return count -} - -// Available returns the number of available tokens. It will be negative -// when there are consumers waiting for tokens. Note that if this -// returns greater than zero, it does not guarantee that calls that take -// tokens from the buffer will succeed, as the number of available -// tokens could have changed in the meantime. This method is intended -// primarily for metrics reporting and debugging. -func (tb *Bucket) Available() int64 { - return tb.available(time.Now()) -} - -// available is the internal version of available - it takes the current time as -// an argument to enable easy testing. -func (tb *Bucket) available(now time.Time) int64 { - tb.mu.Lock() - defer tb.mu.Unlock() - tb.adjust(now) - return tb.avail -} - -// Capacity returns the capacity that the bucket was created with. -func (tb *Bucket) Capacity() int64 { - return tb.capacity -} - -// Rate returns the fill rate of the bucket, in tokens per second. -func (tb *Bucket) Rate() float64 { - return 1e9 * float64(tb.quantum) / float64(tb.fillInterval) -} - -// take is the internal version of Take - it takes the current time as -// an argument to enable easy testing. -func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) { - if count <= 0 { - return 0, true - } - tb.mu.Lock() - defer tb.mu.Unlock() - - currentTick := tb.adjust(now) - avail := tb.avail - count - if avail >= 0 { - tb.avail = avail - return 0, true - } - // Round up the missing tokens to the nearest multiple - // of quantum - the tokens won't be available until - // that tick. - endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum - endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval) - waitTime := endTime.Sub(now) - if waitTime > maxWait { - return 0, false - } - tb.avail = avail - return waitTime, true -} - -// adjust adjusts the current bucket capacity based on the current time. -// It returns the current tick. -func (tb *Bucket) adjust(now time.Time) (currentTick int64) { - currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval) - - if tb.avail >= tb.capacity { - return - } - tb.avail += (currentTick - tb.availTick) * tb.quantum - if tb.avail > tb.capacity { - tb.avail = tb.capacity - } - tb.availTick = currentTick - return -} diff --git a/vendor/github.com/juju/ratelimit/reader.go b/vendor/github.com/juju/ratelimit/reader.go deleted file mode 100644 index 6403bf78d..000000000 --- a/vendor/github.com/juju/ratelimit/reader.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3 with static-linking exception. -// See LICENCE file for details. - -package ratelimit - -import "io" - -type reader struct { - r io.Reader - bucket *Bucket -} - -// Reader returns a reader that is rate limited by -// the given token bucket. Each token in the bucket -// represents one byte. -func Reader(r io.Reader, bucket *Bucket) io.Reader { - return &reader{ - r: r, - bucket: bucket, - } -} - -func (r *reader) Read(buf []byte) (int, error) { - n, err := r.r.Read(buf) - if n <= 0 { - return n, err - } - r.bucket.Wait(int64(n)) - return n, err -} - -type writer struct { - w io.Writer - bucket *Bucket -} - -// Writer returns a reader that is rate limited by -// the given token bucket. Each token in the bucket -// represents one byte. -func Writer(w io.Writer, bucket *Bucket) io.Writer { - return &writer{ - w: w, - bucket: bucket, - } -} - -func (w *writer) Write(buf []byte) (int, error) { - w.bucket.Wait(int64(len(buf))) - return w.w.Write(buf) -} diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE deleted file mode 100644 index fbff658f7..000000000 --- a/vendor/github.com/mailru/easyjson/LICENSE +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2016 Mail.Ru Group - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go deleted file mode 100644 index 4de4a51db..000000000 --- a/vendor/github.com/mailru/easyjson/buffer/pool.go +++ /dev/null @@ -1,207 +0,0 @@ -// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to -// reduce copying and to allow reuse of individual chunks. -package buffer - -import ( - "io" - "sync" -) - -// PoolConfig contains configuration for the allocation and reuse strategy. -type PoolConfig struct { - StartSize int // Minimum chunk size that is allocated. - PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. - MaxSize int // Maximum chunk size that will be allocated. -} - -var config = PoolConfig{ - StartSize: 128, - PooledSize: 512, - MaxSize: 32768, -} - -// Reuse pool: chunk size -> pool. -var buffers = map[int]*sync.Pool{} - -func initBuffers() { - for l := config.PooledSize; l <= config.MaxSize; l *= 2 { - buffers[l] = new(sync.Pool) - } -} - -func init() { - initBuffers() -} - -// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. -func Init(cfg PoolConfig) { - config = cfg - initBuffers() -} - -// putBuf puts a chunk to reuse pool if it can be reused. -func putBuf(buf []byte) { - size := cap(buf) - if size < config.PooledSize { - return - } - if c := buffers[size]; c != nil { - c.Put(buf[:0]) - } -} - -// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. -func getBuf(size int) []byte { - if size < config.PooledSize { - return make([]byte, 0, size) - } - - if c := buffers[size]; c != nil { - v := c.Get() - if v != nil { - return v.([]byte) - } - } - return make([]byte, 0, size) -} - -// Buffer is a buffer optimized for serialization without extra copying. -type Buffer struct { - - // Buf is the current chunk that can be used for serialization. - Buf []byte - - toPool []byte - bufs [][]byte -} - -// EnsureSpace makes sure that the current chunk contains at least s free bytes, -// possibly creating a new chunk. -func (b *Buffer) EnsureSpace(s int) { - if cap(b.Buf)-len(b.Buf) >= s { - return - } - l := len(b.Buf) - if l > 0 { - if cap(b.toPool) != cap(b.Buf) { - // Chunk was reallocated, toPool can be pooled. - putBuf(b.toPool) - } - if cap(b.bufs) == 0 { - b.bufs = make([][]byte, 0, 8) - } - b.bufs = append(b.bufs, b.Buf) - l = cap(b.toPool) * 2 - } else { - l = config.StartSize - } - - if l > config.MaxSize { - l = config.MaxSize - } - b.Buf = getBuf(l) - b.toPool = b.Buf -} - -// AppendByte appends a single byte to buffer. -func (b *Buffer) AppendByte(data byte) { - if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. - b.EnsureSpace(1) - } - b.Buf = append(b.Buf, data) -} - -// AppendBytes appends a byte slice to buffer. -func (b *Buffer) AppendBytes(data []byte) { - for len(data) > 0 { - if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. - b.EnsureSpace(1) - } - - sz := cap(b.Buf) - len(b.Buf) - if sz > len(data) { - sz = len(data) - } - - b.Buf = append(b.Buf, data[:sz]...) - data = data[sz:] - } -} - -// AppendBytes appends a string to buffer. -func (b *Buffer) AppendString(data string) { - for len(data) > 0 { - if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. - b.EnsureSpace(1) - } - - sz := cap(b.Buf) - len(b.Buf) - if sz > len(data) { - sz = len(data) - } - - b.Buf = append(b.Buf, data[:sz]...) - data = data[sz:] - } -} - -// Size computes the size of a buffer by adding sizes of every chunk. -func (b *Buffer) Size() int { - size := len(b.Buf) - for _, buf := range b.bufs { - size += len(buf) - } - return size -} - -// DumpTo outputs the contents of a buffer to a writer and resets the buffer. -func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { - var n int - for _, buf := range b.bufs { - if err == nil { - n, err = w.Write(buf) - written += n - } - putBuf(buf) - } - - if err == nil { - n, err = w.Write(b.Buf) - written += n - } - putBuf(b.toPool) - - b.bufs = nil - b.Buf = nil - b.toPool = nil - - return -} - -// BuildBytes creates a single byte slice with all the contents of the buffer. Data is -// copied if it does not fit in a single chunk. -func (b *Buffer) BuildBytes() []byte { - if len(b.bufs) == 0 { - - ret := b.Buf - b.toPool = nil - b.Buf = nil - - return ret - } - - ret := make([]byte, 0, b.Size()) - for _, buf := range b.bufs { - ret = append(ret, buf...) - putBuf(buf) - } - - ret = append(ret, b.Buf...) - putBuf(b.toPool) - - b.bufs = nil - b.toPool = nil - b.Buf = nil - - return ret -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go deleted file mode 100644 index e90ec40d0..000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/error.go +++ /dev/null @@ -1,15 +0,0 @@ -package jlexer - -import "fmt" - -// LexerError implements the error interface and represents all possible errors that can be -// generated during parsing the JSON data. -type LexerError struct { - Reason string - Offset int - Data string -} - -func (l *LexerError) Error() string { - return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go deleted file mode 100644 index d700c0a32..000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ /dev/null @@ -1,956 +0,0 @@ -// Package jlexer contains a JSON lexer implementation. -// -// It is expected that it is mostly used with generated parser code, so the interface is tuned -// for a parser that knows what kind of data is expected. -package jlexer - -import ( - "fmt" - "io" - "reflect" - "strconv" - "unicode/utf8" - "unsafe" -) - -// tokenKind determines type of a token. -type tokenKind byte - -const ( - tokenUndef tokenKind = iota // No token. - tokenDelim // Delimiter: one of '{', '}', '[' or ']'. - tokenString // A string literal, e.g. "abc\u1234" - tokenNumber // Number literal, e.g. 1.5e5 - tokenBool // Boolean literal: true or false. - tokenNull // null keyword. -) - -// token describes a single token: type, position in the input and value. -type token struct { - kind tokenKind // Type of a token. - - boolValue bool // Value if a boolean literal token. - byteValue []byte // Raw value of a token. - delimValue byte -} - -// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. -type Lexer struct { - Data []byte // Input data given to the lexer. - - start int // Start of the current token. - pos int // Current unscanned position in the input stream. - token token // Last scanned token, if token.kind != tokenUndef. - - firstElement bool // Whether current element is the first in array or an object. - wantSep byte // A comma or a colon character, which need to occur before a token. - - err error // Error encountered during lexing, if any. -} - -// fetchToken scans the input for the next token. -func (r *Lexer) fetchToken() { - r.token.kind = tokenUndef - r.start = r.pos - - // Check if r.Data has r.pos element - // If it doesn't, it mean corrupted input data - if len(r.Data) < r.pos { - r.errParse("Unexpected end of data") - return - } - // Determine the type of a token by skipping whitespace and reading the - // first character. - for _, c := range r.Data[r.pos:] { - switch c { - case ':', ',': - if r.wantSep == c { - r.pos++ - r.start++ - r.wantSep = 0 - } else { - r.errSyntax() - } - - case ' ', '\t', '\r', '\n': - r.pos++ - r.start++ - - case '"': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenString - r.fetchString() - return - - case '{', '[': - if r.wantSep != 0 { - r.errSyntax() - } - r.firstElement = true - r.token.kind = tokenDelim - r.token.delimValue = r.Data[r.pos] - r.pos++ - return - - case '}', ']': - if !r.firstElement && (r.wantSep != ',') { - r.errSyntax() - } - r.wantSep = 0 - r.token.kind = tokenDelim - r.token.delimValue = r.Data[r.pos] - r.pos++ - return - - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': - if r.wantSep != 0 { - r.errSyntax() - } - r.token.kind = tokenNumber - r.fetchNumber() - return - - case 'n': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenNull - r.fetchNull() - return - - case 't': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenBool - r.token.boolValue = true - r.fetchTrue() - return - - case 'f': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenBool - r.token.boolValue = false - r.fetchFalse() - return - - default: - r.errSyntax() - return - } - } - r.err = io.EOF - return -} - -// isTokenEnd returns true if the char can follow a non-delimiter token -func isTokenEnd(c byte) bool { - return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' -} - -// fetchNull fetches and checks remaining bytes of null keyword. -func (r *Lexer) fetchNull() { - r.pos += 4 - if r.pos > len(r.Data) || - r.Data[r.pos-3] != 'u' || - r.Data[r.pos-2] != 'l' || - r.Data[r.pos-1] != 'l' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 4 - r.errSyntax() - } -} - -// fetchTrue fetches and checks remaining bytes of true keyword. -func (r *Lexer) fetchTrue() { - r.pos += 4 - if r.pos > len(r.Data) || - r.Data[r.pos-3] != 'r' || - r.Data[r.pos-2] != 'u' || - r.Data[r.pos-1] != 'e' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 4 - r.errSyntax() - } -} - -// fetchFalse fetches and checks remaining bytes of false keyword. -func (r *Lexer) fetchFalse() { - r.pos += 5 - if r.pos > len(r.Data) || - r.Data[r.pos-4] != 'a' || - r.Data[r.pos-3] != 'l' || - r.Data[r.pos-2] != 's' || - r.Data[r.pos-1] != 'e' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 5 - r.errSyntax() - } -} - -// bytesToStr creates a string pointing at the slice to avoid copying. -// -// Warning: the string returned by the function should be used with care, as the whole input data -// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data -// may be garbage-collected even when the string exists. -func bytesToStr(data []byte) string { - h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) - shdr := reflect.StringHeader{h.Data, h.Len} - return *(*string)(unsafe.Pointer(&shdr)) -} - -// fetchNumber scans a number literal token. -func (r *Lexer) fetchNumber() { - hasE := false - afterE := false - hasDot := false - - r.pos++ - for i, c := range r.Data[r.pos:] { - switch { - case c >= '0' && c <= '9': - afterE = false - case c == '.' && !hasDot: - hasDot = true - case (c == 'e' || c == 'E') && !hasE: - hasE = true - hasDot = true - afterE = true - case (c == '+' || c == '-') && afterE: - afterE = false - default: - r.pos += i - if !isTokenEnd(c) { - r.errSyntax() - } else { - r.token.byteValue = r.Data[r.start:r.pos] - } - return - } - } - - r.pos = len(r.Data) - r.token.byteValue = r.Data[r.start:] -} - -// findStringLen tries to scan into the string literal for ending quote char to determine required size. -// The size will be exact if no escapes are present and may be inexact if there are escaped chars. -func findStringLen(data []byte) (hasEscapes bool, length int) { - delta := 0 - - for i := 0; i < len(data); i++ { - switch data[i] { - case '\\': - i++ - delta++ - if i < len(data) && data[i] == 'u' { - delta++ - } - case '"': - return (delta > 0), (i - delta) - } - } - - return false, len(data) -} - -// processEscape processes a single escape sequence and returns number of bytes processed. -func (r *Lexer) processEscape(data []byte) (int, error) { - if len(data) < 2 { - return 0, fmt.Errorf("syntax error at %v", string(data)) - } - - c := data[1] - switch c { - case '"', '/', '\\': - r.token.byteValue = append(r.token.byteValue, c) - return 2, nil - case 'b': - r.token.byteValue = append(r.token.byteValue, '\b') - return 2, nil - case 'f': - r.token.byteValue = append(r.token.byteValue, '\f') - return 2, nil - case 'n': - r.token.byteValue = append(r.token.byteValue, '\n') - return 2, nil - case 'r': - r.token.byteValue = append(r.token.byteValue, '\r') - return 2, nil - case 't': - r.token.byteValue = append(r.token.byteValue, '\t') - return 2, nil - case 'u': - default: - return 0, fmt.Errorf("syntax error") - } - - var val rune - - for i := 2; i < len(data) && i < 6; i++ { - var v byte - c = data[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - v = c - '0' - case 'a', 'b', 'c', 'd', 'e', 'f': - v = c - 'a' + 10 - case 'A', 'B', 'C', 'D', 'E', 'F': - v = c - 'A' + 10 - default: - return 0, fmt.Errorf("syntax error") - } - - val <<= 4 - val |= rune(v) - } - - l := utf8.RuneLen(val) - if l == -1 { - return 0, fmt.Errorf("invalid unicode escape") - } - - var d [4]byte - utf8.EncodeRune(d[:], val) - r.token.byteValue = append(r.token.byteValue, d[:l]...) - return 6, nil -} - -// fetchString scans a string literal token. -func (r *Lexer) fetchString() { - r.pos++ - data := r.Data[r.pos:] - - hasEscapes, length := findStringLen(data) - if !hasEscapes { - r.token.byteValue = data[:length] - r.pos += length + 1 - return - } - - r.token.byteValue = make([]byte, 0, length) - p := 0 - for i := 0; i < len(data); { - switch data[i] { - case '"': - r.pos += i + 1 - r.token.byteValue = append(r.token.byteValue, data[p:i]...) - i++ - return - - case '\\': - r.token.byteValue = append(r.token.byteValue, data[p:i]...) - off, err := r.processEscape(data[i:]) - if err != nil { - r.errParse(err.Error()) - return - } - i += off - p = i - - default: - i++ - } - } - r.errParse("unterminated string literal") -} - -// scanToken scans the next token if no token is currently available in the lexer. -func (r *Lexer) scanToken() { - if r.token.kind != tokenUndef || r.err != nil { - return - } - - r.fetchToken() -} - -// consume resets the current token to allow scanning the next one. -func (r *Lexer) consume() { - r.token.kind = tokenUndef - r.token.delimValue = 0 -} - -// Ok returns true if no error (including io.EOF) was encountered during scanning. -func (r *Lexer) Ok() bool { - return r.err == nil -} - -const maxErrorContextLen = 13 - -func (r *Lexer) errParse(what string) { - if r.err == nil { - var str string - if len(r.Data)-r.pos <= maxErrorContextLen { - str = string(r.Data) - } else { - str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." - } - r.err = &LexerError{ - Reason: what, - Offset: r.pos, - Data: str, - } - } -} - -func (r *Lexer) errSyntax() { - r.errParse("syntax error") -} - -func (r *Lexer) errInvalidToken(expected string) { - if r.err == nil { - var str string - if len(r.token.byteValue) <= maxErrorContextLen { - str = string(r.token.byteValue) - } else { - str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." - } - r.err = &LexerError{ - Reason: fmt.Sprintf("expected %s", expected), - Offset: r.pos, - Data: str, - } - } -} - -// Delim consumes a token and verifies that it is the given delimiter. -func (r *Lexer) Delim(c byte) { - if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() - } - if !r.Ok() || r.token.delimValue != c { - r.errInvalidToken(string([]byte{c})) - } - r.consume() -} - -// IsDelim returns true if there was no scanning error and next token is the given delimiter. -func (r *Lexer) IsDelim(c byte) bool { - if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() - } - return !r.Ok() || r.token.delimValue == c -} - -// Null verifies that the next token is null and consumes it. -func (r *Lexer) Null() { - if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() - } - if !r.Ok() || r.token.kind != tokenNull { - r.errInvalidToken("null") - } - r.consume() -} - -// IsNull returns true if the next token is a null keyword. -func (r *Lexer) IsNull() bool { - if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() - } - return r.Ok() && r.token.kind == tokenNull -} - -// Skip skips a single token. -func (r *Lexer) Skip() { - if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() - } - r.consume() -} - -// SkipRecursive skips next array or object completely, or just skips a single token if not -// an array/object. -// -// Note: no syntax validation is performed on the skipped data. -func (r *Lexer) SkipRecursive() { - r.scanToken() - - var start, end byte - - if r.token.delimValue == '{' { - start, end = '{', '}' - } else if r.token.delimValue == '[' { - start, end = '[', ']' - } else { - r.consume() - return - } - - r.consume() - - level := 1 - inQuotes := false - wasEscape := false - - for i, c := range r.Data[r.pos:] { - switch { - case c == start && !inQuotes: - level++ - case c == end && !inQuotes: - level-- - if level == 0 { - r.pos += i + 1 - return - } - case c == '\\' && inQuotes: - wasEscape = true - continue - case c == '"' && inQuotes: - inQuotes = wasEscape - case c == '"': - inQuotes = true - } - wasEscape = false - } - r.pos = len(r.Data) - r.err = io.EOF -} - -// Raw fetches the next item recursively as a data slice -func (r *Lexer) Raw() []byte { - r.SkipRecursive() - if !r.Ok() { - return nil - } - return r.Data[r.start:r.pos] -} - -// UnsafeString returns the string value if the token is a string literal. -// -// Warning: returned string may point to the input buffer, so the string should not outlive -// the input buffer. Intended pattern of usage is as an argument to a switch statement. -func (r *Lexer) UnsafeString() string { - if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return "" - } - - ret := bytesToStr(r.token.byteValue) - r.consume() - return ret -} - -// String reads a string literal. -func (r *Lexer) String() string { - if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return "" - - } - ret := string(r.token.byteValue) - r.consume() - return ret -} - -// Bool reads a true or false boolean keyword. -func (r *Lexer) Bool() bool { - if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() - } - if !r.Ok() || r.token.kind != tokenBool { - r.errInvalidToken("bool") - return false - - } - ret := r.token.boolValue - r.consume() - return ret -} - -func (r *Lexer) number() string { - if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() - } - if !r.Ok() || r.token.kind != tokenNumber { - r.errInvalidToken("number") - return "" - - } - ret := bytesToStr(r.token.byteValue) - r.consume() - return ret -} - -func (r *Lexer) Uint8() uint8 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 8) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return uint8(n) -} - -func (r *Lexer) Uint16() uint16 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 16) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return uint16(n) -} - -func (r *Lexer) Uint32() uint32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 32) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return uint32(n) -} - -func (r *Lexer) Uint64() uint64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return n -} - -func (r *Lexer) Uint() uint { - return uint(r.Uint64()) -} - -func (r *Lexer) Int8() int8 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 8) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return int8(n) -} - -func (r *Lexer) Int16() int16 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 16) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return int16(n) -} - -func (r *Lexer) Int32() int32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 32) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return int32(n) -} - -func (r *Lexer) Int64() int64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return n -} - -func (r *Lexer) Int() int { - return int(r.Int64()) -} - -func (r *Lexer) Uint8Str() uint8 { - s := r.UnsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 8) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return uint8(n) -} - -func (r *Lexer) Uint16Str() uint16 { - s := r.UnsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 16) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return uint16(n) -} - -func (r *Lexer) Uint32Str() uint32 { - s := r.UnsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 32) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return uint32(n) -} - -func (r *Lexer) Uint64Str() uint64 { - s := r.UnsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return n -} - -func (r *Lexer) UintStr() uint { - return uint(r.Uint64Str()) -} - -func (r *Lexer) Int8Str() int8 { - s := r.UnsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 8) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return int8(n) -} - -func (r *Lexer) Int16Str() int16 { - s := r.UnsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 16) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return int16(n) -} - -func (r *Lexer) Int32Str() int32 { - s := r.UnsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 32) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return int32(n) -} - -func (r *Lexer) Int64Str() int64 { - s := r.UnsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return n -} - -func (r *Lexer) IntStr() int { - return int(r.Int64Str()) -} - -func (r *Lexer) Float32() float32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseFloat(s, 32) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return float32(n) -} - -func (r *Lexer) Float64() float64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseFloat(s, 64) - if err != nil { - r.err = &LexerError{ - Reason: err.Error(), - } - } - return n -} - -func (r *Lexer) Error() error { - return r.err -} - -func (r *Lexer) AddError(e error) { - if r.err == nil { - r.err = e - } -} - -// Interface fetches an interface{} analogous to the 'encoding/json' package. -func (r *Lexer) Interface() interface{} { - if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() - } - - if !r.Ok() { - return nil - } - switch r.token.kind { - case tokenString: - return r.String() - case tokenNumber: - return r.Float64() - case tokenBool: - return r.Bool() - case tokenNull: - r.Null() - return nil - } - - if r.token.delimValue == '{' { - r.consume() - - ret := map[string]interface{}{} - for !r.IsDelim('}') { - key := r.String() - r.WantColon() - ret[key] = r.Interface() - r.WantComma() - } - r.Delim('}') - - if r.Ok() { - return ret - } else { - return nil - } - } else if r.token.delimValue == '[' { - r.consume() - - var ret []interface{} - for !r.IsDelim(']') { - ret = append(ret, r.Interface()) - r.WantComma() - } - r.Delim(']') - - if r.Ok() { - return ret - } else { - return nil - } - } - r.errSyntax() - return nil -} - -// WantComma requires a comma to be present before fetching next token. -func (r *Lexer) WantComma() { - r.wantSep = ',' - r.firstElement = false -} - -// WantColon requires a colon to be present before fetching next token. -func (r *Lexer) WantColon() { - r.wantSep = ':' - r.firstElement = false -} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go deleted file mode 100644 index 907675f9c..000000000 --- a/vendor/github.com/mailru/easyjson/jwriter/writer.go +++ /dev/null @@ -1,273 +0,0 @@ -// Package jwriter contains a JSON writer. -package jwriter - -import ( - "io" - "strconv" - "unicode/utf8" - - "github.com/mailru/easyjson/buffer" -) - -// Writer is a JSON writer. -type Writer struct { - Error error - Buffer buffer.Buffer -} - -// Size returns the size of the data that was written out. -func (w *Writer) Size() int { - return w.Buffer.Size() -} - -// DumpTo outputs the data to given io.Writer, resetting the buffer. -func (w *Writer) DumpTo(out io.Writer) (written int, err error) { - return w.Buffer.DumpTo(out) -} - -// BuildBytes returns writer data as a single byte slice. -func (w *Writer) BuildBytes() ([]byte, error) { - if w.Error != nil { - return nil, w.Error - } - - return w.Buffer.BuildBytes(), nil -} - -// RawByte appends raw binary data to the buffer. -func (w *Writer) RawByte(c byte) { - w.Buffer.AppendByte(c) -} - -// RawByte appends raw binary data to the buffer. -func (w *Writer) RawString(s string) { - w.Buffer.AppendString(s) -} - -// RawByte appends raw binary data to the buffer or sets the error if it is given. Useful for -// calling with results of MarshalJSON-like functions. -func (w *Writer) Raw(data []byte, err error) { - switch { - case w.Error != nil: - return - case err != nil: - w.Error = err - case len(data) > 0: - w.Buffer.AppendBytes(data) - default: - w.RawString("null") - } -} - -func (w *Writer) Uint8(n uint8) { - w.Buffer.EnsureSpace(3) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint16(n uint16) { - w.Buffer.EnsureSpace(5) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint32(n uint32) { - w.Buffer.EnsureSpace(10) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint(n uint) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint64(n uint64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) -} - -func (w *Writer) Int8(n int8) { - w.Buffer.EnsureSpace(4) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int16(n int16) { - w.Buffer.EnsureSpace(6) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int32(n int32) { - w.Buffer.EnsureSpace(11) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int(n int) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int64(n int64) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) -} - -func (w *Writer) Uint8Str(n uint8) { - w.Buffer.EnsureSpace(3) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint16Str(n uint16) { - w.Buffer.EnsureSpace(5) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint32Str(n uint32) { - w.Buffer.EnsureSpace(10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) UintStr(n uint) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint64Str(n uint64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int8Str(n int8) { - w.Buffer.EnsureSpace(4) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int16Str(n int16) { - w.Buffer.EnsureSpace(6) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int32Str(n int32) { - w.Buffer.EnsureSpace(11) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) IntStr(n int) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int64Str(n int64) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Float32(n float32) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) -} - -func (w *Writer) Float64(n float64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) -} - -func (w *Writer) Bool(v bool) { - w.Buffer.EnsureSpace(5) - if v { - w.Buffer.Buf = append(w.Buffer.Buf, "true"...) - } else { - w.Buffer.Buf = append(w.Buffer.Buf, "false"...) - } -} - -const chars = "0123456789abcdef" - -func (w *Writer) String(s string) { - w.Buffer.AppendByte('"') - - // Portions of the string that contain no escapes are appended as - // byte slices. - - p := 0 // last non-escape symbol - - for i := 0; i < len(s); { - // single-with character - if c := s[i]; c < utf8.RuneSelf { - var escape byte - switch c { - case '\t': - escape = 't' - case '\r': - escape = 'r' - case '\n': - escape = 'n' - case '\\': - escape = '\\' - case '"': - escape = '"' - case '<', '>': - // do nothing - default: - if c >= 0x20 { - // no escaping is required - i++ - continue - } - } - if escape != 0 { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendByte('\\') - w.Buffer.AppendByte(escape) - } else { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\u00`) - w.Buffer.AppendByte(chars[c>>4]) - w.Buffer.AppendByte(chars[c&0xf]) - } - i++ - p = i - continue - } - - // broken utf - runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) - if runeValue == utf8.RuneError && runeWidth == 1 { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\ufffd`) - i++ - p = i - continue - } - - // jsonp stuff - tab separator and line separator - if runeValue == '\u2028' || runeValue == '\u2029' { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\u202`) - w.Buffer.AppendByte(chars[runeValue&0xf]) - i += runeWidth - p = i - continue - } - i += runeWidth - } - w.Buffer.AppendString(s[p:]) - w.Buffer.AppendByte('"') -} diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE deleted file mode 100644 index 63ed1cfea..000000000 --- a/vendor/github.com/spf13/pflag/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md deleted file mode 100644 index b052414d1..000000000 --- a/vendor/github.com/spf13/pflag/README.md +++ /dev/null @@ -1,296 +0,0 @@ -[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag) -[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag) - -## Description - -pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the [GNU extensions to the POSIX recommendations -for command-line options][1]. For a more precise description, see the -"Command-line flag syntax" section below. - -[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -pflag is available under the same style of BSD license as the Go language, -which can be found in the LICENSE file. - -## Installation - -pflag is available using the standard `go get` command. - -Install by running: - - go get github.com/spf13/pflag - -Run tests by running: - - go test github.com/spf13/pflag - -## Usage - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - -``` go -import flag "github.com/spf13/pflag" -``` - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - -``` go -var ip *int = flag.Int("flagname", 1234, "help message for flagname") -``` - -If you like, you can bind the flag to a variable using the Var() functions. - -``` go -var flagvar int -func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") -} -``` - -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - -``` go -flag.Var(&flagVal, "name", "help message for flagname") -``` - -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - -``` go -flag.Parse() -``` - -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - -``` go -fmt.Println("ip has value ", *ip) -fmt.Println("flagvar has value ", flagvar) -``` - -There are helpers function to get values later if you have the FlagSet but -it was difficult to keep up with all of the flag pointers in your code. -If you have a pflag.FlagSet with a flag called 'flagname' of type int you -can use GetInt() to get the int value. But notice that 'flagname' must exist -and it must be an int. GetString("flagname") will fail. - -``` go -i, err := flagset.GetInt("flagname") -``` - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -var flagvar bool -func init() { - flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") -} -flag.VarP(&flagVal, "varname", "v", "help message") -``` - -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. - -## Setting no option default values for flags - -After you create a flag it is possible to set the pflag.NoOptDefVal for -the given flag. Doing this changes the meaning of the flag slightly. If -a flag has a NoOptDefVal and the flag is set on the command line without -an option the flag will be set to the NoOptDefVal. For example given: - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -flag.Lookup("flagname").NoOptDefVal = "4321" -``` - -Would result in something like - -| Parsed Arguments | Resulting Value | -| ------------- | ------------- | -| --flagname=1357 | ip=1357 | -| --flagname | ip=4321 | -| [nothing] | ip=1234 | - -## Command line flag syntax - -``` ---flag // boolean flags, or flags with no option default values ---flag x // only on flags without a default value ---flag=x -``` - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags -or a flag with a default value - -``` -// boolean or flags where the 'no option default value' is set --f --f=true --abc -but --b true is INVALID - -// non-boolean and flags without a 'no option default value' --n 1234 --n=1234 --n1234 - -// mixed --abcs "hello" --absd="hello" --abcs1234 -``` - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -## Mutating or "Normalizing" Flag names - -It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. - -**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag - -``` go -func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - from := []string{"-", "_"} - to := "." - for _, sep := range from { - name = strings.Replace(name, sep, to, -1) - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) -``` - -**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name - -``` go -func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - switch name { - case "old-flag-name": - name = "new-flag-name" - break - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) -``` - -## Deprecating a flag or its shorthand -It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. - -**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. -```go -// deprecate a flag by specifying its name and a usage message -flags.MarkDeprecated("badflag", "please use --good-flag instead") -``` -This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. - -**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". -```go -// deprecate a flag shorthand by specifying its flag name and a usage message -flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") -``` -This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. - -Note that usage message is essential here, and it should not be empty. - -## Hidden flags -It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. - -**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. -```go -// hide a flag by specifying its name -flags.MarkHidden("secretFlag") -``` - -## Disable sorting of flags -`pflag` allows you to disable sorting of flags for help and usage message. - -**Example**: -```go -flags.BoolP("verbose", "v", false, "verbose output") -flags.String("coolflag", "yeaah", "it's really cool flag") -flags.Int("usefulflag", 777, "sometimes it's very useful") -flags.SortFlags = false -flags.PrintDefaults() -``` -**Output**: -``` - -v, --verbose verbose output - --coolflag string it's really cool flag (default "yeaah") - --usefulflag int sometimes it's very useful (default 777) -``` - - -## Supporting Go flags when using pflag -In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary -to support flags defined by third-party dependencies (e.g. `golang/glog`). - -**Example**: You want to add the Go flags to the `CommandLine` flagset -```go -import ( - goflag "flag" - flag "github.com/spf13/pflag" -) - -var ip *int = flag.Int("flagname", 1234, "help message for flagname") - -func main() { - flag.CommandLine.AddGoFlagSet(goflag.CommandLine) - flag.Parse() -} -``` - -## More info - -You can see the full reference documentation of the pflag package -[at godoc.org][3], or through go's standard documentation system by -running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/spf13/pflag][2] after -installation. - -[2]: http://localhost:6060/pkg/github.com/spf13/pflag -[3]: http://godoc.org/github.com/spf13/pflag diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go deleted file mode 100644 index c4c5c0bfd..000000000 --- a/vendor/github.com/spf13/pflag/bool.go +++ /dev/null @@ -1,94 +0,0 @@ -package pflag - -import "strconv" - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Type() string { - return "bool" -} - -func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -func boolConv(sval string) (interface{}, error) { - return strconv.ParseBool(sval) -} - -// GetBool return the bool value of a flag with the given name -func (f *FlagSet) GetBool(name string) (bool, error) { - val, err := f.getFlagType(name, "bool", boolConv) - if err != nil { - return false, err - } - return val.(bool), nil -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { - f.BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, name string, value bool, usage string) { - BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (f *FlagSet) Bool(name string, value bool, usage string) *bool { - return f.BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { - p := new(bool) - f.BoolVarP(p, name, shorthand, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(name string, value bool, usage string) *bool { - return BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func BoolP(name, shorthand string, value bool, usage string) *bool { - b := CommandLine.BoolP(name, shorthand, value, usage) - return b -} diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go deleted file mode 100644 index 5af02f1a7..000000000 --- a/vendor/github.com/spf13/pflag/bool_slice.go +++ /dev/null @@ -1,147 +0,0 @@ -package pflag - -import ( - "io" - "strconv" - "strings" -) - -// -- boolSlice Value -type boolSliceValue struct { - value *[]bool - changed bool -} - -func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue { - bsv := new(boolSliceValue) - bsv.value = p - *bsv.value = val - return bsv -} - -// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag. -// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended. -func (s *boolSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - boolStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse boolean values into slice - out := make([]bool, 0, len(boolStrSlice)) - for _, boolStr := range boolStrSlice { - b, err := strconv.ParseBool(strings.TrimSpace(boolStr)) - if err != nil { - return err - } - out = append(out, b) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *boolSliceValue) Type() string { - return "boolSlice" -} - -// String defines a "native" format for this boolean slice flag value. -func (s *boolSliceValue) String() string { - - boolStrSlice := make([]string, len(*s.value)) - for i, b := range *s.value { - boolStrSlice[i] = strconv.FormatBool(b) - } - - out, _ := writeAsCSV(boolStrSlice) - - return "[" + out + "]" -} - -func boolSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []bool{}, nil - } - ss := strings.Split(val, ",") - out := make([]bool, len(ss)) - for i, t := range ss { - var err error - out[i], err = strconv.ParseBool(t) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetBoolSlice returns the []bool value of a flag with the given name. -func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) { - val, err := f.getFlagType(name, "boolSlice", boolSliceConv) - if err != nil { - return []bool{}, err - } - return val.([]bool), nil -} - -// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSliceVar defines a []bool flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, "", value, usage) - return &p -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func BoolSlice(name string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, "", value, usage) -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go deleted file mode 100644 index 250a43814..000000000 --- a/vendor/github.com/spf13/pflag/count.go +++ /dev/null @@ -1,96 +0,0 @@ -package pflag - -import "strconv" - -// -- count Value -type countValue int - -func newCountValue(val int, p *int) *countValue { - *p = val - return (*countValue)(p) -} - -func (i *countValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - // -1 means that no specific value was passed, so increment - if v == -1 { - *i = countValue(*i + 1) - } else { - *i = countValue(v) - } - return err -} - -func (i *countValue) Type() string { - return "count" -} - -func (i *countValue) String() string { return strconv.Itoa(int(*i)) } - -func countConv(sval string) (interface{}, error) { - i, err := strconv.Atoi(sval) - if err != nil { - return nil, err - } - return i, nil -} - -// GetCount return the int value of a flag with the given name -func (f *FlagSet) GetCount(name string) (int, error) { - val, err := f.getFlagType(name, "count", countConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// CountVar defines a count flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line -func (f *FlagSet) CountVar(p *int, name string, usage string) { - f.CountVarP(p, name, "", usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { - flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) - flag.NoOptDefVal = "-1" -} - -// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set -func CountVar(p *int, name string, usage string) { - CommandLine.CountVar(p, name, usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func CountVarP(p *int, name, shorthand string, usage string) { - CommandLine.CountVarP(p, name, shorthand, usage) -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line -func (f *FlagSet) Count(name string, usage string) *int { - p := new(int) - f.CountVarP(p, name, "", usage) - return p -} - -// CountP is like Count only takes a shorthand for the flag name. -func (f *FlagSet) CountP(name, shorthand string, usage string) *int { - p := new(int) - f.CountVarP(p, name, shorthand, usage) - return p -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line -func Count(name string, usage string) *int { - return CommandLine.CountP(name, "", usage) -} - -// CountP is like Count only takes a shorthand for the flag name. -func CountP(name, shorthand string, usage string) *int { - return CommandLine.CountP(name, shorthand, usage) -} diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go deleted file mode 100644 index e9debef88..000000000 --- a/vendor/github.com/spf13/pflag/duration.go +++ /dev/null @@ -1,86 +0,0 @@ -package pflag - -import ( - "time" -) - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Type() string { - return "duration" -} - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -func durationConv(sval string) (interface{}, error) { - return time.ParseDuration(sval) -} - -// GetDuration return the duration value of a flag with the given name -func (f *FlagSet) GetDuration(name string) (time.Duration, error) { - val, err := f.getFlagType(name, "duration", durationConv) - if err != nil { - return 0, err - } - return val.(time.Duration), nil -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, "", value, usage) - return p -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, shorthand, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(name string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, "", value, usage) -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go deleted file mode 100644 index 6f1fc3007..000000000 --- a/vendor/github.com/spf13/pflag/flag.go +++ /dev/null @@ -1,1128 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the GNU extensions to the POSIX recommendations -for command-line options. See -http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -Usage: - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - - import flag "github.com/spf13/pflag" - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - var ip = flag.Int("flagname", 1234, "help message for flagname") -If you like, you can bind the flag to a variable using the Var() functions. - var flagvar int - func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") - } -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - flag.Var(&flagVal, "name", "help message for flagname") -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - flag.Parse() -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - fmt.Println("ip has value ", *ip) - fmt.Println("flagvar has value ", flagvar) - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - var ip = flag.IntP("flagname", "f", 1234, "help message") - var flagvar bool - func init() { - flag.BoolVarP("boolname", "b", true, "help message") - } - flag.VarP(&flagVar, "varname", "v", 1234, "help message") -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -Command line flag syntax: - --flag // boolean flags only - --flag=x - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags. - // boolean flags - -f - -abc - // non-boolean flags - -n 1234 - -Ifile - // mixed - -abcs "hello" - -abcn1234 - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. -*/ -package pflag - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "sort" - "strings" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("pflag: help requested") - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -const ( - // ContinueOnError will return an err from Parse() if an error is found - ContinueOnError ErrorHandling = iota - // ExitOnError will call os.Exit(2) if an error is found when parsing - ExitOnError - // PanicOnError will panic() if an error is found when parsing flags - PanicOnError -) - -// NormalizedName is a flag name that has been normalized according to rules -// for the FlagSet (e.g. making '-' and '_' equivalent). -type NormalizedName string - -// A FlagSet represents a set of defined flags. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - - // SortFlags is used to indicate, if user wants to have sorted flags in - // help/usage messages. - SortFlags bool - - name string - parsed bool - actual map[NormalizedName]*Flag - orderedActual []*Flag - sortedActual []*Flag - formal map[NormalizedName]*Flag - orderedFormal []*Flag - sortedFormal []*Flag - shorthands map[byte]*Flag - args []string // arguments after flags - argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- - errorHandling ErrorHandling - output io.Writer // nil means stderr; use out() accessor - interspersed bool // allow interspersed option/non-option args - normalizeNameFunc func(f *FlagSet, name string) NormalizedName -} - -// A Flag represents the state of a flag. -type Flag struct { - Name string // name as it appears on command line - Shorthand string // one-letter abbreviated flag - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message - Changed bool // If the user set the value (or if left to default) - NoOptDefVal string // default value (as text); if the flag is on the command line without any options - Deprecated string // If this flag is deprecated, this string is the new or now thing to use - Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text - ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use - Annotations map[string][]string // used by cobra.Command bash autocomple code -} - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -type Value interface { - String() string - Set(string) error - Type() string -} - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[NormalizedName]*Flag) []*Flag { - list := make(sort.StringSlice, len(flags)) - i := 0 - for k := range flags { - list[i] = string(k) - i++ - } - list.Sort() - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[NormalizedName(name)] - } - return result -} - -// SetNormalizeFunc allows you to add a function which can translate flag names. -// Flags added to the FlagSet will be translated and then when anything tries to -// look up the flag that will also be translated. So it would be possible to create -// a flag named "getURL" and have it translated to "geturl". A user could then pass -// "--getUrl" which may also be translated to "geturl" and everything will work. -func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { - f.normalizeNameFunc = n - f.sortedFormal = f.sortedFormal[:0] - for k, v := range f.orderedFormal { - delete(f.formal, NormalizedName(v.Name)) - nname := f.normalizeFlagName(v.Name) - v.Name = string(nname) - f.formal[nname] = v - f.orderedFormal[k] = v - } -} - -// GetNormalizeFunc returns the previously set NormalizeFunc of a function which -// does no translation, if not set previously. -func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { - if f.normalizeNameFunc != nil { - return f.normalizeNameFunc - } - return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } -} - -func (f *FlagSet) normalizeFlagName(name string) NormalizedName { - n := f.GetNormalizeFunc() - return n(f, name) -} - -func (f *FlagSet) out() io.Writer { - if f.output == nil { - return os.Stderr - } - return f.output -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (f *FlagSet) SetOutput(output io.Writer) { - f.output = output -} - -// VisitAll visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func (f *FlagSet) VisitAll(fn func(*Flag)) { - if len(f.formal) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.formal) != len(f.sortedFormal) { - f.sortedFormal = sortFlags(f.formal) - } - flags = f.sortedFormal - } else { - flags = f.orderedFormal - } - - for _, flag := range flags { - fn(flag) - } -} - -// HasFlags returns a bool to indicate if the FlagSet has any flags definied. -func (f *FlagSet) HasFlags() bool { - return len(f.formal) > 0 -} - -// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags -// definied that are not hidden or deprecated. -func (f *FlagSet) HasAvailableFlags() bool { - for _, flag := range f.formal { - if !flag.Hidden && len(flag.Deprecated) == 0 { - return true - } - } - return false -} - -// VisitAll visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func (f *FlagSet) Visit(fn func(*Flag)) { - if len(f.actual) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.actual) != len(f.sortedActual) { - f.sortedActual = sortFlags(f.actual) - } - flags = f.sortedActual - } else { - flags = f.orderedActual - } - - for _, flag := range flags { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) Lookup(name string) *Flag { - return f.lookup(f.normalizeFlagName(name)) -} - -// ShorthandLookup returns the Flag structure of the short handed flag, -// returning nil if none exists. -// It panics, if len(name) > 1. -func (f *FlagSet) ShorthandLookup(name string) *Flag { - if name == "" { - return nil - } - if len(name) > 1 { - msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - c := name[0] - return f.shorthands[c] -} - -// lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) lookup(name NormalizedName) *Flag { - return f.formal[name] -} - -// func to return a given type for a given flag name -func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { - flag := f.Lookup(name) - if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s", name) - return nil, err - } - - if flag.Value.Type() != ftype { - err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) - return nil, err - } - - sval := flag.Value.String() - result, err := convFunc(sval) - if err != nil { - return nil, err - } - return result, nil -} - -// ArgsLenAtDash will return the length of f.Args at the moment when a -- was -// found during arg parsing. This allows your program to know which args were -// before the -- and which came after. -func (f *FlagSet) ArgsLenAtDash() int { - return f.argsLenAtDash -} - -// MarkDeprecated indicated that a flag is deprecated in your program. It will -// continue to function but will not show up in help or usage messages. Using -// this flag will also print the given usageMessage. -func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.Deprecated = usageMessage - return nil -} - -// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your -// program. It will continue to function but will not show up in help or usage -// messages. Using this flag will also print the given usageMessage. -func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.ShorthandDeprecated = usageMessage - return nil -} - -// MarkHidden sets a flag to 'hidden' in your program. It will continue to -// function but will not show up in help or usage messages. -func (f *FlagSet) MarkHidden(name string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - flag.Hidden = true - return nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.Lookup(name) -} - -// ShorthandLookup returns the Flag structure of the short handed flag, -// returning nil if none exists. -func ShorthandLookup(name string) *Flag { - return CommandLine.ShorthandLookup(name) -} - -// Set sets the value of the named flag. -func (f *FlagSet) Set(name, value string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - - err := flag.Value.Set(value) - if err != nil { - var flagName string - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) - } else { - flagName = fmt.Sprintf("--%s", flag.Name) - } - return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) - } - - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[normalName] = flag - f.orderedActual = append(f.orderedActual, flag) - - flag.Changed = true - - if flag.Deprecated != "" { - fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) - } - return nil -} - -// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. -// This is sometimes used by spf13/cobra programs which want to generate additional -// bash completion information. -func (f *FlagSet) SetAnnotation(name, key string, values []string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - flag.Annotations[key] = values - return nil -} - -// Changed returns true if the flag was explicitly set during Parse() and false -// otherwise -func (f *FlagSet) Changed(name string) bool { - flag := f.Lookup(name) - // If a flag doesn't exist, it wasn't changed.... - if flag == nil { - return false - } - return flag.Changed -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (f *FlagSet) PrintDefaults() { - usages := f.FlagUsages() - fmt.Fprint(f.out(), usages) -} - -// defaultIsZeroValue returns true if the default value for this flag represents -// a zero value. -func (f *Flag) defaultIsZeroValue() bool { - switch f.Value.(type) { - case boolFlag: - return f.DefValue == "false" - case *durationValue: - // Beginning in Go 1.7, duration zero values are "0s" - return f.DefValue == "0" || f.DefValue == "0s" - case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: - return f.DefValue == "0" - case *stringValue: - return f.DefValue == "" - case *ipValue, *ipMaskValue, *ipNetValue: - return f.DefValue == "" - case *intSliceValue, *stringSliceValue, *stringArrayValue: - return f.DefValue == "[]" - default: - switch f.Value.String() { - case "false": - return true - case "": - return true - case "": - return true - case "0": - return true - } - return false - } -} - -// UnquoteUsage extracts a back-quoted name from the usage -// string for a flag and returns it and the un-quoted usage. -// Given "a `name` to show" it returns ("name", "a name to show"). -// If there are no back quotes, the name is an educated guess of the -// type of the flag's value, or the empty string if the flag is boolean. -func UnquoteUsage(flag *Flag) (name string, usage string) { - // Look for a back-quoted name, but avoid the strings package. - usage = flag.Usage - for i := 0; i < len(usage); i++ { - if usage[i] == '`' { - for j := i + 1; j < len(usage); j++ { - if usage[j] == '`' { - name = usage[i+1 : j] - usage = usage[:i] + name + usage[j+1:] - return name, usage - } - } - break // Only one back quote; use type name. - } - } - - name = flag.Value.Type() - switch name { - case "bool": - name = "" - case "float64": - name = "float" - case "int64": - name = "int" - case "uint64": - name = "uint" - } - - return -} - -// Splits the string `s` on whitespace into an initial substring up to -// `i` runes in length and the remainder. Will go `slop` over `i` if -// that encompasses the entire string (which allows the caller to -// avoid short orphan words on the final line). -func wrapN(i, slop int, s string) (string, string) { - if i+slop > len(s) { - return s, "" - } - - w := strings.LastIndexAny(s[:i], " \t") - if w <= 0 { - return s, "" - } - - return s[:w], s[w+1:] -} - -// Wraps the string `s` to a maximum width `w` with leading indent -// `i`. The first line is not indented (this is assumed to be done by -// caller). Pass `w` == 0 to do no wrapping -func wrap(i, w int, s string) string { - if w == 0 { - return s - } - - // space between indent i and end of line width w into which - // we should wrap the text. - wrap := w - i - - var r, l string - - // Not enough space for sensible wrapping. Wrap as a block on - // the next line instead. - if wrap < 24 { - i = 16 - wrap = w - i - r += "\n" + strings.Repeat(" ", i) - } - // If still not enough space then don't even try to wrap. - if wrap < 24 { - return s - } - - // Try to avoid short orphan words on the final line, by - // allowing wrapN to go a bit over if that would fit in the - // remainder of the line. - slop := 5 - wrap = wrap - slop - - // Handle first line, which is indented by the caller (or the - // special case above) - l, s = wrapN(wrap, slop, s) - r = r + l - - // Now wrap the rest - for s != "" { - var t string - - t, s = wrapN(wrap, slop, s) - r = r + "\n" + strings.Repeat(" ", i) + t - } - - return r - -} - -// FlagUsagesWrapped returns a string containing the usage information -// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no -// wrapping) -func (f *FlagSet) FlagUsagesWrapped(cols int) string { - buf := new(bytes.Buffer) - - lines := make([]string, 0, len(f.formal)) - - maxlen := 0 - f.VisitAll(func(flag *Flag) { - if flag.Deprecated != "" || flag.Hidden { - return - } - - line := "" - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) - } else { - line = fmt.Sprintf(" --%s", flag.Name) - } - - varname, usage := UnquoteUsage(flag) - if varname != "" { - line += " " + varname - } - if flag.NoOptDefVal != "" { - switch flag.Value.Type() { - case "string": - line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) - case "bool": - if flag.NoOptDefVal != "true" { - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - default: - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - } - - // This special character will be replaced with spacing once the - // correct alignment is calculated - line += "\x00" - if len(line) > maxlen { - maxlen = len(line) - } - - line += usage - if !flag.defaultIsZeroValue() { - if flag.Value.Type() == "string" { - line += fmt.Sprintf(" (default %q)", flag.DefValue) - } else { - line += fmt.Sprintf(" (default %s)", flag.DefValue) - } - } - - lines = append(lines, line) - }) - - for _, line := range lines { - sidx := strings.Index(line, "\x00") - spacing := strings.Repeat(" ", maxlen-sidx) - // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx - fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) - } - - return buf.String() -} - -// FlagUsages returns a string containing the usage information for all flags in -// the FlagSet -func (f *FlagSet) FlagUsages() string { - return f.FlagUsagesWrapped(0) -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(f *FlagSet) { - fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) - f.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -// By default it prints a simple header and calls PrintDefaults; for details about the -// format of the output and how to control it, see the documentation for PrintDefaults. -var Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// NFlag returns the number of flags that have been set. -func (f *FlagSet) NFlag() int { return len(f.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (f *FlagSet) Arg(i int) string { - if i < 0 || i >= len(f.args) { - return "" - } - return f.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (f *FlagSet) NArg() int { return len(f.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (f *FlagSet) Args() []string { return f.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (f *FlagSet) Var(value Value, name string, usage string) { - f.VarP(value, name, "", usage) -} - -// VarPF is like VarP, but returns the flag created -func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: name, - Shorthand: shorthand, - Usage: usage, - Value: value, - DefValue: value.String(), - } - f.AddFlag(flag) - return flag -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { - f.VarPF(value, name, shorthand, usage) -} - -// AddFlag will add the flag to the FlagSet -func (f *FlagSet) AddFlag(flag *Flag) { - normalizedFlagName := f.normalizeFlagName(flag.Name) - - _, alreadyThere := f.formal[normalizedFlagName] - if alreadyThere { - msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) - fmt.Fprintln(f.out(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if f.formal == nil { - f.formal = make(map[NormalizedName]*Flag) - } - - flag.Name = string(normalizedFlagName) - f.formal[normalizedFlagName] = flag - f.orderedFormal = append(f.orderedFormal, flag) - - if flag.Shorthand == "" { - return - } - if len(flag.Shorthand) > 1 { - msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - if f.shorthands == nil { - f.shorthands = make(map[byte]*Flag) - } - c := flag.Shorthand[0] - used, alreadyThere := f.shorthands[c] - if alreadyThere { - msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - f.shorthands[c] = flag -} - -// AddFlagSet adds one FlagSet to another. If a flag is already present in f -// the flag from newSet will be ignored. -func (f *FlagSet) AddFlagSet(newSet *FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(flag *Flag) { - if f.Lookup(flag.Name) == nil { - f.AddFlag(flag) - } - }) -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, name string, usage string) { - CommandLine.VarP(value, name, "", usage) -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func VarP(value Value, name, shorthand, usage string) { - CommandLine.VarP(value, name, shorthand, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - fmt.Fprintln(f.out(), err) - f.usage() - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (f *FlagSet) usage() { - if f == CommandLine { - Usage() - } else if f.Usage == nil { - defaultUsage(f) - } else { - f.Usage() - } -} - -func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - name := s[2:] - if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) - return - } - - split := strings.SplitN(name, "=", 2) - name = split[0] - flag, exists := f.formal[f.normalizeFlagName(name)] - if !exists { - if name == "help" { // special case for nice help message. - f.usage() - return a, ErrHelp - } - err = f.failf("unknown flag: --%s", name) - return - } - - var value string - if len(split) == 2 { - // '--flag=arg' - value = split[1] - } else if flag.NoOptDefVal != "" { - // '--flag' (arg was optional) - value = flag.NoOptDefVal - } else if len(a) > 0 { - // '--flag arg' - value = a[0] - a = a[1:] - } else { - // '--flag' (arg was required) - err = f.failf("flag needs an argument: %s", s) - return - } - - err = fn(flag, value) - return -} - -func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { - if strings.HasPrefix(shorthands, "test.") { - return - } - - outArgs = args - outShorts = shorthands[1:] - c := shorthands[0] - - flag, exists := f.shorthands[c] - if !exists { - if c == 'h' { // special case for nice help message. - f.usage() - err = ErrHelp - return - } - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) - return - } - - var value string - if len(shorthands) > 2 && shorthands[1] == '=' { - // '-f=arg' - value = shorthands[2:] - outShorts = "" - } else if flag.NoOptDefVal != "" { - // '-f' (arg was optional) - value = flag.NoOptDefVal - } else if len(shorthands) > 1 { - // '-farg' - value = shorthands[1:] - outShorts = "" - } else if len(args) > 0 { - // '-f arg' - value = args[0] - outArgs = args[1:] - } else { - // '-f' (arg was required) - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) - return - } - - if flag.ShorthandDeprecated != "" { - fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) - } - - err = fn(flag, value) - return -} - -func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - shorthands := s[1:] - - // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv"). - for len(shorthands) > 0 { - shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn) - if err != nil { - return - } - } - - return -} - -func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) { - for len(args) > 0 { - s := args[0] - args = args[1:] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - if !f.interspersed { - f.args = append(f.args, s) - f.args = append(f.args, args...) - return nil - } - f.args = append(f.args, s) - continue - } - - if s[1] == '-' { - if len(s) == 2 { // "--" terminates the flags - f.argsLenAtDash = len(f.args) - f.args = append(f.args, args...) - break - } - args, err = f.parseLongArg(s, args, fn) - } else { - args, err = f.parseShortArg(s, args, fn) - } - if err != nil { - return - } - } - return -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (f *FlagSet) Parse(arguments []string) error { - f.parsed = true - - if len(arguments) < 0 { - return nil - } - - f.args = make([]string, 0, len(arguments)) - - set := func(flag *Flag, value string) error { - return f.Set(flag.Name, value) - } - - err := f.parseArgs(arguments, set) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -type parseFunc func(flag *Flag, value string) error - -// ParseAll parses flag definitions from the argument list, which should not -// include the command name. The arguments for fn are flag and value. Must be -// called after all flags in the FlagSet are defined and before flags are -// accessed by the program. The return value will be ErrHelp if -help was set -// but not defined. -func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error { - f.parsed = true - f.args = make([]string, 0, len(arguments)) - - err := f.parseArgs(arguments, fn) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -// Parsed reports whether f.Parse has been called. -func (f *FlagSet) Parsed() bool { - return f.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// ParseAll parses the command-line flags from os.Args[1:] and called fn for each. -// The arguments for fn are flag and value. Must be called after all flags are -// defined and before flags are accessed by the program. -func ParseAll(fn func(flag *Flag, value string) error) { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.ParseAll(os.Args[1:], fn) -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func SetInterspersed(interspersed bool) { - CommandLine.SetInterspersed(interspersed) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// CommandLine is the default set of command-line flags, parsed from os.Args. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name, -// error handling property and SortFlags set to true. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - argsLenAtDash: -1, - interspersed: true, - SortFlags: true, - } - return f -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func (f *FlagSet) SetInterspersed(interspersed bool) { - f.interspersed = interspersed -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { - f.name = name - f.errorHandling = errorHandling - f.argsLenAtDash = -1 -} diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go deleted file mode 100644 index a243f81f7..000000000 --- a/vendor/github.com/spf13/pflag/float32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- float32 Value -type float32Value float32 - -func newFloat32Value(val float32, p *float32) *float32Value { - *p = val - return (*float32Value)(p) -} - -func (f *float32Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 32) - *f = float32Value(v) - return err -} - -func (f *float32Value) Type() string { - return "float32" -} - -func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } - -func float32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseFloat(sval, 32) - if err != nil { - return 0, err - } - return float32(v), nil -} - -// GetFloat32 return the float32 value of a flag with the given name -func (f *FlagSet) GetFloat32(name string) (float32, error) { - val, err := f.getFlagType(name, "float32", float32Conv) - if err != nil { - return 0, err - } - return val.(float32), nil -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func Float32Var(p *float32, name string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, "", value, usage) - return p -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, shorthand, value, usage) - return p -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func Float32(name string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, "", value, usage) -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func Float32P(name, shorthand string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go deleted file mode 100644 index 04b5492a7..000000000 --- a/vendor/github.com/spf13/pflag/float64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Type() string { - return "float64" -} - -func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } - -func float64Conv(sval string) (interface{}, error) { - return strconv.ParseFloat(sval, 64) -} - -// GetFloat64 return the float64 value of a flag with the given name -func (f *FlagSet) GetFloat64(name string) (float64, error) { - val, err := f.getFlagType(name, "float64", float64Conv) - if err != nil { - return 0, err - } - return val.(float64), nil -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, name string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, "", value, usage) - return p -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, shorthand, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(name string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, "", value, usage) -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func Float64P(name, shorthand string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go deleted file mode 100644 index c4f47ebe5..000000000 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - goflag "flag" - "reflect" - "strings" -) - -// flagValueWrapper implements pflag.Value around a flag.Value. The main -// difference here is the addition of the Type method that returns a string -// name of the type. As this is generally unknown, we approximate that with -// reflection. -type flagValueWrapper struct { - inner goflag.Value - flagType string -} - -// We are just copying the boolFlag interface out of goflag as that is what -// they use to decide if a flag should get "true" when no arg is given. -type goBoolFlag interface { - goflag.Value - IsBoolFlag() bool -} - -func wrapFlagValue(v goflag.Value) Value { - // If the flag.Value happens to also be a pflag.Value, just use it directly. - if pv, ok := v.(Value); ok { - return pv - } - - pv := &flagValueWrapper{ - inner: v, - } - - t := reflect.TypeOf(v) - if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { - t = t.Elem() - } - - pv.flagType = strings.TrimSuffix(t.Name(), "Value") - return pv -} - -func (v *flagValueWrapper) String() string { - return v.inner.String() -} - -func (v *flagValueWrapper) Set(s string) error { - return v.inner.Set(s) -} - -func (v *flagValueWrapper) Type() string { - return v.flagType -} - -// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag -// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei -// with both `-v` and `--v` in flags. If the golang flag was more than a single -// character (ex: `verbose`) it will only be accessible via `--verbose` -func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: goflag.Name, - Usage: goflag.Usage, - Value: wrapFlagValue(goflag.Value), - // Looks like golang flags don't set DefValue correctly :-( - //DefValue: goflag.DefValue, - DefValue: goflag.Value.String(), - } - // Ex: if the golang flag was -v, allow both -v and --v to work - if len(flag.Name) == 1 { - flag.Shorthand = flag.Name - } - if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { - flag.NoOptDefVal = "true" - } - return flag -} - -// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet -func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { - if f.Lookup(goflag.Name) != nil { - return - } - newflag := PFlagFromGoFlag(goflag) - f.AddFlag(newflag) -} - -// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet -func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(goflag *goflag.Flag) { - f.AddGoFlag(goflag) - }) -} diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go deleted file mode 100644 index 1474b89df..000000000 --- a/vendor/github.com/spf13/pflag/int.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Type() string { - return "int" -} - -func (i *intValue) String() string { return strconv.Itoa(int(*i)) } - -func intConv(sval string) (interface{}, error) { - return strconv.Atoi(sval) -} - -// GetInt return the int value of a flag with the given name -func (f *FlagSet) GetInt(name string) (int, error) { - val, err := f.getFlagType(name, "int", intConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { - f.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { - f.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, name string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func IntVarP(p *int, name, shorthand string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (f *FlagSet) Int(name string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, "", value, usage) - return p -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, shorthand, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(name string, value int, usage string) *int { - return CommandLine.IntP(name, "", value, usage) -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func IntP(name, shorthand string, value int, usage string) *int { - return CommandLine.IntP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go deleted file mode 100644 index 9b95944f0..000000000 --- a/vendor/github.com/spf13/pflag/int32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int32 Value -type int32Value int32 - -func newInt32Value(val int32, p *int32) *int32Value { - *p = val - return (*int32Value)(p) -} - -func (i *int32Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 32) - *i = int32Value(v) - return err -} - -func (i *int32Value) Type() string { - return "int32" -} - -func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 32) - if err != nil { - return 0, err - } - return int32(v), nil -} - -// GetInt32 return the int32 value of a flag with the given name -func (f *FlagSet) GetInt32(name string) (int32, error) { - val, err := f.getFlagType(name, "int32", int32Conv) - if err != nil { - return 0, err - } - return val.(int32), nil -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func Int32Var(p *int32, name string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, "", value, usage) - return p -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, shorthand, value, usage) - return p -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func Int32(name string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, "", value, usage) -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func Int32P(name, shorthand string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go deleted file mode 100644 index 0026d781d..000000000 --- a/vendor/github.com/spf13/pflag/int64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Type() string { - return "int64" -} - -func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int64Conv(sval string) (interface{}, error) { - return strconv.ParseInt(sval, 0, 64) -} - -// GetInt64 return the int64 value of a flag with the given name -func (f *FlagSet) GetInt64(name string) (int64, error) { - val, err := f.getFlagType(name, "int64", int64Conv) - if err != nil { - return 0, err - } - return val.(int64), nil -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, name string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, "", value, usage) - return p -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, shorthand, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(name string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, "", value, usage) -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func Int64P(name, shorthand string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go deleted file mode 100644 index 4da92228e..000000000 --- a/vendor/github.com/spf13/pflag/int8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int8 Value -type int8Value int8 - -func newInt8Value(val int8, p *int8) *int8Value { - *p = val - return (*int8Value)(p) -} - -func (i *int8Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 8) - *i = int8Value(v) - return err -} - -func (i *int8Value) Type() string { - return "int8" -} - -func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 8) - if err != nil { - return 0, err - } - return int8(v), nil -} - -// GetInt8 return the int8 value of a flag with the given name -func (f *FlagSet) GetInt8(name string) (int8, error) { - val, err := f.getFlagType(name, "int8", int8Conv) - if err != nil { - return 0, err - } - return val.(int8), nil -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func Int8Var(p *int8, name string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, "", value, usage) - return p -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, shorthand, value, usage) - return p -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func Int8(name string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, "", value, usage) -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func Int8P(name, shorthand string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go deleted file mode 100644 index 1e7c9edde..000000000 --- a/vendor/github.com/spf13/pflag/int_slice.go +++ /dev/null @@ -1,128 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- intSlice Value -type intSliceValue struct { - value *[]int - changed bool -} - -func newIntSliceValue(val []int, p *[]int) *intSliceValue { - isv := new(intSliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *intSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *intSliceValue) Type() string { - return "intSlice" -} - -func (s *intSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func intSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int{}, nil - } - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetIntSlice return the []int value of a flag with the given name -func (f *FlagSet) GetIntSlice(name string) ([]int, error) { - val, err := f.getFlagType(name, "intSlice", intSliceConv) - if err != nil { - return []int{}, err - } - return val.([]int), nil -} - -// IntSliceVar defines a intSlice flag with specified name, default value, and usage string. -// The argument p points to a []int variable in which to store the value of the flag. -func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSliceVar defines a int[] flag with specified name, default value, and usage string. -// The argument p points to a int[] variable in which to store the value of the flag. -func IntSliceVar(p *[]int, name string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, "", value, usage) - return &p -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func IntSlice(name string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, "", value, usage) -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func IntSliceP(name, shorthand string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go deleted file mode 100644 index 3d414ba69..000000000 --- a/vendor/github.com/spf13/pflag/ip.go +++ /dev/null @@ -1,94 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// -- net.IP value -type ipValue net.IP - -func newIPValue(val net.IP, p *net.IP) *ipValue { - *p = val - return (*ipValue)(p) -} - -func (i *ipValue) String() string { return net.IP(*i).String() } -func (i *ipValue) Set(s string) error { - ip := net.ParseIP(strings.TrimSpace(s)) - if ip == nil { - return fmt.Errorf("failed to parse IP: %q", s) - } - *i = ipValue(ip) - return nil -} - -func (i *ipValue) Type() string { - return "ip" -} - -func ipConv(sval string) (interface{}, error) { - ip := net.ParseIP(sval) - if ip != nil { - return ip, nil - } - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) -} - -// GetIP return the net.IP value of a flag with the given name -func (f *FlagSet) GetIP(name string) (net.IP, error) { - val, err := f.getFlagType(name, "ip", ipConv) - if err != nil { - return nil, err - } - return val.(net.IP), nil -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func IPVar(p *net.IP, name string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, "", value, usage) - return p -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, shorthand, value, usage) - return p -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func IP(name string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, "", value, usage) -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPP(name, shorthand string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go deleted file mode 100644 index 7dd196fe3..000000000 --- a/vendor/github.com/spf13/pflag/ip_slice.go +++ /dev/null @@ -1,148 +0,0 @@ -package pflag - -import ( - "fmt" - "io" - "net" - "strings" -) - -// -- ipSlice Value -type ipSliceValue struct { - value *[]net.IP - changed bool -} - -func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue { - ipsv := new(ipSliceValue) - ipsv.value = p - *ipsv.value = val - return ipsv -} - -// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag. -// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended. -func (s *ipSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - ipStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse ip values into slice - out := make([]net.IP, 0, len(ipStrSlice)) - for _, ipStr := range ipStrSlice { - ip := net.ParseIP(strings.TrimSpace(ipStr)) - if ip == nil { - return fmt.Errorf("invalid string being converted to IP address: %s", ipStr) - } - out = append(out, ip) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *ipSliceValue) Type() string { - return "ipSlice" -} - -// String defines a "native" format for this net.IP slice flag value. -func (s *ipSliceValue) String() string { - - ipStrSlice := make([]string, len(*s.value)) - for i, ip := range *s.value { - ipStrSlice[i] = ip.String() - } - - out, _ := writeAsCSV(ipStrSlice) - - return "[" + out + "]" -} - -func ipSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry - if len(val) == 0 { - return []net.IP{}, nil - } - ss := strings.Split(val, ",") - out := make([]net.IP, len(ss)) - for i, sval := range ss { - ip := net.ParseIP(strings.TrimSpace(sval)) - if ip == nil { - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) - } - out[i] = ip - } - return out, nil -} - -// GetIPSlice returns the []net.IP value of a flag with the given name -func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) { - val, err := f.getFlagType(name, "ipSlice", ipSliceConv) - if err != nil { - return []net.IP{}, err - } - return val.([]net.IP), nil -} - -// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of that flag. -func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, "", value, usage) - return &p -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of the flag. -func IPSlice(name string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, "", value, usage) -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go deleted file mode 100644 index 5bd44bd21..000000000 --- a/vendor/github.com/spf13/pflag/ipmask.go +++ /dev/null @@ -1,122 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strconv" -) - -// -- net.IPMask value -type ipMaskValue net.IPMask - -func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { - *p = val - return (*ipMaskValue)(p) -} - -func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } -func (i *ipMaskValue) Set(s string) error { - ip := ParseIPv4Mask(s) - if ip == nil { - return fmt.Errorf("failed to parse IP mask: %q", s) - } - *i = ipMaskValue(ip) - return nil -} - -func (i *ipMaskValue) Type() string { - return "ipMask" -} - -// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). -// This function should really belong to the net package. -func ParseIPv4Mask(s string) net.IPMask { - mask := net.ParseIP(s) - if mask == nil { - if len(s) != 8 { - return nil - } - // net.IPMask.String() actually outputs things like ffffff00 - // so write a horrible parser for that as well :-( - m := []int{} - for i := 0; i < 4; i++ { - b := "0x" + s[2*i:2*i+2] - d, err := strconv.ParseInt(b, 0, 0) - if err != nil { - return nil - } - m = append(m, int(d)) - } - s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3]) - mask = net.ParseIP(s) - if mask == nil { - return nil - } - } - return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) -} - -func parseIPv4Mask(sval string) (interface{}, error) { - mask := ParseIPv4Mask(sval) - if mask == nil { - return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval) - } - return mask, nil -} - -// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name -func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) { - val, err := f.getFlagType(name, "ipMask", parseIPv4Mask) - if err != nil { - return nil, err - } - return val.(net.IPMask), nil -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, "", value, usage) - return p -} - -// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, shorthand, value, usage) - return p -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func IPMask(name string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, "", value, usage) -} - -// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go deleted file mode 100644 index e2c1b8bcd..000000000 --- a/vendor/github.com/spf13/pflag/ipnet.go +++ /dev/null @@ -1,98 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// IPNet adapts net.IPNet for use as a flag. -type ipNetValue net.IPNet - -func (ipnet ipNetValue) String() string { - n := net.IPNet(ipnet) - return n.String() -} - -func (ipnet *ipNetValue) Set(value string) error { - _, n, err := net.ParseCIDR(strings.TrimSpace(value)) - if err != nil { - return err - } - *ipnet = ipNetValue(*n) - return nil -} - -func (*ipNetValue) Type() string { - return "ipNet" -} - -func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { - *p = val - return (*ipNetValue)(p) -} - -func ipNetConv(sval string) (interface{}, error) { - _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) - if err == nil { - return *n, nil - } - return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) -} - -// GetIPNet return the net.IPNet value of a flag with the given name -func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { - val, err := f.getFlagType(name, "ipNet", ipNetConv) - if err != nil { - return net.IPNet{}, err - } - return val.(net.IPNet), nil -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, "", value, usage) - return p -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, shorthand, value, usage) - return p -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func IPNet(name string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, "", value, usage) -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go deleted file mode 100644 index 04e0a26ff..000000000 --- a/vendor/github.com/spf13/pflag/string.go +++ /dev/null @@ -1,80 +0,0 @@ -package pflag - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} -func (s *stringValue) Type() string { - return "string" -} - -func (s *stringValue) String() string { return string(*s) } - -func stringConv(sval string) (interface{}, error) { - return sval, nil -} - -// GetString return the string value of a flag with the given name -func (f *FlagSet) GetString(name string) (string, error) { - val, err := f.getFlagType(name, "string", stringConv) - if err != nil { - return "", err - } - return val.(string), nil -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { - f.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { - f.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, name string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func StringVarP(p *string, name, shorthand string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (f *FlagSet) String(name string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, "", value, usage) - return p -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, shorthand, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(name string, value string, usage string) *string { - return CommandLine.StringP(name, "", value, usage) -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func StringP(name, shorthand string, value string, usage string) *string { - return CommandLine.StringP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go deleted file mode 100644 index 276b7ed49..000000000 --- a/vendor/github.com/spf13/pflag/string_array.go +++ /dev/null @@ -1,103 +0,0 @@ -package pflag - -// -- stringArray Value -type stringArrayValue struct { - value *[]string - changed bool -} - -func newStringArrayValue(val []string, p *[]string) *stringArrayValue { - ssv := new(stringArrayValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func (s *stringArrayValue) Set(val string) error { - if !s.changed { - *s.value = []string{val} - s.changed = true - } else { - *s.value = append(*s.value, val) - } - return nil -} - -func (s *stringArrayValue) Type() string { - return "stringArray" -} - -func (s *stringArrayValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func stringArrayConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a array with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringArray return the []string value of a flag with the given name -func (f *FlagSet) GetStringArray(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringArray", stringArrayConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringArrayVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, "", value, usage) - return &p -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringArray(name string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, "", value, usage) -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func StringArrayP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go deleted file mode 100644 index 05eee7543..000000000 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ /dev/null @@ -1,129 +0,0 @@ -package pflag - -import ( - "bytes" - "encoding/csv" - "strings" -) - -// -- stringSlice Value -type stringSliceValue struct { - value *[]string - changed bool -} - -func newStringSliceValue(val []string, p *[]string) *stringSliceValue { - ssv := new(stringSliceValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func readAsCSV(val string) ([]string, error) { - if val == "" { - return []string{}, nil - } - stringReader := strings.NewReader(val) - csvReader := csv.NewReader(stringReader) - return csvReader.Read() -} - -func writeAsCSV(vals []string) (string, error) { - b := &bytes.Buffer{} - w := csv.NewWriter(b) - err := w.Write(vals) - if err != nil { - return "", err - } - w.Flush() - return strings.TrimSuffix(b.String(), "\n"), nil -} - -func (s *stringSliceValue) Set(val string) error { - v, err := readAsCSV(val) - if err != nil { - return err - } - if !s.changed { - *s.value = v - } else { - *s.value = append(*s.value, v...) - } - s.changed = true - return nil -} - -func (s *stringSliceValue) Type() string { - return "stringSlice" -} - -func (s *stringSliceValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func stringSliceConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a slice with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringSlice return the []string value of a flag with the given name -func (f *FlagSet) GetStringSlice(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringSlice", stringSliceConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -func StringSliceVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, "", value, usage) - return &p -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -func StringSlice(name string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, "", value, usage) -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func StringSliceP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go deleted file mode 100644 index dcbc2b758..000000000 --- a/vendor/github.com/spf13/pflag/uint.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Type() string { - return "uint" -} - -func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uintConv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 0) - if err != nil { - return 0, err - } - return uint(v), nil -} - -// GetUint return the uint value of a flag with the given name -func (f *FlagSet) GetUint(name string) (uint, error) { - val, err := f.getFlagType(name, "uint", uintConv) - if err != nil { - return 0, err - } - return val.(uint), nil -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, name string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func UintVarP(p *uint, name, shorthand string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint(name string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, "", value, usage) - return p -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, shorthand, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(name string, value uint, usage string) *uint { - return CommandLine.UintP(name, "", value, usage) -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func UintP(name, shorthand string, value uint, usage string) *uint { - return CommandLine.UintP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go deleted file mode 100644 index 7e9914edd..000000000 --- a/vendor/github.com/spf13/pflag/uint16.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint16 value -type uint16Value uint16 - -func newUint16Value(val uint16, p *uint16) *uint16Value { - *p = val - return (*uint16Value)(p) -} - -func (i *uint16Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 16) - *i = uint16Value(v) - return err -} - -func (i *uint16Value) Type() string { - return "uint16" -} - -func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint16Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 16) - if err != nil { - return 0, err - } - return uint16(v), nil -} - -// GetUint16 return the uint16 value of a flag with the given name -func (f *FlagSet) GetUint16(name string) (uint16, error) { - val, err := f.getFlagType(name, "uint16", uint16Conv) - if err != nil { - return 0, err - } - return val.(uint16), nil -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func Uint16Var(p *uint16, name string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, "", value, usage) - return p -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, shorthand, value, usage) - return p -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint16(name string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, "", value, usage) -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go deleted file mode 100644 index d8024539b..000000000 --- a/vendor/github.com/spf13/pflag/uint32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint32 value -type uint32Value uint32 - -func newUint32Value(val uint32, p *uint32) *uint32Value { - *p = val - return (*uint32Value)(p) -} - -func (i *uint32Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 32) - *i = uint32Value(v) - return err -} - -func (i *uint32Value) Type() string { - return "uint32" -} - -func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 32) - if err != nil { - return 0, err - } - return uint32(v), nil -} - -// GetUint32 return the uint32 value of a flag with the given name -func (f *FlagSet) GetUint32(name string) (uint32, error) { - val, err := f.getFlagType(name, "uint32", uint32Conv) - if err != nil { - return 0, err - } - return val.(uint32), nil -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func Uint32Var(p *uint32, name string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, "", value, usage) - return p -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, shorthand, value, usage) - return p -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func Uint32(name string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, "", value, usage) -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go deleted file mode 100644 index f62240f2c..000000000 --- a/vendor/github.com/spf13/pflag/uint64.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Type() string { - return "uint64" -} - -func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint64Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 64) - if err != nil { - return 0, err - } - return uint64(v), nil -} - -// GetUint64 return the uint64 value of a flag with the given name -func (f *FlagSet) GetUint64(name string) (uint64, error) { - val, err := f.getFlagType(name, "uint64", uint64Conv) - if err != nil { - return 0, err - } - return val.(uint64), nil -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, name string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, "", value, usage) - return p -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, shorthand, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(name string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, "", value, usage) -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go deleted file mode 100644 index bb0e83c1f..000000000 --- a/vendor/github.com/spf13/pflag/uint8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint8 Value -type uint8Value uint8 - -func newUint8Value(val uint8, p *uint8) *uint8Value { - *p = val - return (*uint8Value)(p) -} - -func (i *uint8Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 8) - *i = uint8Value(v) - return err -} - -func (i *uint8Value) Type() string { - return "uint8" -} - -func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 8) - if err != nil { - return 0, err - } - return uint8(v), nil -} - -// GetUint8 return the uint8 value of a flag with the given name -func (f *FlagSet) GetUint8(name string) (uint8, error) { - val, err := f.getFlagType(name, "uint8", uint8Conv) - if err != nil { - return 0, err - } - return val.(uint8), nil -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func Uint8Var(p *uint8, name string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, "", value, usage) - return p -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, shorthand, value, usage) - return p -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func Uint8(name string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, "", value, usage) -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go deleted file mode 100644 index edd94c600..000000000 --- a/vendor/github.com/spf13/pflag/uint_slice.go +++ /dev/null @@ -1,126 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- uintSlice Value -type uintSliceValue struct { - value *[]uint - changed bool -} - -func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue { - uisv := new(uintSliceValue) - uisv.value = p - *uisv.value = val - return uisv -} - -func (s *uintSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return err - } - out[i] = uint(u) - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *uintSliceValue) Type() string { - return "uintSlice" -} - -func (s *uintSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func uintSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []uint{}, nil - } - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return nil, err - } - out[i] = uint(u) - } - return out, nil -} - -// GetUintSlice returns the []uint value of a flag with the given name. -func (f *FlagSet) GetUintSlice(name string) ([]uint, error) { - val, err := f.getFlagType(name, "uintSlice", uintSliceConv) - if err != nil { - return []uint{}, err - } - return val.([]uint), nil -} - -// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string. -// The argument p points to a []uint variable in which to store the value of the flag. -func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSliceVar defines a uint[] flag with specified name, default value, and usage string. -// The argument p points to a uint[] variable in which to store the value of the flag. -func UintSliceVar(p *[]uint, name string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, "", value, usage) - return &p -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func UintSlice(name string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, "", value, usage) -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/ugorji/go/LICENSE b/vendor/github.com/ugorji/go/LICENSE deleted file mode 100644 index 95a0f0541..000000000 --- a/vendor/github.com/ugorji/go/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2012-2015 Ugorji Nwoke. -All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go deleted file mode 100644 index fd1385b45..000000000 --- a/vendor/github.com/ugorji/go/codec/0doc.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -/* -High Performance, Feature-Rich Idiomatic Go codec/encoding library for -binc, msgpack, cbor, json. - -Supported Serialization formats are: - - - msgpack: https://github.com/msgpack/msgpack - - binc: http://github.com/ugorji/binc - - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 - - json: http://json.org http://tools.ietf.org/html/rfc7159 - - simple: - -To install: - - go get github.com/ugorji/go/codec - -This package understands the 'unsafe' tag, to allow using unsafe semantics: - - - When decoding into a struct, you need to read the field name as a string - so you can find the struct field it is mapped to. - Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion. - -To install using unsafe, pass the 'unsafe' tag: - - go get -tags=unsafe github.com/ugorji/go/codec - -For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. - - Multiple conversions: - Package coerces types where appropriate - e.g. decode an int in the stream into a float, etc. - - Corner Cases: - Overflows, nil maps/slices, nil values in streams are handled correctly - - Standard field renaming via tags - - Support for omitting empty fields during an encoding - - Encoding from any value and decoding into pointer to any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Extensions to support efficient encoding/decoding of any named types - - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces - - Decoding without a schema (into a interface{}). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Encode a struct as an array, and decode struct from an array in the data stream - - Comprehensive support for anonymous fields - - Fast (no-reflection) encoding/decoding of common maps and slices - - Code-generation for faster performance. - - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats - - Support indefinite-length formats to enable true streaming - (for formats which support it e.g. json, cbor) - - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. - This mostly applies to maps, where iteration order is non-deterministic. - - NIL in data stream decoded as zero value - - Never silently skip data when decoding. - User decides whether to return an error or silently skip data when keys or indexes - in the data stream do not map to fields in the struct. - - Encode/Decode from/to chan types (for iterative streaming support) - - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosynchracies of codecs e.g. - - For messagepack, configure how ambiguities in handling raw bytes are resolved - - For messagepack, provide rpc server/client codec to support - msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - -Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -Usage - -The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification. - -The Encoder and Decoder are NOT safe for concurrent use. - -Consequently, the usage model is basically: - - - Create and initialize the Handle before any use. - Once created, DO NOT modify it. - - Multiple Encoders or Decoders can now use the Handle concurrently. - They only read information off the Handle (never write). - - However, each Encoder or Decoder MUST not be used concurrently - - To re-use an Encoder/Decoder, call Reset(...) on it first. - This allows you use state maintained on the Encoder/Decoder. - -Sample usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ch codec.CborHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -*/ -package codec - -// Benefits of go-codec: -// -// - encoding/json always reads whole file into memory first. -// This makes it unsuitable for parsing very large files. -// - encoding/xml cannot parse into a map[string]interface{} -// I found this out on reading https://github.com/clbanning/mxj - -// TODO: -// -// - (En|De)coder should store an error when it occurs. -// Until reset, subsequent calls return that error that was stored. -// This means that free panics must go away. -// All errors must be raised through errorf method. -// - Decoding using a chan is good, but incurs concurrency costs. -// This is because there's no fast way to use a channel without it -// having to switch goroutines constantly. -// Callback pattern is still the best. Maybe cnsider supporting something like: -// type X struct { -// Name string -// Ys []Y -// Ys chan <- Y -// Ys func(interface{}) -> call this interface for each entry in there. -// } -// - Consider adding a isZeroer interface { isZero() bool } -// It is used within isEmpty, for omitEmpty support. -// - Consider making Handle used AS-IS within the encoding/decoding session. -// This means that we don't cache Handle information within the (En|De)coder, -// except we really need it at Reset(...) -// - Handle recursive types during encoding/decoding? diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go deleted file mode 100644 index c884d14dc..000000000 --- a/vendor/github.com/ugorji/go/codec/binc.go +++ /dev/null @@ -1,918 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math" - "reflect" - "time" -) - -const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. - -// vd as low 4 bits (there are 16 slots) -const ( - bincVdSpecial byte = iota - bincVdPosInt - bincVdNegInt - bincVdFloat - - bincVdString - bincVdByteArray - bincVdArray - bincVdMap - - bincVdTimestamp - bincVdSmallInt - bincVdUnicodeOther - bincVdSymbol - - bincVdDecimal - _ // open slot - _ // open slot - bincVdCustomExt = 0x0f -) - -const ( - bincSpNil byte = iota - bincSpFalse - bincSpTrue - bincSpNan - bincSpPosInf - bincSpNegInf - bincSpZeroFloat - bincSpZero - bincSpNegOne -) - -const ( - bincFlBin16 byte = iota - bincFlBin32 - _ // bincFlBin32e - bincFlBin64 - _ // bincFlBin64e - // others not currently supported -) - -type bincEncDriver struct { - e *Encoder - w encWriter - m map[string]uint16 // symbols - b [scratchByteArrayLen]byte - s uint16 // symbols sequencer - encNoSeparator -} - -func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) { - if rt == timeTypId { - var bs []byte - switch x := v.(type) { - case time.Time: - bs = encodeTime(x) - case *time.Time: - bs = encodeTime(*x) - default: - e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v) - } - e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) - e.w.writeb(bs) - } -} - -func (e *bincEncDriver) EncodeNil() { - e.w.writen1(bincVdSpecial<<4 | bincSpNil) -} - -func (e *bincEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(bincVdSpecial<<4 | bincSpTrue) - } else { - e.w.writen1(bincVdSpecial<<4 | bincSpFalse) - } -} - -func (e *bincEncDriver) EncodeFloat32(f float32) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - e.w.writen1(bincVdFloat<<4 | bincFlBin32) - bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *bincEncDriver) EncodeFloat64(f float64) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - bigen.PutUint64(e.b[:8], math.Float64bits(f)) - if bincDoPrune { - i := 7 - for ; i >= 0 && (e.b[i] == 0); i-- { - } - i++ - if i <= 6 { - e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) - e.w.writen1(byte(i)) - e.w.writeb(e.b[:i]) - return - } - } - e.w.writen1(bincVdFloat<<4 | bincFlBin64) - e.w.writeb(e.b[:8]) -} - -func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { - if lim == 4 { - bigen.PutUint32(e.b[:lim], uint32(v)) - } else { - bigen.PutUint64(e.b[:lim], v) - } - if bincDoPrune { - i := pruneSignExt(e.b[:lim], pos) - e.w.writen1(bd | lim - 1 - byte(i)) - e.w.writeb(e.b[i:lim]) - } else { - e.w.writen1(bd | lim - 1) - e.w.writeb(e.b[:lim]) - } -} - -func (e *bincEncDriver) EncodeInt(v int64) { - const nbd byte = bincVdNegInt << 4 - if v >= 0 { - e.encUint(bincVdPosInt<<4, true, uint64(v)) - } else if v == -1 { - e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) - } else { - e.encUint(bincVdNegInt<<4, false, uint64(-v)) - } -} - -func (e *bincEncDriver) EncodeUint(v uint64) { - e.encUint(bincVdPosInt<<4, true, v) -} - -func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { - if v == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZero) - } else if pos && v >= 1 && v <= 16 { - e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) - } else if v <= math.MaxUint8 { - e.w.writen2(bd|0x0, byte(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd | 0x01) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.encIntegerPrune(bd, pos, v, 4) - } else { - e.encIntegerPrune(bd, pos, v, 8) - } -} - -func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { - bs := ext.WriteExt(rv) - if bs == nil { - e.EncodeNil() - return - } - e.encodeExtPreamble(uint8(xtag), len(bs)) - e.w.writeb(bs) -} - -func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { - e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.w.writeb(re.Data) -} - -func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { - e.encLen(bincVdCustomExt<<4, uint64(length)) - e.w.writen1(xtag) -} - -func (e *bincEncDriver) EncodeArrayStart(length int) { - e.encLen(bincVdArray<<4, uint64(length)) -} - -func (e *bincEncDriver) EncodeMapStart(length int) { - e.encLen(bincVdMap<<4, uint64(length)) -} - -func (e *bincEncDriver) EncodeString(c charEncoding, v string) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writestr(v) - } -} - -func (e *bincEncDriver) EncodeSymbol(v string) { - // if WriteSymbolsNoRefs { - // e.encodeString(c_UTF8, v) - // return - // } - - //symbols only offer benefit when string length > 1. - //This is because strings with length 1 take only 2 bytes to store - //(bd with embedded length, and single byte for string val). - - l := len(v) - if l == 0 { - e.encBytesLen(c_UTF8, 0) - return - } else if l == 1 { - e.encBytesLen(c_UTF8, 1) - e.w.writen1(v[0]) - return - } - if e.m == nil { - e.m = make(map[string]uint16, 16) - } - ui, ok := e.m[v] - if ok { - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8) - bigenHelper{e.b[:2], e.w}.writeUint16(ui) - } - } else { - e.s++ - ui = e.s - //ui = uint16(atomic.AddUint32(&e.s, 1)) - e.m[v] = ui - var lenprec uint8 - if l <= math.MaxUint8 { - // lenprec = 0 - } else if l <= math.MaxUint16 { - lenprec = 1 - } else if int64(l) <= math.MaxUint32 { - lenprec = 2 - } else { - lenprec = 3 - } - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) - bigenHelper{e.b[:2], e.w}.writeUint16(ui) - } - if lenprec == 0 { - e.w.writen1(byte(l)) - } else if lenprec == 1 { - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l)) - } else if lenprec == 2 { - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l)) - } else { - bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l)) - } - e.w.writestr(v) - } -} - -func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writeb(v) - } -} - -func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { - //TODO: support bincUnicodeOther (for now, just use string or bytearray) - if c == c_RAW { - e.encLen(bincVdByteArray<<4, length) - } else { - e.encLen(bincVdString<<4, length) - } -} - -func (e *bincEncDriver) encLen(bd byte, l uint64) { - if l < 12 { - e.w.writen1(bd | uint8(l+4)) - } else { - e.encLenNumber(bd, l) - } -} - -func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { - if v <= math.MaxUint8 { - e.w.writen2(bd, byte(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd | 0x01) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.w.writen1(bd | 0x02) - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) - } else { - e.w.writen1(bd | 0x03) - bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v)) - } -} - -//------------------------------------ - -type bincDecSymbol struct { - s string - b []byte - i uint16 -} - -type bincDecDriver struct { - d *Decoder - h *BincHandle - r decReader - br bool // bytes reader - bdRead bool - bd byte - vd byte - vs byte - noStreamingCodec - decNoSeparator - b [scratchByteArrayLen]byte - - // linear searching on this slice is ok, - // because we typically expect < 32 symbols in each stream. - s []bincDecSymbol -} - -func (d *bincDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.vd = d.bd >> 4 - d.vs = d.bd & 0x0f - d.bdRead = true -} - -func (d *bincDecDriver) ContainerType() (vt valueType) { - if d.vd == bincVdSpecial && d.vs == bincSpNil { - return valueTypeNil - } else if d.vd == bincVdByteArray { - return valueTypeBytes - } else if d.vd == bincVdString { - return valueTypeString - } else if d.vd == bincVdArray { - return valueTypeArray - } else if d.vd == bincVdMap { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *bincDecDriver) TryDecodeAsNil() bool { - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return true - } - return false -} - -func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) { - if !d.bdRead { - d.readNextBd() - } - if rt == timeTypId { - if d.vd != bincVdTimestamp { - d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) - return - } - tt, err := decodeTime(d.r.readx(int(d.vs))) - if err != nil { - panic(err) - } - var vt *time.Time = v.(*time.Time) - *vt = tt - d.bdRead = false - } -} - -func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { - if vs&0x8 == 0 { - d.r.readb(d.b[0:defaultLen]) - } else { - l := d.r.readn1() - if l > 8 { - d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l) - return - } - for i := l; i < 8; i++ { - d.b[i] = 0 - } - d.r.readb(d.b[0:l]) - } -} - -func (d *bincDecDriver) decFloat() (f float64) { - //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; } - if x := d.vs & 0x7; x == bincFlBin32 { - d.decFloatPre(d.vs, 4) - f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) - } else if x == bincFlBin64 { - d.decFloatPre(d.vs, 8) - f = math.Float64frombits(bigen.Uint64(d.b[0:8])) - } else { - d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) - return - } - return -} - -func (d *bincDecDriver) decUint() (v uint64) { - // need to inline the code (interface conversion and type assertion expensive) - switch d.vs { - case 0: - v = uint64(d.r.readn1()) - case 1: - d.r.readb(d.b[6:8]) - v = uint64(bigen.Uint16(d.b[6:8])) - case 2: - d.b[4] = 0 - d.r.readb(d.b[5:8]) - v = uint64(bigen.Uint32(d.b[4:8])) - case 3: - d.r.readb(d.b[4:8]) - v = uint64(bigen.Uint32(d.b[4:8])) - case 4, 5, 6: - lim := int(7 - d.vs) - d.r.readb(d.b[lim:8]) - for i := 0; i < lim; i++ { - d.b[i] = 0 - } - v = uint64(bigen.Uint64(d.b[:8])) - case 7: - d.r.readb(d.b[:8]) - v = uint64(bigen.Uint64(d.b[:8])) - default: - d.d.errorf("unsigned integers with greater than 64 bits of precision not supported") - return - } - return -} - -func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) { - if !d.bdRead { - d.readNextBd() - } - vd, vs := d.vd, d.vs - if vd == bincVdPosInt { - ui = d.decUint() - } else if vd == bincVdNegInt { - ui = d.decUint() - neg = true - } else if vd == bincVdSmallInt { - ui = uint64(d.vs) + 1 - } else if vd == bincVdSpecial { - if vs == bincSpZero { - //i = 0 - } else if vs == bincSpNegOne { - neg = true - ui = 1 - } else { - d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs) - return - } - } else { - d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) - return - } - return -} - -func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) { - ui, neg := d.decCheckInteger() - i, overflow := chkOvf.SignedInt(ui) - if overflow { - d.d.errorf("simple: overflow converting %v to signed integer", ui) - return - } - if neg { - i = -i - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("binc: overflow integer: %v", i) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - ui, neg := d.decCheckInteger() - if neg { - d.d.errorf("Assigning negative signed value to unsigned type") - return - } - if chkOvf.Uint(ui, bitsize) { - d.d.errorf("binc: overflow integer: %v", ui) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - vd, vs := d.vd, d.vs - if vd == bincVdSpecial { - d.bdRead = false - if vs == bincSpNan { - return math.NaN() - } else if vs == bincSpPosInf { - return math.Inf(1) - } else if vs == bincSpZeroFloat || vs == bincSpZero { - return - } else if vs == bincSpNegInf { - return math.Inf(-1) - } else { - d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) - return - } - } else if vd == bincVdFloat { - f = d.decFloat() - } else { - f = float64(d.DecodeInt(64)) - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("binc: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *bincDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) { - // b = false - } else if bd == (bincVdSpecial | bincSpTrue) { - b = true - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) ReadMapStart() (length int) { - if d.vd != bincVdMap { - d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) - return - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) ReadArrayStart() (length int) { - if d.vd != bincVdArray { - d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) - return - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) decLen() int { - if d.vs > 3 { - return int(d.vs - 4) - } - return int(d.decLenNumber()) -} - -func (d *bincDecDriver) decLenNumber() (v uint64) { - if x := d.vs; x == 0 { - v = uint64(d.r.readn1()) - } else if x == 1 { - d.r.readb(d.b[6:8]) - v = uint64(bigen.Uint16(d.b[6:8])) - } else if x == 2 { - d.r.readb(d.b[4:8]) - v = uint64(bigen.Uint32(d.b[4:8])) - } else { - d.r.readb(d.b[:8]) - v = bigen.Uint64(d.b[:8]) - } - return -} - -func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return - } - var slen int = -1 - // var ok bool - switch d.vd { - case bincVdString, bincVdByteArray: - slen = d.decLen() - if zerocopy { - if d.br { - bs2 = d.r.readx(slen) - } else if len(bs) == 0 { - bs2 = decByteSlice(d.r, slen, d.b[:]) - } else { - bs2 = decByteSlice(d.r, slen, bs) - } - } else { - bs2 = decByteSlice(d.r, slen, bs) - } - if withString { - s = string(bs2) - } - case bincVdSymbol: - // zerocopy doesn't apply for symbols, - // as the values must be stored in a table for later use. - // - //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, - //extract symbol - //if containsStringVal, read it and put in map - //else look in map for string value - var symbol uint16 - vs := d.vs - if vs&0x8 == 0 { - symbol = uint16(d.r.readn1()) - } else { - symbol = uint16(bigen.Uint16(d.r.readx(2))) - } - if d.s == nil { - d.s = make([]bincDecSymbol, 0, 16) - } - - if vs&0x4 == 0 { - for i := range d.s { - j := &d.s[i] - if j.i == symbol { - bs2 = j.b - if withString { - if j.s == "" && bs2 != nil { - j.s = string(bs2) - } - s = j.s - } - break - } - } - } else { - switch vs & 0x3 { - case 0: - slen = int(d.r.readn1()) - case 1: - slen = int(bigen.Uint16(d.r.readx(2))) - case 2: - slen = int(bigen.Uint32(d.r.readx(4))) - case 3: - slen = int(bigen.Uint64(d.r.readx(8))) - } - // since using symbols, do not store any part of - // the parameter bs in the map, as it might be a shared buffer. - // bs2 = decByteSlice(d.r, slen, bs) - bs2 = decByteSlice(d.r, slen, nil) - if withString { - s = string(bs2) - } - d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) - } - default: - d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, bincVdSymbol, d.vd) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeString() (s string) { - // DecodeBytes does not accomodate symbols, whose impl stores string version in map. - // Use decStringAndBytes directly. - // return string(d.DecodeBytes(d.b[:], true, true)) - _, s = d.decStringAndBytes(d.b[:], true, true) - return -} - -func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - if isstring { - bsOut, _ = d.decStringAndBytes(bs, false, zerocopy) - return - } - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return nil - } - var clen int - if d.vd == bincVdString || d.vd == bincVdByteArray { - clen = d.decLen() - } else { - d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, d.vd) - return - } - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, bs) -} - -func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } - return -} - -func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.vd == bincVdCustomExt { - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - return - } - xbs = d.r.readx(l) - } else if d.vd == bincVdByteArray { - xbs = d.DecodeBytes(nil, false, true) - } else { - d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := &d.d.n - var decodeFurther bool - - switch d.vd { - case bincVdSpecial: - switch d.vs { - case bincSpNil: - n.v = valueTypeNil - case bincSpFalse: - n.v = valueTypeBool - n.b = false - case bincSpTrue: - n.v = valueTypeBool - n.b = true - case bincSpNan: - n.v = valueTypeFloat - n.f = math.NaN() - case bincSpPosInf: - n.v = valueTypeFloat - n.f = math.Inf(1) - case bincSpNegInf: - n.v = valueTypeFloat - n.f = math.Inf(-1) - case bincSpZeroFloat: - n.v = valueTypeFloat - n.f = float64(0) - case bincSpZero: - n.v = valueTypeUint - n.u = uint64(0) // int8(0) - case bincSpNegOne: - n.v = valueTypeInt - n.i = int64(-1) // int8(-1) - default: - d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs) - } - case bincVdSmallInt: - n.v = valueTypeUint - n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 - case bincVdPosInt: - n.v = valueTypeUint - n.u = d.decUint() - case bincVdNegInt: - n.v = valueTypeInt - n.i = -(int64(d.decUint())) - case bincVdFloat: - n.v = valueTypeFloat - n.f = d.decFloat() - case bincVdSymbol: - n.v = valueTypeSymbol - n.s = d.DecodeString() - case bincVdString: - n.v = valueTypeString - n.s = d.DecodeString() - case bincVdByteArray: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case bincVdTimestamp: - n.v = valueTypeTimestamp - tt, err := decodeTime(d.r.readx(int(d.vs))) - if err != nil { - panic(err) - } - n.t = tt - case bincVdCustomExt: - n.v = valueTypeExt - l := d.decLen() - n.u = uint64(d.r.readn1()) - n.l = d.r.readx(l) - case bincVdArray: - n.v = valueTypeArray - decodeFurther = true - case bincVdMap: - n.v = valueTypeMap - decodeFurther = true - default: - d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) - } - - if !decodeFurther { - d.bdRead = false - } - if n.v == valueTypeUint && d.h.SignedInteger { - n.v = valueTypeInt - n.i = int64(n.u) - } - return -} - -//------------------------------------ - -//BincHandle is a Handle for the Binc Schema-Free Encoding Format -//defined at https://github.com/ugorji/binc . -// -//BincHandle currently supports all Binc features with the following EXCEPTIONS: -// - only integers up to 64 bits of precision are supported. -// big integers are unsupported. -// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). -// extended precision and decimal IEEE 754 floats are unsupported. -// - Only UTF-8 strings supported. -// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. -// -//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. -type BincHandle struct { - BasicHandle - binaryEncodingType -} - -func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{b: ext}) -} - -func (h *BincHandle) newEncDriver(e *Encoder) encDriver { - return &bincEncDriver{e: e, w: e.w} -} - -func (h *BincHandle) newDecDriver(d *Decoder) decDriver { - return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes} -} - -func (e *bincEncDriver) reset() { - e.w = e.e.w -} - -func (d *bincDecDriver) reset() { - d.r = d.d.r -} - -var _ decDriver = (*bincDecDriver)(nil) -var _ encDriver = (*bincEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go deleted file mode 100644 index 0e5d32b2e..000000000 --- a/vendor/github.com/ugorji/go/codec/cbor.go +++ /dev/null @@ -1,584 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math" - "reflect" -) - -const ( - cborMajorUint byte = iota - cborMajorNegInt - cborMajorBytes - cborMajorText - cborMajorArray - cborMajorMap - cborMajorTag - cborMajorOther -) - -const ( - cborBdFalse byte = 0xf4 + iota - cborBdTrue - cborBdNil - cborBdUndefined - cborBdExt - cborBdFloat16 - cborBdFloat32 - cborBdFloat64 -) - -const ( - cborBdIndefiniteBytes byte = 0x5f - cborBdIndefiniteString = 0x7f - cborBdIndefiniteArray = 0x9f - cborBdIndefiniteMap = 0xbf - cborBdBreak = 0xff -) - -const ( - CborStreamBytes byte = 0x5f - CborStreamString = 0x7f - CborStreamArray = 0x9f - CborStreamMap = 0xbf - CborStreamBreak = 0xff -) - -const ( - cborBaseUint byte = 0x00 - cborBaseNegInt = 0x20 - cborBaseBytes = 0x40 - cborBaseString = 0x60 - cborBaseArray = 0x80 - cborBaseMap = 0xa0 - cborBaseTag = 0xc0 - cborBaseSimple = 0xe0 -) - -// ------------------- - -type cborEncDriver struct { - noBuiltInTypes - encNoSeparator - e *Encoder - w encWriter - h *CborHandle - x [8]byte -} - -func (e *cborEncDriver) EncodeNil() { - e.w.writen1(cborBdNil) -} - -func (e *cborEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(cborBdTrue) - } else { - e.w.writen1(cborBdFalse) - } -} - -func (e *cborEncDriver) EncodeFloat32(f float32) { - e.w.writen1(cborBdFloat32) - bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *cborEncDriver) EncodeFloat64(f float64) { - e.w.writen1(cborBdFloat64) - bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) -} - -func (e *cborEncDriver) encUint(v uint64, bd byte) { - if v <= 0x17 { - e.w.writen1(byte(v) + bd) - } else if v <= math.MaxUint8 { - e.w.writen2(bd+0x18, uint8(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd + 0x19) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.w.writen1(bd + 0x1a) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v)) - } else { // if v <= math.MaxUint64 { - e.w.writen1(bd + 0x1b) - bigenHelper{e.x[:8], e.w}.writeUint64(v) - } -} - -func (e *cborEncDriver) EncodeInt(v int64) { - if v < 0 { - e.encUint(uint64(-1-v), cborBaseNegInt) - } else { - e.encUint(uint64(v), cborBaseUint) - } -} - -func (e *cborEncDriver) EncodeUint(v uint64) { - e.encUint(v, cborBaseUint) -} - -func (e *cborEncDriver) encLen(bd byte, length int) { - e.encUint(uint64(length), bd) -} - -func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { - e.encUint(uint64(xtag), cborBaseTag) - if v := ext.ConvertExt(rv); v == nil { - e.EncodeNil() - } else { - en.encode(v) - } -} - -func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { - e.encUint(uint64(re.Tag), cborBaseTag) - if re.Data != nil { - en.encode(re.Data) - } else if re.Value == nil { - e.EncodeNil() - } else { - en.encode(re.Value) - } -} - -func (e *cborEncDriver) EncodeArrayStart(length int) { - e.encLen(cborBaseArray, length) -} - -func (e *cborEncDriver) EncodeMapStart(length int) { - e.encLen(cborBaseMap, length) -} - -func (e *cborEncDriver) EncodeString(c charEncoding, v string) { - e.encLen(cborBaseString, len(v)) - e.w.writestr(v) -} - -func (e *cborEncDriver) EncodeSymbol(v string) { - e.EncodeString(c_UTF8, v) -} - -func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - if c == c_RAW { - e.encLen(cborBaseBytes, len(v)) - } else { - e.encLen(cborBaseString, len(v)) - } - e.w.writeb(v) -} - -// ---------------------- - -type cborDecDriver struct { - d *Decoder - h *CborHandle - r decReader - b [scratchByteArrayLen]byte - br bool // bytes reader - bdRead bool - bd byte - noBuiltInTypes - decNoSeparator -} - -func (d *cborDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.bdRead = true -} - -func (d *cborDecDriver) ContainerType() (vt valueType) { - if d.bd == cborBdNil { - return valueTypeNil - } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { - return valueTypeBytes - } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) { - return valueTypeString - } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { - return valueTypeArray - } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *cborDecDriver) TryDecodeAsNil() bool { - if !d.bdRead { - d.readNextBd() - } - // treat Nil and Undefined as nil values - if d.bd == cborBdNil || d.bd == cborBdUndefined { - d.bdRead = false - return true - } - return false -} - -func (d *cborDecDriver) CheckBreak() bool { - if !d.bdRead { - d.readNextBd() - } - if d.bd == cborBdBreak { - d.bdRead = false - return true - } - return false -} - -func (d *cborDecDriver) decUint() (ui uint64) { - v := d.bd & 0x1f - if v <= 0x17 { - ui = uint64(v) - } else { - if v == 0x18 { - ui = uint64(d.r.readn1()) - } else if v == 0x19 { - ui = uint64(bigen.Uint16(d.r.readx(2))) - } else if v == 0x1a { - ui = uint64(bigen.Uint32(d.r.readx(4))) - } else if v == 0x1b { - ui = uint64(bigen.Uint64(d.r.readx(8))) - } else { - d.d.errorf("decUint: Invalid descriptor: %v", d.bd) - return - } - } - return -} - -func (d *cborDecDriver) decCheckInteger() (neg bool) { - if !d.bdRead { - d.readNextBd() - } - major := d.bd >> 5 - if major == cborMajorUint { - } else if major == cborMajorNegInt { - neg = true - } else { - d.d.errorf("invalid major: %v (bd: %v)", major, d.bd) - return - } - return -} - -func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) { - neg := d.decCheckInteger() - ui := d.decUint() - // check if this number can be converted to an int without overflow - var overflow bool - if neg { - if i, overflow = chkOvf.SignedInt(ui + 1); overflow { - d.d.errorf("cbor: overflow converting %v to signed integer", ui+1) - return - } - i = -i - } else { - if i, overflow = chkOvf.SignedInt(ui); overflow { - d.d.errorf("cbor: overflow converting %v to signed integer", ui) - return - } - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("cbor: overflow integer: %v", i) - return - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - if d.decCheckInteger() { - d.d.errorf("Assigning negative signed value to unsigned type") - return - } - ui = d.decUint() - if chkOvf.Uint(ui, bitsize) { - d.d.errorf("cbor: overflow integer: %v", ui) - return - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == cborBdFloat16 { - f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2))))) - } else if bd == cborBdFloat32 { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if bd == cborBdFloat64 { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else if bd >= cborBaseUint && bd < cborBaseBytes { - f = float64(d.DecodeInt(64)) - } else { - d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd) - return - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("cbor: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *cborDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == cborBdTrue { - b = true - } else if bd == cborBdFalse { - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *cborDecDriver) ReadMapStart() (length int) { - d.bdRead = false - if d.bd == cborBdIndefiniteMap { - return -1 - } - return d.decLen() -} - -func (d *cborDecDriver) ReadArrayStart() (length int) { - d.bdRead = false - if d.bd == cborBdIndefiniteArray { - return -1 - } - return d.decLen() -} - -func (d *cborDecDriver) decLen() int { - return int(d.decUint()) -} - -func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { - d.bdRead = false - for { - if d.CheckBreak() { - break - } - if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { - d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd) - return nil - } - n := d.decLen() - oldLen := len(bs) - newLen := oldLen + n - if newLen > cap(bs) { - bs2 := make([]byte, newLen, 2*cap(bs)+n) - copy(bs2, bs) - bs = bs2 - } else { - bs = bs[:newLen] - } - d.r.readb(bs[oldLen:newLen]) - // bs = append(bs, d.r.readn()...) - d.bdRead = false - } - d.bdRead = false - return bs -} - -func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == cborBdNil || d.bd == cborBdUndefined { - d.bdRead = false - return nil - } - if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { - if bs == nil { - return d.decAppendIndefiniteBytes(nil) - } - return d.decAppendIndefiniteBytes(bs[:0]) - } - clen := d.decLen() - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, bs) -} - -func (d *cborDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true, true)) -} - -func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if !d.bdRead { - d.readNextBd() - } - u := d.decUint() - d.bdRead = false - realxtag = u - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - d.d.decode(&re.Value) - } else if xtag != realxtag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) - return - } else { - var v interface{} - d.d.decode(&v) - ext.UpdateExt(rv, v) - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := &d.d.n - var decodeFurther bool - - switch d.bd { - case cborBdNil: - n.v = valueTypeNil - case cborBdFalse: - n.v = valueTypeBool - n.b = false - case cborBdTrue: - n.v = valueTypeBool - n.b = true - case cborBdFloat16, cborBdFloat32: - n.v = valueTypeFloat - n.f = d.DecodeFloat(true) - case cborBdFloat64: - n.v = valueTypeFloat - n.f = d.DecodeFloat(false) - case cborBdIndefiniteBytes: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case cborBdIndefiniteString: - n.v = valueTypeString - n.s = d.DecodeString() - case cborBdIndefiniteArray: - n.v = valueTypeArray - decodeFurther = true - case cborBdIndefiniteMap: - n.v = valueTypeMap - decodeFurther = true - default: - switch { - case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: - if d.h.SignedInteger { - n.v = valueTypeInt - n.i = d.DecodeInt(64) - } else { - n.v = valueTypeUint - n.u = d.DecodeUint(64) - } - case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: - n.v = valueTypeInt - n.i = d.DecodeInt(64) - case d.bd >= cborBaseBytes && d.bd < cborBaseString: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case d.bd >= cborBaseString && d.bd < cborBaseArray: - n.v = valueTypeString - n.s = d.DecodeString() - case d.bd >= cborBaseArray && d.bd < cborBaseMap: - n.v = valueTypeArray - decodeFurther = true - case d.bd >= cborBaseMap && d.bd < cborBaseTag: - n.v = valueTypeMap - decodeFurther = true - case d.bd >= cborBaseTag && d.bd < cborBaseSimple: - n.v = valueTypeExt - n.u = d.decUint() - n.l = nil - d.bdRead = false - // d.d.decode(&re.Value) // handled by decode itself. - // decodeFurther = true - default: - d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) - return - } - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -// ------------------------- - -// CborHandle is a Handle for the CBOR encoding format, -// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . -// -// CBOR is comprehensively supported, including support for: -// - indefinite-length arrays/maps/bytes/strings -// - (extension) tags in range 0..0xffff (0 .. 65535) -// - half, single and double-precision floats -// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) -// - nil, true, false, ... -// - arrays and maps, bytes and text strings -// -// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. -// Users can implement them as needed (using SetExt), including spec-documented ones: -// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. -// -// To encode with indefinite lengths (streaming), users will use -// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants. -// -// For example, to encode "one-byte" as an indefinite length string: -// var buf bytes.Buffer -// e := NewEncoder(&buf, new(CborHandle)) -// buf.WriteByte(CborStreamString) -// e.MustEncode("one-") -// e.MustEncode("byte") -// buf.WriteByte(CborStreamBreak) -// encodedBytes := buf.Bytes() -// var vv interface{} -// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv) -// // Now, vv contains the same string "one-byte" -// -type CborHandle struct { - binaryEncodingType - BasicHandle -} - -func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{i: ext}) -} - -func (h *CborHandle) newEncDriver(e *Encoder) encDriver { - return &cborEncDriver{e: e, w: e.w, h: h} -} - -func (h *CborHandle) newDecDriver(d *Decoder) decDriver { - return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes} -} - -func (e *cborEncDriver) reset() { - e.w = e.e.w -} - -func (d *cborDecDriver) reset() { - d.r = d.d.r -} - -var _ decDriver = (*cborDecDriver)(nil) -var _ encDriver = (*cborEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go deleted file mode 100644 index b3b99f036..000000000 --- a/vendor/github.com/ugorji/go/codec/decode.go +++ /dev/null @@ -1,2015 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "encoding" - "errors" - "fmt" - "io" - "reflect" - "time" -) - -// Some tagging information for error messages. -const ( - msgBadDesc = "Unrecognized descriptor byte" - msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" -) - -var ( - onlyMapOrArrayCanDecodeIntoStructErr = errors.New("only encoded map or array can be decoded into a struct") - cannotDecodeIntoNilErr = errors.New("cannot decode into nil") -) - -// decReader abstracts the reading source, allowing implementations that can -// read from an io.Reader or directly off a byte slice with zero-copying. -type decReader interface { - unreadn1() - - // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR - // just return a view of the []byte being decoded from. - // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. - readx(n int) []byte - readb([]byte) - readn1() uint8 - readn1eof() (v uint8, eof bool) - numread() int // number of bytes read - track() - stopTrack() []byte -} - -type decReaderByteScanner interface { - io.Reader - io.ByteScanner -} - -type decDriver interface { - // this will check if the next token is a break. - CheckBreak() bool - TryDecodeAsNil() bool - // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. - ContainerType() (vt valueType) - IsBuiltinType(rt uintptr) bool - DecodeBuiltin(rt uintptr, v interface{}) - - // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. - // For maps and arrays, it will not do the decoding in-band, but will signal - // the decoder, so that is done later, by setting the decNaked.valueType field. - // - // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). - // for extensions, DecodeNaked must read the tag and the []byte if it exists. - // if the []byte is not read, then kInterfaceNaked will treat it as a Handle - // that stores the subsequent value in-band, and complete reading the RawExt. - // - // extensions should also use readx to decode them, for efficiency. - // kInterface will extract the detached byte slice if it has to pass it outside its realm. - DecodeNaked() - DecodeInt(bitsize uint8) (i int64) - DecodeUint(bitsize uint8) (ui uint64) - DecodeFloat(chkOverflow32 bool) (f float64) - DecodeBool() (b bool) - // DecodeString can also decode symbols. - // It looks redundant as DecodeBytes is available. - // However, some codecs (e.g. binc) support symbols and can - // return a pre-stored string value, meaning that it can bypass - // the cost of []byte->string conversion. - DecodeString() (s string) - - // DecodeBytes may be called directly, without going through reflection. - // Consequently, it must be designed to handle possible nil. - DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) - - // decodeExt will decode into a *RawExt or into an extension. - DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) - // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) - ReadMapStart() int - ReadArrayStart() int - - reset() - uncacheRead() -} - -type decNoSeparator struct{} - -func (_ decNoSeparator) ReadEnd() {} -func (_ decNoSeparator) uncacheRead() {} - -type DecodeOptions struct { - // MapType specifies type to use during schema-less decoding of a map in the stream. - // If nil, we use map[interface{}]interface{} - MapType reflect.Type - - // SliceType specifies type to use during schema-less decoding of an array in the stream. - // If nil, we use []interface{} - SliceType reflect.Type - - // MaxInitLen defines the initial length that we "make" a collection (slice, chan or map) with. - // If 0 or negative, we default to a sensible value based on the size of an element in the collection. - // - // For example, when decoding, a stream may say that it has MAX_UINT elements. - // We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash. - // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. - MaxInitLen int - - // If ErrorIfNoField, return an error when decoding a map - // from a codec stream into a struct, and no matching struct field is found. - ErrorIfNoField bool - - // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. - // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, - // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). - ErrorIfNoArrayExpand bool - - // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). - SignedInteger bool - - // MapValueReset controls how we decode into a map value. - // - // By default, we MAY retrieve the mapping for a key, and then decode into that. - // However, especially with big maps, that retrieval may be expensive and unnecessary - // if the stream already contains all that is necessary to recreate the value. - // - // If true, we will never retrieve the previous mapping, - // but rather decode into a new value and set that in the map. - // - // If false, we will retrieve the previous mapping if necessary e.g. - // the previous mapping is a pointer, or is a struct or array with pre-set state, - // or is an interface. - MapValueReset bool - - // InterfaceReset controls how we decode into an interface. - // - // By default, when we see a field that is an interface{...}, - // or a map with interface{...} value, we will attempt decoding into the - // "contained" value. - // - // However, this prevents us from reading a string into an interface{} - // that formerly contained a number. - // - // If true, we will decode into a new "blank" value, and set that in the interface. - // If false, we will decode into whatever is contained in the interface. - InterfaceReset bool - - // InternString controls interning of strings during decoding. - // - // Some handles, e.g. json, typically will read map keys as strings. - // If the set of keys are finite, it may help reduce allocation to - // look them up from a map (than to allocate them afresh). - // - // Note: Handles will be smart when using the intern functionality. - // So everything will not be interned. - InternString bool -} - -// ------------------------------------ - -// ioDecByteScanner implements Read(), ReadByte(...), UnreadByte(...) methods -// of io.Reader, io.ByteScanner. -type ioDecByteScanner struct { - r io.Reader - l byte // last byte - ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread - b [1]byte // tiny buffer for reading single bytes -} - -func (z *ioDecByteScanner) Read(p []byte) (n int, err error) { - var firstByte bool - if z.ls == 1 { - z.ls = 2 - p[0] = z.l - if len(p) == 1 { - n = 1 - return - } - firstByte = true - p = p[1:] - } - n, err = z.r.Read(p) - if n > 0 { - if err == io.EOF && n == len(p) { - err = nil // read was successful, so postpone EOF (till next time) - } - z.l = p[n-1] - z.ls = 2 - } - if firstByte { - n++ - } - return -} - -func (z *ioDecByteScanner) ReadByte() (c byte, err error) { - n, err := z.Read(z.b[:]) - if n == 1 { - c = z.b[0] - if err == io.EOF { - err = nil // read was successful, so postpone EOF (till next time) - } - } - return -} - -func (z *ioDecByteScanner) UnreadByte() (err error) { - x := z.ls - if x == 0 { - err = errors.New("cannot unread - nothing has been read") - } else if x == 1 { - err = errors.New("cannot unread - last byte has not been read") - } else if x == 2 { - z.ls = 1 - } - return -} - -// ioDecReader is a decReader that reads off an io.Reader -type ioDecReader struct { - br decReaderByteScanner - // temp byte array re-used internally for efficiency during read. - // shares buffer with Decoder, so we keep size of struct within 8 words. - x *[scratchByteArrayLen]byte - bs ioDecByteScanner - n int // num read - tr []byte // tracking bytes read - trb bool -} - -func (z *ioDecReader) numread() int { - return z.n -} - -func (z *ioDecReader) readx(n int) (bs []byte) { - if n <= 0 { - return - } - if n < len(z.x) { - bs = z.x[:n] - } else { - bs = make([]byte, n) - } - if _, err := io.ReadAtLeast(z.br, bs, n); err != nil { - panic(err) - } - z.n += len(bs) - if z.trb { - z.tr = append(z.tr, bs...) - } - return -} - -func (z *ioDecReader) readb(bs []byte) { - if len(bs) == 0 { - return - } - n, err := io.ReadAtLeast(z.br, bs, len(bs)) - z.n += n - if err != nil { - panic(err) - } - if z.trb { - z.tr = append(z.tr, bs...) - } -} - -func (z *ioDecReader) readn1() (b uint8) { - b, err := z.br.ReadByte() - if err != nil { - panic(err) - } - z.n++ - if z.trb { - z.tr = append(z.tr, b) - } - return b -} - -func (z *ioDecReader) readn1eof() (b uint8, eof bool) { - b, err := z.br.ReadByte() - if err == nil { - z.n++ - if z.trb { - z.tr = append(z.tr, b) - } - } else if err == io.EOF { - eof = true - } else { - panic(err) - } - return -} - -func (z *ioDecReader) unreadn1() { - err := z.br.UnreadByte() - if err != nil { - panic(err) - } - z.n-- - if z.trb { - if l := len(z.tr) - 1; l >= 0 { - z.tr = z.tr[:l] - } - } -} - -func (z *ioDecReader) track() { - if z.tr != nil { - z.tr = z.tr[:0] - } - z.trb = true -} - -func (z *ioDecReader) stopTrack() (bs []byte) { - z.trb = false - return z.tr -} - -// ------------------------------------ - -var bytesDecReaderCannotUnreadErr = errors.New("cannot unread last byte read") - -// bytesDecReader is a decReader that reads off a byte slice with zero copying -type bytesDecReader struct { - b []byte // data - c int // cursor - a int // available - t int // track start -} - -func (z *bytesDecReader) reset(in []byte) { - z.b = in - z.a = len(in) - z.c = 0 - z.t = 0 -} - -func (z *bytesDecReader) numread() int { - return z.c -} - -func (z *bytesDecReader) unreadn1() { - if z.c == 0 || len(z.b) == 0 { - panic(bytesDecReaderCannotUnreadErr) - } - z.c-- - z.a++ - return -} - -func (z *bytesDecReader) readx(n int) (bs []byte) { - // slicing from a non-constant start position is more expensive, - // as more computation is required to decipher the pointer start position. - // However, we do it only once, and it's better than reslicing both z.b and return value. - - if n <= 0 { - } else if z.a == 0 { - panic(io.EOF) - } else if n > z.a { - panic(io.ErrUnexpectedEOF) - } else { - c0 := z.c - z.c = c0 + n - z.a = z.a - n - bs = z.b[c0:z.c] - } - return -} - -func (z *bytesDecReader) readn1() (v uint8) { - if z.a == 0 { - panic(io.EOF) - } - v = z.b[z.c] - z.c++ - z.a-- - return -} - -func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { - if z.a == 0 { - eof = true - return - } - v = z.b[z.c] - z.c++ - z.a-- - return -} - -func (z *bytesDecReader) readb(bs []byte) { - copy(bs, z.readx(len(bs))) -} - -func (z *bytesDecReader) track() { - z.t = z.c -} - -func (z *bytesDecReader) stopTrack() (bs []byte) { - return z.b[z.t:z.c] -} - -// ------------------------------------ - -type decFnInfo struct { - d *Decoder - ti *typeInfo - xfFn Ext - xfTag uint64 - seq seqType -} - -// ---------------------------------------- - -type decFn struct { - i decFnInfo - f func(*decFnInfo, reflect.Value) -} - -func (f *decFnInfo) builtin(rv reflect.Value) { - f.d.d.DecodeBuiltin(f.ti.rtid, rv.Addr().Interface()) -} - -func (f *decFnInfo) rawExt(rv reflect.Value) { - f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil) -} - -func (f *decFnInfo) ext(rv reflect.Value) { - f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn) -} - -func (f *decFnInfo) getValueForUnmarshalInterface(rv reflect.Value, indir int8) (v interface{}) { - if indir == -1 { - v = rv.Addr().Interface() - } else if indir == 0 { - v = rv.Interface() - } else { - for j := int8(0); j < indir; j++ { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - v = rv.Interface() - } - return -} - -func (f *decFnInfo) selferUnmarshal(rv reflect.Value) { - f.getValueForUnmarshalInterface(rv, f.ti.csIndir).(Selfer).CodecDecodeSelf(f.d) -} - -func (f *decFnInfo) binaryUnmarshal(rv reflect.Value) { - bm := f.getValueForUnmarshalInterface(rv, f.ti.bunmIndir).(encoding.BinaryUnmarshaler) - xbs := f.d.d.DecodeBytes(nil, false, true) - if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) textUnmarshal(rv reflect.Value) { - tm := f.getValueForUnmarshalInterface(rv, f.ti.tunmIndir).(encoding.TextUnmarshaler) - fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) - if fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) jsonUnmarshal(rv reflect.Value) { - tm := f.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler) - // bs := f.d.d.DecodeBytes(f.d.b[:], true, true) - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) kErr(rv reflect.Value) { - f.d.errorf("no decoding function defined for kind %v", rv.Kind()) -} - -func (f *decFnInfo) kString(rv reflect.Value) { - rv.SetString(f.d.d.DecodeString()) -} - -func (f *decFnInfo) kBool(rv reflect.Value) { - rv.SetBool(f.d.d.DecodeBool()) -} - -func (f *decFnInfo) kInt(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(intBitsize)) -} - -func (f *decFnInfo) kInt64(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(64)) -} - -func (f *decFnInfo) kInt32(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(32)) -} - -func (f *decFnInfo) kInt8(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(8)) -} - -func (f *decFnInfo) kInt16(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(16)) -} - -func (f *decFnInfo) kFloat32(rv reflect.Value) { - rv.SetFloat(f.d.d.DecodeFloat(true)) -} - -func (f *decFnInfo) kFloat64(rv reflect.Value) { - rv.SetFloat(f.d.d.DecodeFloat(false)) -} - -func (f *decFnInfo) kUint8(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(8)) -} - -func (f *decFnInfo) kUint64(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(64)) -} - -func (f *decFnInfo) kUint(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(uintBitsize)) -} - -func (f *decFnInfo) kUintptr(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(uintBitsize)) -} - -func (f *decFnInfo) kUint32(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(32)) -} - -func (f *decFnInfo) kUint16(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(16)) -} - -// func (f *decFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") -// if rv.IsNil() { -// rv.Set(reflect.New(rv.Type().Elem())) -// } -// f.d.decodeValue(rv.Elem()) -// } - -// var kIntfCtr uint64 - -func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { - // nil interface: - // use some hieristics to decode it appropriately - // based on the detected next value in the stream. - d := f.d - d.d.DecodeNaked() - n := &d.n - if n.v == valueTypeNil { - return - } - // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). - // if num := f.ti.rt.NumMethod(); num > 0 { - if f.ti.numMeth > 0 { - d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) - return - } - // var useRvn bool - switch n.v { - case valueTypeMap: - // if d.h.MapType == nil || d.h.MapType == mapIntfIntfTyp { - // } else if d.h.MapType == mapStrIntfTyp { // for json performance - // } - if d.mtid == 0 || d.mtid == mapIntfIntfTypId { - l := len(n.ms) - n.ms = append(n.ms, nil) - d.decode(&n.ms[l]) - rvn = reflect.ValueOf(&n.ms[l]).Elem() - n.ms = n.ms[:l] - } else if d.mtid == mapStrIntfTypId { // for json performance - l := len(n.ns) - n.ns = append(n.ns, nil) - d.decode(&n.ns[l]) - rvn = reflect.ValueOf(&n.ns[l]).Elem() - n.ns = n.ns[:l] - } else { - rvn = reflect.New(d.h.MapType).Elem() - d.decodeValue(rvn, nil) - } - case valueTypeArray: - // if d.h.SliceType == nil || d.h.SliceType == intfSliceTyp { - if d.stid == 0 || d.stid == intfSliceTypId { - l := len(n.ss) - n.ss = append(n.ss, nil) - d.decode(&n.ss[l]) - rvn = reflect.ValueOf(&n.ss[l]).Elem() - n.ss = n.ss[:l] - } else { - rvn = reflect.New(d.h.SliceType).Elem() - d.decodeValue(rvn, nil) - } - case valueTypeExt: - var v interface{} - tag, bytes := n.u, n.l // calling decode below might taint the values - if bytes == nil { - l := len(n.is) - n.is = append(n.is, nil) - v2 := &n.is[l] - n.is = n.is[:l] - d.decode(v2) - v = *v2 - } - bfn := d.h.getExtForTag(tag) - if bfn == nil { - var re RawExt - re.Tag = tag - re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) - rvn = reflect.ValueOf(re) - } else { - rvnA := reflect.New(bfn.rt) - rvn = rvnA.Elem() - if bytes != nil { - bfn.ext.ReadExt(rvnA.Interface(), bytes) - } else { - bfn.ext.UpdateExt(rvnA.Interface(), v) - } - } - case valueTypeNil: - // no-op - case valueTypeInt: - rvn = reflect.ValueOf(&n.i).Elem() - case valueTypeUint: - rvn = reflect.ValueOf(&n.u).Elem() - case valueTypeFloat: - rvn = reflect.ValueOf(&n.f).Elem() - case valueTypeBool: - rvn = reflect.ValueOf(&n.b).Elem() - case valueTypeString, valueTypeSymbol: - rvn = reflect.ValueOf(&n.s).Elem() - case valueTypeBytes: - rvn = reflect.ValueOf(&n.l).Elem() - case valueTypeTimestamp: - rvn = reflect.ValueOf(&n.t).Elem() - default: - panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v)) - } - return -} - -func (f *decFnInfo) kInterface(rv reflect.Value) { - // debugf("\t===> kInterface") - - // Note: - // A consequence of how kInterface works, is that - // if an interface already contains something, we try - // to decode into what was there before. - // We do not replace with a generic value (as got from decodeNaked). - - var rvn reflect.Value - if rv.IsNil() { - rvn = f.kInterfaceNaked() - if rvn.IsValid() { - rv.Set(rvn) - } - } else if f.d.h.InterfaceReset { - rvn = f.kInterfaceNaked() - if rvn.IsValid() { - rv.Set(rvn) - } else { - // reset to zero value based on current type in there. - rv.Set(reflect.Zero(rv.Elem().Type())) - } - } else { - rvn = rv.Elem() - // Note: interface{} is settable, but underlying type may not be. - // Consequently, we have to set the reflect.Value directly. - // if underlying type is settable (e.g. ptr or interface), - // we just decode into it. - // Else we create a settable value, decode into it, and set on the interface. - if rvn.CanSet() { - f.d.decodeValue(rvn, nil) - } else { - rvn2 := reflect.New(rvn.Type()).Elem() - rvn2.Set(rvn) - f.d.decodeValue(rvn2, nil) - rv.Set(rvn2) - } - } -} - -func (f *decFnInfo) kStruct(rv reflect.Value) { - fti := f.ti - d := f.d - dd := d.d - cr := d.cr - ctyp := dd.ContainerType() - if ctyp == valueTypeMap { - containerLen := dd.ReadMapStart() - if containerLen == 0 { - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return - } - tisfi := fti.sfi - hasLen := containerLen >= 0 - if hasLen { - for j := 0; j < containerLen; j++ { - // rvkencname := dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) - // rvksi := ti.getForEncName(rvkencname) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if k := fti.indexForEncName(rvkencname); k > -1 { - si := tisfi[k] - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) - } else { - d.decodeValue(si.field(rv, true), nil) - } - } else { - d.structFieldNotFound(-1, rvkencname) - } - } - } else { - for j := 0; !dd.CheckBreak(); j++ { - // rvkencname := dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) - // rvksi := ti.getForEncName(rvkencname) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if k := fti.indexForEncName(rvkencname); k > -1 { - si := tisfi[k] - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) - } else { - d.decodeValue(si.field(rv, true), nil) - } - } else { - d.structFieldNotFound(-1, rvkencname) - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - } else if ctyp == valueTypeArray { - containerLen := dd.ReadArrayStart() - if containerLen == 0 { - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } - return - } - // Not much gain from doing it two ways for array. - // Arrays are not used as much for structs. - hasLen := containerLen >= 0 - for j, si := range fti.sfip { - if hasLen { - if j == containerLen { - break - } - } else if dd.CheckBreak() { - break - } - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) - } else { - d.decodeValue(si.field(rv, true), nil) - } - } - if containerLen > len(fti.sfip) { - // read remaining values and throw away - for j := len(fti.sfip); j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - d.structFieldNotFound(j, "") - } - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } - } else { - f.d.error(onlyMapOrArrayCanDecodeIntoStructErr) - return - } -} - -func (f *decFnInfo) kSlice(rv reflect.Value) { - // A slice can be set from a map or array in stream. - // This way, the order can be kept (as order is lost with map). - ti := f.ti - d := f.d - dd := d.d - rtelem0 := ti.rt.Elem() - ctyp := dd.ContainerType() - if ctyp == valueTypeBytes || ctyp == valueTypeString { - // you can only decode bytes or string in the stream into a slice or array of bytes - if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { - f.d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt) - } - if f.seq == seqTypeChan { - bs2 := dd.DecodeBytes(nil, false, true) - ch := rv.Interface().(chan<- byte) - for _, b := range bs2 { - ch <- b - } - } else { - rvbs := rv.Bytes() - bs2 := dd.DecodeBytes(rvbs, false, false) - if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { - if rv.CanSet() { - rv.SetBytes(bs2) - } else { - copy(rvbs, bs2) - } - } - } - return - } - - // array := f.seq == seqTypeChan - - slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) - - // // an array can never return a nil slice. so no need to check f.array here. - if containerLenS == 0 { - if f.seq == seqTypeSlice { - if rv.IsNil() { - rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) - } else { - rv.SetLen(0) - } - } else if f.seq == seqTypeChan { - if rv.IsNil() { - rv.Set(reflect.MakeChan(ti.rt, 0)) - } - } - slh.End() - return - } - - rtelem := rtelem0 - for rtelem.Kind() == reflect.Ptr { - rtelem = rtelem.Elem() - } - fn := d.getDecFn(rtelem, true, true) - - var rv0, rv9 reflect.Value - rv0 = rv - rvChanged := false - - // for j := 0; j < containerLenS; j++ { - var rvlen int - if containerLenS > 0 { // hasLen - if f.seq == seqTypeChan { - if rv.IsNil() { - rvlen, _ = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) - rv.Set(reflect.MakeChan(ti.rt, rvlen)) - } - // handle chan specially: - for j := 0; j < containerLenS; j++ { - rv9 = reflect.New(rtelem0).Elem() - slh.ElemContainerState(j) - d.decodeValue(rv9, fn) - rv.Send(rv9) - } - } else { // slice or array - var truncated bool // says len of sequence is not same as expected number of elements - numToRead := containerLenS // if truncated, reset numToRead - - rvcap := rv.Cap() - rvlen = rv.Len() - if containerLenS > rvcap { - if f.seq == seqTypeArray { - d.arrayCannotExpand(rvlen, containerLenS) - } else { - oldRvlenGtZero := rvlen > 0 - rvlen, truncated = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) - if truncated { - if rvlen <= rvcap { - rv.SetLen(rvlen) - } else { - rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) - rvChanged = true - } - } else { - rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) - rvChanged = true - } - if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { - reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) - } - rvcap = rvlen - } - numToRead = rvlen - } else if containerLenS != rvlen { - if f.seq == seqTypeSlice { - rv.SetLen(containerLenS) - rvlen = containerLenS - } - } - j := 0 - // we read up to the numToRead - for ; j < numToRead; j++ { - slh.ElemContainerState(j) - d.decodeValue(rv.Index(j), fn) - } - - // if slice, expand and read up to containerLenS (or EOF) iff truncated - // if array, swallow all the rest. - - if f.seq == seqTypeArray { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } else if truncated { // slice was truncated, as chan NOT in this block - for ; j < containerLenS; j++ { - rv = expandSliceValue(rv, 1) - rv9 = rv.Index(j) - if resetSliceElemToZeroValue { - rv9.Set(reflect.Zero(rtelem0)) - } - slh.ElemContainerState(j) - d.decodeValue(rv9, fn) - } - } - } - } else { - rvlen = rv.Len() - j := 0 - for ; !dd.CheckBreak(); j++ { - if f.seq == seqTypeChan { - slh.ElemContainerState(j) - rv9 = reflect.New(rtelem0).Elem() - d.decodeValue(rv9, fn) - rv.Send(rv9) - } else { - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= rvlen { - if f.seq == seqTypeArray { - d.arrayCannotExpand(rvlen, j+1) - decodeIntoBlank = true - } else { // if f.seq == seqTypeSlice - // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs - rv = expandSliceValue(rv, 1) - rv9 = rv.Index(j) - // rv.Index(rv.Len() - 1).Set(reflect.Zero(rtelem0)) - if resetSliceElemToZeroValue { - rv9.Set(reflect.Zero(rtelem0)) - } - rvlen++ - rvChanged = true - } - } else { // slice or array - rv9 = rv.Index(j) - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { // seqTypeSlice - d.decodeValue(rv9, fn) - } - } - } - if f.seq == seqTypeSlice { - if j < rvlen { - rv.SetLen(j) - } else if j == 0 && rv.IsNil() { - rv = reflect.MakeSlice(ti.rt, 0, 0) - rvChanged = true - } - } - } - slh.End() - - if rvChanged { - rv0.Set(rv) - } -} - -func (f *decFnInfo) kArray(rv reflect.Value) { - // f.d.decodeValue(rv.Slice(0, rv.Len())) - f.kSlice(rv.Slice(0, rv.Len())) -} - -func (f *decFnInfo) kMap(rv reflect.Value) { - d := f.d - dd := d.d - containerLen := dd.ReadMapStart() - cr := d.cr - ti := f.ti - if rv.IsNil() { - rv.Set(reflect.MakeMap(ti.rt)) - } - - if containerLen == 0 { - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return - } - - ktype, vtype := ti.rt.Key(), ti.rt.Elem() - ktypeId := reflect.ValueOf(ktype).Pointer() - vtypeKind := vtype.Kind() - var keyFn, valFn *decFn - var xtyp reflect.Type - for xtyp = ktype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { - } - keyFn = d.getDecFn(xtyp, true, true) - for xtyp = vtype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { - } - valFn = d.getDecFn(xtyp, true, true) - var mapGet, mapSet bool - if !f.d.h.MapValueReset { - // if pointer, mapGet = true - // if interface, mapGet = true if !DecodeNakedAlways (else false) - // if builtin, mapGet = false - // else mapGet = true - if vtypeKind == reflect.Ptr { - mapGet = true - } else if vtypeKind == reflect.Interface { - if !f.d.h.InterfaceReset { - mapGet = true - } - } else if !isImmutableKind(vtypeKind) { - mapGet = true - } - } - - var rvk, rvv, rvz reflect.Value - - // for j := 0; j < containerLen; j++ { - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - rvk = reflect.New(ktype).Elem() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - d.decodeValue(rvk, keyFn) - - // special case if a byte array. - if ktypeId == intfTypId { - rvk = rvk.Elem() - if rvk.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(d.string(rvk.Bytes())) - } - } - mapSet = true // set to false if u do a get, and its a pointer, and exists - if mapGet { - rvv = rv.MapIndex(rvk) - if rvv.IsValid() { - if vtypeKind == reflect.Ptr { - mapSet = false - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - d.decodeValue(rvv, valFn) - if mapSet { - rv.SetMapIndex(rvk, rvv) - } - } - } else { - for j := 0; !dd.CheckBreak(); j++ { - rvk = reflect.New(ktype).Elem() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - d.decodeValue(rvk, keyFn) - - // special case if a byte array. - if ktypeId == intfTypId { - rvk = rvk.Elem() - if rvk.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(d.string(rvk.Bytes())) - } - } - mapSet = true // set to false if u do a get, and its a pointer, and exists - if mapGet { - rvv = rv.MapIndex(rvk) - if rvv.IsValid() { - if vtypeKind == reflect.Ptr { - mapSet = false - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - d.decodeValue(rvv, valFn) - if mapSet { - rv.SetMapIndex(rvk, rvv) - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -type decRtidFn struct { - rtid uintptr - fn decFn -} - -// decNaked is used to keep track of the primitives decoded. -// Without it, we would have to decode each primitive and wrap it -// in an interface{}, causing an allocation. -// In this model, the primitives are decoded in a "pseudo-atomic" fashion, -// so we can rest assured that no other decoding happens while these -// primitives are being decoded. -// -// maps and arrays are not handled by this mechanism. -// However, RawExt is, and we accomodate for extensions that decode -// RawExt from DecodeNaked, but need to decode the value subsequently. -// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. -// -// However, decNaked also keeps some arrays of default maps and slices -// used in DecodeNaked. This way, we can get a pointer to it -// without causing a new heap allocation. -// -// kInterfaceNaked will ensure that there is no allocation for the common -// uses. -type decNaked struct { - // r RawExt // used for RawExt, uint, []byte. - u uint64 - i int64 - f float64 - l []byte - s string - t time.Time - b bool - v valueType - - // stacks for reducing allocation - is []interface{} - ms []map[interface{}]interface{} - ns []map[string]interface{} - ss [][]interface{} - // rs []RawExt - - // keep arrays at the bottom? Chance is that they are not used much. - ia [4]interface{} - ma [4]map[interface{}]interface{} - na [4]map[string]interface{} - sa [4][]interface{} - // ra [2]RawExt -} - -func (n *decNaked) reset() { - if n.ss != nil { - n.ss = n.ss[:0] - } - if n.is != nil { - n.is = n.is[:0] - } - if n.ms != nil { - n.ms = n.ms[:0] - } - if n.ns != nil { - n.ns = n.ns[:0] - } -} - -// A Decoder reads and decodes an object from an input stream in the codec format. -type Decoder struct { - // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. - // Try to put things that go together to fit within a cache line (8 words). - - d decDriver - // NOTE: Decoder shouldn't call it's read methods, - // as the handler MAY need to do some coordination. - r decReader - // sa [initCollectionCap]decRtidFn - h *BasicHandle - hh Handle - - be bool // is binary encoding - bytes bool // is bytes reader - js bool // is json handle - - rb bytesDecReader - ri ioDecReader - cr containerStateRecv - - s []decRtidFn - f map[uintptr]*decFn - - // _ uintptr // for alignment purposes, so next one starts from a cache line - - // cache the mapTypeId and sliceTypeId for faster comparisons - mtid uintptr - stid uintptr - - n decNaked - b [scratchByteArrayLen]byte - is map[string]string // used for interning strings -} - -// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. -// -// For efficiency, Users are encouraged to pass in a memory buffered reader -// (eg bufio.Reader, bytes.Buffer). -func NewDecoder(r io.Reader, h Handle) *Decoder { - d := newDecoder(h) - d.Reset(r) - return d -} - -// NewDecoderBytes returns a Decoder which efficiently decodes directly -// from a byte slice with zero copying. -func NewDecoderBytes(in []byte, h Handle) *Decoder { - d := newDecoder(h) - d.ResetBytes(in) - return d -} - -func newDecoder(h Handle) *Decoder { - d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} - n := &d.n - // n.rs = n.ra[:0] - n.ms = n.ma[:0] - n.is = n.ia[:0] - n.ns = n.na[:0] - n.ss = n.sa[:0] - _, d.js = h.(*JsonHandle) - if d.h.InternString { - d.is = make(map[string]string, 32) - } - d.d = h.newDecDriver(d) - d.cr, _ = d.d.(containerStateRecv) - // d.d = h.newDecDriver(decReaderT{true, &d.rb, &d.ri}) - return d -} - -func (d *Decoder) resetCommon() { - d.n.reset() - d.d.reset() - // reset all things which were cached from the Handle, - // but could be changed. - d.mtid, d.stid = 0, 0 - if d.h.MapType != nil { - d.mtid = reflect.ValueOf(d.h.MapType).Pointer() - } - if d.h.SliceType != nil { - d.stid = reflect.ValueOf(d.h.SliceType).Pointer() - } -} - -func (d *Decoder) Reset(r io.Reader) { - d.ri.x = &d.b - // d.s = d.sa[:0] - d.ri.bs.r = r - var ok bool - d.ri.br, ok = r.(decReaderByteScanner) - if !ok { - d.ri.br = &d.ri.bs - } - d.r = &d.ri - d.resetCommon() -} - -func (d *Decoder) ResetBytes(in []byte) { - // d.s = d.sa[:0] - d.rb.reset(in) - d.r = &d.rb - d.resetCommon() -} - -// func (d *Decoder) sendContainerState(c containerState) { -// if d.cr != nil { -// d.cr.sendContainerState(c) -// } -// } - -// Decode decodes the stream from reader and stores the result in the -// value pointed to by v. v cannot be a nil pointer. v can also be -// a reflect.Value of a pointer. -// -// Note that a pointer to a nil interface is not a nil pointer. -// If you do not know what type of stream it is, pass in a pointer to a nil interface. -// We will decode and store a value in that nil interface. -// -// Sample usages: -// // Decoding into a non-nil typed value -// var f float32 -// err = codec.NewDecoder(r, handle).Decode(&f) -// -// // Decoding into nil interface -// var v interface{} -// dec := codec.NewDecoder(r, handle) -// err = dec.Decode(&v) -// -// When decoding into a nil interface{}, we will decode into an appropriate value based -// on the contents of the stream: -// - Numbers are decoded as float64, int64 or uint64. -// - Other values are decoded appropriately depending on the type: -// bool, string, []byte, time.Time, etc -// - Extensions are decoded as RawExt (if no ext function registered for the tag) -// Configurations exist on the Handle to override defaults -// (e.g. for MapType, SliceType and how to decode raw bytes). -// -// When decoding into a non-nil interface{} value, the mode of encoding is based on the -// type of the value. When a value is seen: -// - If an extension is registered for it, call that extension function -// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error -// - Else decode it based on its reflect.Kind -// -// There are some special rules when decoding into containers (slice/array/map/struct). -// Decode will typically use the stream contents to UPDATE the container. -// - A map can be decoded from a stream map, by updating matching keys. -// - A slice can be decoded from a stream array, -// by updating the first n elements, where n is length of the stream. -// - A slice can be decoded from a stream map, by decoding as if -// it contains a sequence of key-value pairs. -// - A struct can be decoded from a stream map, by updating matching fields. -// - A struct can be decoded from a stream array, -// by updating fields as they occur in the struct (by index). -// -// When decoding a stream map or array with length of 0 into a nil map or slice, -// we reset the destination map or slice to a zero-length value. -// -// However, when decoding a stream nil, we reset the destination container -// to its "zero" value (e.g. nil for slice/map, etc). -// -func (d *Decoder) Decode(v interface{}) (err error) { - defer panicToErr(&err) - d.decode(v) - return -} - -// this is not a smart swallow, as it allocates objects and does unnecessary work. -func (d *Decoder) swallowViaHammer() { - var blank interface{} - d.decodeValue(reflect.ValueOf(&blank).Elem(), nil) -} - -func (d *Decoder) swallow() { - // smarter decode that just swallows the content - dd := d.d - if dd.TryDecodeAsNil() { - return - } - cr := d.cr - switch dd.ContainerType() { - case valueTypeMap: - containerLen := dd.ReadMapStart() - clenGtEqualZero := containerLen >= 0 - for j := 0; ; j++ { - if clenGtEqualZero { - if j >= containerLen { - break - } - } else if dd.CheckBreak() { - break - } - if cr != nil { - cr.sendContainerState(containerMapKey) - } - d.swallow() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - d.swallow() - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - case valueTypeArray: - containerLenS := dd.ReadArrayStart() - clenGtEqualZero := containerLenS >= 0 - for j := 0; ; j++ { - if clenGtEqualZero { - if j >= containerLenS { - break - } - } else if dd.CheckBreak() { - break - } - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - d.swallow() - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } - case valueTypeBytes: - dd.DecodeBytes(d.b[:], false, true) - case valueTypeString: - dd.DecodeBytes(d.b[:], true, true) - // dd.DecodeStringAsBytes(d.b[:]) - default: - // these are all primitives, which we can get from decodeNaked - // if RawExt using Value, complete the processing. - dd.DecodeNaked() - if n := &d.n; n.v == valueTypeExt && n.l == nil { - l := len(n.is) - n.is = append(n.is, nil) - v2 := &n.is[l] - n.is = n.is[:l] - d.decode(v2) - } - } -} - -// MustDecode is like Decode, but panics if unable to Decode. -// This provides insight to the code location that triggered the error. -func (d *Decoder) MustDecode(v interface{}) { - d.decode(v) -} - -func (d *Decoder) decode(iv interface{}) { - // if ics, ok := iv.(Selfer); ok { - // ics.CodecDecodeSelf(d) - // return - // } - - if d.d.TryDecodeAsNil() { - switch v := iv.(type) { - case nil: - case *string: - *v = "" - case *bool: - *v = false - case *int: - *v = 0 - case *int8: - *v = 0 - case *int16: - *v = 0 - case *int32: - *v = 0 - case *int64: - *v = 0 - case *uint: - *v = 0 - case *uint8: - *v = 0 - case *uint16: - *v = 0 - case *uint32: - *v = 0 - case *uint64: - *v = 0 - case *float32: - *v = 0 - case *float64: - *v = 0 - case *[]uint8: - *v = nil - case reflect.Value: - if v.Kind() != reflect.Ptr || v.IsNil() { - d.errNotValidPtrValue(v) - } - // d.chkPtrValue(v) - v = v.Elem() - if v.IsValid() { - v.Set(reflect.Zero(v.Type())) - } - default: - rv := reflect.ValueOf(iv) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - d.errNotValidPtrValue(rv) - } - // d.chkPtrValue(rv) - rv = rv.Elem() - if rv.IsValid() { - rv.Set(reflect.Zero(rv.Type())) - } - } - return - } - - switch v := iv.(type) { - case nil: - d.error(cannotDecodeIntoNilErr) - return - - case Selfer: - v.CodecDecodeSelf(d) - - case reflect.Value: - if v.Kind() != reflect.Ptr || v.IsNil() { - d.errNotValidPtrValue(v) - } - // d.chkPtrValue(v) - d.decodeValueNotNil(v.Elem(), nil) - - case *string: - - *v = d.d.DecodeString() - case *bool: - *v = d.d.DecodeBool() - case *int: - *v = int(d.d.DecodeInt(intBitsize)) - case *int8: - *v = int8(d.d.DecodeInt(8)) - case *int16: - *v = int16(d.d.DecodeInt(16)) - case *int32: - *v = int32(d.d.DecodeInt(32)) - case *int64: - *v = d.d.DecodeInt(64) - case *uint: - *v = uint(d.d.DecodeUint(uintBitsize)) - case *uint8: - *v = uint8(d.d.DecodeUint(8)) - case *uint16: - *v = uint16(d.d.DecodeUint(16)) - case *uint32: - *v = uint32(d.d.DecodeUint(32)) - case *uint64: - *v = d.d.DecodeUint(64) - case *float32: - *v = float32(d.d.DecodeFloat(true)) - case *float64: - *v = d.d.DecodeFloat(false) - case *[]uint8: - *v = d.d.DecodeBytes(*v, false, false) - - case *interface{}: - d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil) - - default: - if !fastpathDecodeTypeSwitch(iv, d) { - d.decodeI(iv, true, false, false, false) - } - } -} - -func (d *Decoder) preDecodeValue(rv reflect.Value, tryNil bool) (rv2 reflect.Value, proceed bool) { - if tryNil && d.d.TryDecodeAsNil() { - // No need to check if a ptr, recursively, to determine - // whether to set value to nil. - // Just always set value to its zero type. - if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid - rv.Set(reflect.Zero(rv.Type())) - } - return - } - - // If stream is not containing a nil value, then we can deref to the base - // non-pointer value, and decode into that. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - return rv, true -} - -func (d *Decoder) decodeI(iv interface{}, checkPtr, tryNil, checkFastpath, checkCodecSelfer bool) { - rv := reflect.ValueOf(iv) - if checkPtr { - if rv.Kind() != reflect.Ptr || rv.IsNil() { - d.errNotValidPtrValue(rv) - } - // d.chkPtrValue(rv) - } - rv, proceed := d.preDecodeValue(rv, tryNil) - if proceed { - fn := d.getDecFn(rv.Type(), checkFastpath, checkCodecSelfer) - fn.f(&fn.i, rv) - } -} - -func (d *Decoder) decodeValue(rv reflect.Value, fn *decFn) { - if rv, proceed := d.preDecodeValue(rv, true); proceed { - if fn == nil { - fn = d.getDecFn(rv.Type(), true, true) - } - fn.f(&fn.i, rv) - } -} - -func (d *Decoder) decodeValueNotNil(rv reflect.Value, fn *decFn) { - if rv, proceed := d.preDecodeValue(rv, false); proceed { - if fn == nil { - fn = d.getDecFn(rv.Type(), true, true) - } - fn.f(&fn.i, rv) - } -} - -func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *decFn) { - rtid := reflect.ValueOf(rt).Pointer() - - // retrieve or register a focus'ed function for this type - // to eliminate need to do the retrieval multiple times - - // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } - var ok bool - if useMapForCodecCache { - fn, ok = d.f[rtid] - } else { - for i := range d.s { - v := &(d.s[i]) - if v.rtid == rtid { - fn, ok = &(v.fn), true - break - } - } - } - if ok { - return - } - - if useMapForCodecCache { - if d.f == nil { - d.f = make(map[uintptr]*decFn, initCollectionCap) - } - fn = new(decFn) - d.f[rtid] = fn - } else { - if d.s == nil { - d.s = make([]decRtidFn, 0, initCollectionCap) - } - d.s = append(d.s, decRtidFn{rtid: rtid}) - fn = &(d.s[len(d.s)-1]).fn - } - - // debugf("\tCreating new dec fn for type: %v\n", rt) - ti := d.h.getTypeInfo(rtid, rt) - fi := &(fn.i) - fi.d = d - fi.ti = ti - - // An extension can be registered for any type, regardless of the Kind - // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. - // - // We can't check if it's an extension byte here first, because the user may have - // registered a pointer or non-pointer type, meaning we may have to recurse first - // before matching a mapped type, even though the extension byte is already detected. - // - // NOTE: if decoding into a nil interface{}, we return a non-nil - // value except even if the container registers a length of 0. - if checkCodecSelfer && ti.cs { - fn.f = (*decFnInfo).selferUnmarshal - } else if rtid == rawExtTypId { - fn.f = (*decFnInfo).rawExt - } else if d.d.IsBuiltinType(rtid) { - fn.f = (*decFnInfo).builtin - } else if xfFn := d.h.getExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext - fn.f = (*decFnInfo).ext - } else if supportMarshalInterfaces && d.be && ti.bunm { - fn.f = (*decFnInfo).binaryUnmarshal - } else if supportMarshalInterfaces && !d.be && d.js && ti.junm { - //If JSON, we should check JSONUnmarshal before textUnmarshal - fn.f = (*decFnInfo).jsonUnmarshal - } else if supportMarshalInterfaces && !d.be && ti.tunm { - fn.f = (*decFnInfo).textUnmarshal - } else { - rk := rt.Kind() - if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { - if rt.PkgPath() == "" { - if idx := fastpathAV.index(rtid); idx != -1 { - fn.f = fastpathAV[idx].decfn - } - } else { - // use mapping for underlying type if there - ok = false - var rtu reflect.Type - if rk == reflect.Map { - rtu = reflect.MapOf(rt.Key(), rt.Elem()) - } else { - rtu = reflect.SliceOf(rt.Elem()) - } - rtuid := reflect.ValueOf(rtu).Pointer() - if idx := fastpathAV.index(rtuid); idx != -1 { - xfnf := fastpathAV[idx].decfn - xrt := fastpathAV[idx].rt - fn.f = func(xf *decFnInfo, xrv reflect.Value) { - // xfnf(xf, xrv.Convert(xrt)) - xfnf(xf, xrv.Addr().Convert(reflect.PtrTo(xrt)).Elem()) - } - } - } - } - if fn.f == nil { - switch rk { - case reflect.String: - fn.f = (*decFnInfo).kString - case reflect.Bool: - fn.f = (*decFnInfo).kBool - case reflect.Int: - fn.f = (*decFnInfo).kInt - case reflect.Int64: - fn.f = (*decFnInfo).kInt64 - case reflect.Int32: - fn.f = (*decFnInfo).kInt32 - case reflect.Int8: - fn.f = (*decFnInfo).kInt8 - case reflect.Int16: - fn.f = (*decFnInfo).kInt16 - case reflect.Float32: - fn.f = (*decFnInfo).kFloat32 - case reflect.Float64: - fn.f = (*decFnInfo).kFloat64 - case reflect.Uint8: - fn.f = (*decFnInfo).kUint8 - case reflect.Uint64: - fn.f = (*decFnInfo).kUint64 - case reflect.Uint: - fn.f = (*decFnInfo).kUint - case reflect.Uint32: - fn.f = (*decFnInfo).kUint32 - case reflect.Uint16: - fn.f = (*decFnInfo).kUint16 - // case reflect.Ptr: - // fn.f = (*decFnInfo).kPtr - case reflect.Uintptr: - fn.f = (*decFnInfo).kUintptr - case reflect.Interface: - fn.f = (*decFnInfo).kInterface - case reflect.Struct: - fn.f = (*decFnInfo).kStruct - case reflect.Chan: - fi.seq = seqTypeChan - fn.f = (*decFnInfo).kSlice - case reflect.Slice: - fi.seq = seqTypeSlice - fn.f = (*decFnInfo).kSlice - case reflect.Array: - fi.seq = seqTypeArray - fn.f = (*decFnInfo).kArray - case reflect.Map: - fn.f = (*decFnInfo).kMap - default: - fn.f = (*decFnInfo).kErr - } - } - } - - return -} - -func (d *Decoder) structFieldNotFound(index int, rvkencname string) { - if d.h.ErrorIfNoField { - if index >= 0 { - d.errorf("no matching struct field found when decoding stream array at index %v", index) - return - } else if rvkencname != "" { - d.errorf("no matching struct field found when decoding stream map with key %s", rvkencname) - return - } - } - d.swallow() -} - -func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { - if d.h.ErrorIfNoArrayExpand { - d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) - } -} - -func (d *Decoder) chkPtrValue(rv reflect.Value) { - // We can only decode into a non-nil pointer - if rv.Kind() == reflect.Ptr && !rv.IsNil() { - return - } - d.errNotValidPtrValue(rv) -} - -func (d *Decoder) errNotValidPtrValue(rv reflect.Value) { - if !rv.IsValid() { - d.error(cannotDecodeIntoNilErr) - return - } - if !rv.CanInterface() { - d.errorf("cannot decode into a value without an interface: %v", rv) - return - } - rvi := rv.Interface() - d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi) -} - -func (d *Decoder) error(err error) { - panic(err) -} - -func (d *Decoder) errorf(format string, params ...interface{}) { - params2 := make([]interface{}, len(params)+1) - params2[0] = d.r.numread() - copy(params2[1:], params) - err := fmt.Errorf("[pos %d]: "+format, params2...) - panic(err) -} - -func (d *Decoder) string(v []byte) (s string) { - if d.is != nil { - s, ok := d.is[string(v)] // no allocation here. - if !ok { - s = string(v) - d.is[s] = s - } - return s - } - return string(v) // don't return stringView, as we need a real string here. -} - -func (d *Decoder) intern(s string) { - if d.is != nil { - d.is[s] = s - } -} - -func (d *Decoder) nextValueBytes() []byte { - d.d.uncacheRead() - d.r.track() - d.swallow() - return d.r.stopTrack() -} - -// -------------------------------------------------- - -// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. -// A slice can be set from a map or array in stream. This supports the MapBySlice interface. -type decSliceHelper struct { - d *Decoder - // ct valueType - array bool -} - -func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { - dd := d.d - ctyp := dd.ContainerType() - if ctyp == valueTypeArray { - x.array = true - clen = dd.ReadArrayStart() - } else if ctyp == valueTypeMap { - clen = dd.ReadMapStart() * 2 - } else { - d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) - } - // x.ct = ctyp - x.d = d - return -} - -func (x decSliceHelper) End() { - cr := x.d.cr - if cr == nil { - return - } - if x.array { - cr.sendContainerState(containerArrayEnd) - } else { - cr.sendContainerState(containerMapEnd) - } -} - -func (x decSliceHelper) ElemContainerState(index int) { - cr := x.d.cr - if cr == nil { - return - } - if x.array { - cr.sendContainerState(containerArrayElem) - } else { - if index%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } -} - -func decByteSlice(r decReader, clen int, bs []byte) (bsOut []byte) { - if clen == 0 { - return zeroByteSlice - } - if len(bs) == clen { - bsOut = bs - } else if cap(bs) >= clen { - bsOut = bs[:clen] - } else { - bsOut = make([]byte, clen) - } - r.readb(bsOut) - return -} - -func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { - if xlen := len(in); xlen > 0 { - if isBytesReader || xlen <= scratchByteArrayLen { - if cap(dest) >= xlen { - out = dest[:xlen] - } else { - out = make([]byte, xlen) - } - copy(out, in) - return - } - } - return in -} - -// decInferLen will infer a sensible length, given the following: -// - clen: length wanted. -// - maxlen: max length to be returned. -// if <= 0, it is unset, and we infer it based on the unit size -// - unit: number of bytes for each element of the collection -func decInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { - // handle when maxlen is not set i.e. <= 0 - if clen <= 0 { - return - } - if maxlen <= 0 { - // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. - // maxlen = 256 * 1024 / unit - // if maxlen < (4 * 1024) { - // maxlen = 4 * 1024 - // } - if unit < (256 / 4) { - maxlen = 256 * 1024 / unit - } else { - maxlen = 4 * 1024 - } - } - if clen > maxlen { - rvlen = maxlen - truncated = true - } else { - rvlen = clen - } - return - // if clen <= 0 { - // rvlen = 0 - // } else if maxlen > 0 && clen > maxlen { - // rvlen = maxlen - // truncated = true - // } else { - // rvlen = clen - // } - // return -} - -// // implement overall decReader wrapping both, for possible use inline: -// type decReaderT struct { -// bytes bool -// rb *bytesDecReader -// ri *ioDecReader -// } -// -// // implement *Decoder as a decReader. -// // Using decReaderT (defined just above) caused performance degradation -// // possibly because of constant copying the value, -// // and some value->interface conversion causing allocation. -// func (d *Decoder) unreadn1() { -// if d.bytes { -// d.rb.unreadn1() -// } else { -// d.ri.unreadn1() -// } -// } -// ... for other methods of decReader. -// Testing showed that performance improvement was negligible. diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go deleted file mode 100644 index 99af6fa55..000000000 --- a/vendor/github.com/ugorji/go/codec/encode.go +++ /dev/null @@ -1,1405 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "encoding" - "fmt" - "io" - "reflect" - "sort" - "sync" -) - -const ( - defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 -) - -// AsSymbolFlag defines what should be encoded as symbols. -type AsSymbolFlag uint8 - -const ( - // AsSymbolDefault is default. - // Currently, this means only encode struct field names as symbols. - // The default is subject to change. - AsSymbolDefault AsSymbolFlag = iota - - // AsSymbolAll means encode anything which could be a symbol as a symbol. - AsSymbolAll = 0xfe - - // AsSymbolNone means do not encode anything as a symbol. - AsSymbolNone = 1 << iota - - // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. - AsSymbolMapStringKeysFlag - - // AsSymbolStructFieldName means encode struct field names as symbols. - AsSymbolStructFieldNameFlag -) - -// encWriter abstracts writing to a byte array or to an io.Writer. -type encWriter interface { - writeb([]byte) - writestr(string) - writen1(byte) - writen2(byte, byte) - atEndOfEncode() -} - -// encDriver abstracts the actual codec (binc vs msgpack, etc) -type encDriver interface { - IsBuiltinType(rt uintptr) bool - EncodeBuiltin(rt uintptr, v interface{}) - EncodeNil() - EncodeInt(i int64) - EncodeUint(i uint64) - EncodeBool(b bool) - EncodeFloat32(f float32) - EncodeFloat64(f float64) - // encodeExtPreamble(xtag byte, length int) - EncodeRawExt(re *RawExt, e *Encoder) - EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) - EncodeArrayStart(length int) - EncodeMapStart(length int) - EncodeString(c charEncoding, v string) - EncodeSymbol(v string) - EncodeStringBytes(c charEncoding, v []byte) - //TODO - //encBignum(f *big.Int) - //encStringRunes(c charEncoding, v []rune) - - reset() -} - -type encDriverAsis interface { - EncodeAsis(v []byte) -} - -type encNoSeparator struct{} - -func (_ encNoSeparator) EncodeEnd() {} - -type ioEncWriterWriter interface { - WriteByte(c byte) error - WriteString(s string) (n int, err error) - Write(p []byte) (n int, err error) -} - -type ioEncStringWriter interface { - WriteString(s string) (n int, err error) -} - -type EncodeOptions struct { - // Encode a struct as an array, and not as a map - StructToArray bool - - // Canonical representation means that encoding a value will always result in the same - // sequence of bytes. - // - // This only affects maps, as the iteration order for maps is random. - // - // The implementation MAY use the natural sort order for the map keys if possible: - // - // - If there is a natural sort order (ie for number, bool, string or []byte keys), - // then the map keys are first sorted in natural order and then written - // with corresponding map values to the strema. - // - If there is no natural sort order, then the map keys will first be - // encoded into []byte, and then sorted, - // before writing the sorted keys and the corresponding map values to the stream. - // - Canonical bool - - // AsSymbols defines what should be encoded as symbols. - // - // Encoding as symbols can reduce the encoded size significantly. - // - // However, during decoding, each string to be encoded as a symbol must - // be checked to see if it has been seen before. Consequently, encoding time - // will increase if using symbols, because string comparisons has a clear cost. - // - // Sample values: - // AsSymbolNone - // AsSymbolAll - // AsSymbolMapStringKeys - // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag - AsSymbols AsSymbolFlag -} - -// --------------------------------------------- - -type simpleIoEncWriterWriter struct { - w io.Writer - bw io.ByteWriter - sw ioEncStringWriter -} - -func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { - if o.bw != nil { - return o.bw.WriteByte(c) - } - _, err = o.w.Write([]byte{c}) - return -} - -func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { - if o.sw != nil { - return o.sw.WriteString(s) - } - // return o.w.Write([]byte(s)) - return o.w.Write(bytesView(s)) -} - -func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { - return o.w.Write(p) -} - -// ---------------------------------------- - -// ioEncWriter implements encWriter and can write to an io.Writer implementation -type ioEncWriter struct { - w ioEncWriterWriter - s simpleIoEncWriterWriter - // x [8]byte // temp byte array re-used internally for efficiency -} - -func (z *ioEncWriter) writeb(bs []byte) { - if len(bs) == 0 { - return - } - n, err := z.w.Write(bs) - if err != nil { - panic(err) - } - if n != len(bs) { - panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n)) - } -} - -func (z *ioEncWriter) writestr(s string) { - n, err := z.w.WriteString(s) - if err != nil { - panic(err) - } - if n != len(s) { - panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n)) - } -} - -func (z *ioEncWriter) writen1(b byte) { - if err := z.w.WriteByte(b); err != nil { - panic(err) - } -} - -func (z *ioEncWriter) writen2(b1 byte, b2 byte) { - z.writen1(b1) - z.writen1(b2) -} - -func (z *ioEncWriter) atEndOfEncode() {} - -// ---------------------------------------- - -// bytesEncWriter implements encWriter and can write to an byte slice. -// It is used by Marshal function. -type bytesEncWriter struct { - b []byte - c int // cursor - out *[]byte // write out on atEndOfEncode -} - -func (z *bytesEncWriter) writeb(s []byte) { - if len(s) > 0 { - c := z.grow(len(s)) - copy(z.b[c:], s) - } -} - -func (z *bytesEncWriter) writestr(s string) { - if len(s) > 0 { - c := z.grow(len(s)) - copy(z.b[c:], s) - } -} - -func (z *bytesEncWriter) writen1(b1 byte) { - c := z.grow(1) - z.b[c] = b1 -} - -func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { - c := z.grow(2) - z.b[c] = b1 - z.b[c+1] = b2 -} - -func (z *bytesEncWriter) atEndOfEncode() { - *(z.out) = z.b[:z.c] -} - -func (z *bytesEncWriter) grow(n int) (oldcursor int) { - oldcursor = z.c - z.c = oldcursor + n - if z.c > len(z.b) { - if z.c > cap(z.b) { - // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls. - // bytes.Buffer model (2*cap + n): much better - // bs := make([]byte, 2*cap(z.b)+n) - bs := make([]byte, growCap(cap(z.b), 1, n)) - copy(bs, z.b[:oldcursor]) - z.b = bs - } else { - z.b = z.b[:cap(z.b)] - } - } - return -} - -// --------------------------------------------- - -type encFnInfo struct { - e *Encoder - ti *typeInfo - xfFn Ext - xfTag uint64 - seq seqType -} - -func (f *encFnInfo) builtin(rv reflect.Value) { - f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface()) -} - -func (f *encFnInfo) rawExt(rv reflect.Value) { - // rev := rv.Interface().(RawExt) - // f.e.e.EncodeRawExt(&rev, f.e) - var re *RawExt - if rv.CanAddr() { - re = rv.Addr().Interface().(*RawExt) - } else { - rev := rv.Interface().(RawExt) - re = &rev - } - f.e.e.EncodeRawExt(re, f.e) -} - -func (f *encFnInfo) ext(rv reflect.Value) { - // if this is a struct|array and it was addressable, then pass the address directly (not the value) - if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() { - rv = rv.Addr() - } - f.e.e.EncodeExt(rv.Interface(), f.xfTag, f.xfFn, f.e) -} - -func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) { - if indir == 0 { - v = rv.Interface() - } else if indir == -1 { - // If a non-pointer was passed to Encode(), then that value is not addressable. - // Take addr if addresable, else copy value to an addressable value. - if rv.CanAddr() { - v = rv.Addr().Interface() - } else { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - // fmt.Printf("rv.Type: %v, rv2.Type: %v, v: %v\n", rv.Type(), rv2.Type(), v) - } - } else { - for j := int8(0); j < indir; j++ { - if rv.IsNil() { - f.e.e.EncodeNil() - return - } - rv = rv.Elem() - } - v = rv.Interface() - } - return v, true -} - -func (f *encFnInfo) selferMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.csIndir); proceed { - v.(Selfer).CodecEncodeSelf(f.e) - } -} - -func (f *encFnInfo) binaryMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed { - bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary() - f.e.marshal(bs, fnerr, false, c_RAW) - } -} - -func (f *encFnInfo) textMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed { - // debugf(">>>> encoding.TextMarshaler: %T", rv.Interface()) - bs, fnerr := v.(encoding.TextMarshaler).MarshalText() - f.e.marshal(bs, fnerr, false, c_UTF8) - } -} - -func (f *encFnInfo) jsonMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.jmIndir); proceed { - bs, fnerr := v.(jsonMarshaler).MarshalJSON() - f.e.marshal(bs, fnerr, true, c_UTF8) - } -} - -func (f *encFnInfo) kBool(rv reflect.Value) { - f.e.e.EncodeBool(rv.Bool()) -} - -func (f *encFnInfo) kString(rv reflect.Value) { - f.e.e.EncodeString(c_UTF8, rv.String()) -} - -func (f *encFnInfo) kFloat64(rv reflect.Value) { - f.e.e.EncodeFloat64(rv.Float()) -} - -func (f *encFnInfo) kFloat32(rv reflect.Value) { - f.e.e.EncodeFloat32(float32(rv.Float())) -} - -func (f *encFnInfo) kInt(rv reflect.Value) { - f.e.e.EncodeInt(rv.Int()) -} - -func (f *encFnInfo) kUint(rv reflect.Value) { - f.e.e.EncodeUint(rv.Uint()) -} - -func (f *encFnInfo) kInvalid(rv reflect.Value) { - f.e.e.EncodeNil() -} - -func (f *encFnInfo) kErr(rv reflect.Value) { - f.e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) -} - -func (f *encFnInfo) kSlice(rv reflect.Value) { - ti := f.ti - // array may be non-addressable, so we have to manage with care - // (don't call rv.Bytes, rv.Slice, etc). - // E.g. type struct S{B [2]byte}; - // Encode(S{}) will bomb on "panic: slice of unaddressable array". - e := f.e - if f.seq != seqTypeArray { - if rv.IsNil() { - e.e.EncodeNil() - return - } - // If in this method, then there was no extension function defined. - // So it's okay to treat as []byte. - if ti.rtid == uint8SliceTypId { - e.e.EncodeStringBytes(c_RAW, rv.Bytes()) - return - } - } - cr := e.cr - rtelem := ti.rt.Elem() - l := rv.Len() - if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 { - switch f.seq { - case seqTypeArray: - // if l == 0 { e.e.encodeStringBytes(c_RAW, nil) } else - if rv.CanAddr() { - e.e.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes()) - } else { - var bs []byte - if l <= cap(e.b) { - bs = e.b[:l] - } else { - bs = make([]byte, l) - } - reflect.Copy(reflect.ValueOf(bs), rv) - // TODO: Test that reflect.Copy works instead of manual one-by-one - // for i := 0; i < l; i++ { - // bs[i] = byte(rv.Index(i).Uint()) - // } - e.e.EncodeStringBytes(c_RAW, bs) - } - case seqTypeSlice: - e.e.EncodeStringBytes(c_RAW, rv.Bytes()) - case seqTypeChan: - bs := e.b[:0] - // do not use range, so that the number of elements encoded - // does not change, and encoding does not hang waiting on someone to close chan. - // for b := range rv.Interface().(<-chan byte) { - // bs = append(bs, b) - // } - ch := rv.Interface().(<-chan byte) - for i := 0; i < l; i++ { - bs = append(bs, <-ch) - } - e.e.EncodeStringBytes(c_RAW, bs) - } - return - } - - if ti.mbs { - if l%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", l) - return - } - e.e.EncodeMapStart(l / 2) - } else { - e.e.EncodeArrayStart(l) - } - - if l > 0 { - for rtelem.Kind() == reflect.Ptr { - rtelem = rtelem.Elem() - } - // if kind is reflect.Interface, do not pre-determine the - // encoding type, because preEncodeValue may break it down to - // a concrete type and kInterface will bomb. - var fn *encFn - if rtelem.Kind() != reflect.Interface { - rtelemid := reflect.ValueOf(rtelem).Pointer() - fn = e.getEncFn(rtelemid, rtelem, true, true) - } - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - for j := 0; j < l; j++ { - if cr != nil { - if ti.mbs { - if l%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } else { - cr.sendContainerState(containerArrayElem) - } - } - if f.seq == seqTypeChan { - if rv2, ok2 := rv.Recv(); ok2 { - e.encodeValue(rv2, fn) - } else { - e.encode(nil) // WE HAVE TO DO SOMETHING, so nil if nothing received. - } - } else { - e.encodeValue(rv.Index(j), fn) - } - } - } - - if cr != nil { - if ti.mbs { - cr.sendContainerState(containerMapEnd) - } else { - cr.sendContainerState(containerArrayEnd) - } - } -} - -func (f *encFnInfo) kStruct(rv reflect.Value) { - fti := f.ti - e := f.e - cr := e.cr - tisfi := fti.sfip - toMap := !(fti.toArray || e.h.StructToArray) - newlen := len(fti.sfi) - - // Use sync.Pool to reduce allocating slices unnecessarily. - // The cost of the occasional locking is less than the cost of new allocation. - pool, poolv, fkvs := encStructPoolGet(newlen) - - // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) - if toMap { - tisfi = fti.sfi - } - newlen = 0 - var kv stringRv - for _, si := range tisfi { - kv.r = si.field(rv, false) - // if si.i != -1 { - // rvals[newlen] = rv.Field(int(si.i)) - // } else { - // rvals[newlen] = rv.FieldByIndex(si.is) - // } - if toMap { - if si.omitEmpty && isEmptyValue(kv.r) { - continue - } - kv.v = si.encName - } else { - // use the zero value. - // if a reference or struct, set to nil (so you do not output too much) - if si.omitEmpty && isEmptyValue(kv.r) { - switch kv.r.Kind() { - case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, - reflect.Map, reflect.Slice: - kv.r = reflect.Value{} //encode as nil - } - } - } - fkvs[newlen] = kv - newlen++ - } - - // debugf(">>>> kStruct: newlen: %v", newlen) - // sep := !e.be - ee := e.e //don't dereference everytime - - if toMap { - ee.EncodeMapStart(newlen) - // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - for j := 0; j < newlen; j++ { - kv = fkvs[j] - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(kv.v) - } else { - ee.EncodeString(c_UTF8, kv.v) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(kv.r, nil) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - } else { - ee.EncodeArrayStart(newlen) - for j := 0; j < newlen; j++ { - kv = fkvs[j] - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - e.encodeValue(kv.r, nil) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } - } - - // do not use defer. Instead, use explicit pool return at end of function. - // defer has a cost we are trying to avoid. - // If there is a panic and these slices are not returned, it is ok. - if pool != nil { - pool.Put(poolv) - } -} - -// func (f *encFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") -// if rv.IsNil() { -// f.e.e.encodeNil() -// return -// } -// f.e.encodeValue(rv.Elem()) -// } - -func (f *encFnInfo) kInterface(rv reflect.Value) { - if rv.IsNil() { - f.e.e.EncodeNil() - return - } - f.e.encodeValue(rv.Elem(), nil) -} - -func (f *encFnInfo) kMap(rv reflect.Value) { - ee := f.e.e - if rv.IsNil() { - ee.EncodeNil() - return - } - - l := rv.Len() - ee.EncodeMapStart(l) - e := f.e - cr := e.cr - if l == 0 { - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return - } - var asSymbols bool - // determine the underlying key and val encFn's for the map. - // This eliminates some work which is done for each loop iteration i.e. - // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. - // - // However, if kind is reflect.Interface, do not pre-determine the - // encoding type, because preEncodeValue may break it down to - // a concrete type and kInterface will bomb. - var keyFn, valFn *encFn - ti := f.ti - rtkey := ti.rt.Key() - rtval := ti.rt.Elem() - rtkeyid := reflect.ValueOf(rtkey).Pointer() - // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String - var keyTypeIsString = rtkeyid == stringTypId - if keyTypeIsString { - asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - } else { - for rtkey.Kind() == reflect.Ptr { - rtkey = rtkey.Elem() - } - if rtkey.Kind() != reflect.Interface { - rtkeyid = reflect.ValueOf(rtkey).Pointer() - keyFn = e.getEncFn(rtkeyid, rtkey, true, true) - } - } - for rtval.Kind() == reflect.Ptr { - rtval = rtval.Elem() - } - if rtval.Kind() != reflect.Interface { - rtvalid := reflect.ValueOf(rtval).Pointer() - valFn = e.getEncFn(rtvalid, rtval, true, true) - } - mks := rv.MapKeys() - // for j, lmks := 0, len(mks); j < lmks; j++ { - - if e.h.Canonical { - e.kMapCanonical(rtkeyid, rtkey, rv, mks, valFn, asSymbols) - } else { - for j := range mks { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if keyTypeIsString { - if asSymbols { - ee.EncodeSymbol(mks[j].String()) - } else { - ee.EncodeString(c_UTF8, mks[j].String()) - } - } else { - e.encodeValue(mks[j], keyFn) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mks[j]), valFn) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *encFn, asSymbols bool) { - ee := e.e - cr := e.cr - // we previously did out-of-band if an extension was registered. - // This is not necessary, as the natural kind is sufficient for ordering. - - if rtkeyid == uint8SliceTypId { - mksv := make([]bytesRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Bytes() - } - sort.Sort(bytesRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeStringBytes(c_RAW, mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - } else { - switch rtkey.Kind() { - case reflect.Bool: - mksv := make([]boolRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Bool() - } - sort.Sort(boolRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.String: - mksv := make([]stringRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.String() - } - sort.Sort(stringRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(mksv[i].v) - } else { - ee.EncodeString(c_UTF8, mksv[i].v) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: - mksv := make([]uintRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Uint() - } - sort.Sort(uintRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - mksv := make([]intRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Int() - } - sort.Sort(intRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.Float32: - mksv := make([]floatRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Float() - } - sort.Sort(floatRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(mksv[i].v)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.Float64: - mksv := make([]floatRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Float() - } - sort.Sort(floatRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - default: - // out-of-band - // first encode each key to a []byte first, then sort them, then record - var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - mksbv := make([]bytesRv, len(mks)) - for i, k := range mks { - v := &mksbv[i] - l := len(mksv) - e2.MustEncode(k) - v.r = k - v.v = mksv[l:] - // fmt.Printf(">>>>> %s\n", mksv[l:]) - } - sort.Sort(bytesRvSlice(mksbv)) - for j := range mksbv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(mksbv[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksbv[j].r), valFn) - } - } - } -} - -// -------------------------------------------------- - -// encFn encapsulates the captured variables and the encode function. -// This way, we only do some calculations one times, and pass to the -// code block that should be called (encapsulated in a function) -// instead of executing the checks every time. -type encFn struct { - i encFnInfo - f func(*encFnInfo, reflect.Value) -} - -// -------------------------------------------------- - -type encRtidFn struct { - rtid uintptr - fn encFn -} - -// An Encoder writes an object to an output stream in the codec format. -type Encoder struct { - // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder - e encDriver - // NOTE: Encoder shouldn't call it's write methods, - // as the handler MAY need to do some coordination. - w encWriter - s []encRtidFn - be bool // is binary encoding - js bool // is json handle - - wi ioEncWriter - wb bytesEncWriter - - h *BasicHandle - hh Handle - - cr containerStateRecv - as encDriverAsis - - f map[uintptr]*encFn - b [scratchByteArrayLen]byte -} - -// NewEncoder returns an Encoder for encoding into an io.Writer. -// -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Writer, bytes.Buffer). -func NewEncoder(w io.Writer, h Handle) *Encoder { - e := newEncoder(h) - e.Reset(w) - return e -} - -// NewEncoderBytes returns an encoder for encoding directly and efficiently -// into a byte slice, using zero-copying to temporary slices. -// -// It will potentially replace the output byte slice pointed to. -// After encoding, the out parameter contains the encoded contents. -func NewEncoderBytes(out *[]byte, h Handle) *Encoder { - e := newEncoder(h) - e.ResetBytes(out) - return e -} - -func newEncoder(h Handle) *Encoder { - e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} - _, e.js = h.(*JsonHandle) - e.e = h.newEncDriver(e) - e.as, _ = e.e.(encDriverAsis) - e.cr, _ = e.e.(containerStateRecv) - return e -} - -// Reset the Encoder with a new output stream. -// -// This accomodates using the state of the Encoder, -// where it has "cached" information about sub-engines. -func (e *Encoder) Reset(w io.Writer) { - ww, ok := w.(ioEncWriterWriter) - if ok { - e.wi.w = ww - } else { - sww := &e.wi.s - sww.w = w - sww.bw, _ = w.(io.ByteWriter) - sww.sw, _ = w.(ioEncStringWriter) - e.wi.w = sww - //ww = bufio.NewWriterSize(w, defEncByteBufSize) - } - e.w = &e.wi - e.e.reset() -} - -func (e *Encoder) ResetBytes(out *[]byte) { - in := *out - if in == nil { - in = make([]byte, defEncByteBufSize) - } - e.wb.b, e.wb.out, e.wb.c = in, out, 0 - e.w = &e.wb - e.e.reset() -} - -// func (e *Encoder) sendContainerState(c containerState) { -// if e.cr != nil { -// e.cr.sendContainerState(c) -// } -// } - -// Encode writes an object into a stream. -// -// Encoding can be configured via the struct tag for the fields. -// The "codec" key in struct field's tag value is the key name, -// followed by an optional comma and options. -// Note that the "json" key is used in the absence of the "codec" key. -// -// To set an option on all fields (e.g. omitempty on all fields), you -// can create a field called _struct, and set flags on it. -// -// Struct values "usually" encode as maps. Each exported struct field is encoded unless: -// - the field's tag is "-", OR -// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. -// -// When encoding as a map, the first string in the tag (before the comma) -// is the map key string to use when encoding. -// -// However, struct values may encode as arrays. This happens when: -// - StructToArray Encode option is set, OR -// - the tag on the _struct field sets the "toarray" option -// -// Values with types that implement MapBySlice are encoded as stream maps. -// -// The empty values (for omitempty option) are false, 0, any nil pointer -// or interface value, and any array, slice, map, or string of length zero. -// -// Anonymous fields are encoded inline except: -// - the struct tag specifies a replacement name (first value) -// - the field is of an interface type -// -// Examples: -// -// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. -// type MyStruct struct { -// _struct bool `codec:",omitempty"` //set omitempty for every field -// Field1 string `codec:"-"` //skip this field -// Field2 int `codec:"myName"` //Use key "myName" in encode stream -// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. -// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. -// io.Reader //use key "Reader". -// MyStruct `codec:"my1" //use key "my1". -// MyStruct //inline it -// ... -// } -// -// type MyStruct struct { -// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field -// //and encode struct as an array -// } -// -// The mode of encoding is based on the type of the value. When a value is seen: -// - If a Selfer, call its CodecEncodeSelf method -// - If an extension is registered for it, call that extension function -// - If it implements encoding.(Binary|Text|JSON)Marshaler, call its Marshal(Binary|Text|JSON) method -// - Else encode it based on its reflect.Kind -// -// Note that struct field names and keys in map[string]XXX will be treated as symbols. -// Some formats support symbols (e.g. binc) and will properly encode the string -// only once in the stream, and use a tag to refer to it thereafter. -func (e *Encoder) Encode(v interface{}) (err error) { - defer panicToErr(&err) - e.encode(v) - e.w.atEndOfEncode() - return -} - -// MustEncode is like Encode, but panics if unable to Encode. -// This provides insight to the code location that triggered the error. -func (e *Encoder) MustEncode(v interface{}) { - e.encode(v) - e.w.atEndOfEncode() -} - -// comment out these (Must)Write methods. They were only put there to support cbor. -// However, users already have access to the streams, and can write directly. -// -// // Write allows users write to the Encoder stream directly. -// func (e *Encoder) Write(bs []byte) (err error) { -// defer panicToErr(&err) -// e.w.writeb(bs) -// return -// } -// // MustWrite is like write, but panics if unable to Write. -// func (e *Encoder) MustWrite(bs []byte) { -// e.w.writeb(bs) -// } - -func (e *Encoder) encode(iv interface{}) { - // if ics, ok := iv.(Selfer); ok { - // ics.CodecEncodeSelf(e) - // return - // } - - switch v := iv.(type) { - case nil: - e.e.EncodeNil() - case Selfer: - v.CodecEncodeSelf(e) - - case reflect.Value: - e.encodeValue(v, nil) - - case string: - e.e.EncodeString(c_UTF8, v) - case bool: - e.e.EncodeBool(v) - case int: - e.e.EncodeInt(int64(v)) - case int8: - e.e.EncodeInt(int64(v)) - case int16: - e.e.EncodeInt(int64(v)) - case int32: - e.e.EncodeInt(int64(v)) - case int64: - e.e.EncodeInt(v) - case uint: - e.e.EncodeUint(uint64(v)) - case uint8: - e.e.EncodeUint(uint64(v)) - case uint16: - e.e.EncodeUint(uint64(v)) - case uint32: - e.e.EncodeUint(uint64(v)) - case uint64: - e.e.EncodeUint(v) - case float32: - e.e.EncodeFloat32(v) - case float64: - e.e.EncodeFloat64(v) - - case []uint8: - e.e.EncodeStringBytes(c_RAW, v) - - case *string: - e.e.EncodeString(c_UTF8, *v) - case *bool: - e.e.EncodeBool(*v) - case *int: - e.e.EncodeInt(int64(*v)) - case *int8: - e.e.EncodeInt(int64(*v)) - case *int16: - e.e.EncodeInt(int64(*v)) - case *int32: - e.e.EncodeInt(int64(*v)) - case *int64: - e.e.EncodeInt(*v) - case *uint: - e.e.EncodeUint(uint64(*v)) - case *uint8: - e.e.EncodeUint(uint64(*v)) - case *uint16: - e.e.EncodeUint(uint64(*v)) - case *uint32: - e.e.EncodeUint(uint64(*v)) - case *uint64: - e.e.EncodeUint(*v) - case *float32: - e.e.EncodeFloat32(*v) - case *float64: - e.e.EncodeFloat64(*v) - - case *[]uint8: - e.e.EncodeStringBytes(c_RAW, *v) - - default: - const checkCodecSelfer1 = true // in case T is passed, where *T is a Selfer, still checkCodecSelfer - if !fastpathEncodeTypeSwitch(iv, e) { - e.encodeI(iv, false, checkCodecSelfer1) - } - } -} - -func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) { - if rv, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed { - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - fn := e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer) - fn.f(&fn.i, rv) - } -} - -func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, proceed bool) { - // use a goto statement instead of a recursive function for ptr/interface. -TOP: - switch rv.Kind() { - case reflect.Ptr, reflect.Interface: - if rv.IsNil() { - e.e.EncodeNil() - return - } - rv = rv.Elem() - goto TOP - case reflect.Slice, reflect.Map: - if rv.IsNil() { - e.e.EncodeNil() - return - } - case reflect.Invalid, reflect.Func: - e.e.EncodeNil() - return - } - - return rv, true -} - -func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) { - // if a valid fn is passed, it MUST BE for the dereferenced type of rv - if rv, proceed := e.preEncodeValue(rv); proceed { - if fn == nil { - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - fn = e.getEncFn(rtid, rt, true, true) - } - fn.f(&fn.i, rv) - } -} - -func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *encFn) { - // rtid := reflect.ValueOf(rt).Pointer() - var ok bool - if useMapForCodecCache { - fn, ok = e.f[rtid] - } else { - for i := range e.s { - v := &(e.s[i]) - if v.rtid == rtid { - fn, ok = &(v.fn), true - break - } - } - } - if ok { - return - } - - if useMapForCodecCache { - if e.f == nil { - e.f = make(map[uintptr]*encFn, initCollectionCap) - } - fn = new(encFn) - e.f[rtid] = fn - } else { - if e.s == nil { - e.s = make([]encRtidFn, 0, initCollectionCap) - } - e.s = append(e.s, encRtidFn{rtid: rtid}) - fn = &(e.s[len(e.s)-1]).fn - } - - ti := e.h.getTypeInfo(rtid, rt) - fi := &(fn.i) - fi.e = e - fi.ti = ti - - if checkCodecSelfer && ti.cs { - fn.f = (*encFnInfo).selferMarshal - } else if rtid == rawExtTypId { - fn.f = (*encFnInfo).rawExt - } else if e.e.IsBuiltinType(rtid) { - fn.f = (*encFnInfo).builtin - } else if xfFn := e.h.getExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext - fn.f = (*encFnInfo).ext - } else if supportMarshalInterfaces && e.be && ti.bm { - fn.f = (*encFnInfo).binaryMarshal - } else if supportMarshalInterfaces && !e.be && e.js && ti.jm { - //If JSON, we should check JSONMarshal before textMarshal - fn.f = (*encFnInfo).jsonMarshal - } else if supportMarshalInterfaces && !e.be && ti.tm { - fn.f = (*encFnInfo).textMarshal - } else { - rk := rt.Kind() - if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { - if rt.PkgPath() == "" { - if idx := fastpathAV.index(rtid); idx != -1 { - fn.f = fastpathAV[idx].encfn - } - } else { - ok = false - // use mapping for underlying type if there - var rtu reflect.Type - if rk == reflect.Map { - rtu = reflect.MapOf(rt.Key(), rt.Elem()) - } else { - rtu = reflect.SliceOf(rt.Elem()) - } - rtuid := reflect.ValueOf(rtu).Pointer() - if idx := fastpathAV.index(rtuid); idx != -1 { - xfnf := fastpathAV[idx].encfn - xrt := fastpathAV[idx].rt - fn.f = func(xf *encFnInfo, xrv reflect.Value) { - xfnf(xf, xrv.Convert(xrt)) - } - } - } - } - if fn.f == nil { - switch rk { - case reflect.Bool: - fn.f = (*encFnInfo).kBool - case reflect.String: - fn.f = (*encFnInfo).kString - case reflect.Float64: - fn.f = (*encFnInfo).kFloat64 - case reflect.Float32: - fn.f = (*encFnInfo).kFloat32 - case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: - fn.f = (*encFnInfo).kInt - case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uintptr: - fn.f = (*encFnInfo).kUint - case reflect.Invalid: - fn.f = (*encFnInfo).kInvalid - case reflect.Chan: - fi.seq = seqTypeChan - fn.f = (*encFnInfo).kSlice - case reflect.Slice: - fi.seq = seqTypeSlice - fn.f = (*encFnInfo).kSlice - case reflect.Array: - fi.seq = seqTypeArray - fn.f = (*encFnInfo).kSlice - case reflect.Struct: - fn.f = (*encFnInfo).kStruct - // case reflect.Ptr: - // fn.f = (*encFnInfo).kPtr - case reflect.Interface: - fn.f = (*encFnInfo).kInterface - case reflect.Map: - fn.f = (*encFnInfo).kMap - default: - fn.f = (*encFnInfo).kErr - } - } - } - - return -} - -func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - e.e.EncodeNil() - } else if asis { - e.asis(bs) - } else { - e.e.EncodeStringBytes(c, bs) - } -} - -func (e *Encoder) asis(v []byte) { - if e.as == nil { - e.w.writeb(v) - } else { - e.as.EncodeAsis(v) - } -} - -func (e *Encoder) errorf(format string, params ...interface{}) { - err := fmt.Errorf(format, params...) - panic(err) -} - -// ---------------------------------------- - -const encStructPoolLen = 5 - -// encStructPool is an array of sync.Pool. -// Each element of the array pools one of encStructPool(8|16|32|64). -// It allows the re-use of slices up to 64 in length. -// A performance cost of encoding structs was collecting -// which values were empty and should be omitted. -// We needed slices of reflect.Value and string to collect them. -// This shared pool reduces the amount of unnecessary creation we do. -// The cost is that of locking sometimes, but sync.Pool is efficient -// enough to reduce thread contention. -var encStructPool [encStructPoolLen]sync.Pool - -func init() { - encStructPool[0].New = func() interface{} { return new([8]stringRv) } - encStructPool[1].New = func() interface{} { return new([16]stringRv) } - encStructPool[2].New = func() interface{} { return new([32]stringRv) } - encStructPool[3].New = func() interface{} { return new([64]stringRv) } - encStructPool[4].New = func() interface{} { return new([128]stringRv) } -} - -func encStructPoolGet(newlen int) (p *sync.Pool, v interface{}, s []stringRv) { - // if encStructPoolLen != 5 { // constant chec, so removed at build time. - // panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed - // } - // idxpool := newlen / 8 - - // if pool == nil { - // fkvs = make([]stringRv, newlen) - // } else { - // poolv = pool.Get() - // switch vv := poolv.(type) { - // case *[8]stringRv: - // fkvs = vv[:newlen] - // case *[16]stringRv: - // fkvs = vv[:newlen] - // case *[32]stringRv: - // fkvs = vv[:newlen] - // case *[64]stringRv: - // fkvs = vv[:newlen] - // case *[128]stringRv: - // fkvs = vv[:newlen] - // } - // } - - if newlen <= 8 { - p = &encStructPool[0] - v = p.Get() - s = v.(*[8]stringRv)[:newlen] - } else if newlen <= 16 { - p = &encStructPool[1] - v = p.Get() - s = v.(*[16]stringRv)[:newlen] - } else if newlen <= 32 { - p = &encStructPool[2] - v = p.Get() - s = v.(*[32]stringRv)[:newlen] - } else if newlen <= 64 { - p = &encStructPool[3] - v = p.Get() - s = v.(*[64]stringRv)[:newlen] - } else if newlen <= 128 { - p = &encStructPool[4] - v = p.Get() - s = v.(*[128]stringRv)[:newlen] - } else { - s = make([]stringRv, newlen) - } - return -} - -// ---------------------------------------- - -// func encErr(format string, params ...interface{}) { -// doPanic(msgTagEnc, format, params...) -// } diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go deleted file mode 100644 index d968a500f..000000000 --- a/vendor/github.com/ugorji/go/codec/fast-path.generated.go +++ /dev/null @@ -1,38900 +0,0 @@ -// +build !notfastpath - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl -// ************************************************************ - -package codec - -// Fast path functions try to create a fast path encode or decode implementation -// for common maps and slices. -// -// We define the functions and register then in this single file -// so as not to pollute the encode.go and decode.go, and create a dependency in there. -// This file can be omitted without causing a build failure. -// -// The advantage of fast paths is: -// - Many calls bypass reflection altogether -// -// Currently support -// - slice of all builtin types, -// - map of all builtin types to string or interface value -// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8) -// This should provide adequate "typical" implementations. -// -// Note that fast track decode functions must handle values for which an address cannot be obtained. -// For example: -// m2 := map[string]int{} -// p2 := []interface{}{m2} -// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. -// - -import ( - "reflect" - "sort" -) - -const fastpathCheckNilFalse = false // for reflect -const fastpathCheckNilTrue = true // for type switch - -type fastpathT struct{} - -var fastpathTV fastpathT - -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*encFnInfo, reflect.Value) - decfn func(*decFnInfo, reflect.Value) -} - -type fastpathA [271]fastpathE - -func (x *fastpathA) index(rtid uintptr) int { - // use binary search to grab the index (adapted from sort/search.go) - h, i, j := 0, 0, 271 // len(x) - for i < j { - h = i + (j-i)/2 - if x[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - } - if i < 271 && x[i].rtid == rtid { - return i - } - return -1 -} - -type fastpathAslice []fastpathE - -func (x fastpathAslice) Len() int { return len(x) } -func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } -func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -var fastpathAV fastpathA - -// due to possible initialization loop error, make fastpath in an init() -func init() { - if !fastpathEnabled { - return - } - i := 0 - fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { - xrt := reflect.TypeOf(v) - xptr := reflect.ValueOf(xrt).Pointer() - fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} - i++ - return - } - - fn([]interface{}(nil), (*encFnInfo).fastpathEncSliceIntfR, (*decFnInfo).fastpathDecSliceIntfR) - fn([]string(nil), (*encFnInfo).fastpathEncSliceStringR, (*decFnInfo).fastpathDecSliceStringR) - fn([]float32(nil), (*encFnInfo).fastpathEncSliceFloat32R, (*decFnInfo).fastpathDecSliceFloat32R) - fn([]float64(nil), (*encFnInfo).fastpathEncSliceFloat64R, (*decFnInfo).fastpathDecSliceFloat64R) - fn([]uint(nil), (*encFnInfo).fastpathEncSliceUintR, (*decFnInfo).fastpathDecSliceUintR) - fn([]uint16(nil), (*encFnInfo).fastpathEncSliceUint16R, (*decFnInfo).fastpathDecSliceUint16R) - fn([]uint32(nil), (*encFnInfo).fastpathEncSliceUint32R, (*decFnInfo).fastpathDecSliceUint32R) - fn([]uint64(nil), (*encFnInfo).fastpathEncSliceUint64R, (*decFnInfo).fastpathDecSliceUint64R) - fn([]uintptr(nil), (*encFnInfo).fastpathEncSliceUintptrR, (*decFnInfo).fastpathDecSliceUintptrR) - fn([]int(nil), (*encFnInfo).fastpathEncSliceIntR, (*decFnInfo).fastpathDecSliceIntR) - fn([]int8(nil), (*encFnInfo).fastpathEncSliceInt8R, (*decFnInfo).fastpathDecSliceInt8R) - fn([]int16(nil), (*encFnInfo).fastpathEncSliceInt16R, (*decFnInfo).fastpathDecSliceInt16R) - fn([]int32(nil), (*encFnInfo).fastpathEncSliceInt32R, (*decFnInfo).fastpathDecSliceInt32R) - fn([]int64(nil), (*encFnInfo).fastpathEncSliceInt64R, (*decFnInfo).fastpathDecSliceInt64R) - fn([]bool(nil), (*encFnInfo).fastpathEncSliceBoolR, (*decFnInfo).fastpathDecSliceBoolR) - - fn(map[interface{}]interface{}(nil), (*encFnInfo).fastpathEncMapIntfIntfR, (*decFnInfo).fastpathDecMapIntfIntfR) - fn(map[interface{}]string(nil), (*encFnInfo).fastpathEncMapIntfStringR, (*decFnInfo).fastpathDecMapIntfStringR) - fn(map[interface{}]uint(nil), (*encFnInfo).fastpathEncMapIntfUintR, (*decFnInfo).fastpathDecMapIntfUintR) - fn(map[interface{}]uint8(nil), (*encFnInfo).fastpathEncMapIntfUint8R, (*decFnInfo).fastpathDecMapIntfUint8R) - fn(map[interface{}]uint16(nil), (*encFnInfo).fastpathEncMapIntfUint16R, (*decFnInfo).fastpathDecMapIntfUint16R) - fn(map[interface{}]uint32(nil), (*encFnInfo).fastpathEncMapIntfUint32R, (*decFnInfo).fastpathDecMapIntfUint32R) - fn(map[interface{}]uint64(nil), (*encFnInfo).fastpathEncMapIntfUint64R, (*decFnInfo).fastpathDecMapIntfUint64R) - fn(map[interface{}]uintptr(nil), (*encFnInfo).fastpathEncMapIntfUintptrR, (*decFnInfo).fastpathDecMapIntfUintptrR) - fn(map[interface{}]int(nil), (*encFnInfo).fastpathEncMapIntfIntR, (*decFnInfo).fastpathDecMapIntfIntR) - fn(map[interface{}]int8(nil), (*encFnInfo).fastpathEncMapIntfInt8R, (*decFnInfo).fastpathDecMapIntfInt8R) - fn(map[interface{}]int16(nil), (*encFnInfo).fastpathEncMapIntfInt16R, (*decFnInfo).fastpathDecMapIntfInt16R) - fn(map[interface{}]int32(nil), (*encFnInfo).fastpathEncMapIntfInt32R, (*decFnInfo).fastpathDecMapIntfInt32R) - fn(map[interface{}]int64(nil), (*encFnInfo).fastpathEncMapIntfInt64R, (*decFnInfo).fastpathDecMapIntfInt64R) - fn(map[interface{}]float32(nil), (*encFnInfo).fastpathEncMapIntfFloat32R, (*decFnInfo).fastpathDecMapIntfFloat32R) - fn(map[interface{}]float64(nil), (*encFnInfo).fastpathEncMapIntfFloat64R, (*decFnInfo).fastpathDecMapIntfFloat64R) - fn(map[interface{}]bool(nil), (*encFnInfo).fastpathEncMapIntfBoolR, (*decFnInfo).fastpathDecMapIntfBoolR) - fn(map[string]interface{}(nil), (*encFnInfo).fastpathEncMapStringIntfR, (*decFnInfo).fastpathDecMapStringIntfR) - fn(map[string]string(nil), (*encFnInfo).fastpathEncMapStringStringR, (*decFnInfo).fastpathDecMapStringStringR) - fn(map[string]uint(nil), (*encFnInfo).fastpathEncMapStringUintR, (*decFnInfo).fastpathDecMapStringUintR) - fn(map[string]uint8(nil), (*encFnInfo).fastpathEncMapStringUint8R, (*decFnInfo).fastpathDecMapStringUint8R) - fn(map[string]uint16(nil), (*encFnInfo).fastpathEncMapStringUint16R, (*decFnInfo).fastpathDecMapStringUint16R) - fn(map[string]uint32(nil), (*encFnInfo).fastpathEncMapStringUint32R, (*decFnInfo).fastpathDecMapStringUint32R) - fn(map[string]uint64(nil), (*encFnInfo).fastpathEncMapStringUint64R, (*decFnInfo).fastpathDecMapStringUint64R) - fn(map[string]uintptr(nil), (*encFnInfo).fastpathEncMapStringUintptrR, (*decFnInfo).fastpathDecMapStringUintptrR) - fn(map[string]int(nil), (*encFnInfo).fastpathEncMapStringIntR, (*decFnInfo).fastpathDecMapStringIntR) - fn(map[string]int8(nil), (*encFnInfo).fastpathEncMapStringInt8R, (*decFnInfo).fastpathDecMapStringInt8R) - fn(map[string]int16(nil), (*encFnInfo).fastpathEncMapStringInt16R, (*decFnInfo).fastpathDecMapStringInt16R) - fn(map[string]int32(nil), (*encFnInfo).fastpathEncMapStringInt32R, (*decFnInfo).fastpathDecMapStringInt32R) - fn(map[string]int64(nil), (*encFnInfo).fastpathEncMapStringInt64R, (*decFnInfo).fastpathDecMapStringInt64R) - fn(map[string]float32(nil), (*encFnInfo).fastpathEncMapStringFloat32R, (*decFnInfo).fastpathDecMapStringFloat32R) - fn(map[string]float64(nil), (*encFnInfo).fastpathEncMapStringFloat64R, (*decFnInfo).fastpathDecMapStringFloat64R) - fn(map[string]bool(nil), (*encFnInfo).fastpathEncMapStringBoolR, (*decFnInfo).fastpathDecMapStringBoolR) - fn(map[float32]interface{}(nil), (*encFnInfo).fastpathEncMapFloat32IntfR, (*decFnInfo).fastpathDecMapFloat32IntfR) - fn(map[float32]string(nil), (*encFnInfo).fastpathEncMapFloat32StringR, (*decFnInfo).fastpathDecMapFloat32StringR) - fn(map[float32]uint(nil), (*encFnInfo).fastpathEncMapFloat32UintR, (*decFnInfo).fastpathDecMapFloat32UintR) - fn(map[float32]uint8(nil), (*encFnInfo).fastpathEncMapFloat32Uint8R, (*decFnInfo).fastpathDecMapFloat32Uint8R) - fn(map[float32]uint16(nil), (*encFnInfo).fastpathEncMapFloat32Uint16R, (*decFnInfo).fastpathDecMapFloat32Uint16R) - fn(map[float32]uint32(nil), (*encFnInfo).fastpathEncMapFloat32Uint32R, (*decFnInfo).fastpathDecMapFloat32Uint32R) - fn(map[float32]uint64(nil), (*encFnInfo).fastpathEncMapFloat32Uint64R, (*decFnInfo).fastpathDecMapFloat32Uint64R) - fn(map[float32]uintptr(nil), (*encFnInfo).fastpathEncMapFloat32UintptrR, (*decFnInfo).fastpathDecMapFloat32UintptrR) - fn(map[float32]int(nil), (*encFnInfo).fastpathEncMapFloat32IntR, (*decFnInfo).fastpathDecMapFloat32IntR) - fn(map[float32]int8(nil), (*encFnInfo).fastpathEncMapFloat32Int8R, (*decFnInfo).fastpathDecMapFloat32Int8R) - fn(map[float32]int16(nil), (*encFnInfo).fastpathEncMapFloat32Int16R, (*decFnInfo).fastpathDecMapFloat32Int16R) - fn(map[float32]int32(nil), (*encFnInfo).fastpathEncMapFloat32Int32R, (*decFnInfo).fastpathDecMapFloat32Int32R) - fn(map[float32]int64(nil), (*encFnInfo).fastpathEncMapFloat32Int64R, (*decFnInfo).fastpathDecMapFloat32Int64R) - fn(map[float32]float32(nil), (*encFnInfo).fastpathEncMapFloat32Float32R, (*decFnInfo).fastpathDecMapFloat32Float32R) - fn(map[float32]float64(nil), (*encFnInfo).fastpathEncMapFloat32Float64R, (*decFnInfo).fastpathDecMapFloat32Float64R) - fn(map[float32]bool(nil), (*encFnInfo).fastpathEncMapFloat32BoolR, (*decFnInfo).fastpathDecMapFloat32BoolR) - fn(map[float64]interface{}(nil), (*encFnInfo).fastpathEncMapFloat64IntfR, (*decFnInfo).fastpathDecMapFloat64IntfR) - fn(map[float64]string(nil), (*encFnInfo).fastpathEncMapFloat64StringR, (*decFnInfo).fastpathDecMapFloat64StringR) - fn(map[float64]uint(nil), (*encFnInfo).fastpathEncMapFloat64UintR, (*decFnInfo).fastpathDecMapFloat64UintR) - fn(map[float64]uint8(nil), (*encFnInfo).fastpathEncMapFloat64Uint8R, (*decFnInfo).fastpathDecMapFloat64Uint8R) - fn(map[float64]uint16(nil), (*encFnInfo).fastpathEncMapFloat64Uint16R, (*decFnInfo).fastpathDecMapFloat64Uint16R) - fn(map[float64]uint32(nil), (*encFnInfo).fastpathEncMapFloat64Uint32R, (*decFnInfo).fastpathDecMapFloat64Uint32R) - fn(map[float64]uint64(nil), (*encFnInfo).fastpathEncMapFloat64Uint64R, (*decFnInfo).fastpathDecMapFloat64Uint64R) - fn(map[float64]uintptr(nil), (*encFnInfo).fastpathEncMapFloat64UintptrR, (*decFnInfo).fastpathDecMapFloat64UintptrR) - fn(map[float64]int(nil), (*encFnInfo).fastpathEncMapFloat64IntR, (*decFnInfo).fastpathDecMapFloat64IntR) - fn(map[float64]int8(nil), (*encFnInfo).fastpathEncMapFloat64Int8R, (*decFnInfo).fastpathDecMapFloat64Int8R) - fn(map[float64]int16(nil), (*encFnInfo).fastpathEncMapFloat64Int16R, (*decFnInfo).fastpathDecMapFloat64Int16R) - fn(map[float64]int32(nil), (*encFnInfo).fastpathEncMapFloat64Int32R, (*decFnInfo).fastpathDecMapFloat64Int32R) - fn(map[float64]int64(nil), (*encFnInfo).fastpathEncMapFloat64Int64R, (*decFnInfo).fastpathDecMapFloat64Int64R) - fn(map[float64]float32(nil), (*encFnInfo).fastpathEncMapFloat64Float32R, (*decFnInfo).fastpathDecMapFloat64Float32R) - fn(map[float64]float64(nil), (*encFnInfo).fastpathEncMapFloat64Float64R, (*decFnInfo).fastpathDecMapFloat64Float64R) - fn(map[float64]bool(nil), (*encFnInfo).fastpathEncMapFloat64BoolR, (*decFnInfo).fastpathDecMapFloat64BoolR) - fn(map[uint]interface{}(nil), (*encFnInfo).fastpathEncMapUintIntfR, (*decFnInfo).fastpathDecMapUintIntfR) - fn(map[uint]string(nil), (*encFnInfo).fastpathEncMapUintStringR, (*decFnInfo).fastpathDecMapUintStringR) - fn(map[uint]uint(nil), (*encFnInfo).fastpathEncMapUintUintR, (*decFnInfo).fastpathDecMapUintUintR) - fn(map[uint]uint8(nil), (*encFnInfo).fastpathEncMapUintUint8R, (*decFnInfo).fastpathDecMapUintUint8R) - fn(map[uint]uint16(nil), (*encFnInfo).fastpathEncMapUintUint16R, (*decFnInfo).fastpathDecMapUintUint16R) - fn(map[uint]uint32(nil), (*encFnInfo).fastpathEncMapUintUint32R, (*decFnInfo).fastpathDecMapUintUint32R) - fn(map[uint]uint64(nil), (*encFnInfo).fastpathEncMapUintUint64R, (*decFnInfo).fastpathDecMapUintUint64R) - fn(map[uint]uintptr(nil), (*encFnInfo).fastpathEncMapUintUintptrR, (*decFnInfo).fastpathDecMapUintUintptrR) - fn(map[uint]int(nil), (*encFnInfo).fastpathEncMapUintIntR, (*decFnInfo).fastpathDecMapUintIntR) - fn(map[uint]int8(nil), (*encFnInfo).fastpathEncMapUintInt8R, (*decFnInfo).fastpathDecMapUintInt8R) - fn(map[uint]int16(nil), (*encFnInfo).fastpathEncMapUintInt16R, (*decFnInfo).fastpathDecMapUintInt16R) - fn(map[uint]int32(nil), (*encFnInfo).fastpathEncMapUintInt32R, (*decFnInfo).fastpathDecMapUintInt32R) - fn(map[uint]int64(nil), (*encFnInfo).fastpathEncMapUintInt64R, (*decFnInfo).fastpathDecMapUintInt64R) - fn(map[uint]float32(nil), (*encFnInfo).fastpathEncMapUintFloat32R, (*decFnInfo).fastpathDecMapUintFloat32R) - fn(map[uint]float64(nil), (*encFnInfo).fastpathEncMapUintFloat64R, (*decFnInfo).fastpathDecMapUintFloat64R) - fn(map[uint]bool(nil), (*encFnInfo).fastpathEncMapUintBoolR, (*decFnInfo).fastpathDecMapUintBoolR) - fn(map[uint8]interface{}(nil), (*encFnInfo).fastpathEncMapUint8IntfR, (*decFnInfo).fastpathDecMapUint8IntfR) - fn(map[uint8]string(nil), (*encFnInfo).fastpathEncMapUint8StringR, (*decFnInfo).fastpathDecMapUint8StringR) - fn(map[uint8]uint(nil), (*encFnInfo).fastpathEncMapUint8UintR, (*decFnInfo).fastpathDecMapUint8UintR) - fn(map[uint8]uint8(nil), (*encFnInfo).fastpathEncMapUint8Uint8R, (*decFnInfo).fastpathDecMapUint8Uint8R) - fn(map[uint8]uint16(nil), (*encFnInfo).fastpathEncMapUint8Uint16R, (*decFnInfo).fastpathDecMapUint8Uint16R) - fn(map[uint8]uint32(nil), (*encFnInfo).fastpathEncMapUint8Uint32R, (*decFnInfo).fastpathDecMapUint8Uint32R) - fn(map[uint8]uint64(nil), (*encFnInfo).fastpathEncMapUint8Uint64R, (*decFnInfo).fastpathDecMapUint8Uint64R) - fn(map[uint8]uintptr(nil), (*encFnInfo).fastpathEncMapUint8UintptrR, (*decFnInfo).fastpathDecMapUint8UintptrR) - fn(map[uint8]int(nil), (*encFnInfo).fastpathEncMapUint8IntR, (*decFnInfo).fastpathDecMapUint8IntR) - fn(map[uint8]int8(nil), (*encFnInfo).fastpathEncMapUint8Int8R, (*decFnInfo).fastpathDecMapUint8Int8R) - fn(map[uint8]int16(nil), (*encFnInfo).fastpathEncMapUint8Int16R, (*decFnInfo).fastpathDecMapUint8Int16R) - fn(map[uint8]int32(nil), (*encFnInfo).fastpathEncMapUint8Int32R, (*decFnInfo).fastpathDecMapUint8Int32R) - fn(map[uint8]int64(nil), (*encFnInfo).fastpathEncMapUint8Int64R, (*decFnInfo).fastpathDecMapUint8Int64R) - fn(map[uint8]float32(nil), (*encFnInfo).fastpathEncMapUint8Float32R, (*decFnInfo).fastpathDecMapUint8Float32R) - fn(map[uint8]float64(nil), (*encFnInfo).fastpathEncMapUint8Float64R, (*decFnInfo).fastpathDecMapUint8Float64R) - fn(map[uint8]bool(nil), (*encFnInfo).fastpathEncMapUint8BoolR, (*decFnInfo).fastpathDecMapUint8BoolR) - fn(map[uint16]interface{}(nil), (*encFnInfo).fastpathEncMapUint16IntfR, (*decFnInfo).fastpathDecMapUint16IntfR) - fn(map[uint16]string(nil), (*encFnInfo).fastpathEncMapUint16StringR, (*decFnInfo).fastpathDecMapUint16StringR) - fn(map[uint16]uint(nil), (*encFnInfo).fastpathEncMapUint16UintR, (*decFnInfo).fastpathDecMapUint16UintR) - fn(map[uint16]uint8(nil), (*encFnInfo).fastpathEncMapUint16Uint8R, (*decFnInfo).fastpathDecMapUint16Uint8R) - fn(map[uint16]uint16(nil), (*encFnInfo).fastpathEncMapUint16Uint16R, (*decFnInfo).fastpathDecMapUint16Uint16R) - fn(map[uint16]uint32(nil), (*encFnInfo).fastpathEncMapUint16Uint32R, (*decFnInfo).fastpathDecMapUint16Uint32R) - fn(map[uint16]uint64(nil), (*encFnInfo).fastpathEncMapUint16Uint64R, (*decFnInfo).fastpathDecMapUint16Uint64R) - fn(map[uint16]uintptr(nil), (*encFnInfo).fastpathEncMapUint16UintptrR, (*decFnInfo).fastpathDecMapUint16UintptrR) - fn(map[uint16]int(nil), (*encFnInfo).fastpathEncMapUint16IntR, (*decFnInfo).fastpathDecMapUint16IntR) - fn(map[uint16]int8(nil), (*encFnInfo).fastpathEncMapUint16Int8R, (*decFnInfo).fastpathDecMapUint16Int8R) - fn(map[uint16]int16(nil), (*encFnInfo).fastpathEncMapUint16Int16R, (*decFnInfo).fastpathDecMapUint16Int16R) - fn(map[uint16]int32(nil), (*encFnInfo).fastpathEncMapUint16Int32R, (*decFnInfo).fastpathDecMapUint16Int32R) - fn(map[uint16]int64(nil), (*encFnInfo).fastpathEncMapUint16Int64R, (*decFnInfo).fastpathDecMapUint16Int64R) - fn(map[uint16]float32(nil), (*encFnInfo).fastpathEncMapUint16Float32R, (*decFnInfo).fastpathDecMapUint16Float32R) - fn(map[uint16]float64(nil), (*encFnInfo).fastpathEncMapUint16Float64R, (*decFnInfo).fastpathDecMapUint16Float64R) - fn(map[uint16]bool(nil), (*encFnInfo).fastpathEncMapUint16BoolR, (*decFnInfo).fastpathDecMapUint16BoolR) - fn(map[uint32]interface{}(nil), (*encFnInfo).fastpathEncMapUint32IntfR, (*decFnInfo).fastpathDecMapUint32IntfR) - fn(map[uint32]string(nil), (*encFnInfo).fastpathEncMapUint32StringR, (*decFnInfo).fastpathDecMapUint32StringR) - fn(map[uint32]uint(nil), (*encFnInfo).fastpathEncMapUint32UintR, (*decFnInfo).fastpathDecMapUint32UintR) - fn(map[uint32]uint8(nil), (*encFnInfo).fastpathEncMapUint32Uint8R, (*decFnInfo).fastpathDecMapUint32Uint8R) - fn(map[uint32]uint16(nil), (*encFnInfo).fastpathEncMapUint32Uint16R, (*decFnInfo).fastpathDecMapUint32Uint16R) - fn(map[uint32]uint32(nil), (*encFnInfo).fastpathEncMapUint32Uint32R, (*decFnInfo).fastpathDecMapUint32Uint32R) - fn(map[uint32]uint64(nil), (*encFnInfo).fastpathEncMapUint32Uint64R, (*decFnInfo).fastpathDecMapUint32Uint64R) - fn(map[uint32]uintptr(nil), (*encFnInfo).fastpathEncMapUint32UintptrR, (*decFnInfo).fastpathDecMapUint32UintptrR) - fn(map[uint32]int(nil), (*encFnInfo).fastpathEncMapUint32IntR, (*decFnInfo).fastpathDecMapUint32IntR) - fn(map[uint32]int8(nil), (*encFnInfo).fastpathEncMapUint32Int8R, (*decFnInfo).fastpathDecMapUint32Int8R) - fn(map[uint32]int16(nil), (*encFnInfo).fastpathEncMapUint32Int16R, (*decFnInfo).fastpathDecMapUint32Int16R) - fn(map[uint32]int32(nil), (*encFnInfo).fastpathEncMapUint32Int32R, (*decFnInfo).fastpathDecMapUint32Int32R) - fn(map[uint32]int64(nil), (*encFnInfo).fastpathEncMapUint32Int64R, (*decFnInfo).fastpathDecMapUint32Int64R) - fn(map[uint32]float32(nil), (*encFnInfo).fastpathEncMapUint32Float32R, (*decFnInfo).fastpathDecMapUint32Float32R) - fn(map[uint32]float64(nil), (*encFnInfo).fastpathEncMapUint32Float64R, (*decFnInfo).fastpathDecMapUint32Float64R) - fn(map[uint32]bool(nil), (*encFnInfo).fastpathEncMapUint32BoolR, (*decFnInfo).fastpathDecMapUint32BoolR) - fn(map[uint64]interface{}(nil), (*encFnInfo).fastpathEncMapUint64IntfR, (*decFnInfo).fastpathDecMapUint64IntfR) - fn(map[uint64]string(nil), (*encFnInfo).fastpathEncMapUint64StringR, (*decFnInfo).fastpathDecMapUint64StringR) - fn(map[uint64]uint(nil), (*encFnInfo).fastpathEncMapUint64UintR, (*decFnInfo).fastpathDecMapUint64UintR) - fn(map[uint64]uint8(nil), (*encFnInfo).fastpathEncMapUint64Uint8R, (*decFnInfo).fastpathDecMapUint64Uint8R) - fn(map[uint64]uint16(nil), (*encFnInfo).fastpathEncMapUint64Uint16R, (*decFnInfo).fastpathDecMapUint64Uint16R) - fn(map[uint64]uint32(nil), (*encFnInfo).fastpathEncMapUint64Uint32R, (*decFnInfo).fastpathDecMapUint64Uint32R) - fn(map[uint64]uint64(nil), (*encFnInfo).fastpathEncMapUint64Uint64R, (*decFnInfo).fastpathDecMapUint64Uint64R) - fn(map[uint64]uintptr(nil), (*encFnInfo).fastpathEncMapUint64UintptrR, (*decFnInfo).fastpathDecMapUint64UintptrR) - fn(map[uint64]int(nil), (*encFnInfo).fastpathEncMapUint64IntR, (*decFnInfo).fastpathDecMapUint64IntR) - fn(map[uint64]int8(nil), (*encFnInfo).fastpathEncMapUint64Int8R, (*decFnInfo).fastpathDecMapUint64Int8R) - fn(map[uint64]int16(nil), (*encFnInfo).fastpathEncMapUint64Int16R, (*decFnInfo).fastpathDecMapUint64Int16R) - fn(map[uint64]int32(nil), (*encFnInfo).fastpathEncMapUint64Int32R, (*decFnInfo).fastpathDecMapUint64Int32R) - fn(map[uint64]int64(nil), (*encFnInfo).fastpathEncMapUint64Int64R, (*decFnInfo).fastpathDecMapUint64Int64R) - fn(map[uint64]float32(nil), (*encFnInfo).fastpathEncMapUint64Float32R, (*decFnInfo).fastpathDecMapUint64Float32R) - fn(map[uint64]float64(nil), (*encFnInfo).fastpathEncMapUint64Float64R, (*decFnInfo).fastpathDecMapUint64Float64R) - fn(map[uint64]bool(nil), (*encFnInfo).fastpathEncMapUint64BoolR, (*decFnInfo).fastpathDecMapUint64BoolR) - fn(map[uintptr]interface{}(nil), (*encFnInfo).fastpathEncMapUintptrIntfR, (*decFnInfo).fastpathDecMapUintptrIntfR) - fn(map[uintptr]string(nil), (*encFnInfo).fastpathEncMapUintptrStringR, (*decFnInfo).fastpathDecMapUintptrStringR) - fn(map[uintptr]uint(nil), (*encFnInfo).fastpathEncMapUintptrUintR, (*decFnInfo).fastpathDecMapUintptrUintR) - fn(map[uintptr]uint8(nil), (*encFnInfo).fastpathEncMapUintptrUint8R, (*decFnInfo).fastpathDecMapUintptrUint8R) - fn(map[uintptr]uint16(nil), (*encFnInfo).fastpathEncMapUintptrUint16R, (*decFnInfo).fastpathDecMapUintptrUint16R) - fn(map[uintptr]uint32(nil), (*encFnInfo).fastpathEncMapUintptrUint32R, (*decFnInfo).fastpathDecMapUintptrUint32R) - fn(map[uintptr]uint64(nil), (*encFnInfo).fastpathEncMapUintptrUint64R, (*decFnInfo).fastpathDecMapUintptrUint64R) - fn(map[uintptr]uintptr(nil), (*encFnInfo).fastpathEncMapUintptrUintptrR, (*decFnInfo).fastpathDecMapUintptrUintptrR) - fn(map[uintptr]int(nil), (*encFnInfo).fastpathEncMapUintptrIntR, (*decFnInfo).fastpathDecMapUintptrIntR) - fn(map[uintptr]int8(nil), (*encFnInfo).fastpathEncMapUintptrInt8R, (*decFnInfo).fastpathDecMapUintptrInt8R) - fn(map[uintptr]int16(nil), (*encFnInfo).fastpathEncMapUintptrInt16R, (*decFnInfo).fastpathDecMapUintptrInt16R) - fn(map[uintptr]int32(nil), (*encFnInfo).fastpathEncMapUintptrInt32R, (*decFnInfo).fastpathDecMapUintptrInt32R) - fn(map[uintptr]int64(nil), (*encFnInfo).fastpathEncMapUintptrInt64R, (*decFnInfo).fastpathDecMapUintptrInt64R) - fn(map[uintptr]float32(nil), (*encFnInfo).fastpathEncMapUintptrFloat32R, (*decFnInfo).fastpathDecMapUintptrFloat32R) - fn(map[uintptr]float64(nil), (*encFnInfo).fastpathEncMapUintptrFloat64R, (*decFnInfo).fastpathDecMapUintptrFloat64R) - fn(map[uintptr]bool(nil), (*encFnInfo).fastpathEncMapUintptrBoolR, (*decFnInfo).fastpathDecMapUintptrBoolR) - fn(map[int]interface{}(nil), (*encFnInfo).fastpathEncMapIntIntfR, (*decFnInfo).fastpathDecMapIntIntfR) - fn(map[int]string(nil), (*encFnInfo).fastpathEncMapIntStringR, (*decFnInfo).fastpathDecMapIntStringR) - fn(map[int]uint(nil), (*encFnInfo).fastpathEncMapIntUintR, (*decFnInfo).fastpathDecMapIntUintR) - fn(map[int]uint8(nil), (*encFnInfo).fastpathEncMapIntUint8R, (*decFnInfo).fastpathDecMapIntUint8R) - fn(map[int]uint16(nil), (*encFnInfo).fastpathEncMapIntUint16R, (*decFnInfo).fastpathDecMapIntUint16R) - fn(map[int]uint32(nil), (*encFnInfo).fastpathEncMapIntUint32R, (*decFnInfo).fastpathDecMapIntUint32R) - fn(map[int]uint64(nil), (*encFnInfo).fastpathEncMapIntUint64R, (*decFnInfo).fastpathDecMapIntUint64R) - fn(map[int]uintptr(nil), (*encFnInfo).fastpathEncMapIntUintptrR, (*decFnInfo).fastpathDecMapIntUintptrR) - fn(map[int]int(nil), (*encFnInfo).fastpathEncMapIntIntR, (*decFnInfo).fastpathDecMapIntIntR) - fn(map[int]int8(nil), (*encFnInfo).fastpathEncMapIntInt8R, (*decFnInfo).fastpathDecMapIntInt8R) - fn(map[int]int16(nil), (*encFnInfo).fastpathEncMapIntInt16R, (*decFnInfo).fastpathDecMapIntInt16R) - fn(map[int]int32(nil), (*encFnInfo).fastpathEncMapIntInt32R, (*decFnInfo).fastpathDecMapIntInt32R) - fn(map[int]int64(nil), (*encFnInfo).fastpathEncMapIntInt64R, (*decFnInfo).fastpathDecMapIntInt64R) - fn(map[int]float32(nil), (*encFnInfo).fastpathEncMapIntFloat32R, (*decFnInfo).fastpathDecMapIntFloat32R) - fn(map[int]float64(nil), (*encFnInfo).fastpathEncMapIntFloat64R, (*decFnInfo).fastpathDecMapIntFloat64R) - fn(map[int]bool(nil), (*encFnInfo).fastpathEncMapIntBoolR, (*decFnInfo).fastpathDecMapIntBoolR) - fn(map[int8]interface{}(nil), (*encFnInfo).fastpathEncMapInt8IntfR, (*decFnInfo).fastpathDecMapInt8IntfR) - fn(map[int8]string(nil), (*encFnInfo).fastpathEncMapInt8StringR, (*decFnInfo).fastpathDecMapInt8StringR) - fn(map[int8]uint(nil), (*encFnInfo).fastpathEncMapInt8UintR, (*decFnInfo).fastpathDecMapInt8UintR) - fn(map[int8]uint8(nil), (*encFnInfo).fastpathEncMapInt8Uint8R, (*decFnInfo).fastpathDecMapInt8Uint8R) - fn(map[int8]uint16(nil), (*encFnInfo).fastpathEncMapInt8Uint16R, (*decFnInfo).fastpathDecMapInt8Uint16R) - fn(map[int8]uint32(nil), (*encFnInfo).fastpathEncMapInt8Uint32R, (*decFnInfo).fastpathDecMapInt8Uint32R) - fn(map[int8]uint64(nil), (*encFnInfo).fastpathEncMapInt8Uint64R, (*decFnInfo).fastpathDecMapInt8Uint64R) - fn(map[int8]uintptr(nil), (*encFnInfo).fastpathEncMapInt8UintptrR, (*decFnInfo).fastpathDecMapInt8UintptrR) - fn(map[int8]int(nil), (*encFnInfo).fastpathEncMapInt8IntR, (*decFnInfo).fastpathDecMapInt8IntR) - fn(map[int8]int8(nil), (*encFnInfo).fastpathEncMapInt8Int8R, (*decFnInfo).fastpathDecMapInt8Int8R) - fn(map[int8]int16(nil), (*encFnInfo).fastpathEncMapInt8Int16R, (*decFnInfo).fastpathDecMapInt8Int16R) - fn(map[int8]int32(nil), (*encFnInfo).fastpathEncMapInt8Int32R, (*decFnInfo).fastpathDecMapInt8Int32R) - fn(map[int8]int64(nil), (*encFnInfo).fastpathEncMapInt8Int64R, (*decFnInfo).fastpathDecMapInt8Int64R) - fn(map[int8]float32(nil), (*encFnInfo).fastpathEncMapInt8Float32R, (*decFnInfo).fastpathDecMapInt8Float32R) - fn(map[int8]float64(nil), (*encFnInfo).fastpathEncMapInt8Float64R, (*decFnInfo).fastpathDecMapInt8Float64R) - fn(map[int8]bool(nil), (*encFnInfo).fastpathEncMapInt8BoolR, (*decFnInfo).fastpathDecMapInt8BoolR) - fn(map[int16]interface{}(nil), (*encFnInfo).fastpathEncMapInt16IntfR, (*decFnInfo).fastpathDecMapInt16IntfR) - fn(map[int16]string(nil), (*encFnInfo).fastpathEncMapInt16StringR, (*decFnInfo).fastpathDecMapInt16StringR) - fn(map[int16]uint(nil), (*encFnInfo).fastpathEncMapInt16UintR, (*decFnInfo).fastpathDecMapInt16UintR) - fn(map[int16]uint8(nil), (*encFnInfo).fastpathEncMapInt16Uint8R, (*decFnInfo).fastpathDecMapInt16Uint8R) - fn(map[int16]uint16(nil), (*encFnInfo).fastpathEncMapInt16Uint16R, (*decFnInfo).fastpathDecMapInt16Uint16R) - fn(map[int16]uint32(nil), (*encFnInfo).fastpathEncMapInt16Uint32R, (*decFnInfo).fastpathDecMapInt16Uint32R) - fn(map[int16]uint64(nil), (*encFnInfo).fastpathEncMapInt16Uint64R, (*decFnInfo).fastpathDecMapInt16Uint64R) - fn(map[int16]uintptr(nil), (*encFnInfo).fastpathEncMapInt16UintptrR, (*decFnInfo).fastpathDecMapInt16UintptrR) - fn(map[int16]int(nil), (*encFnInfo).fastpathEncMapInt16IntR, (*decFnInfo).fastpathDecMapInt16IntR) - fn(map[int16]int8(nil), (*encFnInfo).fastpathEncMapInt16Int8R, (*decFnInfo).fastpathDecMapInt16Int8R) - fn(map[int16]int16(nil), (*encFnInfo).fastpathEncMapInt16Int16R, (*decFnInfo).fastpathDecMapInt16Int16R) - fn(map[int16]int32(nil), (*encFnInfo).fastpathEncMapInt16Int32R, (*decFnInfo).fastpathDecMapInt16Int32R) - fn(map[int16]int64(nil), (*encFnInfo).fastpathEncMapInt16Int64R, (*decFnInfo).fastpathDecMapInt16Int64R) - fn(map[int16]float32(nil), (*encFnInfo).fastpathEncMapInt16Float32R, (*decFnInfo).fastpathDecMapInt16Float32R) - fn(map[int16]float64(nil), (*encFnInfo).fastpathEncMapInt16Float64R, (*decFnInfo).fastpathDecMapInt16Float64R) - fn(map[int16]bool(nil), (*encFnInfo).fastpathEncMapInt16BoolR, (*decFnInfo).fastpathDecMapInt16BoolR) - fn(map[int32]interface{}(nil), (*encFnInfo).fastpathEncMapInt32IntfR, (*decFnInfo).fastpathDecMapInt32IntfR) - fn(map[int32]string(nil), (*encFnInfo).fastpathEncMapInt32StringR, (*decFnInfo).fastpathDecMapInt32StringR) - fn(map[int32]uint(nil), (*encFnInfo).fastpathEncMapInt32UintR, (*decFnInfo).fastpathDecMapInt32UintR) - fn(map[int32]uint8(nil), (*encFnInfo).fastpathEncMapInt32Uint8R, (*decFnInfo).fastpathDecMapInt32Uint8R) - fn(map[int32]uint16(nil), (*encFnInfo).fastpathEncMapInt32Uint16R, (*decFnInfo).fastpathDecMapInt32Uint16R) - fn(map[int32]uint32(nil), (*encFnInfo).fastpathEncMapInt32Uint32R, (*decFnInfo).fastpathDecMapInt32Uint32R) - fn(map[int32]uint64(nil), (*encFnInfo).fastpathEncMapInt32Uint64R, (*decFnInfo).fastpathDecMapInt32Uint64R) - fn(map[int32]uintptr(nil), (*encFnInfo).fastpathEncMapInt32UintptrR, (*decFnInfo).fastpathDecMapInt32UintptrR) - fn(map[int32]int(nil), (*encFnInfo).fastpathEncMapInt32IntR, (*decFnInfo).fastpathDecMapInt32IntR) - fn(map[int32]int8(nil), (*encFnInfo).fastpathEncMapInt32Int8R, (*decFnInfo).fastpathDecMapInt32Int8R) - fn(map[int32]int16(nil), (*encFnInfo).fastpathEncMapInt32Int16R, (*decFnInfo).fastpathDecMapInt32Int16R) - fn(map[int32]int32(nil), (*encFnInfo).fastpathEncMapInt32Int32R, (*decFnInfo).fastpathDecMapInt32Int32R) - fn(map[int32]int64(nil), (*encFnInfo).fastpathEncMapInt32Int64R, (*decFnInfo).fastpathDecMapInt32Int64R) - fn(map[int32]float32(nil), (*encFnInfo).fastpathEncMapInt32Float32R, (*decFnInfo).fastpathDecMapInt32Float32R) - fn(map[int32]float64(nil), (*encFnInfo).fastpathEncMapInt32Float64R, (*decFnInfo).fastpathDecMapInt32Float64R) - fn(map[int32]bool(nil), (*encFnInfo).fastpathEncMapInt32BoolR, (*decFnInfo).fastpathDecMapInt32BoolR) - fn(map[int64]interface{}(nil), (*encFnInfo).fastpathEncMapInt64IntfR, (*decFnInfo).fastpathDecMapInt64IntfR) - fn(map[int64]string(nil), (*encFnInfo).fastpathEncMapInt64StringR, (*decFnInfo).fastpathDecMapInt64StringR) - fn(map[int64]uint(nil), (*encFnInfo).fastpathEncMapInt64UintR, (*decFnInfo).fastpathDecMapInt64UintR) - fn(map[int64]uint8(nil), (*encFnInfo).fastpathEncMapInt64Uint8R, (*decFnInfo).fastpathDecMapInt64Uint8R) - fn(map[int64]uint16(nil), (*encFnInfo).fastpathEncMapInt64Uint16R, (*decFnInfo).fastpathDecMapInt64Uint16R) - fn(map[int64]uint32(nil), (*encFnInfo).fastpathEncMapInt64Uint32R, (*decFnInfo).fastpathDecMapInt64Uint32R) - fn(map[int64]uint64(nil), (*encFnInfo).fastpathEncMapInt64Uint64R, (*decFnInfo).fastpathDecMapInt64Uint64R) - fn(map[int64]uintptr(nil), (*encFnInfo).fastpathEncMapInt64UintptrR, (*decFnInfo).fastpathDecMapInt64UintptrR) - fn(map[int64]int(nil), (*encFnInfo).fastpathEncMapInt64IntR, (*decFnInfo).fastpathDecMapInt64IntR) - fn(map[int64]int8(nil), (*encFnInfo).fastpathEncMapInt64Int8R, (*decFnInfo).fastpathDecMapInt64Int8R) - fn(map[int64]int16(nil), (*encFnInfo).fastpathEncMapInt64Int16R, (*decFnInfo).fastpathDecMapInt64Int16R) - fn(map[int64]int32(nil), (*encFnInfo).fastpathEncMapInt64Int32R, (*decFnInfo).fastpathDecMapInt64Int32R) - fn(map[int64]int64(nil), (*encFnInfo).fastpathEncMapInt64Int64R, (*decFnInfo).fastpathDecMapInt64Int64R) - fn(map[int64]float32(nil), (*encFnInfo).fastpathEncMapInt64Float32R, (*decFnInfo).fastpathDecMapInt64Float32R) - fn(map[int64]float64(nil), (*encFnInfo).fastpathEncMapInt64Float64R, (*decFnInfo).fastpathDecMapInt64Float64R) - fn(map[int64]bool(nil), (*encFnInfo).fastpathEncMapInt64BoolR, (*decFnInfo).fastpathDecMapInt64BoolR) - fn(map[bool]interface{}(nil), (*encFnInfo).fastpathEncMapBoolIntfR, (*decFnInfo).fastpathDecMapBoolIntfR) - fn(map[bool]string(nil), (*encFnInfo).fastpathEncMapBoolStringR, (*decFnInfo).fastpathDecMapBoolStringR) - fn(map[bool]uint(nil), (*encFnInfo).fastpathEncMapBoolUintR, (*decFnInfo).fastpathDecMapBoolUintR) - fn(map[bool]uint8(nil), (*encFnInfo).fastpathEncMapBoolUint8R, (*decFnInfo).fastpathDecMapBoolUint8R) - fn(map[bool]uint16(nil), (*encFnInfo).fastpathEncMapBoolUint16R, (*decFnInfo).fastpathDecMapBoolUint16R) - fn(map[bool]uint32(nil), (*encFnInfo).fastpathEncMapBoolUint32R, (*decFnInfo).fastpathDecMapBoolUint32R) - fn(map[bool]uint64(nil), (*encFnInfo).fastpathEncMapBoolUint64R, (*decFnInfo).fastpathDecMapBoolUint64R) - fn(map[bool]uintptr(nil), (*encFnInfo).fastpathEncMapBoolUintptrR, (*decFnInfo).fastpathDecMapBoolUintptrR) - fn(map[bool]int(nil), (*encFnInfo).fastpathEncMapBoolIntR, (*decFnInfo).fastpathDecMapBoolIntR) - fn(map[bool]int8(nil), (*encFnInfo).fastpathEncMapBoolInt8R, (*decFnInfo).fastpathDecMapBoolInt8R) - fn(map[bool]int16(nil), (*encFnInfo).fastpathEncMapBoolInt16R, (*decFnInfo).fastpathDecMapBoolInt16R) - fn(map[bool]int32(nil), (*encFnInfo).fastpathEncMapBoolInt32R, (*decFnInfo).fastpathDecMapBoolInt32R) - fn(map[bool]int64(nil), (*encFnInfo).fastpathEncMapBoolInt64R, (*decFnInfo).fastpathDecMapBoolInt64R) - fn(map[bool]float32(nil), (*encFnInfo).fastpathEncMapBoolFloat32R, (*decFnInfo).fastpathDecMapBoolFloat32R) - fn(map[bool]float64(nil), (*encFnInfo).fastpathEncMapBoolFloat64R, (*decFnInfo).fastpathDecMapBoolFloat64R) - fn(map[bool]bool(nil), (*encFnInfo).fastpathEncMapBoolBoolR, (*decFnInfo).fastpathDecMapBoolBoolR) - - sort.Sort(fastpathAslice(fastpathAV[:])) -} - -// -- encode - -// -- -- fast path type switch -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } - switch v := iv.(type) { - - case []interface{}: - fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) - case *[]interface{}: - fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) - case *map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]string: - fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) - case *map[interface{}]string: - fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint: - fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint: - fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int: - fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) - case *map[interface{}]int: - fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) - case *map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) - - case []string: - fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) - case *[]string: - fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) - - case map[string]interface{}: - fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) - case *map[string]interface{}: - fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) - - case map[string]string: - fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) - case *map[string]string: - fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) - - case map[string]uint: - fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) - case *map[string]uint: - fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) - - case map[string]uint8: - fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) - case *map[string]uint8: - fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) - - case map[string]uint16: - fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) - case *map[string]uint16: - fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) - - case map[string]uint32: - fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) - case *map[string]uint32: - fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) - - case map[string]uint64: - fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) - case *map[string]uint64: - fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) - - case map[string]uintptr: - fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) - case *map[string]uintptr: - fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) - - case map[string]int: - fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) - case *map[string]int: - fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) - - case map[string]int8: - fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) - case *map[string]int8: - fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) - - case map[string]int16: - fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) - case *map[string]int16: - fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) - - case map[string]int32: - fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) - case *map[string]int32: - fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) - - case map[string]int64: - fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) - case *map[string]int64: - fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) - - case map[string]float32: - fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) - case *map[string]float32: - fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) - - case map[string]float64: - fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) - case *map[string]float64: - fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) - - case map[string]bool: - fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) - case *map[string]bool: - fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) - - case []float32: - fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) - case *[]float32: - fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) - - case map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) - case *map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) - - case map[float32]string: - fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) - case *map[float32]string: - fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint: - fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) - case *map[float32]uint: - fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) - case *map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) - case *map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) - case *map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) - case *map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) - case *map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float32]int: - fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) - case *map[float32]int: - fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) - - case map[float32]int8: - fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) - case *map[float32]int8: - fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) - - case map[float32]int16: - fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) - case *map[float32]int16: - fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) - - case map[float32]int32: - fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) - case *map[float32]int32: - fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) - - case map[float32]int64: - fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) - case *map[float32]int64: - fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) - - case map[float32]float32: - fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) - case *map[float32]float32: - fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) - - case map[float32]float64: - fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) - case *map[float32]float64: - fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) - - case map[float32]bool: - fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) - case *map[float32]bool: - fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) - - case []float64: - fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) - case *[]float64: - fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) - - case map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) - case *map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) - - case map[float64]string: - fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) - case *map[float64]string: - fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint: - fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) - case *map[float64]uint: - fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) - case *map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) - case *map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) - case *map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) - case *map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) - case *map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float64]int: - fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) - case *map[float64]int: - fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) - - case map[float64]int8: - fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) - case *map[float64]int8: - fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) - - case map[float64]int16: - fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) - case *map[float64]int16: - fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) - - case map[float64]int32: - fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) - case *map[float64]int32: - fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) - - case map[float64]int64: - fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) - case *map[float64]int64: - fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) - - case map[float64]float32: - fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) - case *map[float64]float32: - fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) - - case map[float64]float64: - fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) - case *map[float64]float64: - fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) - - case map[float64]bool: - fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) - case *map[float64]bool: - fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) - - case []uint: - fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) - case *[]uint: - fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) - - case map[uint]interface{}: - fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) - case *map[uint]interface{}: - fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) - - case map[uint]string: - fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) - case *map[uint]string: - fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint: - fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) - case *map[uint]uint: - fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint8: - fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) - case *map[uint]uint8: - fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint16: - fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) - case *map[uint]uint16: - fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint32: - fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) - case *map[uint]uint32: - fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint64: - fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) - case *map[uint]uint64: - fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) - - case map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) - case *map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint]int: - fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) - case *map[uint]int: - fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) - - case map[uint]int8: - fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) - case *map[uint]int8: - fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) - - case map[uint]int16: - fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) - case *map[uint]int16: - fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) - - case map[uint]int32: - fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) - case *map[uint]int32: - fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) - - case map[uint]int64: - fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) - case *map[uint]int64: - fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) - - case map[uint]float32: - fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) - case *map[uint]float32: - fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uint]float64: - fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) - case *map[uint]float64: - fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uint]bool: - fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) - case *map[uint]bool: - fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) - - case map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) - case *map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint8]string: - fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) - case *map[uint8]string: - fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint: - fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) - case *map[uint8]uint: - fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int: - fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) - case *map[uint8]int: - fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int8: - fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) - case *map[uint8]int8: - fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int16: - fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) - case *map[uint8]int16: - fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int32: - fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) - case *map[uint8]int32: - fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int64: - fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) - case *map[uint8]int64: - fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float32: - fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) - case *map[uint8]float32: - fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float64: - fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) - case *map[uint8]float64: - fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]bool: - fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) - case *map[uint8]bool: - fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) - - case []uint16: - fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) - case *[]uint16: - fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) - case *map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint16]string: - fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) - case *map[uint16]string: - fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint: - fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) - case *map[uint16]uint: - fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int: - fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) - case *map[uint16]int: - fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int8: - fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) - case *map[uint16]int8: - fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int16: - fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) - case *map[uint16]int16: - fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int32: - fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) - case *map[uint16]int32: - fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int64: - fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) - case *map[uint16]int64: - fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float32: - fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) - case *map[uint16]float32: - fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float64: - fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) - case *map[uint16]float64: - fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]bool: - fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) - case *map[uint16]bool: - fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) - - case []uint32: - fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) - case *[]uint32: - fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) - case *map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint32]string: - fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) - case *map[uint32]string: - fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint: - fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) - case *map[uint32]uint: - fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int: - fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) - case *map[uint32]int: - fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int8: - fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) - case *map[uint32]int8: - fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int16: - fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) - case *map[uint32]int16: - fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int32: - fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) - case *map[uint32]int32: - fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int64: - fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) - case *map[uint32]int64: - fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float32: - fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) - case *map[uint32]float32: - fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float64: - fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) - case *map[uint32]float64: - fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]bool: - fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) - case *map[uint32]bool: - fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) - - case []uint64: - fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) - case *[]uint64: - fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) - case *map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint64]string: - fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) - case *map[uint64]string: - fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint: - fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) - case *map[uint64]uint: - fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int: - fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) - case *map[uint64]int: - fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int8: - fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) - case *map[uint64]int8: - fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int16: - fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) - case *map[uint64]int16: - fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int32: - fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) - case *map[uint64]int32: - fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int64: - fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) - case *map[uint64]int64: - fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float32: - fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) - case *map[uint64]float32: - fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float64: - fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) - case *map[uint64]float64: - fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]bool: - fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) - case *map[uint64]bool: - fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) - - case []uintptr: - fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) - case *[]uintptr: - fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) - case *map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]string: - fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) - case *map[uintptr]string: - fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int: - fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) - case *map[uintptr]int: - fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) - case *map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) - - case []int: - fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) - case *[]int: - fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) - - case map[int]interface{}: - fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) - case *map[int]interface{}: - fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) - - case map[int]string: - fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) - case *map[int]string: - fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) - - case map[int]uint: - fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) - case *map[int]uint: - fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) - - case map[int]uint8: - fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) - case *map[int]uint8: - fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) - - case map[int]uint16: - fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) - case *map[int]uint16: - fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) - - case map[int]uint32: - fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) - case *map[int]uint32: - fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) - - case map[int]uint64: - fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) - case *map[int]uint64: - fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) - - case map[int]uintptr: - fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) - case *map[int]uintptr: - fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) - - case map[int]int: - fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) - case *map[int]int: - fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) - - case map[int]int8: - fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) - case *map[int]int8: - fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) - - case map[int]int16: - fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) - case *map[int]int16: - fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) - - case map[int]int32: - fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) - case *map[int]int32: - fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) - - case map[int]int64: - fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) - case *map[int]int64: - fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) - - case map[int]float32: - fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) - case *map[int]float32: - fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) - - case map[int]float64: - fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) - case *map[int]float64: - fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) - - case map[int]bool: - fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) - case *map[int]bool: - fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) - - case []int8: - fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) - case *[]int8: - fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) - - case map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) - case *map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) - - case map[int8]string: - fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) - case *map[int8]string: - fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint: - fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) - case *map[int8]uint: - fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) - case *map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) - case *map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) - case *map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) - case *map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) - case *map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int8]int: - fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) - case *map[int8]int: - fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) - - case map[int8]int8: - fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) - case *map[int8]int8: - fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) - - case map[int8]int16: - fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) - case *map[int8]int16: - fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) - - case map[int8]int32: - fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) - case *map[int8]int32: - fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) - - case map[int8]int64: - fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) - case *map[int8]int64: - fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) - - case map[int8]float32: - fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) - case *map[int8]float32: - fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) - - case map[int8]float64: - fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) - case *map[int8]float64: - fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) - - case map[int8]bool: - fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) - case *map[int8]bool: - fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) - - case []int16: - fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) - case *[]int16: - fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) - - case map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) - case *map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) - - case map[int16]string: - fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) - case *map[int16]string: - fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint: - fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) - case *map[int16]uint: - fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) - case *map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) - case *map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) - case *map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) - case *map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) - case *map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int16]int: - fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) - case *map[int16]int: - fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) - - case map[int16]int8: - fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) - case *map[int16]int8: - fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) - - case map[int16]int16: - fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) - case *map[int16]int16: - fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) - - case map[int16]int32: - fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) - case *map[int16]int32: - fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) - - case map[int16]int64: - fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) - case *map[int16]int64: - fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) - - case map[int16]float32: - fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) - case *map[int16]float32: - fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) - - case map[int16]float64: - fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) - case *map[int16]float64: - fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) - - case map[int16]bool: - fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) - case *map[int16]bool: - fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) - - case []int32: - fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) - case *[]int32: - fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) - - case map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) - case *map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) - - case map[int32]string: - fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) - case *map[int32]string: - fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint: - fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) - case *map[int32]uint: - fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) - case *map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) - case *map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) - case *map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) - case *map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) - case *map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int32]int: - fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) - case *map[int32]int: - fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) - - case map[int32]int8: - fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) - case *map[int32]int8: - fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) - - case map[int32]int16: - fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) - case *map[int32]int16: - fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) - - case map[int32]int32: - fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) - case *map[int32]int32: - fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) - - case map[int32]int64: - fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) - case *map[int32]int64: - fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) - - case map[int32]float32: - fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) - case *map[int32]float32: - fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) - - case map[int32]float64: - fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) - case *map[int32]float64: - fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) - - case map[int32]bool: - fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) - case *map[int32]bool: - fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) - - case []int64: - fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) - case *[]int64: - fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) - - case map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) - case *map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) - - case map[int64]string: - fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) - case *map[int64]string: - fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint: - fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) - case *map[int64]uint: - fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) - case *map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) - case *map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) - case *map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) - case *map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) - case *map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int64]int: - fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) - case *map[int64]int: - fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) - - case map[int64]int8: - fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) - case *map[int64]int8: - fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) - - case map[int64]int16: - fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) - case *map[int64]int16: - fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) - - case map[int64]int32: - fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) - case *map[int64]int32: - fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) - - case map[int64]int64: - fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) - case *map[int64]int64: - fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) - - case map[int64]float32: - fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) - case *map[int64]float32: - fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) - - case map[int64]float64: - fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) - case *map[int64]float64: - fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) - - case map[int64]bool: - fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) - case *map[int64]bool: - fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) - - case []bool: - fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) - case *[]bool: - fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) - - case map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) - case *map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) - - case map[bool]string: - fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) - case *map[bool]string: - fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint: - fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) - case *map[bool]uint: - fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint8: - fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) - case *map[bool]uint8: - fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint16: - fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) - case *map[bool]uint16: - fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint32: - fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) - case *map[bool]uint32: - fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint64: - fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) - case *map[bool]uint64: - fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) - - case map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) - case *map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) - - case map[bool]int: - fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) - case *map[bool]int: - fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) - - case map[bool]int8: - fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) - case *map[bool]int8: - fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) - - case map[bool]int16: - fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) - case *map[bool]int16: - fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) - - case map[bool]int32: - fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) - case *map[bool]int32: - fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) - - case map[bool]int64: - fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) - case *map[bool]int64: - fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) - - case map[bool]float32: - fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) - case *map[bool]float32: - fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) - - case map[bool]float64: - fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) - case *map[bool]float64: - fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) - - case map[bool]bool: - fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) - case *map[bool]bool: - fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } - switch v := iv.(type) { - - case []interface{}: - fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) - case *[]interface{}: - fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) - - case []string: - fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) - case *[]string: - fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) - - case []float32: - fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) - case *[]float32: - fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) - - case []float64: - fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) - case *[]float64: - fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) - - case []uint: - fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) - case *[]uint: - fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) - - case []uint16: - fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) - case *[]uint16: - fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) - - case []uint32: - fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) - case *[]uint32: - fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) - - case []uint64: - fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) - case *[]uint64: - fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) - - case []uintptr: - fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) - case *[]uintptr: - fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) - - case []int: - fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) - case *[]int: - fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) - - case []int8: - fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) - case *[]int8: - fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) - - case []int16: - fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) - case *[]int16: - fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) - - case []int32: - fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) - case *[]int32: - fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) - - case []int64: - fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) - case *[]int64: - fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) - - case []bool: - fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) - case *[]bool: - fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } - switch v := iv.(type) { - - case map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) - case *map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]string: - fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) - case *map[interface{}]string: - fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint: - fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint: - fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int: - fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) - case *map[interface{}]int: - fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) - case *map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) - - case map[string]interface{}: - fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) - case *map[string]interface{}: - fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) - - case map[string]string: - fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) - case *map[string]string: - fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) - - case map[string]uint: - fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) - case *map[string]uint: - fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) - - case map[string]uint8: - fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) - case *map[string]uint8: - fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) - - case map[string]uint16: - fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) - case *map[string]uint16: - fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) - - case map[string]uint32: - fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) - case *map[string]uint32: - fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) - - case map[string]uint64: - fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) - case *map[string]uint64: - fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) - - case map[string]uintptr: - fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) - case *map[string]uintptr: - fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) - - case map[string]int: - fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) - case *map[string]int: - fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) - - case map[string]int8: - fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) - case *map[string]int8: - fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) - - case map[string]int16: - fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) - case *map[string]int16: - fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) - - case map[string]int32: - fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) - case *map[string]int32: - fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) - - case map[string]int64: - fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) - case *map[string]int64: - fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) - - case map[string]float32: - fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) - case *map[string]float32: - fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) - - case map[string]float64: - fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) - case *map[string]float64: - fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) - - case map[string]bool: - fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) - case *map[string]bool: - fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) - - case map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) - case *map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) - - case map[float32]string: - fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) - case *map[float32]string: - fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint: - fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) - case *map[float32]uint: - fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) - case *map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) - case *map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) - case *map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) - case *map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) - case *map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float32]int: - fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) - case *map[float32]int: - fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) - - case map[float32]int8: - fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) - case *map[float32]int8: - fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) - - case map[float32]int16: - fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) - case *map[float32]int16: - fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) - - case map[float32]int32: - fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) - case *map[float32]int32: - fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) - - case map[float32]int64: - fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) - case *map[float32]int64: - fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) - - case map[float32]float32: - fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) - case *map[float32]float32: - fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) - - case map[float32]float64: - fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) - case *map[float32]float64: - fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) - - case map[float32]bool: - fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) - case *map[float32]bool: - fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) - - case map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) - case *map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) - - case map[float64]string: - fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) - case *map[float64]string: - fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint: - fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) - case *map[float64]uint: - fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) - case *map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) - case *map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) - case *map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) - case *map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) - case *map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float64]int: - fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) - case *map[float64]int: - fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) - - case map[float64]int8: - fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) - case *map[float64]int8: - fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) - - case map[float64]int16: - fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) - case *map[float64]int16: - fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) - - case map[float64]int32: - fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) - case *map[float64]int32: - fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) - - case map[float64]int64: - fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) - case *map[float64]int64: - fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) - - case map[float64]float32: - fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) - case *map[float64]float32: - fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) - - case map[float64]float64: - fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) - case *map[float64]float64: - fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) - - case map[float64]bool: - fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) - case *map[float64]bool: - fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint]interface{}: - fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) - case *map[uint]interface{}: - fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) - - case map[uint]string: - fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) - case *map[uint]string: - fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint: - fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) - case *map[uint]uint: - fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint8: - fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) - case *map[uint]uint8: - fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint16: - fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) - case *map[uint]uint16: - fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint32: - fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) - case *map[uint]uint32: - fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint64: - fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) - case *map[uint]uint64: - fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) - - case map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) - case *map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint]int: - fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) - case *map[uint]int: - fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) - - case map[uint]int8: - fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) - case *map[uint]int8: - fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) - - case map[uint]int16: - fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) - case *map[uint]int16: - fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) - - case map[uint]int32: - fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) - case *map[uint]int32: - fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) - - case map[uint]int64: - fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) - case *map[uint]int64: - fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) - - case map[uint]float32: - fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) - case *map[uint]float32: - fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uint]float64: - fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) - case *map[uint]float64: - fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uint]bool: - fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) - case *map[uint]bool: - fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) - - case map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) - case *map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint8]string: - fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) - case *map[uint8]string: - fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint: - fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) - case *map[uint8]uint: - fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int: - fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) - case *map[uint8]int: - fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int8: - fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) - case *map[uint8]int8: - fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int16: - fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) - case *map[uint8]int16: - fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int32: - fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) - case *map[uint8]int32: - fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int64: - fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) - case *map[uint8]int64: - fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float32: - fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) - case *map[uint8]float32: - fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float64: - fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) - case *map[uint8]float64: - fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]bool: - fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) - case *map[uint8]bool: - fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) - case *map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint16]string: - fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) - case *map[uint16]string: - fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint: - fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) - case *map[uint16]uint: - fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int: - fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) - case *map[uint16]int: - fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int8: - fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) - case *map[uint16]int8: - fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int16: - fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) - case *map[uint16]int16: - fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int32: - fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) - case *map[uint16]int32: - fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int64: - fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) - case *map[uint16]int64: - fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float32: - fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) - case *map[uint16]float32: - fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float64: - fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) - case *map[uint16]float64: - fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]bool: - fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) - case *map[uint16]bool: - fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) - case *map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint32]string: - fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) - case *map[uint32]string: - fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint: - fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) - case *map[uint32]uint: - fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int: - fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) - case *map[uint32]int: - fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int8: - fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) - case *map[uint32]int8: - fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int16: - fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) - case *map[uint32]int16: - fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int32: - fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) - case *map[uint32]int32: - fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int64: - fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) - case *map[uint32]int64: - fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float32: - fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) - case *map[uint32]float32: - fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float64: - fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) - case *map[uint32]float64: - fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]bool: - fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) - case *map[uint32]bool: - fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) - case *map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint64]string: - fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) - case *map[uint64]string: - fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint: - fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) - case *map[uint64]uint: - fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int: - fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) - case *map[uint64]int: - fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int8: - fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) - case *map[uint64]int8: - fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int16: - fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) - case *map[uint64]int16: - fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int32: - fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) - case *map[uint64]int32: - fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int64: - fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) - case *map[uint64]int64: - fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float32: - fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) - case *map[uint64]float32: - fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float64: - fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) - case *map[uint64]float64: - fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]bool: - fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) - case *map[uint64]bool: - fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) - case *map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]string: - fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) - case *map[uintptr]string: - fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int: - fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) - case *map[uintptr]int: - fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) - case *map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) - - case map[int]interface{}: - fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) - case *map[int]interface{}: - fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) - - case map[int]string: - fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) - case *map[int]string: - fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) - - case map[int]uint: - fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) - case *map[int]uint: - fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) - - case map[int]uint8: - fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) - case *map[int]uint8: - fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) - - case map[int]uint16: - fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) - case *map[int]uint16: - fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) - - case map[int]uint32: - fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) - case *map[int]uint32: - fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) - - case map[int]uint64: - fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) - case *map[int]uint64: - fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) - - case map[int]uintptr: - fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) - case *map[int]uintptr: - fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) - - case map[int]int: - fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) - case *map[int]int: - fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) - - case map[int]int8: - fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) - case *map[int]int8: - fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) - - case map[int]int16: - fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) - case *map[int]int16: - fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) - - case map[int]int32: - fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) - case *map[int]int32: - fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) - - case map[int]int64: - fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) - case *map[int]int64: - fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) - - case map[int]float32: - fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) - case *map[int]float32: - fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) - - case map[int]float64: - fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) - case *map[int]float64: - fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) - - case map[int]bool: - fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) - case *map[int]bool: - fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) - - case map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) - case *map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) - - case map[int8]string: - fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) - case *map[int8]string: - fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint: - fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) - case *map[int8]uint: - fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) - case *map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) - case *map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) - case *map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) - case *map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) - case *map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int8]int: - fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) - case *map[int8]int: - fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) - - case map[int8]int8: - fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) - case *map[int8]int8: - fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) - - case map[int8]int16: - fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) - case *map[int8]int16: - fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) - - case map[int8]int32: - fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) - case *map[int8]int32: - fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) - - case map[int8]int64: - fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) - case *map[int8]int64: - fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) - - case map[int8]float32: - fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) - case *map[int8]float32: - fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) - - case map[int8]float64: - fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) - case *map[int8]float64: - fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) - - case map[int8]bool: - fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) - case *map[int8]bool: - fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) - - case map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) - case *map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) - - case map[int16]string: - fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) - case *map[int16]string: - fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint: - fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) - case *map[int16]uint: - fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) - case *map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) - case *map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) - case *map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) - case *map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) - case *map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int16]int: - fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) - case *map[int16]int: - fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) - - case map[int16]int8: - fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) - case *map[int16]int8: - fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) - - case map[int16]int16: - fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) - case *map[int16]int16: - fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) - - case map[int16]int32: - fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) - case *map[int16]int32: - fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) - - case map[int16]int64: - fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) - case *map[int16]int64: - fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) - - case map[int16]float32: - fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) - case *map[int16]float32: - fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) - - case map[int16]float64: - fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) - case *map[int16]float64: - fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) - - case map[int16]bool: - fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) - case *map[int16]bool: - fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) - - case map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) - case *map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) - - case map[int32]string: - fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) - case *map[int32]string: - fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint: - fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) - case *map[int32]uint: - fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) - case *map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) - case *map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) - case *map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) - case *map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) - case *map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int32]int: - fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) - case *map[int32]int: - fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) - - case map[int32]int8: - fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) - case *map[int32]int8: - fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) - - case map[int32]int16: - fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) - case *map[int32]int16: - fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) - - case map[int32]int32: - fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) - case *map[int32]int32: - fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) - - case map[int32]int64: - fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) - case *map[int32]int64: - fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) - - case map[int32]float32: - fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) - case *map[int32]float32: - fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) - - case map[int32]float64: - fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) - case *map[int32]float64: - fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) - - case map[int32]bool: - fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) - case *map[int32]bool: - fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) - - case map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) - case *map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) - - case map[int64]string: - fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) - case *map[int64]string: - fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint: - fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) - case *map[int64]uint: - fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) - case *map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) - case *map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) - case *map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) - case *map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) - case *map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int64]int: - fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) - case *map[int64]int: - fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) - - case map[int64]int8: - fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) - case *map[int64]int8: - fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) - - case map[int64]int16: - fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) - case *map[int64]int16: - fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) - - case map[int64]int32: - fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) - case *map[int64]int32: - fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) - - case map[int64]int64: - fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) - case *map[int64]int64: - fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) - - case map[int64]float32: - fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) - case *map[int64]float32: - fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) - - case map[int64]float64: - fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) - case *map[int64]float64: - fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) - - case map[int64]bool: - fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) - case *map[int64]bool: - fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) - - case map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) - case *map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) - - case map[bool]string: - fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) - case *map[bool]string: - fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint: - fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) - case *map[bool]uint: - fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint8: - fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) - case *map[bool]uint8: - fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint16: - fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) - case *map[bool]uint16: - fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint32: - fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) - case *map[bool]uint32: - fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint64: - fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) - case *map[bool]uint64: - fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) - - case map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) - case *map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) - - case map[bool]int: - fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) - case *map[bool]int: - fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) - - case map[bool]int8: - fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) - case *map[bool]int8: - fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) - - case map[bool]int16: - fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) - case *map[bool]int16: - fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) - - case map[bool]int32: - fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) - case *map[bool]int32: - fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) - - case map[bool]int64: - fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) - case *map[bool]int64: - fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) - - case map[bool]float32: - fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) - case *map[bool]float32: - fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) - - case map[bool]float64: - fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) - case *map[bool]float64: - fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) - - case map[bool]bool: - fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) - case *map[bool]bool: - fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions - -func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) { - fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - e.encode(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) { - fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeString(c_UTF8, v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) { - fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeFloat32(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) { - fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeFloat64(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) { - fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) { - fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) { - fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) { - fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) { - fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - e.encode(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) { - fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) { - fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) { - fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) { - fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) { - fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) { - fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeBool(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) { - fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfStringR(rv reflect.Value) { - fastpathTV.EncMapIntfStringV(rv.Interface().(map[interface{}]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUintR(rv reflect.Value) { - fastpathTV.EncMapIntfUintV(rv.Interface().(map[interface{}]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUint8R(rv reflect.Value) { - fastpathTV.EncMapIntfUint8V(rv.Interface().(map[interface{}]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUint16R(rv reflect.Value) { - fastpathTV.EncMapIntfUint16V(rv.Interface().(map[interface{}]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUint32R(rv reflect.Value) { - fastpathTV.EncMapIntfUint32V(rv.Interface().(map[interface{}]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUint64R(rv reflect.Value) { - fastpathTV.EncMapIntfUint64V(rv.Interface().(map[interface{}]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUintptrR(rv reflect.Value) { - fastpathTV.EncMapIntfUintptrV(rv.Interface().(map[interface{}]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfIntR(rv reflect.Value) { - fastpathTV.EncMapIntfIntV(rv.Interface().(map[interface{}]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfInt8R(rv reflect.Value) { - fastpathTV.EncMapIntfInt8V(rv.Interface().(map[interface{}]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfInt16R(rv reflect.Value) { - fastpathTV.EncMapIntfInt16V(rv.Interface().(map[interface{}]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfInt32R(rv reflect.Value) { - fastpathTV.EncMapIntfInt32V(rv.Interface().(map[interface{}]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfInt64R(rv reflect.Value) { - fastpathTV.EncMapIntfInt64V(rv.Interface().(map[interface{}]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfFloat32R(rv reflect.Value) { - fastpathTV.EncMapIntfFloat32V(rv.Interface().(map[interface{}]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfFloat64R(rv reflect.Value) { - fastpathTV.EncMapIntfFloat64V(rv.Interface().(map[interface{}]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfBoolR(rv reflect.Value) { - fastpathTV.EncMapIntfBoolV(rv.Interface().(map[interface{}]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringIntfR(rv reflect.Value) { - fastpathTV.EncMapStringIntfV(rv.Interface().(map[string]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringStringR(rv reflect.Value) { - fastpathTV.EncMapStringStringV(rv.Interface().(map[string]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringStringV(v map[string]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUintR(rv reflect.Value) { - fastpathTV.EncMapStringUintV(rv.Interface().(map[string]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUintV(v map[string]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUint8R(rv reflect.Value) { - fastpathTV.EncMapStringUint8V(rv.Interface().(map[string]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUint16R(rv reflect.Value) { - fastpathTV.EncMapStringUint16V(rv.Interface().(map[string]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUint32R(rv reflect.Value) { - fastpathTV.EncMapStringUint32V(rv.Interface().(map[string]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUint64R(rv reflect.Value) { - fastpathTV.EncMapStringUint64V(rv.Interface().(map[string]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUintptrR(rv reflect.Value) { - fastpathTV.EncMapStringUintptrV(rv.Interface().(map[string]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringIntR(rv reflect.Value) { - fastpathTV.EncMapStringIntV(rv.Interface().(map[string]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringIntV(v map[string]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringInt8R(rv reflect.Value) { - fastpathTV.EncMapStringInt8V(rv.Interface().(map[string]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringInt8V(v map[string]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringInt16R(rv reflect.Value) { - fastpathTV.EncMapStringInt16V(rv.Interface().(map[string]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringInt16V(v map[string]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringInt32R(rv reflect.Value) { - fastpathTV.EncMapStringInt32V(rv.Interface().(map[string]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringInt32V(v map[string]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringInt64R(rv reflect.Value) { - fastpathTV.EncMapStringInt64V(rv.Interface().(map[string]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringInt64V(v map[string]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringFloat32R(rv reflect.Value) { - fastpathTV.EncMapStringFloat32V(rv.Interface().(map[string]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringFloat64R(rv reflect.Value) { - fastpathTV.EncMapStringFloat64V(rv.Interface().(map[string]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringBoolR(rv reflect.Value) { - fastpathTV.EncMapStringBoolV(rv.Interface().(map[string]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringBoolV(v map[string]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32IntfR(rv reflect.Value) { - fastpathTV.EncMapFloat32IntfV(rv.Interface().(map[float32]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32StringR(rv reflect.Value) { - fastpathTV.EncMapFloat32StringV(rv.Interface().(map[float32]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32UintR(rv reflect.Value) { - fastpathTV.EncMapFloat32UintV(rv.Interface().(map[float32]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Uint8R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint8V(rv.Interface().(map[float32]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Uint16R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint16V(rv.Interface().(map[float32]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Uint32R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint32V(rv.Interface().(map[float32]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Uint64R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint64V(rv.Interface().(map[float32]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32UintptrR(rv reflect.Value) { - fastpathTV.EncMapFloat32UintptrV(rv.Interface().(map[float32]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32IntR(rv reflect.Value) { - fastpathTV.EncMapFloat32IntV(rv.Interface().(map[float32]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Int8R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int8V(rv.Interface().(map[float32]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Int16R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int16V(rv.Interface().(map[float32]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Int32R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int32V(rv.Interface().(map[float32]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Int64R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int64V(rv.Interface().(map[float32]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Float32R(rv reflect.Value) { - fastpathTV.EncMapFloat32Float32V(rv.Interface().(map[float32]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Float64R(rv reflect.Value) { - fastpathTV.EncMapFloat32Float64V(rv.Interface().(map[float32]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32BoolR(rv reflect.Value) { - fastpathTV.EncMapFloat32BoolV(rv.Interface().(map[float32]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64IntfR(rv reflect.Value) { - fastpathTV.EncMapFloat64IntfV(rv.Interface().(map[float64]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64StringR(rv reflect.Value) { - fastpathTV.EncMapFloat64StringV(rv.Interface().(map[float64]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64UintR(rv reflect.Value) { - fastpathTV.EncMapFloat64UintV(rv.Interface().(map[float64]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Uint8R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint8V(rv.Interface().(map[float64]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Uint16R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint16V(rv.Interface().(map[float64]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Uint32R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint32V(rv.Interface().(map[float64]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Uint64R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint64V(rv.Interface().(map[float64]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64UintptrR(rv reflect.Value) { - fastpathTV.EncMapFloat64UintptrV(rv.Interface().(map[float64]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64IntR(rv reflect.Value) { - fastpathTV.EncMapFloat64IntV(rv.Interface().(map[float64]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Int8R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int8V(rv.Interface().(map[float64]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Int16R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int16V(rv.Interface().(map[float64]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Int32R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int32V(rv.Interface().(map[float64]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Int64R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int64V(rv.Interface().(map[float64]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Float32R(rv reflect.Value) { - fastpathTV.EncMapFloat64Float32V(rv.Interface().(map[float64]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Float64R(rv reflect.Value) { - fastpathTV.EncMapFloat64Float64V(rv.Interface().(map[float64]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64BoolR(rv reflect.Value) { - fastpathTV.EncMapFloat64BoolV(rv.Interface().(map[float64]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintIntfR(rv reflect.Value) { - fastpathTV.EncMapUintIntfV(rv.Interface().(map[uint]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintStringR(rv reflect.Value) { - fastpathTV.EncMapUintStringV(rv.Interface().(map[uint]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintStringV(v map[uint]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUintR(rv reflect.Value) { - fastpathTV.EncMapUintUintV(rv.Interface().(map[uint]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUintV(v map[uint]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUint8R(rv reflect.Value) { - fastpathTV.EncMapUintUint8V(rv.Interface().(map[uint]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUint16R(rv reflect.Value) { - fastpathTV.EncMapUintUint16V(rv.Interface().(map[uint]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUint32R(rv reflect.Value) { - fastpathTV.EncMapUintUint32V(rv.Interface().(map[uint]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUint64R(rv reflect.Value) { - fastpathTV.EncMapUintUint64V(rv.Interface().(map[uint]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUintptrR(rv reflect.Value) { - fastpathTV.EncMapUintUintptrV(rv.Interface().(map[uint]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintIntR(rv reflect.Value) { - fastpathTV.EncMapUintIntV(rv.Interface().(map[uint]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintIntV(v map[uint]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintInt8R(rv reflect.Value) { - fastpathTV.EncMapUintInt8V(rv.Interface().(map[uint]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintInt16R(rv reflect.Value) { - fastpathTV.EncMapUintInt16V(rv.Interface().(map[uint]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintInt32R(rv reflect.Value) { - fastpathTV.EncMapUintInt32V(rv.Interface().(map[uint]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintInt64R(rv reflect.Value) { - fastpathTV.EncMapUintInt64V(rv.Interface().(map[uint]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintFloat32R(rv reflect.Value) { - fastpathTV.EncMapUintFloat32V(rv.Interface().(map[uint]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintFloat64R(rv reflect.Value) { - fastpathTV.EncMapUintFloat64V(rv.Interface().(map[uint]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintBoolR(rv reflect.Value) { - fastpathTV.EncMapUintBoolV(rv.Interface().(map[uint]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8IntfR(rv reflect.Value) { - fastpathTV.EncMapUint8IntfV(rv.Interface().(map[uint8]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8StringR(rv reflect.Value) { - fastpathTV.EncMapUint8StringV(rv.Interface().(map[uint8]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8UintR(rv reflect.Value) { - fastpathTV.EncMapUint8UintV(rv.Interface().(map[uint8]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint8V(rv.Interface().(map[uint8]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint16V(rv.Interface().(map[uint8]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint32V(rv.Interface().(map[uint8]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint64V(rv.Interface().(map[uint8]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint8UintptrV(rv.Interface().(map[uint8]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8IntR(rv reflect.Value) { - fastpathTV.EncMapUint8IntV(rv.Interface().(map[uint8]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Int8R(rv reflect.Value) { - fastpathTV.EncMapUint8Int8V(rv.Interface().(map[uint8]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Int16R(rv reflect.Value) { - fastpathTV.EncMapUint8Int16V(rv.Interface().(map[uint8]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Int32R(rv reflect.Value) { - fastpathTV.EncMapUint8Int32V(rv.Interface().(map[uint8]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Int64R(rv reflect.Value) { - fastpathTV.EncMapUint8Int64V(rv.Interface().(map[uint8]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Float32R(rv reflect.Value) { - fastpathTV.EncMapUint8Float32V(rv.Interface().(map[uint8]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Float64R(rv reflect.Value) { - fastpathTV.EncMapUint8Float64V(rv.Interface().(map[uint8]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8BoolR(rv reflect.Value) { - fastpathTV.EncMapUint8BoolV(rv.Interface().(map[uint8]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16IntfR(rv reflect.Value) { - fastpathTV.EncMapUint16IntfV(rv.Interface().(map[uint16]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16StringR(rv reflect.Value) { - fastpathTV.EncMapUint16StringV(rv.Interface().(map[uint16]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16UintR(rv reflect.Value) { - fastpathTV.EncMapUint16UintV(rv.Interface().(map[uint16]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint8V(rv.Interface().(map[uint16]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint16V(rv.Interface().(map[uint16]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint32V(rv.Interface().(map[uint16]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint64V(rv.Interface().(map[uint16]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint16UintptrV(rv.Interface().(map[uint16]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16IntR(rv reflect.Value) { - fastpathTV.EncMapUint16IntV(rv.Interface().(map[uint16]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Int8R(rv reflect.Value) { - fastpathTV.EncMapUint16Int8V(rv.Interface().(map[uint16]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Int16R(rv reflect.Value) { - fastpathTV.EncMapUint16Int16V(rv.Interface().(map[uint16]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Int32R(rv reflect.Value) { - fastpathTV.EncMapUint16Int32V(rv.Interface().(map[uint16]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Int64R(rv reflect.Value) { - fastpathTV.EncMapUint16Int64V(rv.Interface().(map[uint16]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Float32R(rv reflect.Value) { - fastpathTV.EncMapUint16Float32V(rv.Interface().(map[uint16]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Float64R(rv reflect.Value) { - fastpathTV.EncMapUint16Float64V(rv.Interface().(map[uint16]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16BoolR(rv reflect.Value) { - fastpathTV.EncMapUint16BoolV(rv.Interface().(map[uint16]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32IntfR(rv reflect.Value) { - fastpathTV.EncMapUint32IntfV(rv.Interface().(map[uint32]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32StringR(rv reflect.Value) { - fastpathTV.EncMapUint32StringV(rv.Interface().(map[uint32]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32UintR(rv reflect.Value) { - fastpathTV.EncMapUint32UintV(rv.Interface().(map[uint32]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint8V(rv.Interface().(map[uint32]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint16V(rv.Interface().(map[uint32]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint32V(rv.Interface().(map[uint32]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint64V(rv.Interface().(map[uint32]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint32UintptrV(rv.Interface().(map[uint32]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32IntR(rv reflect.Value) { - fastpathTV.EncMapUint32IntV(rv.Interface().(map[uint32]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Int8R(rv reflect.Value) { - fastpathTV.EncMapUint32Int8V(rv.Interface().(map[uint32]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Int16R(rv reflect.Value) { - fastpathTV.EncMapUint32Int16V(rv.Interface().(map[uint32]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Int32R(rv reflect.Value) { - fastpathTV.EncMapUint32Int32V(rv.Interface().(map[uint32]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Int64R(rv reflect.Value) { - fastpathTV.EncMapUint32Int64V(rv.Interface().(map[uint32]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Float32R(rv reflect.Value) { - fastpathTV.EncMapUint32Float32V(rv.Interface().(map[uint32]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Float64R(rv reflect.Value) { - fastpathTV.EncMapUint32Float64V(rv.Interface().(map[uint32]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32BoolR(rv reflect.Value) { - fastpathTV.EncMapUint32BoolV(rv.Interface().(map[uint32]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64IntfR(rv reflect.Value) { - fastpathTV.EncMapUint64IntfV(rv.Interface().(map[uint64]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64StringR(rv reflect.Value) { - fastpathTV.EncMapUint64StringV(rv.Interface().(map[uint64]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64UintR(rv reflect.Value) { - fastpathTV.EncMapUint64UintV(rv.Interface().(map[uint64]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint8V(rv.Interface().(map[uint64]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint16V(rv.Interface().(map[uint64]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint32V(rv.Interface().(map[uint64]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint64V(rv.Interface().(map[uint64]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint64UintptrV(rv.Interface().(map[uint64]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64IntR(rv reflect.Value) { - fastpathTV.EncMapUint64IntV(rv.Interface().(map[uint64]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Int8R(rv reflect.Value) { - fastpathTV.EncMapUint64Int8V(rv.Interface().(map[uint64]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Int16R(rv reflect.Value) { - fastpathTV.EncMapUint64Int16V(rv.Interface().(map[uint64]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Int32R(rv reflect.Value) { - fastpathTV.EncMapUint64Int32V(rv.Interface().(map[uint64]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Int64R(rv reflect.Value) { - fastpathTV.EncMapUint64Int64V(rv.Interface().(map[uint64]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Float32R(rv reflect.Value) { - fastpathTV.EncMapUint64Float32V(rv.Interface().(map[uint64]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Float64R(rv reflect.Value) { - fastpathTV.EncMapUint64Float64V(rv.Interface().(map[uint64]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64BoolR(rv reflect.Value) { - fastpathTV.EncMapUint64BoolV(rv.Interface().(map[uint64]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrIntfR(rv reflect.Value) { - fastpathTV.EncMapUintptrIntfV(rv.Interface().(map[uintptr]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrStringR(rv reflect.Value) { - fastpathTV.EncMapUintptrStringV(rv.Interface().(map[uintptr]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUintR(rv reflect.Value) { - fastpathTV.EncMapUintptrUintV(rv.Interface().(map[uintptr]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUint8R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint8V(rv.Interface().(map[uintptr]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUint16R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint16V(rv.Interface().(map[uintptr]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUint32R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint32V(rv.Interface().(map[uintptr]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUint64R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint64V(rv.Interface().(map[uintptr]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUintptrR(rv reflect.Value) { - fastpathTV.EncMapUintptrUintptrV(rv.Interface().(map[uintptr]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrIntR(rv reflect.Value) { - fastpathTV.EncMapUintptrIntV(rv.Interface().(map[uintptr]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrInt8R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt8V(rv.Interface().(map[uintptr]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrInt16R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt16V(rv.Interface().(map[uintptr]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrInt32R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt32V(rv.Interface().(map[uintptr]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrInt64R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt64V(rv.Interface().(map[uintptr]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrFloat32R(rv reflect.Value) { - fastpathTV.EncMapUintptrFloat32V(rv.Interface().(map[uintptr]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrFloat64R(rv reflect.Value) { - fastpathTV.EncMapUintptrFloat64V(rv.Interface().(map[uintptr]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrBoolR(rv reflect.Value) { - fastpathTV.EncMapUintptrBoolV(rv.Interface().(map[uintptr]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntIntfR(rv reflect.Value) { - fastpathTV.EncMapIntIntfV(rv.Interface().(map[int]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntStringR(rv reflect.Value) { - fastpathTV.EncMapIntStringV(rv.Interface().(map[int]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntStringV(v map[int]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUintR(rv reflect.Value) { - fastpathTV.EncMapIntUintV(rv.Interface().(map[int]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUintV(v map[int]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUint8R(rv reflect.Value) { - fastpathTV.EncMapIntUint8V(rv.Interface().(map[int]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUint16R(rv reflect.Value) { - fastpathTV.EncMapIntUint16V(rv.Interface().(map[int]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUint32R(rv reflect.Value) { - fastpathTV.EncMapIntUint32V(rv.Interface().(map[int]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUint64R(rv reflect.Value) { - fastpathTV.EncMapIntUint64V(rv.Interface().(map[int]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUintptrR(rv reflect.Value) { - fastpathTV.EncMapIntUintptrV(rv.Interface().(map[int]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntIntR(rv reflect.Value) { - fastpathTV.EncMapIntIntV(rv.Interface().(map[int]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntIntV(v map[int]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntInt8R(rv reflect.Value) { - fastpathTV.EncMapIntInt8V(rv.Interface().(map[int]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntInt8V(v map[int]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntInt16R(rv reflect.Value) { - fastpathTV.EncMapIntInt16V(rv.Interface().(map[int]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntInt16V(v map[int]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntInt32R(rv reflect.Value) { - fastpathTV.EncMapIntInt32V(rv.Interface().(map[int]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntInt32V(v map[int]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntInt64R(rv reflect.Value) { - fastpathTV.EncMapIntInt64V(rv.Interface().(map[int]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntInt64V(v map[int]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntFloat32R(rv reflect.Value) { - fastpathTV.EncMapIntFloat32V(rv.Interface().(map[int]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntFloat64R(rv reflect.Value) { - fastpathTV.EncMapIntFloat64V(rv.Interface().(map[int]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntBoolR(rv reflect.Value) { - fastpathTV.EncMapIntBoolV(rv.Interface().(map[int]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntBoolV(v map[int]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8IntfR(rv reflect.Value) { - fastpathTV.EncMapInt8IntfV(rv.Interface().(map[int8]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8StringR(rv reflect.Value) { - fastpathTV.EncMapInt8StringV(rv.Interface().(map[int8]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8StringV(v map[int8]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8UintR(rv reflect.Value) { - fastpathTV.EncMapInt8UintV(rv.Interface().(map[int8]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint8V(rv.Interface().(map[int8]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint16V(rv.Interface().(map[int8]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint32V(rv.Interface().(map[int8]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint64V(rv.Interface().(map[int8]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt8UintptrV(rv.Interface().(map[int8]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8IntR(rv reflect.Value) { - fastpathTV.EncMapInt8IntV(rv.Interface().(map[int8]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8IntV(v map[int8]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Int8R(rv reflect.Value) { - fastpathTV.EncMapInt8Int8V(rv.Interface().(map[int8]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Int16R(rv reflect.Value) { - fastpathTV.EncMapInt8Int16V(rv.Interface().(map[int8]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Int32R(rv reflect.Value) { - fastpathTV.EncMapInt8Int32V(rv.Interface().(map[int8]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Int64R(rv reflect.Value) { - fastpathTV.EncMapInt8Int64V(rv.Interface().(map[int8]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Float32R(rv reflect.Value) { - fastpathTV.EncMapInt8Float32V(rv.Interface().(map[int8]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Float64R(rv reflect.Value) { - fastpathTV.EncMapInt8Float64V(rv.Interface().(map[int8]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8BoolR(rv reflect.Value) { - fastpathTV.EncMapInt8BoolV(rv.Interface().(map[int8]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16IntfR(rv reflect.Value) { - fastpathTV.EncMapInt16IntfV(rv.Interface().(map[int16]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16StringR(rv reflect.Value) { - fastpathTV.EncMapInt16StringV(rv.Interface().(map[int16]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16StringV(v map[int16]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16UintR(rv reflect.Value) { - fastpathTV.EncMapInt16UintV(rv.Interface().(map[int16]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint8V(rv.Interface().(map[int16]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint16V(rv.Interface().(map[int16]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint32V(rv.Interface().(map[int16]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint64V(rv.Interface().(map[int16]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt16UintptrV(rv.Interface().(map[int16]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16IntR(rv reflect.Value) { - fastpathTV.EncMapInt16IntV(rv.Interface().(map[int16]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16IntV(v map[int16]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Int8R(rv reflect.Value) { - fastpathTV.EncMapInt16Int8V(rv.Interface().(map[int16]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Int16R(rv reflect.Value) { - fastpathTV.EncMapInt16Int16V(rv.Interface().(map[int16]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Int32R(rv reflect.Value) { - fastpathTV.EncMapInt16Int32V(rv.Interface().(map[int16]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Int64R(rv reflect.Value) { - fastpathTV.EncMapInt16Int64V(rv.Interface().(map[int16]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Float32R(rv reflect.Value) { - fastpathTV.EncMapInt16Float32V(rv.Interface().(map[int16]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Float64R(rv reflect.Value) { - fastpathTV.EncMapInt16Float64V(rv.Interface().(map[int16]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16BoolR(rv reflect.Value) { - fastpathTV.EncMapInt16BoolV(rv.Interface().(map[int16]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32IntfR(rv reflect.Value) { - fastpathTV.EncMapInt32IntfV(rv.Interface().(map[int32]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32StringR(rv reflect.Value) { - fastpathTV.EncMapInt32StringV(rv.Interface().(map[int32]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32StringV(v map[int32]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32UintR(rv reflect.Value) { - fastpathTV.EncMapInt32UintV(rv.Interface().(map[int32]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint8V(rv.Interface().(map[int32]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint16V(rv.Interface().(map[int32]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint32V(rv.Interface().(map[int32]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint64V(rv.Interface().(map[int32]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt32UintptrV(rv.Interface().(map[int32]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32IntR(rv reflect.Value) { - fastpathTV.EncMapInt32IntV(rv.Interface().(map[int32]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32IntV(v map[int32]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Int8R(rv reflect.Value) { - fastpathTV.EncMapInt32Int8V(rv.Interface().(map[int32]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Int16R(rv reflect.Value) { - fastpathTV.EncMapInt32Int16V(rv.Interface().(map[int32]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Int32R(rv reflect.Value) { - fastpathTV.EncMapInt32Int32V(rv.Interface().(map[int32]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Int64R(rv reflect.Value) { - fastpathTV.EncMapInt32Int64V(rv.Interface().(map[int32]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Float32R(rv reflect.Value) { - fastpathTV.EncMapInt32Float32V(rv.Interface().(map[int32]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Float64R(rv reflect.Value) { - fastpathTV.EncMapInt32Float64V(rv.Interface().(map[int32]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32BoolR(rv reflect.Value) { - fastpathTV.EncMapInt32BoolV(rv.Interface().(map[int32]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64IntfR(rv reflect.Value) { - fastpathTV.EncMapInt64IntfV(rv.Interface().(map[int64]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64StringR(rv reflect.Value) { - fastpathTV.EncMapInt64StringV(rv.Interface().(map[int64]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64StringV(v map[int64]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64UintR(rv reflect.Value) { - fastpathTV.EncMapInt64UintV(rv.Interface().(map[int64]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint8V(rv.Interface().(map[int64]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint16V(rv.Interface().(map[int64]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint32V(rv.Interface().(map[int64]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint64V(rv.Interface().(map[int64]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt64UintptrV(rv.Interface().(map[int64]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64IntR(rv reflect.Value) { - fastpathTV.EncMapInt64IntV(rv.Interface().(map[int64]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64IntV(v map[int64]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Int8R(rv reflect.Value) { - fastpathTV.EncMapInt64Int8V(rv.Interface().(map[int64]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Int16R(rv reflect.Value) { - fastpathTV.EncMapInt64Int16V(rv.Interface().(map[int64]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Int32R(rv reflect.Value) { - fastpathTV.EncMapInt64Int32V(rv.Interface().(map[int64]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Int64R(rv reflect.Value) { - fastpathTV.EncMapInt64Int64V(rv.Interface().(map[int64]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Float32R(rv reflect.Value) { - fastpathTV.EncMapInt64Float32V(rv.Interface().(map[int64]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Float64R(rv reflect.Value) { - fastpathTV.EncMapInt64Float64V(rv.Interface().(map[int64]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64BoolR(rv reflect.Value) { - fastpathTV.EncMapInt64BoolV(rv.Interface().(map[int64]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolIntfR(rv reflect.Value) { - fastpathTV.EncMapBoolIntfV(rv.Interface().(map[bool]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolStringR(rv reflect.Value) { - fastpathTV.EncMapBoolStringV(rv.Interface().(map[bool]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolStringV(v map[bool]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUintR(rv reflect.Value) { - fastpathTV.EncMapBoolUintV(rv.Interface().(map[bool]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUint8R(rv reflect.Value) { - fastpathTV.EncMapBoolUint8V(rv.Interface().(map[bool]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUint16R(rv reflect.Value) { - fastpathTV.EncMapBoolUint16V(rv.Interface().(map[bool]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUint32R(rv reflect.Value) { - fastpathTV.EncMapBoolUint32V(rv.Interface().(map[bool]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUint64R(rv reflect.Value) { - fastpathTV.EncMapBoolUint64V(rv.Interface().(map[bool]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUintptrR(rv reflect.Value) { - fastpathTV.EncMapBoolUintptrV(rv.Interface().(map[bool]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolIntR(rv reflect.Value) { - fastpathTV.EncMapBoolIntV(rv.Interface().(map[bool]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolIntV(v map[bool]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolInt8R(rv reflect.Value) { - fastpathTV.EncMapBoolInt8V(rv.Interface().(map[bool]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolInt16R(rv reflect.Value) { - fastpathTV.EncMapBoolInt16V(rv.Interface().(map[bool]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolInt32R(rv reflect.Value) { - fastpathTV.EncMapBoolInt32V(rv.Interface().(map[bool]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolInt64R(rv reflect.Value) { - fastpathTV.EncMapBoolInt64V(rv.Interface().(map[bool]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolFloat32R(rv reflect.Value) { - fastpathTV.EncMapBoolFloat32V(rv.Interface().(map[bool]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolFloat64R(rv reflect.Value) { - fastpathTV.EncMapBoolFloat64V(rv.Interface().(map[bool]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolBoolR(rv reflect.Value) { - fastpathTV.EncMapBoolBoolV(rv.Interface().(map[bool]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -// -- decode - -// -- -- fast path type switch -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - if !fastpathEnabled { - return false - } - switch v := iv.(type) { - - case []interface{}: - fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, d) - case *[]interface{}: - v2, changed2 := fastpathTV.DecSliceIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]interface{}: - fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]interface{}: - v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]string: - fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]string: - v2, changed2 := fastpathTV.DecMapIntfStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint: - fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint: - v2, changed2 := fastpathTV.DecMapIntfUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint8: - fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint8: - v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint16: - fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint16: - v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint32: - fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint32: - v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint64: - fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint64: - v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uintptr: - fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uintptr: - v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int: - fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int: - v2, changed2 := fastpathTV.DecMapIntfIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int8: - fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int8: - v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int16: - fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int16: - v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int32: - fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int32: - v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int64: - fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int64: - v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]float32: - fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]float32: - v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]float64: - fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]float64: - v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]bool: - fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]bool: - v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []string: - fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, d) - case *[]string: - v2, changed2 := fastpathTV.DecSliceStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]interface{}: - fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, d) - case *map[string]interface{}: - v2, changed2 := fastpathTV.DecMapStringIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]string: - fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, d) - case *map[string]string: - v2, changed2 := fastpathTV.DecMapStringStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint: - fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, d) - case *map[string]uint: - v2, changed2 := fastpathTV.DecMapStringUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint8: - fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, d) - case *map[string]uint8: - v2, changed2 := fastpathTV.DecMapStringUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint16: - fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, d) - case *map[string]uint16: - v2, changed2 := fastpathTV.DecMapStringUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint32: - fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, d) - case *map[string]uint32: - v2, changed2 := fastpathTV.DecMapStringUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint64: - fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, d) - case *map[string]uint64: - v2, changed2 := fastpathTV.DecMapStringUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uintptr: - fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[string]uintptr: - v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int: - fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, d) - case *map[string]int: - v2, changed2 := fastpathTV.DecMapStringIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int8: - fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, d) - case *map[string]int8: - v2, changed2 := fastpathTV.DecMapStringInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int16: - fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, d) - case *map[string]int16: - v2, changed2 := fastpathTV.DecMapStringInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int32: - fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, d) - case *map[string]int32: - v2, changed2 := fastpathTV.DecMapStringInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int64: - fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, d) - case *map[string]int64: - v2, changed2 := fastpathTV.DecMapStringInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]float32: - fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[string]float32: - v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]float64: - fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[string]float64: - v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]bool: - fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, d) - case *map[string]bool: - v2, changed2 := fastpathTV.DecMapStringBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []float32: - fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, d) - case *[]float32: - v2, changed2 := fastpathTV.DecSliceFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]interface{}: - fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, d) - case *map[float32]interface{}: - v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]string: - fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, d) - case *map[float32]string: - v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint: - fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint: - v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint8: - fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint8: - v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint16: - fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint16: - v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint32: - fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint32: - v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint64: - fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint64: - v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uintptr: - fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[float32]uintptr: - v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int: - fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, d) - case *map[float32]int: - v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int8: - fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, d) - case *map[float32]int8: - v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int16: - fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, d) - case *map[float32]int16: - v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int32: - fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, d) - case *map[float32]int32: - v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int64: - fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, d) - case *map[float32]int64: - v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]float32: - fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, d) - case *map[float32]float32: - v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]float64: - fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, d) - case *map[float32]float64: - v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]bool: - fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, d) - case *map[float32]bool: - v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []float64: - fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, d) - case *[]float64: - v2, changed2 := fastpathTV.DecSliceFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]interface{}: - fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, d) - case *map[float64]interface{}: - v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]string: - fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, d) - case *map[float64]string: - v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint: - fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint: - v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint8: - fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint8: - v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint16: - fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint16: - v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint32: - fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint32: - v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint64: - fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint64: - v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uintptr: - fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[float64]uintptr: - v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int: - fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, d) - case *map[float64]int: - v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int8: - fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, d) - case *map[float64]int8: - v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int16: - fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, d) - case *map[float64]int16: - v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int32: - fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, d) - case *map[float64]int32: - v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int64: - fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, d) - case *map[float64]int64: - v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]float32: - fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, d) - case *map[float64]float32: - v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]float64: - fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, d) - case *map[float64]float64: - v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]bool: - fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, d) - case *map[float64]bool: - v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uint: - fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, d) - case *[]uint: - v2, changed2 := fastpathTV.DecSliceUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]interface{}: - fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint]interface{}: - v2, changed2 := fastpathTV.DecMapUintIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]string: - fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, d) - case *map[uint]string: - v2, changed2 := fastpathTV.DecMapUintStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint: - fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint: - v2, changed2 := fastpathTV.DecMapUintUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint8: - fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint8: - v2, changed2 := fastpathTV.DecMapUintUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint16: - fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint16: - v2, changed2 := fastpathTV.DecMapUintUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint32: - fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint32: - v2, changed2 := fastpathTV.DecMapUintUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint64: - fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint64: - v2, changed2 := fastpathTV.DecMapUintUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uintptr: - fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint]uintptr: - v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int: - fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, d) - case *map[uint]int: - v2, changed2 := fastpathTV.DecMapUintIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int8: - fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, d) - case *map[uint]int8: - v2, changed2 := fastpathTV.DecMapUintInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int16: - fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, d) - case *map[uint]int16: - v2, changed2 := fastpathTV.DecMapUintInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int32: - fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, d) - case *map[uint]int32: - v2, changed2 := fastpathTV.DecMapUintInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int64: - fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, d) - case *map[uint]int64: - v2, changed2 := fastpathTV.DecMapUintInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]float32: - fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[uint]float32: - v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]float64: - fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[uint]float64: - v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]bool: - fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint]bool: - v2, changed2 := fastpathTV.DecMapUintBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]interface{}: - fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]interface{}: - v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]string: - fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]string: - v2, changed2 := fastpathTV.DecMapUint8StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint: - fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint: - v2, changed2 := fastpathTV.DecMapUint8UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint8: - fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint8: - v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint16: - fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint16: - v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint32: - fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint32: - v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint64: - fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint64: - v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uintptr: - fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uintptr: - v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int: - fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int: - v2, changed2 := fastpathTV.DecMapUint8IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int8: - fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int8: - v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int16: - fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int16: - v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int32: - fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int32: - v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int64: - fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int64: - v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]float32: - fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]float32: - v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]float64: - fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]float64: - v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]bool: - fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]bool: - v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uint16: - fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, d) - case *[]uint16: - v2, changed2 := fastpathTV.DecSliceUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]interface{}: - fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]interface{}: - v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]string: - fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]string: - v2, changed2 := fastpathTV.DecMapUint16StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint: - fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint: - v2, changed2 := fastpathTV.DecMapUint16UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint8: - fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint8: - v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint16: - fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint16: - v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint32: - fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint32: - v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint64: - fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint64: - v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uintptr: - fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uintptr: - v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int: - fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int: - v2, changed2 := fastpathTV.DecMapUint16IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int8: - fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int8: - v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int16: - fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int16: - v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int32: - fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int32: - v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int64: - fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int64: - v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]float32: - fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]float32: - v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]float64: - fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]float64: - v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]bool: - fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]bool: - v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uint32: - fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, d) - case *[]uint32: - v2, changed2 := fastpathTV.DecSliceUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]interface{}: - fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]interface{}: - v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]string: - fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]string: - v2, changed2 := fastpathTV.DecMapUint32StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint: - fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint: - v2, changed2 := fastpathTV.DecMapUint32UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint8: - fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint8: - v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint16: - fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint16: - v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint32: - fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint32: - v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint64: - fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint64: - v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uintptr: - fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uintptr: - v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int: - fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int: - v2, changed2 := fastpathTV.DecMapUint32IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int8: - fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int8: - v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int16: - fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int16: - v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int32: - fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int32: - v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int64: - fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int64: - v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]float32: - fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]float32: - v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]float64: - fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]float64: - v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]bool: - fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]bool: - v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uint64: - fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, d) - case *[]uint64: - v2, changed2 := fastpathTV.DecSliceUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]interface{}: - fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]interface{}: - v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]string: - fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]string: - v2, changed2 := fastpathTV.DecMapUint64StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint: - fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint: - v2, changed2 := fastpathTV.DecMapUint64UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint8: - fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint8: - v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint16: - fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint16: - v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint32: - fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint32: - v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint64: - fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint64: - v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uintptr: - fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uintptr: - v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int: - fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int: - v2, changed2 := fastpathTV.DecMapUint64IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int8: - fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int8: - v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int16: - fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int16: - v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int32: - fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int32: - v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int64: - fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int64: - v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]float32: - fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]float32: - v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]float64: - fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]float64: - v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]bool: - fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]bool: - v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uintptr: - fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, d) - case *[]uintptr: - v2, changed2 := fastpathTV.DecSliceUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]interface{}: - fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]interface{}: - v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]string: - fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]string: - v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint: - fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint: - v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint8: - fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint8: - v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint16: - fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint16: - v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint32: - fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint32: - v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint64: - fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint64: - v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uintptr: - fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uintptr: - v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int: - fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int: - v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int8: - fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int8: - v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int16: - fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int16: - v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int32: - fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int32: - v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int64: - fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int64: - v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]float32: - fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]float32: - v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]float64: - fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]float64: - v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]bool: - fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]bool: - v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int: - fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, d) - case *[]int: - v2, changed2 := fastpathTV.DecSliceIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]interface{}: - fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, d) - case *map[int]interface{}: - v2, changed2 := fastpathTV.DecMapIntIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]string: - fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, d) - case *map[int]string: - v2, changed2 := fastpathTV.DecMapIntStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint: - fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, d) - case *map[int]uint: - v2, changed2 := fastpathTV.DecMapIntUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint8: - fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, d) - case *map[int]uint8: - v2, changed2 := fastpathTV.DecMapIntUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint16: - fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, d) - case *map[int]uint16: - v2, changed2 := fastpathTV.DecMapIntUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint32: - fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, d) - case *map[int]uint32: - v2, changed2 := fastpathTV.DecMapIntUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint64: - fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, d) - case *map[int]uint64: - v2, changed2 := fastpathTV.DecMapIntUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uintptr: - fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int]uintptr: - v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int: - fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, d) - case *map[int]int: - v2, changed2 := fastpathTV.DecMapIntIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int8: - fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, d) - case *map[int]int8: - v2, changed2 := fastpathTV.DecMapIntInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int16: - fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, d) - case *map[int]int16: - v2, changed2 := fastpathTV.DecMapIntInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int32: - fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, d) - case *map[int]int32: - v2, changed2 := fastpathTV.DecMapIntInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int64: - fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, d) - case *map[int]int64: - v2, changed2 := fastpathTV.DecMapIntInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]float32: - fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[int]float32: - v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]float64: - fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[int]float64: - v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]bool: - fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, d) - case *map[int]bool: - v2, changed2 := fastpathTV.DecMapIntBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int8: - fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, d) - case *[]int8: - v2, changed2 := fastpathTV.DecSliceInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]interface{}: - fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, d) - case *map[int8]interface{}: - v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]string: - fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, d) - case *map[int8]string: - v2, changed2 := fastpathTV.DecMapInt8StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint: - fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint: - v2, changed2 := fastpathTV.DecMapInt8UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint8: - fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint8: - v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint16: - fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint16: - v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint32: - fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint32: - v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint64: - fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint64: - v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uintptr: - fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int8]uintptr: - v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int: - fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, d) - case *map[int8]int: - v2, changed2 := fastpathTV.DecMapInt8IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int8: - fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, d) - case *map[int8]int8: - v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int16: - fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, d) - case *map[int8]int16: - v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int32: - fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, d) - case *map[int8]int32: - v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int64: - fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, d) - case *map[int8]int64: - v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]float32: - fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, d) - case *map[int8]float32: - v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]float64: - fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, d) - case *map[int8]float64: - v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]bool: - fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, d) - case *map[int8]bool: - v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int16: - fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, d) - case *[]int16: - v2, changed2 := fastpathTV.DecSliceInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]interface{}: - fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, d) - case *map[int16]interface{}: - v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]string: - fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, d) - case *map[int16]string: - v2, changed2 := fastpathTV.DecMapInt16StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint: - fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint: - v2, changed2 := fastpathTV.DecMapInt16UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint8: - fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint8: - v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint16: - fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint16: - v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint32: - fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint32: - v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint64: - fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint64: - v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uintptr: - fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int16]uintptr: - v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int: - fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, d) - case *map[int16]int: - v2, changed2 := fastpathTV.DecMapInt16IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int8: - fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, d) - case *map[int16]int8: - v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int16: - fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, d) - case *map[int16]int16: - v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int32: - fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, d) - case *map[int16]int32: - v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int64: - fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, d) - case *map[int16]int64: - v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]float32: - fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, d) - case *map[int16]float32: - v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]float64: - fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, d) - case *map[int16]float64: - v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]bool: - fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, d) - case *map[int16]bool: - v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int32: - fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, d) - case *[]int32: - v2, changed2 := fastpathTV.DecSliceInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]interface{}: - fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, d) - case *map[int32]interface{}: - v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]string: - fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, d) - case *map[int32]string: - v2, changed2 := fastpathTV.DecMapInt32StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint: - fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint: - v2, changed2 := fastpathTV.DecMapInt32UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint8: - fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint8: - v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint16: - fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint16: - v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint32: - fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint32: - v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint64: - fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint64: - v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uintptr: - fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int32]uintptr: - v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int: - fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, d) - case *map[int32]int: - v2, changed2 := fastpathTV.DecMapInt32IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int8: - fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, d) - case *map[int32]int8: - v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int16: - fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, d) - case *map[int32]int16: - v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int32: - fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, d) - case *map[int32]int32: - v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int64: - fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, d) - case *map[int32]int64: - v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]float32: - fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, d) - case *map[int32]float32: - v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]float64: - fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, d) - case *map[int32]float64: - v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]bool: - fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, d) - case *map[int32]bool: - v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int64: - fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, d) - case *[]int64: - v2, changed2 := fastpathTV.DecSliceInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]interface{}: - fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, d) - case *map[int64]interface{}: - v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]string: - fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, d) - case *map[int64]string: - v2, changed2 := fastpathTV.DecMapInt64StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint: - fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint: - v2, changed2 := fastpathTV.DecMapInt64UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint8: - fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint8: - v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint16: - fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint16: - v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint32: - fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint32: - v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint64: - fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint64: - v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uintptr: - fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int64]uintptr: - v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int: - fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, d) - case *map[int64]int: - v2, changed2 := fastpathTV.DecMapInt64IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int8: - fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, d) - case *map[int64]int8: - v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int16: - fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, d) - case *map[int64]int16: - v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int32: - fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, d) - case *map[int64]int32: - v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int64: - fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, d) - case *map[int64]int64: - v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]float32: - fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, d) - case *map[int64]float32: - v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]float64: - fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, d) - case *map[int64]float64: - v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]bool: - fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, d) - case *map[int64]bool: - v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []bool: - fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, d) - case *[]bool: - v2, changed2 := fastpathTV.DecSliceBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]interface{}: - fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, d) - case *map[bool]interface{}: - v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]string: - fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, d) - case *map[bool]string: - v2, changed2 := fastpathTV.DecMapBoolStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint: - fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint: - v2, changed2 := fastpathTV.DecMapBoolUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint8: - fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint8: - v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint16: - fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint16: - v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint32: - fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint32: - v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint64: - fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint64: - v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uintptr: - fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[bool]uintptr: - v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int: - fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, d) - case *map[bool]int: - v2, changed2 := fastpathTV.DecMapBoolIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int8: - fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, d) - case *map[bool]int8: - v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int16: - fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, d) - case *map[bool]int16: - v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int32: - fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, d) - case *map[bool]int32: - v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int64: - fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, d) - case *map[bool]int64: - v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]float32: - fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[bool]float32: - v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]float64: - fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[bool]float64: - v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]bool: - fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, d) - case *map[bool]bool: - v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions - -func (f *decFnInfo) fastpathDecSliceIntfR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]interface{}) - v, changed := fastpathTV.DecSliceIntfV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]interface{}) - fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceIntfX(vp *[]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecSliceIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, d *Decoder) (_ []interface{}, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []interface{}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]interface{}, xlen) - } - } else { - v = make([]interface{}, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - d.decode(&v[j]) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, nil) - slh.ElemContainerState(j) - d.decode(&v[j]) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []interface{}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]interface{}, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, nil) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - d.decode(&v[j]) - - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceStringR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]string) - v, changed := fastpathTV.DecSliceStringV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]string) - fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceStringX(vp *[]string, checkNil bool, d *Decoder) { - v, changed := f.DecSliceStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d *Decoder) (_ []string, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []string{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]string, xlen) - } - } else { - v = make([]string, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeString() - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, "") - slh.ElemContainerState(j) - v[j] = dd.DecodeString() - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []string{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]string, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, "") - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeString() - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceFloat32R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]float32) - v, changed := fastpathTV.DecSliceFloat32V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]float32) - fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceFloat32X(vp *[]float32, checkNil bool, d *Decoder) { - v, changed := f.DecSliceFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, d *Decoder) (_ []float32, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []float32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]float32, xlen) - } - } else { - v = make([]float32, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = float32(dd.DecodeFloat(true)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = float32(dd.DecodeFloat(true)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []float32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]float32, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = float32(dd.DecodeFloat(true)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceFloat64R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]float64) - v, changed := fastpathTV.DecSliceFloat64V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]float64) - fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceFloat64X(vp *[]float64, checkNil bool, d *Decoder) { - v, changed := f.DecSliceFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, d *Decoder) (_ []float64, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []float64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]float64, xlen) - } - } else { - v = make([]float64, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeFloat(false) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = dd.DecodeFloat(false) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []float64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]float64, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeFloat(false) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUintR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint) - v, changed := fastpathTV.DecSliceUintV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uint) - fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUintX(vp *[]uint, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Decoder) (_ []uint, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint, xlen) - } - } else { - v = make([]uint, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uint(dd.DecodeUint(uintBitsize)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uint(dd.DecodeUint(uintBitsize)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]uint, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uint(dd.DecodeUint(uintBitsize)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUint16R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint16) - v, changed := fastpathTV.DecSliceUint16V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uint16) - fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUint16X(vp *[]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d *Decoder) (_ []uint16, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint16, xlen) - } - } else { - v = make([]uint16, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uint16(dd.DecodeUint(16)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uint16(dd.DecodeUint(16)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]uint16, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uint16(dd.DecodeUint(16)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUint32R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint32) - v, changed := fastpathTV.DecSliceUint32V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uint32) - fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUint32X(vp *[]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d *Decoder) (_ []uint32, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint32, xlen) - } - } else { - v = make([]uint32, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uint32(dd.DecodeUint(32)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uint32(dd.DecodeUint(32)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]uint32, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uint32(dd.DecodeUint(32)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUint64R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint64) - v, changed := fastpathTV.DecSliceUint64V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uint64) - fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUint64X(vp *[]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d *Decoder) (_ []uint64, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint64, xlen) - } - } else { - v = make([]uint64, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeUint(64) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = dd.DecodeUint(64) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]uint64, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeUint(64) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUintptrR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uintptr) - v, changed := fastpathTV.DecSliceUintptrV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uintptr) - fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, d *Decoder) (_ []uintptr, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uintptr{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uintptr, xlen) - } - } else { - v = make([]uintptr, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uintptr(dd.DecodeUint(uintBitsize)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uintptr(dd.DecodeUint(uintBitsize)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uintptr{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]uintptr, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uintptr(dd.DecodeUint(uintBitsize)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceIntR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int) - v, changed := fastpathTV.DecSliceIntV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int) - fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceIntX(vp *[]int, checkNil bool, d *Decoder) { - v, changed := f.DecSliceIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decoder) (_ []int, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int, xlen) - } - } else { - v = make([]int, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int(dd.DecodeInt(intBitsize)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int(dd.DecodeInt(intBitsize)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]int, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int(dd.DecodeInt(intBitsize)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceInt8R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int8) - v, changed := fastpathTV.DecSliceInt8V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int8) - fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceInt8X(vp *[]int8, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Decoder) (_ []int8, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int8{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int8, xlen) - } - } else { - v = make([]int8, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int8(dd.DecodeInt(8)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int8(dd.DecodeInt(8)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int8{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]int8, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int8(dd.DecodeInt(8)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceInt16R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int16) - v, changed := fastpathTV.DecSliceInt16V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int16) - fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceInt16X(vp *[]int16, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *Decoder) (_ []int16, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int16, xlen) - } - } else { - v = make([]int16, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int16(dd.DecodeInt(16)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int16(dd.DecodeInt(16)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]int16, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int16(dd.DecodeInt(16)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceInt32R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int32) - v, changed := fastpathTV.DecSliceInt32V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int32) - fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceInt32X(vp *[]int32, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *Decoder) (_ []int32, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int32, xlen) - } - } else { - v = make([]int32, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int32(dd.DecodeInt(32)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int32(dd.DecodeInt(32)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]int32, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int32(dd.DecodeInt(32)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceInt64R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int64) - v, changed := fastpathTV.DecSliceInt64V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int64) - fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceInt64X(vp *[]int64, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *Decoder) (_ []int64, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int64, xlen) - } - } else { - v = make([]int64, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeInt(64) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = dd.DecodeInt(64) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]int64, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeInt(64) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceBoolR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]bool) - v, changed := fastpathTV.DecSliceBoolV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]bool) - fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceBoolX(vp *[]bool, checkNil bool, d *Decoder) { - v, changed := f.DecSliceBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Decoder) (_ []bool, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []bool{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]bool, xlen) - } - } else { - v = make([]bool, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeBool() - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, false) - slh.ElemContainerState(j) - v[j] = dd.DecodeBool() - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []bool{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]bool, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, false) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeBool() - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]interface{}) - v, changed := fastpathTV.DecMapIntfIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]interface{}) - fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[interface{}]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk interface{} - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]string) - v, changed := fastpathTV.DecMapIntfStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]string) - fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[interface{}]string, xlen) - changed = true - } - - var mk interface{} - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint) - v, changed := fastpathTV.DecMapIntfUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint) - fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uint, xlen) - changed = true - } - - var mk interface{} - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint8) - v, changed := fastpathTV.DecMapIntfUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint8) - fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]uint8, xlen) - changed = true - } - - var mk interface{} - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint16) - v, changed := fastpathTV.DecMapIntfUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint16) - fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[interface{}]uint16, xlen) - changed = true - } - - var mk interface{} - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint32) - v, changed := fastpathTV.DecMapIntfUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint32) - fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]uint32, xlen) - changed = true - } - - var mk interface{} - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint64) - v, changed := fastpathTV.DecMapIntfUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint64) - fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uint64, xlen) - changed = true - } - - var mk interface{} - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uintptr) - v, changed := fastpathTV.DecMapIntfUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uintptr) - fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uintptr, xlen) - changed = true - } - - var mk interface{} - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int) - v, changed := fastpathTV.DecMapIntfIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int) - fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]int, xlen) - changed = true - } - - var mk interface{} - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int8) - v, changed := fastpathTV.DecMapIntfInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int8) - fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]int8, xlen) - changed = true - } - - var mk interface{} - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int16) - v, changed := fastpathTV.DecMapIntfInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int16) - fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[interface{}]int16, xlen) - changed = true - } - - var mk interface{} - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int32) - v, changed := fastpathTV.DecMapIntfInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int32) - fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]int32, xlen) - changed = true - } - - var mk interface{} - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int64) - v, changed := fastpathTV.DecMapIntfInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int64) - fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]int64, xlen) - changed = true - } - - var mk interface{} - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]float32) - v, changed := fastpathTV.DecMapIntfFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]float32) - fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]float32, xlen) - changed = true - } - - var mk interface{} - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]float64) - v, changed := fastpathTV.DecMapIntfFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]float64) - fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]float64, xlen) - changed = true - } - - var mk interface{} - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]bool) - v, changed := fastpathTV.DecMapIntfBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]bool) - fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]bool, xlen) - changed = true - } - - var mk interface{} - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]interface{}) - v, changed := fastpathTV.DecMapStringIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]interface{}) - fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[string]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[string]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk string - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]string) - v, changed := fastpathTV.DecMapStringStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]string) - fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringStringX(vp *map[string]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringStringV(v map[string]string, checkNil bool, canChange bool, - d *Decoder) (_ map[string]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[string]string, xlen) - changed = true - } - - var mk string - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint) - v, changed := fastpathTV.DecMapStringUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint) - fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUintX(vp *map[string]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUintV(v map[string]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uint, xlen) - changed = true - } - - var mk string - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint8) - v, changed := fastpathTV.DecMapStringUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint8) - fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]uint8, xlen) - changed = true - } - - var mk string - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint16) - v, changed := fastpathTV.DecMapStringUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint16) - fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[string]uint16, xlen) - changed = true - } - - var mk string - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint32) - v, changed := fastpathTV.DecMapStringUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint32) - fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]uint32, xlen) - changed = true - } - - var mk string - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint64) - v, changed := fastpathTV.DecMapStringUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint64) - fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uint64, xlen) - changed = true - } - - var mk string - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uintptr) - v, changed := fastpathTV.DecMapStringUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uintptr) - fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uintptr, xlen) - changed = true - } - - var mk string - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int) - v, changed := fastpathTV.DecMapStringIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int) - fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringIntX(vp *map[string]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringIntV(v map[string]int, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]int, xlen) - changed = true - } - - var mk string - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int8) - v, changed := fastpathTV.DecMapStringInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int8) - fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt8V(v map[string]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]int8, xlen) - changed = true - } - - var mk string - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int16) - v, changed := fastpathTV.DecMapStringInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int16) - fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt16V(v map[string]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[string]int16, xlen) - changed = true - } - - var mk string - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int32) - v, changed := fastpathTV.DecMapStringInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int32) - fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt32V(v map[string]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]int32, xlen) - changed = true - } - - var mk string - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int64) - v, changed := fastpathTV.DecMapStringInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int64) - fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt64V(v map[string]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]int64, xlen) - changed = true - } - - var mk string - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]float32) - v, changed := fastpathTV.DecMapStringFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]float32) - fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[string]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]float32, xlen) - changed = true - } - - var mk string - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]float64) - v, changed := fastpathTV.DecMapStringFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]float64) - fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[string]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]float64, xlen) - changed = true - } - - var mk string - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]bool) - v, changed := fastpathTV.DecMapStringBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]bool) - fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringBoolV(v map[string]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[string]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]bool, xlen) - changed = true - } - - var mk string - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]interface{}) - v, changed := fastpathTV.DecMapFloat32IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]interface{}) - fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[float32]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk float32 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]string) - v, changed := fastpathTV.DecMapFloat32StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]string) - fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[float32]string, xlen) - changed = true - } - - var mk float32 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint) - v, changed := fastpathTV.DecMapFloat32UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint) - fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uint, xlen) - changed = true - } - - var mk float32 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint8) - v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint8) - fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]uint8, xlen) - changed = true - } - - var mk float32 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint16) - v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint16) - fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[float32]uint16, xlen) - changed = true - } - - var mk float32 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint32) - v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint32) - fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]uint32, xlen) - changed = true - } - - var mk float32 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint64) - v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint64) - fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uint64, xlen) - changed = true - } - - var mk float32 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uintptr) - v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uintptr) - fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uintptr, xlen) - changed = true - } - - var mk float32 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int) - v, changed := fastpathTV.DecMapFloat32IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int) - fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]int, xlen) - changed = true - } - - var mk float32 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int8) - v, changed := fastpathTV.DecMapFloat32Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int8) - fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]int8, xlen) - changed = true - } - - var mk float32 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int16) - v, changed := fastpathTV.DecMapFloat32Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int16) - fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[float32]int16, xlen) - changed = true - } - - var mk float32 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int32) - v, changed := fastpathTV.DecMapFloat32Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int32) - fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]int32, xlen) - changed = true - } - - var mk float32 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int64) - v, changed := fastpathTV.DecMapFloat32Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int64) - fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]int64, xlen) - changed = true - } - - var mk float32 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]float32) - v, changed := fastpathTV.DecMapFloat32Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]float32) - fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]float32, xlen) - changed = true - } - - var mk float32 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]float64) - v, changed := fastpathTV.DecMapFloat32Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]float64) - fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]float64, xlen) - changed = true - } - - var mk float32 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]bool) - v, changed := fastpathTV.DecMapFloat32BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]bool) - fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]bool, xlen) - changed = true - } - - var mk float32 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]interface{}) - v, changed := fastpathTV.DecMapFloat64IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]interface{}) - fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[float64]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk float64 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]string) - v, changed := fastpathTV.DecMapFloat64StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]string) - fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[float64]string, xlen) - changed = true - } - - var mk float64 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint) - v, changed := fastpathTV.DecMapFloat64UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint) - fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uint, xlen) - changed = true - } - - var mk float64 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint8) - v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint8) - fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]uint8, xlen) - changed = true - } - - var mk float64 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint16) - v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint16) - fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[float64]uint16, xlen) - changed = true - } - - var mk float64 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint32) - v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint32) - fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]uint32, xlen) - changed = true - } - - var mk float64 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint64) - v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint64) - fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uint64, xlen) - changed = true - } - - var mk float64 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uintptr) - v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uintptr) - fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uintptr, xlen) - changed = true - } - - var mk float64 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int) - v, changed := fastpathTV.DecMapFloat64IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int) - fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]int, xlen) - changed = true - } - - var mk float64 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int8) - v, changed := fastpathTV.DecMapFloat64Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int8) - fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]int8, xlen) - changed = true - } - - var mk float64 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int16) - v, changed := fastpathTV.DecMapFloat64Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int16) - fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[float64]int16, xlen) - changed = true - } - - var mk float64 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int32) - v, changed := fastpathTV.DecMapFloat64Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int32) - fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]int32, xlen) - changed = true - } - - var mk float64 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int64) - v, changed := fastpathTV.DecMapFloat64Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int64) - fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]int64, xlen) - changed = true - } - - var mk float64 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]float32) - v, changed := fastpathTV.DecMapFloat64Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]float32) - fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]float32, xlen) - changed = true - } - - var mk float64 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]float64) - v, changed := fastpathTV.DecMapFloat64Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]float64) - fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]float64, xlen) - changed = true - } - - var mk float64 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]bool) - v, changed := fastpathTV.DecMapFloat64BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]bool) - fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]bool, xlen) - changed = true - } - - var mk float64 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]interface{}) - v, changed := fastpathTV.DecMapUintIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]interface{}) - fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]string) - v, changed := fastpathTV.DecMapUintStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]string) - fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintStringX(vp *map[uint]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintStringV(v map[uint]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint]string, xlen) - changed = true - } - - var mk uint - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint) - v, changed := fastpathTV.DecMapUintUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint) - fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUintV(v map[uint]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uint, xlen) - changed = true - } - - var mk uint - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint8) - v, changed := fastpathTV.DecMapUintUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint8) - fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]uint8, xlen) - changed = true - } - - var mk uint - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint16) - v, changed := fastpathTV.DecMapUintUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint16) - fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint]uint16, xlen) - changed = true - } - - var mk uint - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint32) - v, changed := fastpathTV.DecMapUintUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint32) - fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]uint32, xlen) - changed = true - } - - var mk uint - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint64) - v, changed := fastpathTV.DecMapUintUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint64) - fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uint64, xlen) - changed = true - } - - var mk uint - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uintptr) - v, changed := fastpathTV.DecMapUintUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uintptr) - fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uintptr, xlen) - changed = true - } - - var mk uint - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int) - v, changed := fastpathTV.DecMapUintIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int) - fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintIntX(vp *map[uint]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintIntV(v map[uint]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]int, xlen) - changed = true - } - - var mk uint - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int8) - v, changed := fastpathTV.DecMapUintInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int8) - fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]int8, xlen) - changed = true - } - - var mk uint - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int16) - v, changed := fastpathTV.DecMapUintInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int16) - fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint]int16, xlen) - changed = true - } - - var mk uint - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int32) - v, changed := fastpathTV.DecMapUintInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int32) - fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]int32, xlen) - changed = true - } - - var mk uint - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int64) - v, changed := fastpathTV.DecMapUintInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int64) - fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]int64, xlen) - changed = true - } - - var mk uint - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]float32) - v, changed := fastpathTV.DecMapUintFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]float32) - fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]float32, xlen) - changed = true - } - - var mk uint - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]float64) - v, changed := fastpathTV.DecMapUintFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]float64) - fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]float64, xlen) - changed = true - } - - var mk uint - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]bool) - v, changed := fastpathTV.DecMapUintBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]bool) - fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]bool, xlen) - changed = true - } - - var mk uint - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]interface{}) - v, changed := fastpathTV.DecMapUint8IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]interface{}) - fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[uint8]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint8 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]string) - v, changed := fastpathTV.DecMapUint8StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]string) - fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[uint8]string, xlen) - changed = true - } - - var mk uint8 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint) - v, changed := fastpathTV.DecMapUint8UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint) - fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uint, xlen) - changed = true - } - - var mk uint8 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint8) - v, changed := fastpathTV.DecMapUint8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint8) - fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]uint8, xlen) - changed = true - } - - var mk uint8 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint16) - v, changed := fastpathTV.DecMapUint8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint16) - fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint8]uint16, xlen) - changed = true - } - - var mk uint8 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint32) - v, changed := fastpathTV.DecMapUint8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint32) - fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]uint32, xlen) - changed = true - } - - var mk uint8 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint64) - v, changed := fastpathTV.DecMapUint8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint64) - fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uint64, xlen) - changed = true - } - - var mk uint8 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uintptr) - v, changed := fastpathTV.DecMapUint8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uintptr) - fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uintptr, xlen) - changed = true - } - - var mk uint8 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int) - v, changed := fastpathTV.DecMapUint8IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int) - fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]int, xlen) - changed = true - } - - var mk uint8 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int8) - v, changed := fastpathTV.DecMapUint8Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int8) - fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]int8, xlen) - changed = true - } - - var mk uint8 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int16) - v, changed := fastpathTV.DecMapUint8Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int16) - fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint8]int16, xlen) - changed = true - } - - var mk uint8 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int32) - v, changed := fastpathTV.DecMapUint8Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int32) - fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]int32, xlen) - changed = true - } - - var mk uint8 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int64) - v, changed := fastpathTV.DecMapUint8Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int64) - fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]int64, xlen) - changed = true - } - - var mk uint8 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]float32) - v, changed := fastpathTV.DecMapUint8Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]float32) - fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]float32, xlen) - changed = true - } - - var mk uint8 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]float64) - v, changed := fastpathTV.DecMapUint8Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]float64) - fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]float64, xlen) - changed = true - } - - var mk uint8 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]bool) - v, changed := fastpathTV.DecMapUint8BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]bool) - fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]bool, xlen) - changed = true - } - - var mk uint8 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]interface{}) - v, changed := fastpathTV.DecMapUint16IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]interface{}) - fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[uint16]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint16 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]string) - v, changed := fastpathTV.DecMapUint16StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]string) - fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[uint16]string, xlen) - changed = true - } - - var mk uint16 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint) - v, changed := fastpathTV.DecMapUint16UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint) - fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uint, xlen) - changed = true - } - - var mk uint16 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint8) - v, changed := fastpathTV.DecMapUint16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint8) - fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]uint8, xlen) - changed = true - } - - var mk uint16 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint16) - v, changed := fastpathTV.DecMapUint16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint16) - fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[uint16]uint16, xlen) - changed = true - } - - var mk uint16 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint32) - v, changed := fastpathTV.DecMapUint16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint32) - fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]uint32, xlen) - changed = true - } - - var mk uint16 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint64) - v, changed := fastpathTV.DecMapUint16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint64) - fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uint64, xlen) - changed = true - } - - var mk uint16 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uintptr) - v, changed := fastpathTV.DecMapUint16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uintptr) - fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uintptr, xlen) - changed = true - } - - var mk uint16 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int) - v, changed := fastpathTV.DecMapUint16IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int) - fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]int, xlen) - changed = true - } - - var mk uint16 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int8) - v, changed := fastpathTV.DecMapUint16Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int8) - fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]int8, xlen) - changed = true - } - - var mk uint16 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int16) - v, changed := fastpathTV.DecMapUint16Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int16) - fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[uint16]int16, xlen) - changed = true - } - - var mk uint16 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int32) - v, changed := fastpathTV.DecMapUint16Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int32) - fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]int32, xlen) - changed = true - } - - var mk uint16 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int64) - v, changed := fastpathTV.DecMapUint16Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int64) - fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]int64, xlen) - changed = true - } - - var mk uint16 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]float32) - v, changed := fastpathTV.DecMapUint16Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]float32) - fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]float32, xlen) - changed = true - } - - var mk uint16 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]float64) - v, changed := fastpathTV.DecMapUint16Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]float64) - fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]float64, xlen) - changed = true - } - - var mk uint16 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]bool) - v, changed := fastpathTV.DecMapUint16BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]bool) - fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]bool, xlen) - changed = true - } - - var mk uint16 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]interface{}) - v, changed := fastpathTV.DecMapUint32IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]interface{}) - fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[uint32]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint32 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]string) - v, changed := fastpathTV.DecMapUint32StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]string) - fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[uint32]string, xlen) - changed = true - } - - var mk uint32 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint) - v, changed := fastpathTV.DecMapUint32UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint) - fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uint, xlen) - changed = true - } - - var mk uint32 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint8) - v, changed := fastpathTV.DecMapUint32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint8) - fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]uint8, xlen) - changed = true - } - - var mk uint32 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint16) - v, changed := fastpathTV.DecMapUint32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint16) - fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint32]uint16, xlen) - changed = true - } - - var mk uint32 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint32) - v, changed := fastpathTV.DecMapUint32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint32) - fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]uint32, xlen) - changed = true - } - - var mk uint32 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint64) - v, changed := fastpathTV.DecMapUint32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint64) - fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uint64, xlen) - changed = true - } - - var mk uint32 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uintptr) - v, changed := fastpathTV.DecMapUint32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uintptr) - fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uintptr, xlen) - changed = true - } - - var mk uint32 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int) - v, changed := fastpathTV.DecMapUint32IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int) - fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]int, xlen) - changed = true - } - - var mk uint32 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int8) - v, changed := fastpathTV.DecMapUint32Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int8) - fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]int8, xlen) - changed = true - } - - var mk uint32 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int16) - v, changed := fastpathTV.DecMapUint32Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int16) - fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint32]int16, xlen) - changed = true - } - - var mk uint32 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int32) - v, changed := fastpathTV.DecMapUint32Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int32) - fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]int32, xlen) - changed = true - } - - var mk uint32 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int64) - v, changed := fastpathTV.DecMapUint32Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int64) - fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]int64, xlen) - changed = true - } - - var mk uint32 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]float32) - v, changed := fastpathTV.DecMapUint32Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]float32) - fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]float32, xlen) - changed = true - } - - var mk uint32 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]float64) - v, changed := fastpathTV.DecMapUint32Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]float64) - fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]float64, xlen) - changed = true - } - - var mk uint32 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]bool) - v, changed := fastpathTV.DecMapUint32BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]bool) - fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]bool, xlen) - changed = true - } - - var mk uint32 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]interface{}) - v, changed := fastpathTV.DecMapUint64IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]interface{}) - fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint64]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint64 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]string) - v, changed := fastpathTV.DecMapUint64StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]string) - fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint64]string, xlen) - changed = true - } - - var mk uint64 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint) - v, changed := fastpathTV.DecMapUint64UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint) - fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uint, xlen) - changed = true - } - - var mk uint64 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint8) - v, changed := fastpathTV.DecMapUint64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint8) - fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]uint8, xlen) - changed = true - } - - var mk uint64 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint16) - v, changed := fastpathTV.DecMapUint64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint16) - fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint64]uint16, xlen) - changed = true - } - - var mk uint64 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint32) - v, changed := fastpathTV.DecMapUint64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint32) - fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]uint32, xlen) - changed = true - } - - var mk uint64 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint64) - v, changed := fastpathTV.DecMapUint64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint64) - fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uint64, xlen) - changed = true - } - - var mk uint64 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uintptr) - v, changed := fastpathTV.DecMapUint64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uintptr) - fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uintptr, xlen) - changed = true - } - - var mk uint64 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int) - v, changed := fastpathTV.DecMapUint64IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int) - fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]int, xlen) - changed = true - } - - var mk uint64 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int8) - v, changed := fastpathTV.DecMapUint64Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int8) - fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]int8, xlen) - changed = true - } - - var mk uint64 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int16) - v, changed := fastpathTV.DecMapUint64Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int16) - fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint64]int16, xlen) - changed = true - } - - var mk uint64 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int32) - v, changed := fastpathTV.DecMapUint64Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int32) - fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]int32, xlen) - changed = true - } - - var mk uint64 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int64) - v, changed := fastpathTV.DecMapUint64Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int64) - fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]int64, xlen) - changed = true - } - - var mk uint64 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]float32) - v, changed := fastpathTV.DecMapUint64Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]float32) - fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]float32, xlen) - changed = true - } - - var mk uint64 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]float64) - v, changed := fastpathTV.DecMapUint64Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]float64) - fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]float64, xlen) - changed = true - } - - var mk uint64 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]bool) - v, changed := fastpathTV.DecMapUint64BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]bool) - fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]bool, xlen) - changed = true - } - - var mk uint64 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]interface{}) - v, changed := fastpathTV.DecMapUintptrIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]interface{}) - fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uintptr]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uintptr - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]string) - v, changed := fastpathTV.DecMapUintptrStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]string) - fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uintptr]string, xlen) - changed = true - } - - var mk uintptr - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint) - v, changed := fastpathTV.DecMapUintptrUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint) - fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uint, xlen) - changed = true - } - - var mk uintptr - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint8) - v, changed := fastpathTV.DecMapUintptrUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint8) - fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]uint8, xlen) - changed = true - } - - var mk uintptr - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint16) - v, changed := fastpathTV.DecMapUintptrUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint16) - fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uintptr]uint16, xlen) - changed = true - } - - var mk uintptr - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint32) - v, changed := fastpathTV.DecMapUintptrUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint32) - fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]uint32, xlen) - changed = true - } - - var mk uintptr - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint64) - v, changed := fastpathTV.DecMapUintptrUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint64) - fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uint64, xlen) - changed = true - } - - var mk uintptr - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uintptr) - v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uintptr) - fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uintptr, xlen) - changed = true - } - - var mk uintptr - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int) - v, changed := fastpathTV.DecMapUintptrIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int) - fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]int, xlen) - changed = true - } - - var mk uintptr - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int8) - v, changed := fastpathTV.DecMapUintptrInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int8) - fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]int8, xlen) - changed = true - } - - var mk uintptr - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int16) - v, changed := fastpathTV.DecMapUintptrInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int16) - fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uintptr]int16, xlen) - changed = true - } - - var mk uintptr - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int32) - v, changed := fastpathTV.DecMapUintptrInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int32) - fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]int32, xlen) - changed = true - } - - var mk uintptr - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int64) - v, changed := fastpathTV.DecMapUintptrInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int64) - fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]int64, xlen) - changed = true - } - - var mk uintptr - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]float32) - v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]float32) - fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]float32, xlen) - changed = true - } - - var mk uintptr - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]float64) - v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]float64) - fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]float64, xlen) - changed = true - } - - var mk uintptr - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]bool) - v, changed := fastpathTV.DecMapUintptrBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]bool) - fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]bool, xlen) - changed = true - } - - var mk uintptr - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]interface{}) - v, changed := fastpathTV.DecMapIntIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]interface{}) - fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]string) - v, changed := fastpathTV.DecMapIntStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]string) - fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntStringX(vp *map[int]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntStringV(v map[int]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int]string, xlen) - changed = true - } - - var mk int - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint) - v, changed := fastpathTV.DecMapIntUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint) - fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUintX(vp *map[int]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUintV(v map[int]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uint, xlen) - changed = true - } - - var mk int - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint8) - v, changed := fastpathTV.DecMapIntUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint8) - fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]uint8, xlen) - changed = true - } - - var mk int - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint16) - v, changed := fastpathTV.DecMapIntUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint16) - fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int]uint16, xlen) - changed = true - } - - var mk int - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint32) - v, changed := fastpathTV.DecMapIntUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint32) - fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]uint32, xlen) - changed = true - } - - var mk int - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint64) - v, changed := fastpathTV.DecMapIntUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint64) - fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uint64, xlen) - changed = true - } - - var mk int - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uintptr) - v, changed := fastpathTV.DecMapIntUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uintptr) - fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uintptr, xlen) - changed = true - } - - var mk int - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int) - v, changed := fastpathTV.DecMapIntIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int) - fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntIntX(vp *map[int]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntIntV(v map[int]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]int, xlen) - changed = true - } - - var mk int - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int8) - v, changed := fastpathTV.DecMapIntInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int8) - fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt8V(v map[int]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]int8, xlen) - changed = true - } - - var mk int - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int16) - v, changed := fastpathTV.DecMapIntInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int16) - fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt16V(v map[int]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int]int16, xlen) - changed = true - } - - var mk int - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int32) - v, changed := fastpathTV.DecMapIntInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int32) - fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt32V(v map[int]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]int32, xlen) - changed = true - } - - var mk int - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int64) - v, changed := fastpathTV.DecMapIntInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int64) - fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt64V(v map[int]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]int64, xlen) - changed = true - } - - var mk int - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]float32) - v, changed := fastpathTV.DecMapIntFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]float32) - fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]float32, xlen) - changed = true - } - - var mk int - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]float64) - v, changed := fastpathTV.DecMapIntFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]float64) - fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]float64, xlen) - changed = true - } - - var mk int - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]bool) - v, changed := fastpathTV.DecMapIntBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]bool) - fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntBoolV(v map[int]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]bool, xlen) - changed = true - } - - var mk int - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]interface{}) - v, changed := fastpathTV.DecMapInt8IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]interface{}) - fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[int8]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int8 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]string) - v, changed := fastpathTV.DecMapInt8StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]string) - fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8StringV(v map[int8]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[int8]string, xlen) - changed = true - } - - var mk int8 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint) - v, changed := fastpathTV.DecMapInt8UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint) - fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uint, xlen) - changed = true - } - - var mk int8 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint8) - v, changed := fastpathTV.DecMapInt8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint8) - fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]uint8, xlen) - changed = true - } - - var mk int8 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint16) - v, changed := fastpathTV.DecMapInt8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint16) - fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int8]uint16, xlen) - changed = true - } - - var mk int8 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint32) - v, changed := fastpathTV.DecMapInt8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint32) - fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]uint32, xlen) - changed = true - } - - var mk int8 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint64) - v, changed := fastpathTV.DecMapInt8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint64) - fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uint64, xlen) - changed = true - } - - var mk int8 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uintptr) - v, changed := fastpathTV.DecMapInt8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uintptr) - fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uintptr, xlen) - changed = true - } - - var mk int8 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int) - v, changed := fastpathTV.DecMapInt8IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int) - fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8IntV(v map[int8]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]int, xlen) - changed = true - } - - var mk int8 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int8) - v, changed := fastpathTV.DecMapInt8Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int8) - fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]int8, xlen) - changed = true - } - - var mk int8 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int16) - v, changed := fastpathTV.DecMapInt8Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int16) - fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int8]int16, xlen) - changed = true - } - - var mk int8 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int32) - v, changed := fastpathTV.DecMapInt8Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int32) - fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]int32, xlen) - changed = true - } - - var mk int8 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int64) - v, changed := fastpathTV.DecMapInt8Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int64) - fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]int64, xlen) - changed = true - } - - var mk int8 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]float32) - v, changed := fastpathTV.DecMapInt8Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]float32) - fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]float32, xlen) - changed = true - } - - var mk int8 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]float64) - v, changed := fastpathTV.DecMapInt8Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]float64) - fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]float64, xlen) - changed = true - } - - var mk int8 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]bool) - v, changed := fastpathTV.DecMapInt8BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]bool) - fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]bool, xlen) - changed = true - } - - var mk int8 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]interface{}) - v, changed := fastpathTV.DecMapInt16IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]interface{}) - fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[int16]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int16 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]string) - v, changed := fastpathTV.DecMapInt16StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]string) - fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16StringV(v map[int16]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[int16]string, xlen) - changed = true - } - - var mk int16 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint) - v, changed := fastpathTV.DecMapInt16UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint) - fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uint, xlen) - changed = true - } - - var mk int16 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint8) - v, changed := fastpathTV.DecMapInt16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint8) - fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]uint8, xlen) - changed = true - } - - var mk int16 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint16) - v, changed := fastpathTV.DecMapInt16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint16) - fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[int16]uint16, xlen) - changed = true - } - - var mk int16 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint32) - v, changed := fastpathTV.DecMapInt16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint32) - fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]uint32, xlen) - changed = true - } - - var mk int16 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint64) - v, changed := fastpathTV.DecMapInt16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint64) - fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uint64, xlen) - changed = true - } - - var mk int16 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uintptr) - v, changed := fastpathTV.DecMapInt16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uintptr) - fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uintptr, xlen) - changed = true - } - - var mk int16 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int) - v, changed := fastpathTV.DecMapInt16IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int) - fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16IntV(v map[int16]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]int, xlen) - changed = true - } - - var mk int16 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int8) - v, changed := fastpathTV.DecMapInt16Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int8) - fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]int8, xlen) - changed = true - } - - var mk int16 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int16) - v, changed := fastpathTV.DecMapInt16Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int16) - fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[int16]int16, xlen) - changed = true - } - - var mk int16 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int32) - v, changed := fastpathTV.DecMapInt16Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int32) - fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]int32, xlen) - changed = true - } - - var mk int16 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int64) - v, changed := fastpathTV.DecMapInt16Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int64) - fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]int64, xlen) - changed = true - } - - var mk int16 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]float32) - v, changed := fastpathTV.DecMapInt16Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]float32) - fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]float32, xlen) - changed = true - } - - var mk int16 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]float64) - v, changed := fastpathTV.DecMapInt16Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]float64) - fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]float64, xlen) - changed = true - } - - var mk int16 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]bool) - v, changed := fastpathTV.DecMapInt16BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]bool) - fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]bool, xlen) - changed = true - } - - var mk int16 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]interface{}) - v, changed := fastpathTV.DecMapInt32IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]interface{}) - fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[int32]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int32 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]string) - v, changed := fastpathTV.DecMapInt32StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]string) - fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32StringV(v map[int32]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[int32]string, xlen) - changed = true - } - - var mk int32 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint) - v, changed := fastpathTV.DecMapInt32UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint) - fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uint, xlen) - changed = true - } - - var mk int32 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint8) - v, changed := fastpathTV.DecMapInt32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint8) - fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]uint8, xlen) - changed = true - } - - var mk int32 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint16) - v, changed := fastpathTV.DecMapInt32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint16) - fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int32]uint16, xlen) - changed = true - } - - var mk int32 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint32) - v, changed := fastpathTV.DecMapInt32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint32) - fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]uint32, xlen) - changed = true - } - - var mk int32 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint64) - v, changed := fastpathTV.DecMapInt32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint64) - fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uint64, xlen) - changed = true - } - - var mk int32 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uintptr) - v, changed := fastpathTV.DecMapInt32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uintptr) - fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uintptr, xlen) - changed = true - } - - var mk int32 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int) - v, changed := fastpathTV.DecMapInt32IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int) - fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32IntV(v map[int32]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]int, xlen) - changed = true - } - - var mk int32 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int8) - v, changed := fastpathTV.DecMapInt32Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int8) - fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]int8, xlen) - changed = true - } - - var mk int32 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int16) - v, changed := fastpathTV.DecMapInt32Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int16) - fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int32]int16, xlen) - changed = true - } - - var mk int32 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int32) - v, changed := fastpathTV.DecMapInt32Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int32) - fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]int32, xlen) - changed = true - } - - var mk int32 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int64) - v, changed := fastpathTV.DecMapInt32Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int64) - fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]int64, xlen) - changed = true - } - - var mk int32 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]float32) - v, changed := fastpathTV.DecMapInt32Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]float32) - fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]float32, xlen) - changed = true - } - - var mk int32 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]float64) - v, changed := fastpathTV.DecMapInt32Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]float64) - fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]float64, xlen) - changed = true - } - - var mk int32 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]bool) - v, changed := fastpathTV.DecMapInt32BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]bool) - fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]bool, xlen) - changed = true - } - - var mk int32 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]interface{}) - v, changed := fastpathTV.DecMapInt64IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]interface{}) - fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int64]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int64 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]string) - v, changed := fastpathTV.DecMapInt64StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]string) - fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64StringV(v map[int64]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int64]string, xlen) - changed = true - } - - var mk int64 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint) - v, changed := fastpathTV.DecMapInt64UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint) - fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uint, xlen) - changed = true - } - - var mk int64 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint8) - v, changed := fastpathTV.DecMapInt64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint8) - fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]uint8, xlen) - changed = true - } - - var mk int64 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint16) - v, changed := fastpathTV.DecMapInt64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint16) - fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int64]uint16, xlen) - changed = true - } - - var mk int64 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint32) - v, changed := fastpathTV.DecMapInt64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint32) - fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]uint32, xlen) - changed = true - } - - var mk int64 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint64) - v, changed := fastpathTV.DecMapInt64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint64) - fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uint64, xlen) - changed = true - } - - var mk int64 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uintptr) - v, changed := fastpathTV.DecMapInt64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uintptr) - fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uintptr, xlen) - changed = true - } - - var mk int64 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int) - v, changed := fastpathTV.DecMapInt64IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int) - fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64IntV(v map[int64]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]int, xlen) - changed = true - } - - var mk int64 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int8) - v, changed := fastpathTV.DecMapInt64Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int8) - fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]int8, xlen) - changed = true - } - - var mk int64 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int16) - v, changed := fastpathTV.DecMapInt64Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int16) - fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int64]int16, xlen) - changed = true - } - - var mk int64 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int32) - v, changed := fastpathTV.DecMapInt64Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int32) - fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]int32, xlen) - changed = true - } - - var mk int64 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int64) - v, changed := fastpathTV.DecMapInt64Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int64) - fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]int64, xlen) - changed = true - } - - var mk int64 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]float32) - v, changed := fastpathTV.DecMapInt64Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]float32) - fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]float32, xlen) - changed = true - } - - var mk int64 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]float64) - v, changed := fastpathTV.DecMapInt64Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]float64) - fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]float64, xlen) - changed = true - } - - var mk int64 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]bool) - v, changed := fastpathTV.DecMapInt64BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]bool) - fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]bool, xlen) - changed = true - } - - var mk int64 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]interface{}) - v, changed := fastpathTV.DecMapBoolIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]interface{}) - fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[bool]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk bool - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]string) - v, changed := fastpathTV.DecMapBoolStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]string) - fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolStringV(v map[bool]string, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[bool]string, xlen) - changed = true - } - - var mk bool - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint) - v, changed := fastpathTV.DecMapBoolUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint) - fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uint, xlen) - changed = true - } - - var mk bool - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint8) - v, changed := fastpathTV.DecMapBoolUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint8) - fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]uint8, xlen) - changed = true - } - - var mk bool - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint16) - v, changed := fastpathTV.DecMapBoolUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint16) - fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[bool]uint16, xlen) - changed = true - } - - var mk bool - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint32) - v, changed := fastpathTV.DecMapBoolUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint32) - fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]uint32, xlen) - changed = true - } - - var mk bool - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint64) - v, changed := fastpathTV.DecMapBoolUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint64) - fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uint64, xlen) - changed = true - } - - var mk bool - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uintptr) - v, changed := fastpathTV.DecMapBoolUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uintptr) - fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uintptr, xlen) - changed = true - } - - var mk bool - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int) - v, changed := fastpathTV.DecMapBoolIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int) - fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolIntV(v map[bool]int, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]int, xlen) - changed = true - } - - var mk bool - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int8) - v, changed := fastpathTV.DecMapBoolInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int8) - fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]int8, xlen) - changed = true - } - - var mk bool - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int16) - v, changed := fastpathTV.DecMapBoolInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int16) - fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[bool]int16, xlen) - changed = true - } - - var mk bool - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int32) - v, changed := fastpathTV.DecMapBoolInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int32) - fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]int32, xlen) - changed = true - } - - var mk bool - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int64) - v, changed := fastpathTV.DecMapBoolInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int64) - fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]int64, xlen) - changed = true - } - - var mk bool - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]float32) - v, changed := fastpathTV.DecMapBoolFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]float32) - fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]float32, xlen) - changed = true - } - - var mk bool - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]float64) - v, changed := fastpathTV.DecMapBoolFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]float64) - fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]float64, xlen) - changed = true - } - - var mk bool - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]bool) - v, changed := fastpathTV.DecMapBoolBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]bool) - fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]bool, xlen) - changed = true - } - - var mk bool - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl deleted file mode 100644 index 58cc6df4c..000000000 --- a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl +++ /dev/null @@ -1,511 +0,0 @@ -// +build !notfastpath - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl -// ************************************************************ - -package codec - -// Fast path functions try to create a fast path encode or decode implementation -// for common maps and slices. -// -// We define the functions and register then in this single file -// so as not to pollute the encode.go and decode.go, and create a dependency in there. -// This file can be omitted without causing a build failure. -// -// The advantage of fast paths is: -// - Many calls bypass reflection altogether -// -// Currently support -// - slice of all builtin types, -// - map of all builtin types to string or interface value -// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8) -// This should provide adequate "typical" implementations. -// -// Note that fast track decode functions must handle values for which an address cannot be obtained. -// For example: -// m2 := map[string]int{} -// p2 := []interface{}{m2} -// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. -// - -import ( - "reflect" - "sort" -) - -const fastpathCheckNilFalse = false // for reflect -const fastpathCheckNilTrue = true // for type switch - -type fastpathT struct {} - -var fastpathTV fastpathT - -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*encFnInfo, reflect.Value) - decfn func(*decFnInfo, reflect.Value) -} - -type fastpathA [{{ .FastpathLen }}]fastpathE - -func (x *fastpathA) index(rtid uintptr) int { - // use binary search to grab the index (adapted from sort/search.go) - h, i, j := 0, 0, {{ .FastpathLen }} // len(x) - for i < j { - h = i + (j-i)/2 - if x[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - } - if i < {{ .FastpathLen }} && x[i].rtid == rtid { - return i - } - return -1 -} - -type fastpathAslice []fastpathE - -func (x fastpathAslice) Len() int { return len(x) } -func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } -func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -var fastpathAV fastpathA - -// due to possible initialization loop error, make fastpath in an init() -func init() { - if !fastpathEnabled { - return - } - i := 0 - fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { - xrt := reflect.TypeOf(v) - xptr := reflect.ValueOf(xrt).Pointer() - fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} - i++ - return - } - - {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - fn([]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} - - {{range .Values}}{{if not .Primitive}}{{if .MapKey }} - fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} - - sort.Sort(fastpathAslice(fastpathAV[:])) -} - -// -- encode - -// -- -- fast path type switch -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}:{{else}} - case map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if not .MapKey }} - case *[]{{ .Elem }}:{{else}} - case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) -{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) - case *[]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) -{{end}}{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} - case map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) - case *map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) -{{end}}{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - -func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { cr.sendContainerState(containerArrayElem) } - {{ encmd .Elem "v2"}} - } - if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}} -} - -{{end}}{{end}}{{end}} - -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} - -func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - {{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - {{end}}if e.h.Canonical { - {{if eq .MapKey "interface{}"}}{{/* out of band - */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}} - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { cr.sendContainerState(containerMapKey) } - e.asis(v2[j].v) - if cr != nil { cr.sendContainerState(containerMapValue) } - e.encode(v[v2[j].i]) - } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) - var i int - for k, _ := range v { - v2[i] = {{ $x }}(k) - i++ - } - sort.Sort({{ sorttype .MapKey false}}(v2)) - for _, k2 := range v2 { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{if eq .MapKey "string"}}if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - }{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }} - } {{end}} - } else { - for k2, v2 := range v { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{if eq .MapKey "string"}}if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - }{{else}}{{ encmd .MapKey "k2"}}{{end}} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ encmd .Elem "v2"}} - } - } - if cr != nil { cr.sendContainerState(containerMapEnd) }{{/* ee.EncodeEnd() */}} -} - -{{end}}{{end}}{{end}} - -// -- decode - -// -- -- fast path type switch -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - if !fastpathEnabled { - return false - } - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}:{{else}} - case map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if not .MapKey }} - case *[]{{ .Elem }}:{{else}} - case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} - v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } -{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} -{{/* -Slices can change if they -- did not come from an array -- are addressable (from a ptr) -- are settable (e.g. contained in an interface{}) -*/}} -func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { {{/* // CanSet => CanAddr + Exported */}} - vp := rv.Addr().Interface().(*[]{{ .Elem }}) - v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]{{ .Elem }}) - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil bool, d *Decoder) { - v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { - dd := d.d - {{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}} - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []{{ .Elem }}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { {{/* - // fast-path is for "basic" immutable types, so no need to copy them over - // s := make([]{{ .Elem }}, decInferLen(containerLenS, d.h.MaxInitLen)) - // copy(s, v[:cap(v)]) - // v = s */}} - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]{{ .Elem }}, xlen) - } - } else { - v = make([]{{ .Elem }}, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } {{/* // all checks done. cannot go past len. */}} - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } - if xtrunc { {{/* // means canChange=true, changed=true already. */}} - for ; j < containerLenS; j++ { - v = append(v, {{ zerocmd .Elem }}) - slh.ElemContainerState(j) - {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() {{/* check break first, so we can initialize v with a capacity of 4 if necessary */}} - if breakFound { - if canChange { - if v == nil { - v = []{{ .Elem }}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return - } - if cap(v) == 0 { - v = make([]{{ .Elem }}, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, {{ zerocmd .Elem }}) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { {{/* // all checks done. cannot go past len. */}} - {{ if eq .Elem "interface{}" }}d.decode(&v[j]) - {{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -{{end}}{{end}}{{end}} - - -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} -{{/* -Maps can change if they are -- addressable (from a ptr) -- settable (e.g. contained in an interface{}) -*/}} -func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[{{ .MapKey }}]{{ .Elem }}) - v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}) - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, d *Decoder) { - v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool, - d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) { - dd := d.d - cr := d.cr - {{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}} - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) - v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen) - changed = true - } - {{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}} - var mk {{ .MapKey }} - var mv {{ .Elem }} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{ if eq .MapKey "interface{}" }}mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} - }{{ else }}mk = {{ decmd .MapKey }}{{ end }} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } - d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{ if eq .MapKey "interface{}" }}mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} - }{{ else }}mk = {{ decmd .MapKey }}{{ end }} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } - d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { cr.sendContainerState(containerMapEnd) } - return v, changed -} - -{{end}}{{end}}{{end}} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go deleted file mode 100644 index d6f5f0c91..000000000 --- a/vendor/github.com/ugorji/go/codec/fast-path.not.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build notfastpath - -package codec - -import "reflect" - -// The generated fast-path code is very large, and adds a few seconds to the build time. -// This causes test execution, execution of small tools which use codec, etc -// to take a long time. -// -// To mitigate, we now support the notfastpath tag. -// This tag disables fastpath during build, allowing for faster build, test execution, -// short-program runs, etc. - -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } - -type fastpathT struct{} -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*encFnInfo, reflect.Value) - decfn func(*decFnInfo, reflect.Value) -} -type fastpathA [0]fastpathE - -func (x fastpathA) index(rtid uintptr) int { return -1 } - -var fastpathAV fastpathA -var fastpathTV fastpathT diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl deleted file mode 100644 index 2caae5bfd..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl +++ /dev/null @@ -1,101 +0,0 @@ -{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}} -var {{var "c"}} bool {{/* // changed */}} -if {{var "l"}} == 0 { - {{if isSlice }}if {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } else if len({{var "v"}}) != 0 { - {{var "v"}} = {{var "v"}}[:0] - {{var "c"}} = true - } {{end}} {{if isChan }}if {{var "v"}} == nil { - {{var "v"}} = make({{ .CTyp }}, 0) - {{var "c"}} = true - } {{end}} -} else if {{var "l"}} > 0 { - {{if isChan }}if {{var "v"}} == nil { - {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) - {{var "c"}} = true - } - for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { - {{var "h"}}.ElemContainerState({{var "r"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - } - {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} - var {{var "rt"}} bool {{/* truncated */}} - if {{var "l"}} > cap({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) - {{ else }}{{if not .Immutable }} - {{var "rg"}} := len({{var "v"}}) > 0 - {{var "v2"}} := {{var "v"}} {{end}} - {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rt"}} { - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - {{var "c"}} = true - {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} - if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} - } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "l"}}] - {{var "c"}} = true - } {{end}} {{/* end isSlice:47 */}} - {{var "j"}} := 0 - for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - z.DecSwallow() - } - {{ else }}if {{var "rt"}} { - for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "v"}} = append({{var "v"}}, {{ zero}}) - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - } {{end}} {{/* end isArray:56 */}} - {{end}} {{/* end isChan:16 */}} -} else { {{/* len < 0 */}} - {{var "j"}} := 0 - for ; !r.CheckBreak(); {{var "j"}}++ { - {{if isChan }} - {{var "h"}}.ElemContainerState({{var "j"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - {{ else }} - if {{var "j"}} >= len({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) - {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} - {{var "c"}} = true {{end}} - } - {{var "h"}}.ElemContainerState({{var "j"}}) - if {{var "j"}} < len({{var "v"}}) { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } else { - z.DecSwallow() - } - {{end}} - } - {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "j"}}] - {{var "c"}} = true - } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - }{{end}} -} -{{var "h"}}.End() -{{if not isArray }}if {{var "c"}} { - *{{ .Varname }} = {{var "v"}} -}{{end}} diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl deleted file mode 100644 index 77400e0a1..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl +++ /dev/null @@ -1,58 +0,0 @@ -{{var "v"}} := *{{ .Varname }} -{{var "l"}} := r.ReadMapStart() -{{var "bh"}} := z.DecBasicHandle() -if {{var "v"}} == nil { - {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) - {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) - *{{ .Varname }} = {{var "v"}} -} -var {{var "mk"}} {{ .KTyp }} -var {{var "mv"}} {{ .Typ }} -var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool -if {{var "bh"}}.MapValueReset { - {{if decElemKindPtr}}{{var "mg"}} = true - {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } - {{else if not decElemKindImmutable}}{{var "mg"}} = true - {{end}} } -if {{var "l"}} > 0 { -for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true{{end}} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} else if {{var "l"}} < 0 { -for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true {{ end }} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} // else len==0: TODO: Should we clear map entries? -z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go deleted file mode 100644 index 22bce776b..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go +++ /dev/null @@ -1,233 +0,0 @@ -// //+build ignore - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl -// ************************************************************ - -package codec - -import ( - "encoding" - "reflect" -) - -// This file is used to generate helper code for codecgen. -// The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continously and without notice. -// -// To help enforce this, we create an unexported type with exported members. -// The only way to get the type is via the one exported type that we control (somewhat). -// -// When static codecs are created for types, they will use this value -// to perform encoding or decoding of primitives or known slice or map types. - -// GenHelperEncoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { - return genHelperEncoder{e: e}, e.e -} - -// GenHelperDecoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { - return genHelperDecoder{d: d}, d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperEncoder struct { - e *Encoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperDecoder struct { - d *Decoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBasicHandle() *BasicHandle { - return f.e.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinary() bool { - return f.e.be // f.e.hh.isBinaryEncoding() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFallback(iv interface{}) { - // println(">>>>>>>>> EncFallback") - f.e.encodeI(iv, false, false) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { - bs, fnerr := iv.MarshalText() - f.e.marshal(bs, fnerr, false, c_UTF8) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { - bs, fnerr := iv.MarshalJSON() - f.e.marshal(bs, fnerr, true, c_UTF8) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { - bs, fnerr := iv.MarshalBinary() - f.e.marshal(bs, fnerr, false, c_RAW) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) TimeRtidIfBinc() uintptr { - if _, ok := f.e.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.js -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) HasExtensions() bool { - return len(f.e.h.extHandle) != 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v) - if rt.Kind() == reflect.Ptr { - rt = rt.Elem() - } - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.e.h.getExt(rtid); xfFn != nil { - f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) - return true - } - return false -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncSendContainerState(c containerState) { - if f.e.cr != nil { - f.e.cr.sendContainerState(c) - } -} - -// ---------------- DECODER FOLLOWS ----------------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBasicHandle() *BasicHandle { - return f.d.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinary() bool { - return f.d.be // f.d.hh.isBinaryEncoding() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSwallow() { - f.d.swallow() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchBuffer() []byte { - return f.d.b[:] -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { - // println(">>>>>>>>> DecFallback") - f.d.decodeI(iv, chkPtr, false, false, false) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { - return f.d.decSliceHelperStart() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { - f.d.structFieldNotFound(index, name) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { - f.d.arrayCannotExpand(sliceLen, streamLen) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - // bs := f.dd.DecodeBytes(f.d.b[:], true, true) - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) TimeRtidIfBinc() uintptr { - if _, ok := f.d.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) IsJSONHandle() bool { - return f.d.js -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) HasExtensions() bool { - return len(f.d.h.extHandle) != 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v).Elem() - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.d.h.getExt(rtid); xfFn != nil { - f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) - return true - } - return false -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { - return decInferLen(clen, maxlen, unit) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSendContainerState(c containerState) { - if f.d.cr != nil { - f.d.cr.sendContainerState(c) - } -} diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl deleted file mode 100644 index 31958574f..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl +++ /dev/null @@ -1,364 +0,0 @@ -// //+build ignore - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl -// ************************************************************ - -package codec - -import ( - "encoding" - "reflect" -) - -// This file is used to generate helper code for codecgen. -// The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continously and without notice. -// -// To help enforce this, we create an unexported type with exported members. -// The only way to get the type is via the one exported type that we control (somewhat). -// -// When static codecs are created for types, they will use this value -// to perform encoding or decoding of primitives or known slice or map types. - -// GenHelperEncoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { - return genHelperEncoder{e:e}, e.e -} - -// GenHelperDecoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { - return genHelperDecoder{d:d}, d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperEncoder struct { - e *Encoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperDecoder struct { - d *Decoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBasicHandle() *BasicHandle { - return f.e.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinary() bool { - return f.e.be // f.e.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFallback(iv interface{}) { - // println(">>>>>>>>> EncFallback") - f.e.encodeI(iv, false, false) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { - bs, fnerr := iv.MarshalText() - f.e.marshal(bs, fnerr, false, c_UTF8) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { - bs, fnerr := iv.MarshalJSON() - f.e.marshal(bs, fnerr, true, c_UTF8) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { - bs, fnerr := iv.MarshalBinary() - f.e.marshal(bs, fnerr, false, c_RAW) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) TimeRtidIfBinc() uintptr { - if _, ok := f.e.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) HasExtensions() bool { - return len(f.e.h.extHandle) != 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v) - if rt.Kind() == reflect.Ptr { - rt = rt.Elem() - } - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.e.h.getExt(rtid); xfFn != nil { - f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) - return true - } - return false -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncSendContainerState(c containerState) { - if f.e.cr != nil { - f.e.cr.sendContainerState(c) - } -} - -// ---------------- DECODER FOLLOWS ----------------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBasicHandle() *BasicHandle { - return f.d.h -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinary() bool { - return f.d.be // f.d.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSwallow() { - f.d.swallow() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchBuffer() []byte { - return f.d.b[:] -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { - // println(">>>>>>>>> DecFallback") - f.d.decodeI(iv, chkPtr, false, false, false) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { - return f.d.decSliceHelperStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { - f.d.structFieldNotFound(index, name) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { - f.d.arrayCannotExpand(sliceLen, streamLen) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - // bs := f.dd.DecodeBytes(f.d.b[:], true, true) - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) TimeRtidIfBinc() uintptr { - if _, ok := f.d.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) IsJSONHandle() bool { - return f.d.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) HasExtensions() bool { - return len(f.d.h.extHandle) != 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v).Elem() - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.d.h.getExt(rtid); xfFn != nil { - f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) - return true - } - return false -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { - return decInferLen(clen, maxlen, unit) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSendContainerState(c containerState) { - if f.d.cr != nil { - f.d.cr.sendContainerState(c) - } -} - -{{/* - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncDriver() encDriver { - return f.e.e -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecDriver() decDriver { - return f.d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncNil() { - f.e.e.EncodeNil() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBytes(v []byte) { - f.e.e.EncodeStringBytes(c_RAW, v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayStart(length int) { - f.e.e.EncodeArrayStart(length) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayEnd() { - f.e.e.EncodeArrayEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayEntrySeparator() { - f.e.e.EncodeArrayEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapStart(length int) { - f.e.e.EncodeMapStart(length) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapEnd() { - f.e.e.EncodeMapEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapEntrySeparator() { - f.e.e.EncodeMapEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapKVSeparator() { - f.e.e.EncodeMapKVSeparator() -} - -// --------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBytes(v *[]byte) { - *v = f.d.d.DecodeBytes(*v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTryNil() bool { - return f.d.d.TryDecodeAsNil() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsNil() (b bool) { - return f.d.d.IsContainerType(valueTypeNil) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsMap() (b bool) { - return f.d.d.IsContainerType(valueTypeMap) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsArray() (b bool) { - return f.d.d.IsContainerType(valueTypeArray) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecCheckBreak() bool { - return f.d.d.CheckBreak() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapStart() int { - return f.d.d.ReadMapStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayStart() int { - return f.d.d.ReadArrayStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapEnd() { - f.d.d.ReadMapEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayEnd() { - f.d.d.ReadArrayEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayEntrySeparator() { - f.d.d.ReadArrayEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapEntrySeparator() { - f.d.d.ReadMapEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapKVSeparator() { - f.d.d.ReadMapKVSeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) ReadStringAsBytes(bs []byte) []byte { - return f.d.d.DecodeStringAsBytes(bs) -} - - -// -- encode calls (primitives) -{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" true }}(v {{ .Primitive }}) { - ee := f.e.e - {{ encmd .Primitive "v" }} -} -{{ end }}{{ end }}{{ end }} - -// -- decode calls (primitives) -{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) {{ .MethodNamePfx "Dec" true }}(vp *{{ .Primitive }}) { - dd := f.d.d - *vp = {{ decmd .Primitive }} -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) {{ .MethodNamePfx "Read" true }}() (v {{ .Primitive }}) { - dd := f.d.d - v = {{ decmd .Primitive }} - return -} -{{ end }}{{ end }}{{ end }} - - -// -- encode calls (slices/maps) -{{range .Values}}{{if not .Primitive }}{{if .Slice }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v []{{ .Elem }}) { {{ else }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v map[{{ .MapKey }}]{{ .Elem }}) { {{end}} - f.F.{{ .MethodNamePfx "Enc" false }}V(v, false, f.e) -} -{{ end }}{{ end }} - -// -- decode calls (slices/maps) -{{range .Values}}{{if not .Primitive }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -{{if .Slice }}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *[]{{ .Elem }}) { -{{else}}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *map[{{ .MapKey }}]{{ .Elem }}) { {{end}} - v, changed := f.F.{{ .MethodNamePfx "Dec" false }}V(*vp, false, true, f.d) - if changed { - *vp = v - } -} -{{ end }}{{ end }} -*/}} diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go deleted file mode 100644 index f3c1b7060..000000000 --- a/vendor/github.com/ugorji/go/codec/gen.generated.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl - -const genDecMapTmpl = ` -{{var "v"}} := *{{ .Varname }} -{{var "l"}} := r.ReadMapStart() -{{var "bh"}} := z.DecBasicHandle() -if {{var "v"}} == nil { - {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) - {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) - *{{ .Varname }} = {{var "v"}} -} -var {{var "mk"}} {{ .KTyp }} -var {{var "mv"}} {{ .Typ }} -var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool -if {{var "bh"}}.MapValueReset { - {{if decElemKindPtr}}{{var "mg"}} = true - {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } - {{else if not decElemKindImmutable}}{{var "mg"}} = true - {{end}} } -if {{var "l"}} > 0 { -for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true{{end}} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} else if {{var "l"}} < 0 { -for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true {{ end }} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} // else len==0: TODO: Should we clear map entries? -z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) -` - -const genDecListTmpl = ` -{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}} -var {{var "c"}} bool {{/* // changed */}} -if {{var "l"}} == 0 { - {{if isSlice }}if {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } else if len({{var "v"}}) != 0 { - {{var "v"}} = {{var "v"}}[:0] - {{var "c"}} = true - } {{end}} {{if isChan }}if {{var "v"}} == nil { - {{var "v"}} = make({{ .CTyp }}, 0) - {{var "c"}} = true - } {{end}} -} else if {{var "l"}} > 0 { - {{if isChan }}if {{var "v"}} == nil { - {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) - {{var "c"}} = true - } - for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { - {{var "h"}}.ElemContainerState({{var "r"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - } - {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} - var {{var "rt"}} bool {{/* truncated */}} - if {{var "l"}} > cap({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) - {{ else }}{{if not .Immutable }} - {{var "rg"}} := len({{var "v"}}) > 0 - {{var "v2"}} := {{var "v"}} {{end}} - {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rt"}} { - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - {{var "c"}} = true - {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} - if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} - } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "l"}}] - {{var "c"}} = true - } {{end}} {{/* end isSlice:47 */}} - {{var "j"}} := 0 - for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - z.DecSwallow() - } - {{ else }}if {{var "rt"}} { - for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "v"}} = append({{var "v"}}, {{ zero}}) - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - } {{end}} {{/* end isArray:56 */}} - {{end}} {{/* end isChan:16 */}} -} else { {{/* len < 0 */}} - {{var "j"}} := 0 - for ; !r.CheckBreak(); {{var "j"}}++ { - {{if isChan }} - {{var "h"}}.ElemContainerState({{var "j"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - {{ else }} - if {{var "j"}} >= len({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) - {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} - {{var "c"}} = true {{end}} - } - {{var "h"}}.ElemContainerState({{var "j"}}) - if {{var "j"}} < len({{var "v"}}) { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } else { - z.DecSwallow() - } - {{end}} - } - {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "j"}}] - {{var "c"}} = true - } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - }{{end}} -} -{{var "h"}}.End() -{{if not isArray }}if {{var "c"}} { - *{{ .Varname }} = {{var "v"}} -}{{end}} -` diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go deleted file mode 100644 index a075e7c0d..000000000 --- a/vendor/github.com/ugorji/go/codec/gen.go +++ /dev/null @@ -1,1920 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "bytes" - "encoding/base64" - "errors" - "fmt" - "go/format" - "io" - "io/ioutil" - "math/rand" - "os" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "text/template" - "time" -) - -// --------------------------------------------------- -// codecgen supports the full cycle of reflection-based codec: -// - RawExt -// - Builtins -// - Extensions -// - (Binary|Text|JSON)(Unm|M)arshal -// - generic by-kind -// -// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. -// In those areas, we try to only do reflection or interface-conversion when NECESSARY: -// - Extensions, only if Extensions are configured. -// -// However, codecgen doesn't support the following: -// - Canonical option. (codecgen IGNORES it currently) -// This is just because it has not been implemented. -// -// During encode/decode, Selfer takes precedence. -// A type implementing Selfer will know how to encode/decode itself statically. -// -// The following field types are supported: -// array: [n]T -// slice: []T -// map: map[K]V -// primitive: [u]int[n], float(32|64), bool, string -// struct -// -// --------------------------------------------------- -// Note that a Selfer cannot call (e|d).(En|De)code on itself, -// as this will cause a circular reference, as (En|De)code will call Selfer methods. -// Any type that implements Selfer must implement completely and not fallback to (En|De)code. -// -// In addition, code in this file manages the generation of fast-path implementations of -// encode/decode of slices/maps of primitive keys/values. -// -// Users MUST re-generate their implementations whenever the code shape changes. -// The generated code will panic if it was generated with a version older than the supporting library. -// --------------------------------------------------- -// -// codec framework is very feature rich. -// When encoding or decoding into an interface, it depends on the runtime type of the interface. -// The type of the interface may be a named type, an extension, etc. -// Consequently, we fallback to runtime codec for encoding/decoding interfaces. -// In addition, we fallback for any value which cannot be guaranteed at runtime. -// This allows us support ANY value, including any named types, specifically those which -// do not implement our interfaces (e.g. Selfer). -// -// This explains some slowness compared to other code generation codecs (e.g. msgp). -// This reduction in speed is only seen when your refers to interfaces, -// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } -// -// codecgen will panic if the file was generated with an old version of the library in use. -// -// Note: -// It was a concious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. -// This way, there isn't a function call overhead just to see that we should not enter a block of code. - -// GenVersion is the current version of codecgen. -// -// NOTE: Increment this value each time codecgen changes fundamentally. -// Fundamental changes are: -// - helper methods change (signature change, new ones added, some removed, etc) -// - codecgen command line changes -// -// v1: Initial Version -// v2: -// v3: Changes for Kubernetes: -// changes in signature of some unpublished helper methods and codecgen cmdline arguments. -// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) -// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. -const GenVersion = 5 - -const ( - genCodecPkg = "codec1978" - genTempVarPfx = "yy" - genTopLevelVarName = "x" - - // ignore canBeNil parameter, and always set to true. - // This is because nil can appear anywhere, so we should always check. - genAnythingCanBeNil = true - - // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; - // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals - // are not executed a lot. - // - // From testing, it didn't make much difference in runtime, so keep as true (one function only) - genUseOneFunctionForDecStructMap = true -) - -type genStructMapStyle uint8 - -const ( - genStructMapStyleConsolidated genStructMapStyle = iota - genStructMapStyleLenPrefix - genStructMapStyleCheckBreak -) - -var ( - genAllTypesSamePkgErr = errors.New("All types must be in the same package") - genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice") - genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") - genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) -) - -// genRunner holds some state used during a Gen run. -type genRunner struct { - w io.Writer // output - c uint64 // counter used for generating varsfx - t []reflect.Type // list of types to run selfer on - - tc reflect.Type // currently running selfer on this type - te map[uintptr]bool // types for which the encoder has been created - td map[uintptr]bool // types for which the decoder has been created - cp string // codec import path - - im map[string]reflect.Type // imports to add - imn map[string]string // package names of imports to add - imc uint64 // counter for import numbers - - is map[reflect.Type]struct{} // types seen during import search - bp string // base PkgPath, for which we are generating for - - cpfx string // codec package prefix - unsafe bool // is unsafe to be used in generated code? - - tm map[reflect.Type]struct{} // types for which enc/dec must be generated - ts []reflect.Type // types for which enc/dec must be generated - - xs string // top level variable/constant suffix - hn string // fn helper type name - - ti *TypeInfos - // rr *rand.Rand // random generator for file-specific types -} - -// Gen will write a complete go file containing Selfer implementations for each -// type passed. All the types must be in the same package. -// -// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.* -func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) { - if len(typ) == 0 { - return - } - x := genRunner{ - unsafe: useUnsafe, - w: w, - t: typ, - te: make(map[uintptr]bool), - td: make(map[uintptr]bool), - im: make(map[string]reflect.Type), - imn: make(map[string]string), - is: make(map[reflect.Type]struct{}), - tm: make(map[reflect.Type]struct{}), - ts: []reflect.Type{}, - bp: genImportPath(typ[0]), - xs: uid, - ti: ti, - } - if x.ti == nil { - x.ti = defTypeInfos - } - if x.xs == "" { - rr := rand.New(rand.NewSource(time.Now().UnixNano())) - x.xs = strconv.FormatInt(rr.Int63n(9999), 10) - } - - // gather imports first: - x.cp = genImportPath(reflect.TypeOf(x)) - x.imn[x.cp] = genCodecPkg - for _, t := range typ { - // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) - if genImportPath(t) != x.bp { - panic(genAllTypesSamePkgErr) - } - x.genRefPkgs(t) - } - if buildTags != "" { - x.line("//+build " + buildTags) - x.line("") - } - x.line(` - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -`) - x.line("package " + pkgName) - x.line("") - x.line("import (") - if x.cp != x.bp { - x.cpfx = genCodecPkg + "." - x.linef("%s \"%s\"", genCodecPkg, x.cp) - } - // use a sorted set of im keys, so that we can get consistent output - imKeys := make([]string, 0, len(x.im)) - for k, _ := range x.im { - imKeys = append(imKeys, k) - } - sort.Strings(imKeys) - for _, k := range imKeys { // for k, _ := range x.im { - x.linef("%s \"%s\"", x.imn[k], k) - } - // add required packages - for _, k := range [...]string{"reflect", "unsafe", "runtime", "fmt", "errors"} { - if _, ok := x.im[k]; !ok { - if k == "unsafe" && !x.unsafe { - continue - } - x.line("\"" + k + "\"") - } - } - x.line(")") - x.line("") - - x.line("const (") - x.linef("// ----- content types ----") - x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8)) - x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW)) - x.linef("// ----- value types used ----") - x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray)) - x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap)) - x.linef("// ----- containerStateValues ----") - x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey)) - x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue)) - x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd)) - x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem)) - x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd)) - x.line(")") - x.line("var (") - x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())") - x.line("codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") - x.line(")") - x.line("") - - if x.unsafe { - x.line("type codecSelferUnsafeString" + x.xs + " struct { Data uintptr; Len int}") - x.line("") - } - x.hn = "codecSelfer" + x.xs - x.line("type " + x.hn + " struct{}") - x.line("") - - x.line("func init() {") - x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion) - x.line("_, file, _, _ := runtime.Caller(0)") - x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) - x.linef(`%v, %sGenVersion, file)`, GenVersion, x.cpfx) - x.line("panic(err)") - x.linef("}") - x.line("if false { // reference the types, but skip this branch at build/run time") - var n int - // for k, t := range x.im { - for _, k := range imKeys { - t := x.im[k] - x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) - n++ - } - if x.unsafe { - x.linef("var v%v unsafe.Pointer", n) - n++ - } - if n > 0 { - x.out("_") - for i := 1; i < n; i++ { - x.out(", _") - } - x.out(" = v0") - for i := 1; i < n; i++ { - x.outf(", v%v", i) - } - } - x.line("} ") // close if false - x.line("}") // close init - x.line("") - - // generate rest of type info - for _, t := range typ { - x.tc = t - x.selfer(true) - x.selfer(false) - } - - for _, t := range x.ts { - rtid := reflect.ValueOf(t).Pointer() - // generate enc functions for all these slice/map types. - x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) - x.genRequiredMethodVars(true) - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - x.encListFallback("v", t) - case reflect.Map: - x.encMapFallback("v", t) - default: - panic(genExpectArrayOrMapErr) - } - x.line("}") - x.line("") - - // generate dec functions for all these slice/map types. - x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) - x.genRequiredMethodVars(false) - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - x.decListFallback("v", rtid, t) - case reflect.Map: - x.decMapFallback("v", rtid, t) - default: - panic(genExpectArrayOrMapErr) - } - x.line("}") - x.line("") - } - - x.line("") -} - -func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { - // return varname != genTopLevelVarName && t != x.tc - // the only time we checkForSelfer is if we are not at the TOP of the generated code. - return varname != genTopLevelVarName -} - -func (x *genRunner) arr2str(t reflect.Type, s string) string { - if t.Kind() == reflect.Array { - return s - } - return "" -} - -func (x *genRunner) genRequiredMethodVars(encode bool) { - x.line("var h " + x.hn) - if encode { - x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") - } else { - x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") - } - x.line("_, _, _ = h, z, r") -} - -func (x *genRunner) genRefPkgs(t reflect.Type) { - if _, ok := x.is[t]; ok { - return - } - // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) - x.is[t] = struct{}{} - tpkg, tname := genImportPath(t), t.Name() - if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { - if _, ok := x.im[tpkg]; !ok { - x.im[tpkg] = t - if idx := strings.LastIndex(tpkg, "/"); idx < 0 { - x.imn[tpkg] = tpkg - } else { - x.imc++ - x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + tpkg[idx+1:] - } - } - } - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: - x.genRefPkgs(t.Elem()) - case reflect.Map: - x.genRefPkgs(t.Elem()) - x.genRefPkgs(t.Key()) - case reflect.Struct: - for i := 0; i < t.NumField(); i++ { - if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { - x.genRefPkgs(t.Field(i).Type) - } - } - } -} - -func (x *genRunner) line(s string) { - x.out(s) - if len(s) == 0 || s[len(s)-1] != '\n' { - x.out("\n") - } -} - -func (x *genRunner) varsfx() string { - x.c++ - return strconv.FormatUint(x.c, 10) -} - -func (x *genRunner) out(s string) { - if _, err := io.WriteString(x.w, s); err != nil { - panic(err) - } -} - -func (x *genRunner) linef(s string, params ...interface{}) { - x.line(fmt.Sprintf(s, params...)) -} - -func (x *genRunner) outf(s string, params ...interface{}) { - x.out(fmt.Sprintf(s, params...)) -} - -func (x *genRunner) genTypeName(t reflect.Type) (n string) { - // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() - - // if the type has a PkgPath, which doesn't match the current package, - // then include it. - // We cannot depend on t.String() because it includes current package, - // or t.PkgPath because it includes full import path, - // - var ptrPfx string - for t.Kind() == reflect.Ptr { - ptrPfx += "*" - t = t.Elem() - } - if tn := t.Name(); tn != "" { - return ptrPfx + x.genTypeNamePrim(t) - } - switch t.Kind() { - case reflect.Map: - return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) - case reflect.Slice: - return ptrPfx + "[]" + x.genTypeName(t.Elem()) - case reflect.Array: - return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) - case reflect.Chan: - return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) - default: - if t == intfTyp { - return ptrPfx + "interface{}" - } else { - return ptrPfx + x.genTypeNamePrim(t) - } - } -} - -func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { - if t.Name() == "" { - return t.String() - } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { - return t.Name() - } else { - return x.imn[genImportPath(t)] + "." + t.Name() - // return t.String() // best way to get the package name inclusive - } -} - -func (x *genRunner) genZeroValueR(t reflect.Type) string { - // if t is a named type, w - switch t.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, - reflect.Slice, reflect.Map, reflect.Invalid: - return "nil" - case reflect.Bool: - return "false" - case reflect.String: - return `""` - case reflect.Struct, reflect.Array: - return x.genTypeName(t) + "{}" - default: // all numbers - return "0" - } -} - -func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { - return genMethodNameT(t, x.tc) -} - -func (x *genRunner) selfer(encode bool) { - t := x.tc - t0 := t - // always make decode use a pointer receiver, - // and structs always use a ptr receiver (encode|decode) - isptr := !encode || t.Kind() == reflect.Struct - fnSigPfx := "func (x " - if isptr { - fnSigPfx += "*" - } - fnSigPfx += x.genTypeName(t) - - x.out(fnSigPfx) - if isptr { - t = reflect.PtrTo(t) - } - if encode { - x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") - x.genRequiredMethodVars(true) - // x.enc(genTopLevelVarName, t) - x.encVar(genTopLevelVarName, t) - } else { - x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - // do not use decVar, as there is no need to check TryDecodeAsNil - // or way to elegantly handle that, and also setting it to a - // non-nil value doesn't affect the pointer passed. - // x.decVar(genTopLevelVarName, t, false) - x.dec(genTopLevelVarName, t0) - } - x.line("}") - x.line("") - - if encode || t0.Kind() != reflect.Struct { - return - } - - // write is containerMap - if genUseOneFunctionForDecStructMap { - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleConsolidated) - x.line("}") - x.line("") - } else { - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleLenPrefix) - x.line("}") - x.line("") - - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleCheckBreak) - x.line("}") - x.line("") - } - - // write containerArray - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructArray(genTopLevelVarName, "l", "return", reflect.ValueOf(t0).Pointer(), t0) - x.line("}") - x.line("") - -} - -// used for chan, array, slice, map -func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) { - if encode { - x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname) - } else { - x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname) - } - if _, ok := x.tm[t]; !ok { - x.tm[t] = struct{}{} - x.ts = append(x.ts, t) - } -} - -// encVar will encode a variable. -// The parameter, t, is the reflect.Type of the variable itself -func (x *genRunner) encVar(varname string, t reflect.Type) { - // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) - var checkNil bool - switch t.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: - checkNil = true - } - if checkNil { - x.linef("if %s == nil { r.EncodeNil() } else { ", varname) - } - switch t.Kind() { - case reflect.Ptr: - switch t.Elem().Kind() { - case reflect.Struct, reflect.Array: - x.enc(varname, genNonPtr(t)) - default: - i := x.varsfx() - x.line(genTempVarPfx + i + " := *" + varname) - x.enc(genTempVarPfx+i, genNonPtr(t)) - } - case reflect.Struct, reflect.Array: - i := x.varsfx() - x.line(genTempVarPfx + i + " := &" + varname) - x.enc(genTempVarPfx+i, t) - default: - x.enc(varname, t) - } - - if checkNil { - x.line("}") - } - -} - -// enc will encode a variable (varname) of type T, -// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type *T (to prevent copying) -func (x *genRunner) enc(varname string, t reflect.Type) { - // varName here must be to a pointer to a struct/array, or to a value directly. - rtid := reflect.ValueOf(t).Pointer() - // We call CodecEncodeSelf if one of the following are honored: - // - the type already implements Selfer, call that - // - the type has a Selfer implementation just created, use that - // - the type is in the list of the ones we will generate for, but it is not currently being generated - - tptr := reflect.PtrTo(t) - tk := t.Kind() - if x.checkForSelfer(t, varname) { - if t.Implements(selferTyp) || (tptr.Implements(selferTyp) && (tk == reflect.Array || tk == reflect.Struct)) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - - if _, ok := x.te[rtid]; ok { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - } - - inlist := false - for _, t0 := range x.t { - if t == t0 { - inlist = true - if x.checkForSelfer(t, varname) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - break - } - } - - var rtidAdded bool - if t == x.tc { - x.te[rtid] = true - rtidAdded = true - } - - // check if - // - type is RawExt - // - the type implements (Text|JSON|Binary)(Unm|M)arshal - mi := x.varsfx() - x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi) - x.linef("_ = %sm%s", genTempVarPfx, mi) - x.line("if false {") //start if block - defer func() { x.line("}") }() //end if block - - if t == rawExtTyp { - x.linef("} else { r.EncodeRawExt(%v, e)", varname) - return - } - // HACK: Support for Builtins. - // Currently, only Binc supports builtins, and the only builtin type is time.Time. - // Have a method that returns the rtid for time.Time if Handle is Binc. - if t == timeTyp { - vrtid := genTempVarPfx + "m" + x.varsfx() - x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) - x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname) - } - // only check for extensions if the type is named, and has a packagePath. - if genImportPath(t) != "" && t.Name() != "" { - // first check if extensions are configued, before doing the interface conversion - x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname) - } - if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { - x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) - } - if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) - } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { - x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) - } - - x.line("} else {") - - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x.line("r.EncodeInt(int64(" + varname + "))") - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x.line("r.EncodeUint(uint64(" + varname + "))") - case reflect.Float32: - x.line("r.EncodeFloat32(float32(" + varname + "))") - case reflect.Float64: - x.line("r.EncodeFloat64(float64(" + varname + "))") - case reflect.Bool: - x.line("r.EncodeBool(bool(" + varname + "))") - case reflect.String: - x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(" + varname + "))") - case reflect.Chan: - x.xtraSM(varname, true, t) - // x.encListFallback(varname, rtid, t) - case reflect.Array: - x.xtraSM(varname, true, t) - case reflect.Slice: - // if nil, call dedicated function - // if a []uint8, call dedicated function - // if a known fastpath slice, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if rtid == uint8SliceTypId { - x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))") - } else if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") - } else { - x.xtraSM(varname, true, t) - // x.encListFallback(varname, rtid, t) - } - case reflect.Map: - // if nil, call dedicated function - // if a known fastpath map, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") - if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") - } else { - x.xtraSM(varname, true, t) - // x.encMapFallback(varname, rtid, t) - } - case reflect.Struct: - if !inlist { - delete(x.te, rtid) - x.line("z.EncFallback(" + varname + ")") - break - } - x.encStruct(varname, rtid, t) - default: - if rtidAdded { - delete(x.te, rtid) - } - x.line("z.EncFallback(" + varname + ")") - } -} - -func (x *genRunner) encZero(t reflect.Type) { - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x.line("r.EncodeInt(0)") - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x.line("r.EncodeUint(0)") - case reflect.Float32: - x.line("r.EncodeFloat32(0)") - case reflect.Float64: - x.line("r.EncodeFloat64(0)") - case reflect.Bool: - x.line("r.EncodeBool(false)") - case reflect.String: - x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + `, "")`) - default: - x.line("r.EncodeNil()") - } -} - -func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { - // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) - // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it - - // if t === type currently running selfer on, do for all - ti := x.ti.get(rtid, t) - i := x.varsfx() - sepVarname := genTempVarPfx + "sep" + i - numfieldsvar := genTempVarPfx + "q" + i - ti2arrayvar := genTempVarPfx + "r" + i - struct2arrvar := genTempVarPfx + "2arr" + i - - x.line(sepVarname + " := !z.EncBinary()") - x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) - tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. - // due to omitEmpty, we need to calculate the - // number of non-empty things we write out first. - // This is required as we need to pre-determine the size of the container, - // to support length-prefixing. - x.linef("var %s [%v]bool", numfieldsvar, len(tisfi)) - x.linef("_, _, _ = %s, %s, %s", sepVarname, numfieldsvar, struct2arrvar) - x.linef("const %s bool = %v", ti2arrayvar, ti.toArray) - nn := 0 - for j, si := range tisfi { - if !si.omitEmpty { - nn++ - continue - } - var t2 reflect.StructField - var omitline string - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { - t2typ := t - varname3 := varname - for _, ix := range si.is { - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(ix) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - omitline += varname3 + " != nil && " - } - } - } - // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. - // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) - switch t2.Type.Kind() { - case reflect.Struct: - omitline += " true" - case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: - omitline += "len(" + varname + "." + t2.Name + ") != 0" - default: - omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type) - } - x.linef("%s[%v] = %s", numfieldsvar, j, omitline) - } - x.linef("var %snn%s int", genTempVarPfx, i) - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { - x.line("r.EncodeArrayStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") - x.linef("} else {") // if not ti.toArray - x.linef("%snn%s = %v", genTempVarPfx, i, nn) - x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) - x.linef("r.EncodeMapStart(%snn%s)", genTempVarPfx, i) - x.linef("%snn%s = %v", genTempVarPfx, i, 0) - // x.line("r.EncodeMapStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") - x.line("}") // close if not StructToArray - - for j, si := range tisfi { - i := x.varsfx() - isNilVarName := genTempVarPfx + "n" + i - var labelUsed bool - var t2 reflect.StructField - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { - t2typ := t - varname3 := varname - for _, ix := range si.is { - // fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix) - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(ix) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - if !labelUsed { - x.line("var " + isNilVarName + " bool") - } - x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") - x.line("goto LABEL" + i) - x.line("}") - labelUsed = true - // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") - } - } - // t2 = t.FieldByIndex(si.is) - } - if labelUsed { - x.line("LABEL" + i + ":") - } - // if the type of the field is a Selfer, or one of the ones - - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray - if labelUsed { - x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") - } - x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - if si.omitEmpty { - x.linef("if %s[%v] {", numfieldsvar, j) - } - x.encVar(varname+"."+t2.Name, t2.Type) - if si.omitEmpty { - x.linef("} else {") - x.encZero(t2.Type) - x.linef("}") - } - if labelUsed { - x.line("}") - } - - x.linef("} else {") // if not ti.toArray - - if si.omitEmpty { - x.linef("if %s[%v] {", numfieldsvar, j) - } - x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) - x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))") - x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) - if labelUsed { - x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") - x.encVar(varname+"."+t2.Name, t2.Type) - x.line("}") - } else { - x.encVar(varname+"."+t2.Name, t2.Type) - } - if si.omitEmpty { - x.line("}") - } - x.linef("} ") // end if/else ti.toArray - } - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { - x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) - x.line("} else {") - x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) - x.line("}") - -} - -func (x *genRunner) encListFallback(varname string, t reflect.Type) { - i := x.varsfx() - g := genTempVarPfx - x.line("r.EncodeArrayStart(len(" + varname + "))") - if t.Kind() == reflect.Chan { - x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i) - x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - x.linef("%sv%s := <-%s", g, i, varname) - } else { - // x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) - x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) - x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - } - x.encVar(genTempVarPfx+"v"+i, t.Elem()) - x.line("}") - x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) -} - -func (x *genRunner) encMapFallback(varname string, t reflect.Type) { - // TODO: expand this to handle canonical. - i := x.varsfx() - x.line("r.EncodeMapStart(len(" + varname + "))") - x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) - // x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {") - x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) - x.encVar(genTempVarPfx+"k"+i, t.Key()) - x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) - x.encVar(genTempVarPfx+"v"+i, t.Elem()) - x.line("}") - x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) -} - -func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) { - // We only encode as nil if a nillable value. - // This removes some of the wasted checks for TryDecodeAsNil. - // We need to think about this more, to see what happens if omitempty, etc - // cause a nil value to be stored when something is expected. - // This could happen when decoding from a struct encoded as an array. - // For that, decVar should be called with canNil=true, to force true as its value. - i := x.varsfx() - if !canBeNil { - canBeNil = genAnythingCanBeNil || !genIsImmutable(t) - } - if canBeNil { - x.line("if r.TryDecodeAsNil() {") - if t.Kind() == reflect.Ptr { - x.line("if " + varname + " != nil { ") - - // if varname is a field of a struct (has a dot in it), - // then just set it to nil - if strings.IndexByte(varname, '.') != -1 { - x.line(varname + " = nil") - } else { - x.line("*" + varname + " = " + x.genZeroValueR(t.Elem())) - } - x.line("}") - } else { - x.line(varname + " = " + x.genZeroValueR(t)) - } - x.line("} else {") - } else { - x.line("// cannot be nil") - } - if t.Kind() != reflect.Ptr { - if x.decTryAssignPrimitive(varname, t) { - x.line(genTempVarPfx + "v" + i + " := &" + varname) - x.dec(genTempVarPfx+"v"+i, t) - } - } else { - x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) - // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). - // There's a chance of a **T in here which is nil. - var ptrPfx string - for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { - ptrPfx += "*" - x.linef("if %s%s == nil { %s%s = new(%s)}", - ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) - } - // if varname has [ in it, then create temp variable for this ptr thingie - if strings.Index(varname, "[") >= 0 { - varname2 := genTempVarPfx + "w" + i - x.line(varname2 + " := " + varname) - varname = varname2 - } - - if ptrPfx == "" { - x.dec(varname, t) - } else { - x.line(genTempVarPfx + "z" + i + " := " + ptrPfx + varname) - x.dec(genTempVarPfx+"z"+i, t) - } - - } - - if canBeNil { - x.line("} ") - } -} - -func (x *genRunner) dec(varname string, t reflect.Type) { - // assumptions: - // - the varname is to a pointer already. No need to take address of it - // - t is always a baseType T (not a *T, etc). - rtid := reflect.ValueOf(t).Pointer() - tptr := reflect.PtrTo(t) - if x.checkForSelfer(t, varname) { - if t.Implements(selferTyp) || tptr.Implements(selferTyp) { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - if _, ok := x.td[rtid]; ok { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - } - - inlist := false - for _, t0 := range x.t { - if t == t0 { - inlist = true - if x.checkForSelfer(t, varname) { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - break - } - } - - var rtidAdded bool - if t == x.tc { - x.td[rtid] = true - rtidAdded = true - } - - // check if - // - type is RawExt - // - the type implements (Text|JSON|Binary)(Unm|M)arshal - mi := x.varsfx() - x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) - x.linef("_ = %sm%s", genTempVarPfx, mi) - x.line("if false {") //start if block - defer func() { x.line("}") }() //end if block - - if t == rawExtTyp { - x.linef("} else { r.DecodeExt(%v, 0, nil)", varname) - return - } - - // HACK: Support for Builtins. - // Currently, only Binc supports builtins, and the only builtin type is time.Time. - // Have a method that returns the rtid for time.Time if Handle is Binc. - if t == timeTyp { - vrtid := genTempVarPfx + "m" + x.varsfx() - x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) - x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname) - } - // only check for extensions if the type is named, and has a packagePath. - if genImportPath(t) != "" && t.Name() != "" { - // first check if extensions are configued, before doing the interface conversion - x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) - } - - if t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { - x.linef("} else if %sm%s { z.DecBinaryUnmarshal(%v) ", genTempVarPfx, mi, varname) - } - if t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.DecJSONUnmarshal(%v)", genTempVarPfx, mi, varname) - } else if t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { - x.linef("} else if !%sm%s { z.DecTextUnmarshal(%v)", genTempVarPfx, mi, varname) - } - - x.line("} else {") - - // Since these are pointers, we cannot share, and have to use them one by one - switch t.Kind() { - case reflect.Int: - x.line("*((*int)(" + varname + ")) = int(r.DecodeInt(codecSelferBitsize" + x.xs + "))") - // x.line("z.DecInt((*int)(" + varname + "))") - case reflect.Int8: - x.line("*((*int8)(" + varname + ")) = int8(r.DecodeInt(8))") - // x.line("z.DecInt8((*int8)(" + varname + "))") - case reflect.Int16: - x.line("*((*int16)(" + varname + ")) = int16(r.DecodeInt(16))") - // x.line("z.DecInt16((*int16)(" + varname + "))") - case reflect.Int32: - x.line("*((*int32)(" + varname + ")) = int32(r.DecodeInt(32))") - // x.line("z.DecInt32((*int32)(" + varname + "))") - case reflect.Int64: - x.line("*((*int64)(" + varname + ")) = int64(r.DecodeInt(64))") - // x.line("z.DecInt64((*int64)(" + varname + "))") - - case reflect.Uint: - x.line("*((*uint)(" + varname + ")) = uint(r.DecodeUint(codecSelferBitsize" + x.xs + "))") - // x.line("z.DecUint((*uint)(" + varname + "))") - case reflect.Uint8: - x.line("*((*uint8)(" + varname + ")) = uint8(r.DecodeUint(8))") - // x.line("z.DecUint8((*uint8)(" + varname + "))") - case reflect.Uint16: - x.line("*((*uint16)(" + varname + ")) = uint16(r.DecodeUint(16))") - //x.line("z.DecUint16((*uint16)(" + varname + "))") - case reflect.Uint32: - x.line("*((*uint32)(" + varname + ")) = uint32(r.DecodeUint(32))") - //x.line("z.DecUint32((*uint32)(" + varname + "))") - case reflect.Uint64: - x.line("*((*uint64)(" + varname + ")) = uint64(r.DecodeUint(64))") - //x.line("z.DecUint64((*uint64)(" + varname + "))") - case reflect.Uintptr: - x.line("*((*uintptr)(" + varname + ")) = uintptr(r.DecodeUint(codecSelferBitsize" + x.xs + "))") - - case reflect.Float32: - x.line("*((*float32)(" + varname + ")) = float32(r.DecodeFloat(true))") - //x.line("z.DecFloat32((*float32)(" + varname + "))") - case reflect.Float64: - x.line("*((*float64)(" + varname + ")) = float64(r.DecodeFloat(false))") - // x.line("z.DecFloat64((*float64)(" + varname + "))") - - case reflect.Bool: - x.line("*((*bool)(" + varname + ")) = r.DecodeBool()") - // x.line("z.DecBool((*bool)(" + varname + "))") - case reflect.String: - x.line("*((*string)(" + varname + ")) = r.DecodeString()") - // x.line("z.DecString((*string)(" + varname + "))") - case reflect.Array, reflect.Chan: - x.xtraSM(varname, false, t) - // x.decListFallback(varname, rtid, true, t) - case reflect.Slice: - // if a []uint8, call dedicated function - // if a known fastpath slice, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if rtid == uint8SliceTypId { - x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false, false)") - } else if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") - } else { - x.xtraSM(varname, false, t) - // x.decListFallback(varname, rtid, false, t) - } - case reflect.Map: - // if a known fastpath map, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") - } else { - x.xtraSM(varname, false, t) - // x.decMapFallback(varname, rtid, t) - } - case reflect.Struct: - if inlist { - x.decStruct(varname, rtid, t) - } else { - // delete(x.td, rtid) - x.line("z.DecFallback(" + varname + ", false)") - } - default: - if rtidAdded { - delete(x.te, rtid) - } - x.line("z.DecFallback(" + varname + ", true)") - } -} - -func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) { - // We have to use the actual type name when doing a direct assignment. - // We don't have the luxury of casting the pointer to the underlying type. - // - // Consequently, in the situation of a - // type Message int32 - // var x Message - // var i int32 = 32 - // x = i // this will bomb - // x = Message(i) // this will work - // *((*int32)(&x)) = i // this will work - // - // Consequently, we replace: - // case reflect.Uint32: x.line(varname + " = uint32(r.DecodeUint(32))") - // with: - // case reflect.Uint32: x.line(varname + " = " + genTypeNamePrim(t, x.tc) + "(r.DecodeUint(32))") - - xfn := func(t reflect.Type) string { - return x.genTypeNamePrim(t) - } - switch t.Kind() { - case reflect.Int: - x.linef("%s = %s(r.DecodeInt(codecSelferBitsize%s))", varname, xfn(t), x.xs) - case reflect.Int8: - x.linef("%s = %s(r.DecodeInt(8))", varname, xfn(t)) - case reflect.Int16: - x.linef("%s = %s(r.DecodeInt(16))", varname, xfn(t)) - case reflect.Int32: - x.linef("%s = %s(r.DecodeInt(32))", varname, xfn(t)) - case reflect.Int64: - x.linef("%s = %s(r.DecodeInt(64))", varname, xfn(t)) - - case reflect.Uint: - x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs) - case reflect.Uint8: - x.linef("%s = %s(r.DecodeUint(8))", varname, xfn(t)) - case reflect.Uint16: - x.linef("%s = %s(r.DecodeUint(16))", varname, xfn(t)) - case reflect.Uint32: - x.linef("%s = %s(r.DecodeUint(32))", varname, xfn(t)) - case reflect.Uint64: - x.linef("%s = %s(r.DecodeUint(64))", varname, xfn(t)) - case reflect.Uintptr: - x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs) - - case reflect.Float32: - x.linef("%s = %s(r.DecodeFloat(true))", varname, xfn(t)) - case reflect.Float64: - x.linef("%s = %s(r.DecodeFloat(false))", varname, xfn(t)) - - case reflect.Bool: - x.linef("%s = %s(r.DecodeBool())", varname, xfn(t)) - case reflect.String: - x.linef("%s = %s(r.DecodeString())", varname, xfn(t)) - default: - tryAsPtr = true - } - return -} - -func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { - type tstruc struct { - TempVar string - Rand string - Varname string - CTyp string - Typ string - Immutable bool - Size int - } - telem := t.Elem() - ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} - - funcs := make(template.FuncMap) - - funcs["decLineVar"] = func(varname string) string { - x.decVar(varname, telem, false) - return "" - } - funcs["decLine"] = func(pfx string) string { - x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) - return "" - } - funcs["var"] = func(s string) string { - return ts.TempVar + s + ts.Rand - } - funcs["zero"] = func() string { - return x.genZeroValueR(telem) - } - funcs["isArray"] = func() bool { - return t.Kind() == reflect.Array - } - funcs["isSlice"] = func() bool { - return t.Kind() == reflect.Slice - } - funcs["isChan"] = func() bool { - return t.Kind() == reflect.Chan - } - tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) - if err != nil { - panic(err) - } - if err = tm.Execute(x.w, &ts); err != nil { - panic(err) - } -} - -func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { - type tstruc struct { - TempVar string - Sfx string - Rand string - Varname string - KTyp string - Typ string - Size int - } - telem := t.Elem() - tkey := t.Key() - ts := tstruc{ - genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), - x.genTypeName(telem), int(telem.Size() + tkey.Size()), - } - - funcs := make(template.FuncMap) - funcs["decElemZero"] = func() string { - return x.genZeroValueR(telem) - } - funcs["decElemKindImmutable"] = func() bool { - return genIsImmutable(telem) - } - funcs["decElemKindPtr"] = func() bool { - return telem.Kind() == reflect.Ptr - } - funcs["decElemKindIntf"] = func() bool { - return telem.Kind() == reflect.Interface - } - funcs["decLineVarK"] = func(varname string) string { - x.decVar(varname, tkey, false) - return "" - } - funcs["decLineVar"] = func(varname string) string { - x.decVar(varname, telem, false) - return "" - } - funcs["decLineK"] = func(pfx string) string { - x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false) - return "" - } - funcs["decLine"] = func(pfx string) string { - x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) - return "" - } - funcs["var"] = func(s string) string { - return ts.TempVar + s + ts.Rand - } - - tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) - if err != nil { - panic(err) - } - if err = tm.Execute(x.w, &ts); err != nil { - panic(err) - } -} - -func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { - ti := x.ti.get(rtid, t) - tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. - x.line("switch (" + kName + ") {") - for _, si := range tisfi { - x.line("case \"" + si.encName + "\":") - var t2 reflect.StructField - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { - //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value. - // t2 = t.FieldByIndex(si.is) - t2typ := t - varname3 := varname - for _, ix := range si.is { - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(ix) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) - } - } - } - x.decVar(varname+"."+t2.Name, t2.Type, false) - } - x.line("default:") - // pass the slice here, so that the string will not escape, and maybe save allocation - x.line("z.DecStructFieldNotFound(-1, " + kName + ")") - x.line("} // end switch " + kName) -} - -func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { - tpfx := genTempVarPfx - i := x.varsfx() - kName := tpfx + "s" + i - - // We thought to use ReadStringAsBytes, as go compiler might optimize the copy out. - // However, using that was more expensive, as it seems that the switch expression - // is evaluated each time. - // - // We could depend on decodeString using a temporary/shared buffer internally. - // However, this model of creating a byte array, and using explicitly is faster, - // and allows optional use of unsafe []byte->string conversion without alloc. - - // Also, ensure that the slice array doesn't escape. - // That will help escape analysis prevent allocation when it gets better. - - // x.line("var " + kName + "Arr = [32]byte{} // default string to decode into") - // x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into") - // use the scratch buffer to avoid allocation (most field names are < 32). - - x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into") - - x.line("_ = " + kName + "Slc") - switch style { - case genStructMapStyleLenPrefix: - x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) - case genStructMapStyleCheckBreak: - x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) - default: // 0, otherwise. - x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length - x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) - x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) - x.line("} else { if r.CheckBreak() { break }; }") - } - x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs) - x.line(kName + "Slc = r.DecodeBytes(" + kName + "Slc, true, true)") - // let string be scoped to this loop alone, so it doesn't escape. - if x.unsafe { - x.line(kName + "SlcHdr := codecSelferUnsafeString" + x.xs + "{uintptr(unsafe.Pointer(&" + - kName + "Slc[0])), len(" + kName + "Slc)}") - x.line(kName + " := *(*string)(unsafe.Pointer(&" + kName + "SlcHdr))") - } else { - x.line(kName + " := string(" + kName + "Slc)") - } - x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs) - x.decStructMapSwitch(kName, varname, rtid, t) - - x.line("} // end for " + tpfx + "j" + i) - x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) -} - -func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { - tpfx := genTempVarPfx - i := x.varsfx() - ti := x.ti.get(rtid, t) - tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. - x.linef("var %sj%s int", tpfx, i) - x.linef("var %sb%s bool", tpfx, i) // break - x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length - for _, si := range tisfi { - var t2 reflect.StructField - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { - //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value. - // t2 = t.FieldByIndex(si.is) - t2typ := t - varname3 := varname - for _, ix := range si.is { - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(ix) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) - } - } - } - - x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", - tpfx, i, tpfx, i, tpfx, i, - tpfx, i, lenvarname, tpfx, i) - x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }", - tpfx, i, x.xs, breakString) - x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - x.decVar(varname+"."+t2.Name, t2.Type, true) - } - // read remaining values and throw away. - x.line("for {") - x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", - tpfx, i, tpfx, i, tpfx, i, - tpfx, i, lenvarname, tpfx, i) - x.linef("if %sb%s { break }", tpfx, i) - x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) - x.line("}") - x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) -} - -func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { - // if container is map - i := x.varsfx() - x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) - x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) - x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") - x.linef("if %sl%s == 0 {", genTempVarPfx, i) - x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) - if genUseOneFunctionForDecStructMap { - x.line("} else { ") - x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i) - } else { - x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") - x.line("x.codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") - x.line("} else {") - x.line("x.codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") - } - x.line("}") - - // else if container is array - x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) - x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") - x.linef("if %sl%s == 0 {", genTempVarPfx, i) - x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) - x.line("} else { ") - x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i) - x.line("}") - // else panic - x.line("} else { ") - x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")") - x.line("} ") -} - -// -------- - -type genV struct { - // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice - MapKey string - Elem string - Primitive string - Size int -} - -func (x *genRunner) newGenV(t reflect.Type) (v genV) { - switch t.Kind() { - case reflect.Slice, reflect.Array: - te := t.Elem() - v.Elem = x.genTypeName(te) - v.Size = int(te.Size()) - case reflect.Map: - te, tk := t.Elem(), t.Key() - v.Elem = x.genTypeName(te) - v.MapKey = x.genTypeName(tk) - v.Size = int(te.Size() + tk.Size()) - default: - panic("unexpected type for newGenV. Requires map or slice type") - } - return -} - -func (x *genV) MethodNamePfx(prefix string, prim bool) string { - var name []byte - if prefix != "" { - name = append(name, prefix...) - } - if prim { - name = append(name, genTitleCaseName(x.Primitive)...) - } else { - if x.MapKey == "" { - name = append(name, "Slice"...) - } else { - name = append(name, "Map"...) - name = append(name, genTitleCaseName(x.MapKey)...) - } - name = append(name, genTitleCaseName(x.Elem)...) - } - return string(name) - -} - -var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" - -// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. -// -// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, -// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. -// We strip it here. -func genImportPath(t reflect.Type) (s string) { - s = t.PkgPath() - if genCheckVendor { - // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. - // if s contains /vendor/ OR startsWith vendor/, then return everything after it. - const vendorStart = "vendor/" - const vendorInline = "/vendor/" - if i := strings.LastIndex(s, vendorInline); i >= 0 { - s = s[i+len(vendorInline):] - } else if strings.HasPrefix(s, vendorStart) { - s = s[len(vendorStart):] - } - } - return -} - -func genNonPtr(t reflect.Type) reflect.Type { - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t -} - -func genTitleCaseName(s string) string { - switch s { - case "interface{}": - return "Intf" - default: - return strings.ToUpper(s[0:1]) + s[1:] - } -} - -func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { - var ptrPfx string - for t.Kind() == reflect.Ptr { - ptrPfx += "Ptrto" - t = t.Elem() - } - tstr := t.String() - if tn := t.Name(); tn != "" { - if tRef != nil && genImportPath(t) == genImportPath(tRef) { - return ptrPfx + tn - } else { - if genQNameRegex.MatchString(tstr) { - return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } - } - switch t.Kind() { - case reflect.Map: - return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) - case reflect.Slice: - return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) - case reflect.Array: - return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) - case reflect.Chan: - var cx string - switch t.ChanDir() { - case reflect.SendDir: - cx = "ChanSend" - case reflect.RecvDir: - cx = "ChanRecv" - default: - cx = "Chan" - } - return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) - default: - if t == intfTyp { - return ptrPfx + "Interface" - } else { - if tRef != nil && genImportPath(t) == genImportPath(tRef) { - if t.Name() != "" { - return ptrPfx + t.Name() - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } else { - // best way to get the package name inclusive - // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) - if t.Name() != "" && genQNameRegex.MatchString(tstr) { - return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } - } - } -} - -// genCustomNameForType base64encodes the t.String() value in such a way -// that it can be used within a function name. -func genCustomTypeName(tstr string) string { - len2 := genBase64enc.EncodedLen(len(tstr)) - bufx := make([]byte, len2) - genBase64enc.Encode(bufx, []byte(tstr)) - for i := len2 - 1; i >= 0; i-- { - if bufx[i] == '=' { - len2-- - } else { - break - } - } - return string(bufx[:len2]) -} - -func genIsImmutable(t reflect.Type) (v bool) { - return isImmutableKind(t.Kind()) -} - -type genInternal struct { - Values []genV - Unsafe bool -} - -func (x genInternal) FastpathLen() (l int) { - for _, v := range x.Values { - if v.Primitive == "" { - l++ - } - } - return -} - -func genInternalZeroValue(s string) string { - switch s { - case "interface{}": - return "nil" - case "bool": - return "false" - case "string": - return `""` - default: - return "0" - } -} - -func genInternalEncCommandAsString(s string, vname string) string { - switch s { - case "uint", "uint8", "uint16", "uint32", "uint64": - return "ee.EncodeUint(uint64(" + vname + "))" - case "int", "int8", "int16", "int32", "int64": - return "ee.EncodeInt(int64(" + vname + "))" - case "string": - return "ee.EncodeString(c_UTF8, " + vname + ")" - case "float32": - return "ee.EncodeFloat32(" + vname + ")" - case "float64": - return "ee.EncodeFloat64(" + vname + ")" - case "bool": - return "ee.EncodeBool(" + vname + ")" - case "symbol": - return "ee.EncodeSymbol(" + vname + ")" - default: - return "e.encode(" + vname + ")" - } -} - -func genInternalDecCommandAsString(s string) string { - switch s { - case "uint": - return "uint(dd.DecodeUint(uintBitsize))" - case "uint8": - return "uint8(dd.DecodeUint(8))" - case "uint16": - return "uint16(dd.DecodeUint(16))" - case "uint32": - return "uint32(dd.DecodeUint(32))" - case "uint64": - return "dd.DecodeUint(64)" - case "uintptr": - return "uintptr(dd.DecodeUint(uintBitsize))" - case "int": - return "int(dd.DecodeInt(intBitsize))" - case "int8": - return "int8(dd.DecodeInt(8))" - case "int16": - return "int16(dd.DecodeInt(16))" - case "int32": - return "int32(dd.DecodeInt(32))" - case "int64": - return "dd.DecodeInt(64)" - - case "string": - return "dd.DecodeString()" - case "float32": - return "float32(dd.DecodeFloat(true))" - case "float64": - return "dd.DecodeFloat(false)" - case "bool": - return "dd.DecodeBool()" - default: - panic(errors.New("gen internal: unknown type for decode: " + s)) - } -} - -func genInternalSortType(s string, elem bool) string { - for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { - if strings.HasPrefix(s, v) { - if elem { - if v == "int" || v == "uint" || v == "float" { - return v + "64" - } else { - return v - } - } - return v + "Slice" - } - } - panic("sorttype: unexpected type: " + s) -} - -// var genInternalMu sync.Mutex -var genInternalV genInternal -var genInternalTmplFuncs template.FuncMap -var genInternalOnce sync.Once - -func genInternalInit() { - types := [...]string{ - "interface{}", - "string", - "float32", - "float64", - "uint", - "uint8", - "uint16", - "uint32", - "uint64", - "uintptr", - "int", - "int8", - "int16", - "int32", - "int64", - "bool", - } - // keep as slice, so it is in specific iteration order. - // Initial order was uint64, string, interface{}, int, int64 - mapvaltypes := [...]string{ - "interface{}", - "string", - "uint", - "uint8", - "uint16", - "uint32", - "uint64", - "uintptr", - "int", - "int8", - "int16", - "int32", - "int64", - "float32", - "float64", - "bool", - } - wordSizeBytes := int(intBitsize) / 8 - - mapvaltypes2 := map[string]int{ - "interface{}": 2 * wordSizeBytes, - "string": 2 * wordSizeBytes, - "uint": 1 * wordSizeBytes, - "uint8": 1, - "uint16": 2, - "uint32": 4, - "uint64": 8, - "uintptr": 1 * wordSizeBytes, - "int": 1 * wordSizeBytes, - "int8": 1, - "int16": 2, - "int32": 4, - "int64": 8, - "float32": 4, - "float64": 8, - "bool": 1, - } - var gt genInternal - - // For each slice or map type, there must be a (symetrical) Encode and Decode fast-path function - for _, s := range types { - gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) - if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. - gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) - } - if _, ok := mapvaltypes2[s]; !ok { - gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) - } - for _, ms := range mapvaltypes { - gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) - } - } - - funcs := make(template.FuncMap) - // funcs["haspfx"] = strings.HasPrefix - funcs["encmd"] = genInternalEncCommandAsString - funcs["decmd"] = genInternalDecCommandAsString - funcs["zerocmd"] = genInternalZeroValue - funcs["hasprefix"] = strings.HasPrefix - funcs["sorttype"] = genInternalSortType - - genInternalV = gt - genInternalTmplFuncs = funcs -} - -// genInternalGoFile is used to generate source files from templates. -// It is run by the program author alone. -// Unfortunately, it has to be exported so that it can be called from a command line tool. -// *** DO NOT USE *** -func genInternalGoFile(r io.Reader, w io.Writer, safe bool) (err error) { - genInternalOnce.Do(genInternalInit) - - gt := genInternalV - gt.Unsafe = !safe - - t := template.New("").Funcs(genInternalTmplFuncs) - - tmplstr, err := ioutil.ReadAll(r) - if err != nil { - return - } - - if t, err = t.Parse(string(tmplstr)); err != nil { - return - } - - var out bytes.Buffer - err = t.Execute(&out, gt) - if err != nil { - return - } - - bout, err := format.Source(out.Bytes()) - if err != nil { - w.Write(out.Bytes()) // write out if error, so we can still see. - // w.Write(bout) // write out if error, as much as possible, so we can still see. - return - } - w.Write(bout) - return -} diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go deleted file mode 100644 index 560014ae3..000000000 --- a/vendor/github.com/ugorji/go/codec/helper.go +++ /dev/null @@ -1,1129 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// Contains code shared by both encode and decode. - -// Some shared ideas around encoding/decoding -// ------------------------------------------ -// -// If an interface{} is passed, we first do a type assertion to see if it is -// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. -// -// If we start with a reflect.Value, we are already in reflect.Value land and -// will try to grab the function for the underlying Type and directly call that function. -// This is more performant than calling reflect.Value.Interface(). -// -// This still helps us bypass many layers of reflection, and give best performance. -// -// Containers -// ------------ -// Containers in the stream are either associative arrays (key-value pairs) or -// regular arrays (indexed by incrementing integers). -// -// Some streams support indefinite-length containers, and use a breaking -// byte-sequence to denote that the container has come to an end. -// -// Some streams also are text-based, and use explicit separators to denote the -// end/beginning of different values. -// -// During encode, we use a high-level condition to determine how to iterate through -// the container. That decision is based on whether the container is text-based (with -// separators) or binary (without separators). If binary, we do not even call the -// encoding of separators. -// -// During decode, we use a different high-level condition to determine how to iterate -// through the containers. That decision is based on whether the stream contained -// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that -// it has to be binary, and we do not even try to read separators. -// -// The only codec that may suffer (slightly) is cbor, and only when decoding indefinite-length. -// It may suffer because we treat it like a text-based codec, and read separators. -// However, this read is a no-op and the cost is insignificant. -// -// Philosophy -// ------------ -// On decode, this codec will update containers appropriately: -// - If struct, update fields from stream into fields of struct. -// If field in stream not found in struct, handle appropriately (based on option). -// If a struct field has no corresponding value in the stream, leave it AS IS. -// If nil in stream, set value to nil/zero value. -// - If map, update map from stream. -// If the stream value is NIL, set the map to nil. -// - if slice, try to update up to length of array in stream. -// if container len is less than stream array length, -// and container cannot be expanded, handled (based on option). -// This means you can decode 4-element stream array into 1-element array. -// -// ------------------------------------ -// On encode, user can specify omitEmpty. This means that the value will be omitted -// if the zero value. The problem may occur during decode, where omitted values do not affect -// the value being decoded into. This means that if decoding into a struct with an -// int field with current value=5, and the field is omitted in the stream, then after -// decoding, the value will still be 5 (not 0). -// omitEmpty only works if you guarantee that you always decode into zero-values. -// -// ------------------------------------ -// We could have truncated a map to remove keys not available in the stream, -// or set values in the struct which are not in the stream to their zero values. -// We decided against it because there is no efficient way to do it. -// We may introduce it as an option later. -// However, that will require enabling it for both runtime and code generation modes. -// -// To support truncate, we need to do 2 passes over the container: -// map -// - first collect all keys (e.g. in k1) -// - for each key in stream, mark k1 that the key should not be removed -// - after updating map, do second pass and call delete for all keys in k1 which are not marked -// struct: -// - for each field, track the *typeInfo s1 -// - iterate through all s1, and for each one not marked, set value to zero -// - this involves checking the possible anonymous fields which are nil ptrs. -// too much work. -// -// ------------------------------------------ -// Error Handling is done within the library using panic. -// -// This way, the code doesn't have to keep checking if an error has happened, -// and we don't have to keep sending the error value along with each call -// or storing it in the En|Decoder and checking it constantly along the way. -// -// The disadvantage is that small functions which use panics cannot be inlined. -// The code accounts for that by only using panics behind an interface; -// since interface calls cannot be inlined, this is irrelevant. -// -// We considered storing the error is En|Decoder. -// - once it has its err field set, it cannot be used again. -// - panicing will be optional, controlled by const flag. -// - code should always check error first and return early. -// We eventually decided against it as it makes the code clumsier to always -// check for these error conditions. - -import ( - "bytes" - "encoding" - "encoding/binary" - "errors" - "fmt" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -const ( - scratchByteArrayLen = 32 - initCollectionCap = 32 // 32 is defensive. 16 is preferred. - - // Support encoding.(Binary|Text)(Unm|M)arshaler. - // This constant flag will enable or disable it. - supportMarshalInterfaces = true - - // Each Encoder or Decoder uses a cache of functions based on conditionals, - // so that the conditionals are not run every time. - // - // Either a map or a slice is used to keep track of the functions. - // The map is more natural, but has a higher cost than a slice/array. - // This flag (useMapForCodecCache) controls which is used. - // - // From benchmarks, slices with linear search perform better with < 32 entries. - // We have typically seen a high threshold of about 24 entries. - useMapForCodecCache = false - - // for debugging, set this to false, to catch panic traces. - // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. - recoverPanicToErr = true - - // Fast path functions try to create a fast path encode or decode implementation - // for common maps and slices, by by-passing reflection altogether. - fastpathEnabled = true - - // if checkStructForEmptyValue, check structs fields to see if an empty value. - // This could be an expensive call, so possibly disable it. - checkStructForEmptyValue = false - - // if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue - derefForIsEmptyValue = false - - // if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first. - // Only concern is that, if the slice already contained some garbage, we will decode into that garbage. - // The chances of this are slim, so leave this "optimization". - // TODO: should this be true, to ensure that we always decode into a "zero" "empty" value? - resetSliceElemToZeroValue bool = false -) - -var oneByteArr = [1]byte{0} -var zeroByteSlice = oneByteArr[:0:0] - -type charEncoding uint8 - -const ( - c_RAW charEncoding = iota - c_UTF8 - c_UTF16LE - c_UTF16BE - c_UTF32LE - c_UTF32BE -) - -// valueType is the stream type -type valueType uint8 - -const ( - valueTypeUnset valueType = iota - valueTypeNil - valueTypeInt - valueTypeUint - valueTypeFloat - valueTypeBool - valueTypeString - valueTypeSymbol - valueTypeBytes - valueTypeMap - valueTypeArray - valueTypeTimestamp - valueTypeExt - - // valueTypeInvalid = 0xff -) - -type seqType uint8 - -const ( - _ seqType = iota - seqTypeArray - seqTypeSlice - seqTypeChan -) - -// note that containerMapStart and containerArraySend are not sent. -// This is because the ReadXXXStart and EncodeXXXStart already does these. -type containerState uint8 - -const ( - _ containerState = iota - - containerMapStart // slot left open, since Driver method already covers it - containerMapKey - containerMapValue - containerMapEnd - containerArrayStart // slot left open, since Driver methods already cover it - containerArrayElem - containerArrayEnd -) - -type containerStateRecv interface { - sendContainerState(containerState) -} - -// mirror json.Marshaler and json.Unmarshaler here, -// so we don't import the encoding/json package -type jsonMarshaler interface { - MarshalJSON() ([]byte, error) -} -type jsonUnmarshaler interface { - UnmarshalJSON([]byte) error -} - -var ( - bigen = binary.BigEndian - structInfoFieldName = "_struct" - - mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) - mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) - intfSliceTyp = reflect.TypeOf([]interface{}(nil)) - intfTyp = intfSliceTyp.Elem() - - stringTyp = reflect.TypeOf("") - timeTyp = reflect.TypeOf(time.Time{}) - rawExtTyp = reflect.TypeOf(RawExt{}) - uint8SliceTyp = reflect.TypeOf([]uint8(nil)) - - mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() - - binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() - binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() - - textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - - jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() - jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() - - selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() - - uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() - rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() - intfTypId = reflect.ValueOf(intfTyp).Pointer() - timeTypId = reflect.ValueOf(timeTyp).Pointer() - stringTypId = reflect.ValueOf(stringTyp).Pointer() - - mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() - mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() - intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() - // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() - - intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) - uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) - - bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} - bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} - - chkOvf checkOverflow - - noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo") -) - -var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) - -// Selfer defines methods by which a value can encode or decode itself. -// -// Any type which implements Selfer will be able to encode or decode itself. -// Consequently, during (en|de)code, this takes precedence over -// (text|binary)(M|Unm)arshal or extension support. -type Selfer interface { - CodecEncodeSelf(*Encoder) - CodecDecodeSelf(*Decoder) -} - -// MapBySlice represents a slice which should be encoded as a map in the stream. -// The slice contains a sequence of key-value pairs. -// This affords storing a map in a specific sequence in the stream. -// -// The support of MapBySlice affords the following: -// - A slice type which implements MapBySlice will be encoded as a map -// - A slice can be decoded from a map in the stream -type MapBySlice interface { - MapBySlice() -} - -// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. -// -// BasicHandle encapsulates the common options and extension functions. -type BasicHandle struct { - // TypeInfos is used to get the type info for any type. - // - // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json - TypeInfos *TypeInfos - - extHandle - EncodeOptions - DecodeOptions -} - -func (x *BasicHandle) getBasicHandle() *BasicHandle { - return x -} - -func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - if x.TypeInfos != nil { - return x.TypeInfos.get(rtid, rt) - } - return defTypeInfos.get(rtid, rt) -} - -// Handle is the interface for a specific encoding format. -// -// Typically, a Handle is pre-configured before first time use, -// and not modified while in use. Such a pre-configured Handle -// is safe for concurrent access. -type Handle interface { - getBasicHandle() *BasicHandle - newEncDriver(w *Encoder) encDriver - newDecDriver(r *Decoder) decDriver - isBinary() bool -} - -// RawExt represents raw unprocessed extension data. -// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag. -// -// Only one of Data or Value is nil. If Data is nil, then the content of the RawExt is in the Value. -type RawExt struct { - Tag uint64 - // Data is the []byte which represents the raw ext. If Data is nil, ext is exposed in Value. - // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types - Data []byte - // Value represents the extension, if Data is nil. - // Value is used by codecs (e.g. cbor) which use the format to do custom serialization of the types. - Value interface{} -} - -// BytesExt handles custom (de)serialization of types to/from []byte. -// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. -type BytesExt interface { - // WriteExt converts a value to a []byte. - // - // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. - WriteExt(v interface{}) []byte - - // ReadExt updates a value from a []byte. - ReadExt(dst interface{}, src []byte) -} - -// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. -// The Encoder or Decoder will then handle the further (de)serialization of that known type. -// -// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. -type InterfaceExt interface { - // ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64. - // - // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. - ConvertExt(v interface{}) interface{} - - // UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time. - UpdateExt(dst interface{}, src interface{}) -} - -// Ext handles custom (de)serialization of custom types / extensions. -type Ext interface { - BytesExt - InterfaceExt -} - -// addExtWrapper is a wrapper implementation to support former AddExt exported method. -type addExtWrapper struct { - encFn func(reflect.Value) ([]byte, error) - decFn func(reflect.Value, []byte) error -} - -func (x addExtWrapper) WriteExt(v interface{}) []byte { - bs, err := x.encFn(reflect.ValueOf(v)) - if err != nil { - panic(err) - } - return bs -} - -func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { - if err := x.decFn(reflect.ValueOf(v), bs); err != nil { - panic(err) - } -} - -func (x addExtWrapper) ConvertExt(v interface{}) interface{} { - return x.WriteExt(v) -} - -func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { - x.ReadExt(dest, v.([]byte)) -} - -type setExtWrapper struct { - b BytesExt - i InterfaceExt -} - -func (x *setExtWrapper) WriteExt(v interface{}) []byte { - if x.b == nil { - panic("BytesExt.WriteExt is not supported") - } - return x.b.WriteExt(v) -} - -func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) { - if x.b == nil { - panic("BytesExt.WriteExt is not supported") - - } - x.b.ReadExt(v, bs) -} - -func (x *setExtWrapper) ConvertExt(v interface{}) interface{} { - if x.i == nil { - panic("InterfaceExt.ConvertExt is not supported") - - } - return x.i.ConvertExt(v) -} - -func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) { - if x.i == nil { - panic("InterfaceExxt.UpdateExt is not supported") - - } - x.i.UpdateExt(dest, v) -} - -// type errorString string -// func (x errorString) Error() string { return string(x) } - -type binaryEncodingType struct{} - -func (_ binaryEncodingType) isBinary() bool { return true } - -type textEncodingType struct{} - -func (_ textEncodingType) isBinary() bool { return false } - -// noBuiltInTypes is embedded into many types which do not support builtins -// e.g. msgpack, simple, cbor. -type noBuiltInTypes struct{} - -func (_ noBuiltInTypes) IsBuiltinType(rt uintptr) bool { return false } -func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} -func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} - -type noStreamingCodec struct{} - -func (_ noStreamingCodec) CheckBreak() bool { return false } - -// bigenHelper. -// Users must already slice the x completely, because we will not reslice. -type bigenHelper struct { - x []byte // must be correctly sliced to appropriate len. slicing is a cost. - w encWriter -} - -func (z bigenHelper) writeUint16(v uint16) { - bigen.PutUint16(z.x, v) - z.w.writeb(z.x) -} - -func (z bigenHelper) writeUint32(v uint32) { - bigen.PutUint32(z.x, v) - z.w.writeb(z.x) -} - -func (z bigenHelper) writeUint64(v uint64) { - bigen.PutUint64(z.x, v) - z.w.writeb(z.x) -} - -type extTypeTagFn struct { - rtid uintptr - rt reflect.Type - tag uint64 - ext Ext -} - -type extHandle []extTypeTagFn - -// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. -// -// AddExt registes an encode and decode function for a reflect.Type. -// AddExt internally calls SetExt. -// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. -func (o *extHandle) AddExt( - rt reflect.Type, tag byte, - encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, []byte) error, -) (err error) { - if encfn == nil || decfn == nil { - return o.SetExt(rt, uint64(tag), nil) - } - return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) -} - -// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. -// -// Note that the type must be a named type, and specifically not -// a pointer or Interface. An error is returned if that is not honored. -// -// To Deregister an ext, call SetExt with nil Ext -func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { - // o is a pointer, because we may need to initialize it - if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { - err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", - reflect.Zero(rt).Interface()) - return - } - - rtid := reflect.ValueOf(rt).Pointer() - for _, v := range *o { - if v.rtid == rtid { - v.tag, v.ext = tag, ext - return - } - } - - if *o == nil { - *o = make([]extTypeTagFn, 0, 4) - } - *o = append(*o, extTypeTagFn{rtid, rt, tag, ext}) - return -} - -func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { - var v *extTypeTagFn - for i := range o { - v = &o[i] - if v.rtid == rtid { - return v - } - } - return nil -} - -func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn { - var v *extTypeTagFn - for i := range o { - v = &o[i] - if v.tag == tag { - return v - } - } - return nil -} - -type structFieldInfo struct { - encName string // encode name - - // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. - - is []int // (recursive/embedded) field index in struct - i int16 // field index in struct - omitEmpty bool - toArray bool // if field is _struct, is the toArray set? -} - -// func (si *structFieldInfo) isZero() bool { -// return si.encName == "" && len(si.is) == 0 && si.i == 0 && !si.omitEmpty && !si.toArray -// } - -// rv returns the field of the struct. -// If anonymous, it returns an Invalid -func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value) { - if si.i != -1 { - v = v.Field(int(si.i)) - return v - } - // replicate FieldByIndex - for _, x := range si.is { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - if !update { - return - } - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = v.Field(x) - } - return v -} - -func (si *structFieldInfo) setToZeroValue(v reflect.Value) { - if si.i != -1 { - v = v.Field(int(si.i)) - v.Set(reflect.Zero(v.Type())) - // v.Set(reflect.New(v.Type()).Elem()) - // v.Set(reflect.New(v.Type())) - } else { - // replicate FieldByIndex - for _, x := range si.is { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - return - } - v = v.Elem() - } - v = v.Field(x) - } - v.Set(reflect.Zero(v.Type())) - } -} - -func parseStructFieldInfo(fname string, stag string) *structFieldInfo { - // if fname == "" { - // panic(noFieldNameToStructFieldInfoErr) - // } - si := structFieldInfo{ - encName: fname, - } - - if stag != "" { - for i, s := range strings.Split(stag, ",") { - if i == 0 { - if s != "" { - si.encName = s - } - } else { - if s == "omitempty" { - si.omitEmpty = true - } else if s == "toarray" { - si.toArray = true - } - } - } - } - // si.encNameBs = []byte(si.encName) - return &si -} - -type sfiSortedByEncName []*structFieldInfo - -func (p sfiSortedByEncName) Len() int { - return len(p) -} - -func (p sfiSortedByEncName) Less(i, j int) bool { - return p[i].encName < p[j].encName -} - -func (p sfiSortedByEncName) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -// typeInfo keeps information about each type referenced in the encode/decode sequence. -// -// During an encode/decode sequence, we work as below: -// - If base is a built in type, en/decode base value -// - If base is registered as an extension, en/decode base value -// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method -// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method -// - Else decode appropriately based on the reflect.Kind -type typeInfo struct { - sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. - sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. - - rt reflect.Type - rtid uintptr - - numMeth uint16 // number of methods - - // baseId gives pointer to the base reflect.Type, after deferencing - // the pointers. E.g. base type of ***time.Time is time.Time. - base reflect.Type - baseId uintptr - baseIndir int8 // number of indirections to get to base - - mbs bool // base type (T or *T) is a MapBySlice - - bm bool // base type (T or *T) is a binaryMarshaler - bunm bool // base type (T or *T) is a binaryUnmarshaler - bmIndir int8 // number of indirections to get to binaryMarshaler type - bunmIndir int8 // number of indirections to get to binaryUnmarshaler type - - tm bool // base type (T or *T) is a textMarshaler - tunm bool // base type (T or *T) is a textUnmarshaler - tmIndir int8 // number of indirections to get to textMarshaler type - tunmIndir int8 // number of indirections to get to textUnmarshaler type - - jm bool // base type (T or *T) is a jsonMarshaler - junm bool // base type (T or *T) is a jsonUnmarshaler - jmIndir int8 // number of indirections to get to jsonMarshaler type - junmIndir int8 // number of indirections to get to jsonUnmarshaler type - - cs bool // base type (T or *T) is a Selfer - csIndir int8 // number of indirections to get to Selfer type - - toArray bool // whether this (struct) type should be encoded as an array -} - -func (ti *typeInfo) indexForEncName(name string) int { - //tisfi := ti.sfi - const binarySearchThreshold = 16 - if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { - // linear search. faster than binary search in my testing up to 16-field structs. - for i, si := range ti.sfi { - if si.encName == name { - return i - } - } - } else { - // binary search. adapted from sort/search.go. - h, i, j := 0, 0, sfilen - for i < j { - h = i + (j-i)/2 - if ti.sfi[h].encName < name { - i = h + 1 - } else { - j = h - } - } - if i < sfilen && ti.sfi[i].encName == name { - return i - } - } - return -1 -} - -// TypeInfos caches typeInfo for each type on first inspection. -// -// It is configured with a set of tag keys, which are used to get -// configuration for the type. -type TypeInfos struct { - infos map[uintptr]*typeInfo - mu sync.RWMutex - tags []string -} - -// NewTypeInfos creates a TypeInfos given a set of struct tags keys. -// -// This allows users customize the struct tag keys which contain configuration -// of their types. -func NewTypeInfos(tags []string) *TypeInfos { - return &TypeInfos{tags: tags, infos: make(map[uintptr]*typeInfo, 64)} -} - -func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { - // check for tags: codec, json, in that order. - // this allows seamless support for many configured structs. - for _, x := range x.tags { - s = t.Get(x) - if s != "" { - return s - } - } - return -} - -func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - var ok bool - x.mu.RLock() - pti, ok = x.infos[rtid] - x.mu.RUnlock() - if ok { - return - } - - // do not hold lock while computing this. - // it may lead to duplication, but that's ok. - ti := typeInfo{rt: rt, rtid: rtid} - ti.numMeth = uint16(rt.NumMethod()) - - var indir int8 - if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { - ti.bm, ti.bmIndir = true, indir - } - if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { - ti.bunm, ti.bunmIndir = true, indir - } - if ok, indir = implementsIntf(rt, textMarshalerTyp); ok { - ti.tm, ti.tmIndir = true, indir - } - if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok { - ti.tunm, ti.tunmIndir = true, indir - } - if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok { - ti.jm, ti.jmIndir = true, indir - } - if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok { - ti.junm, ti.junmIndir = true, indir - } - if ok, indir = implementsIntf(rt, selferTyp); ok { - ti.cs, ti.csIndir = true, indir - } - if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { - ti.mbs = true - } - - pt := rt - var ptIndir int8 - // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } - for pt.Kind() == reflect.Ptr { - pt = pt.Elem() - ptIndir++ - } - if ptIndir == 0 { - ti.base = rt - ti.baseId = rtid - } else { - ti.base = pt - ti.baseId = reflect.ValueOf(pt).Pointer() - ti.baseIndir = ptIndir - } - - if rt.Kind() == reflect.Struct { - var siInfo *structFieldInfo - if f, ok := rt.FieldByName(structInfoFieldName); ok { - siInfo = parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) - ti.toArray = siInfo.toArray - } - sfip := make([]*structFieldInfo, 0, rt.NumField()) - x.rget(rt, nil, make(map[string]bool, 16), &sfip, siInfo) - - ti.sfip = make([]*structFieldInfo, len(sfip)) - ti.sfi = make([]*structFieldInfo, len(sfip)) - copy(ti.sfip, sfip) - sort.Sort(sfiSortedByEncName(sfip)) - copy(ti.sfi, sfip) - } - // sfi = sfip - - x.mu.Lock() - if pti, ok = x.infos[rtid]; !ok { - pti = &ti - x.infos[rtid] = pti - } - x.mu.Unlock() - return -} - -func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, - sfi *[]*structFieldInfo, siInfo *structFieldInfo, -) { - for j := 0; j < rt.NumField(); j++ { - f := rt.Field(j) - fkind := f.Type.Kind() - // skip if a func type, or is unexported, or structTag value == "-" - if fkind == reflect.Func { - continue - } - // if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { - if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded - continue - } - stag := x.structTag(f.Tag) - if stag == "-" { - continue - } - var si *structFieldInfo - // if anonymous and no struct tag (or it's blank), and a struct (or pointer to struct), inline it. - if f.Anonymous && fkind != reflect.Interface { - doInline := stag == "" - if !doInline { - si = parseStructFieldInfo("", stag) - doInline = si.encName == "" - // doInline = si.isZero() - } - if doInline { - ft := f.Type - for ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } - if ft.Kind() == reflect.Struct { - indexstack2 := make([]int, len(indexstack)+1, len(indexstack)+4) - copy(indexstack2, indexstack) - indexstack2[len(indexstack)] = j - // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - x.rget(ft, indexstack2, fnameToHastag, sfi, siInfo) - continue - } - } - } - - // after the anonymous dance: if an unexported field, skip - if f.PkgPath != "" { // unexported - continue - } - - // do not let fields with same name in embedded structs override field at higher level. - // this must be done after anonymous check, to allow anonymous field - // still include their child fields - if _, ok := fnameToHastag[f.Name]; ok { - continue - } - if f.Name == "" { - panic(noFieldNameToStructFieldInfoErr) - } - if si == nil { - si = parseStructFieldInfo(f.Name, stag) - } else if si.encName == "" { - si.encName = f.Name - } - // si.ikind = int(f.Type.Kind()) - if len(indexstack) == 0 { - si.i = int16(j) - } else { - si.i = -1 - si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - } - - if siInfo != nil { - if siInfo.omitEmpty { - si.omitEmpty = true - } - } - *sfi = append(*sfi, si) - fnameToHastag[f.Name] = stag != "" - } -} - -func panicToErr(err *error) { - if recoverPanicToErr { - if x := recover(); x != nil { - //debug.PrintStack() - panicValToErr(x, err) - } - } -} - -// func doPanic(tag string, format string, params ...interface{}) { -// params2 := make([]interface{}, len(params)+1) -// params2[0] = tag -// copy(params2[1:], params) -// panic(fmt.Errorf("%s: "+format, params2...)) -// } - -func isImmutableKind(k reflect.Kind) (v bool) { - return false || - k == reflect.Int || - k == reflect.Int8 || - k == reflect.Int16 || - k == reflect.Int32 || - k == reflect.Int64 || - k == reflect.Uint || - k == reflect.Uint8 || - k == reflect.Uint16 || - k == reflect.Uint32 || - k == reflect.Uint64 || - k == reflect.Uintptr || - k == reflect.Float32 || - k == reflect.Float64 || - k == reflect.Bool || - k == reflect.String -} - -// these functions must be inlinable, and not call anybody -type checkOverflow struct{} - -func (_ checkOverflow) Float32(f float64) (overflow bool) { - if f < 0 { - f = -f - } - return math.MaxFloat32 < f && f <= math.MaxFloat64 -} - -func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { - if bitsize == 0 || bitsize >= 64 || v == 0 { - return - } - if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { - overflow = true - } - return -} - -func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { - if bitsize == 0 || bitsize >= 64 || v == 0 { - return - } - if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { - overflow = true - } - return -} - -func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) { - //e.g. -127 to 128 for int8 - pos := (v >> 63) == 0 - ui2 := v & 0x7fffffffffffffff - if pos { - if ui2 > math.MaxInt64 { - overflow = true - return - } - } else { - if ui2 > math.MaxInt64-1 { - overflow = true - return - } - } - i = int64(v) - return -} - -// ------------------ SORT ----------------- - -func isNaN(f float64) bool { return f != f } - -// ----------------------- - -type intSlice []int64 -type uintSlice []uint64 -type floatSlice []float64 -type boolSlice []bool -type stringSlice []string -type bytesSlice [][]byte - -func (p intSlice) Len() int { return len(p) } -func (p intSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p uintSlice) Len() int { return len(p) } -func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p floatSlice) Len() int { return len(p) } -func (p floatSlice) Less(i, j int) bool { - return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j]) -} -func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p stringSlice) Len() int { return len(p) } -func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p bytesSlice) Len() int { return len(p) } -func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 } -func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p boolSlice) Len() int { return len(p) } -func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] } -func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// --------------------- - -type intRv struct { - v int64 - r reflect.Value -} -type intRvSlice []intRv -type uintRv struct { - v uint64 - r reflect.Value -} -type uintRvSlice []uintRv -type floatRv struct { - v float64 - r reflect.Value -} -type floatRvSlice []floatRv -type boolRv struct { - v bool - r reflect.Value -} -type boolRvSlice []boolRv -type stringRv struct { - v string - r reflect.Value -} -type stringRvSlice []stringRv -type bytesRv struct { - v []byte - r reflect.Value -} -type bytesRvSlice []bytesRv - -func (p intRvSlice) Len() int { return len(p) } -func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } -func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p uintRvSlice) Len() int { return len(p) } -func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } -func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p floatRvSlice) Len() int { return len(p) } -func (p floatRvSlice) Less(i, j int) bool { - return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v) -} -func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p stringRvSlice) Len() int { return len(p) } -func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } -func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p bytesRvSlice) Len() int { return len(p) } -func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } -func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p boolRvSlice) Len() int { return len(p) } -func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v } -func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// ----------------- - -type bytesI struct { - v []byte - i interface{} -} - -type bytesISlice []bytesI - -func (p bytesISlice) Len() int { return len(p) } -func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } -func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go deleted file mode 100644 index dea981fbb..000000000 --- a/vendor/github.com/ugorji/go/codec/helper_internal.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// All non-std package dependencies live in this file, -// so porting to different environment is easy (just update functions). - -import ( - "errors" - "fmt" - "math" - "reflect" -) - -func panicValToErr(panicVal interface{}, err *error) { - if panicVal == nil { - return - } - // case nil - switch xerr := panicVal.(type) { - case error: - *err = xerr - case string: - *err = errors.New(xerr) - default: - *err = fmt.Errorf("%v", panicVal) - } - return -} - -func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { - switch v.Kind() { - case reflect.Invalid: - return true - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if deref { - if v.IsNil() { - return true - } - return hIsEmptyValue(v.Elem(), deref, checkStruct) - } else { - return v.IsNil() - } - case reflect.Struct: - if !checkStruct { - return false - } - // return true if all fields are empty. else return false. - // we cannot use equality check, because some fields may be maps/slices/etc - // and consequently the structs are not comparable. - // return v.Interface() == reflect.Zero(v.Type()).Interface() - for i, n := 0, v.NumField(); i < n; i++ { - if !hIsEmptyValue(v.Field(i), deref, checkStruct) { - return false - } - } - return true - } - return false -} - -func isEmptyValue(v reflect.Value) bool { - return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue) -} - -func pruneSignExt(v []byte, pos bool) (n int) { - if len(v) < 2 { - } else if pos && v[0] == 0 { - for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { - } - } else if !pos && v[0] == 0xff { - for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { - } - } - return -} - -func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { - if typ == nil { - return - } - rt := typ - // The type might be a pointer and we need to keep - // dereferencing to the base type until we find an implementation. - for { - if rt.Implements(iTyp) { - return true, indir - } - if p := rt; p.Kind() == reflect.Ptr { - indir++ - if indir >= math.MaxInt8 { // insane number of indirections - return false, 0 - } - rt = p.Elem() - continue - } - break - } - // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. - if typ.Kind() != reflect.Ptr { - // Not a pointer, but does the pointer work? - if reflect.PtrTo(typ).Implements(iTyp) { - return true, -1 - } - } - return false, 0 -} - -// validate that this function is correct ... -// culled from OGRE (Object-Oriented Graphics Rendering Engine) -// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) -func halfFloatToFloatBits(yy uint16) (d uint32) { - y := uint32(yy) - s := (y >> 15) & 0x01 - e := (y >> 10) & 0x1f - m := y & 0x03ff - - if e == 0 { - if m == 0 { // plu or minus 0 - return s << 31 - } else { // Denormalized number -- renormalize it - for (m & 0x00000400) == 0 { - m <<= 1 - e -= 1 - } - e += 1 - const zz uint32 = 0x0400 - m &= ^zz - } - } else if e == 31 { - if m == 0 { // Inf - return (s << 31) | 0x7f800000 - } else { // NaN - return (s << 31) | 0x7f800000 | (m << 13) - } - } - e = e + (127 - 15) - m = m << 13 - return (s << 31) | (e << 23) | m -} - -// GrowCap will return a new capacity for a slice, given the following: -// - oldCap: current capacity -// - unit: in-memory size of an element -// - num: number of elements to add -func growCap(oldCap, unit, num int) (newCap int) { - // appendslice logic (if cap < 1024, *2, else *1.25): - // leads to many copy calls, especially when copying bytes. - // bytes.Buffer model (2*cap + n): much better for bytes. - // smarter way is to take the byte-size of the appended element(type) into account - - // maintain 3 thresholds: - // t1: if cap <= t1, newcap = 2x - // t2: if cap <= t2, newcap = 1.75x - // t3: if cap <= t3, newcap = 1.5x - // else newcap = 1.25x - // - // t1, t2, t3 >= 1024 always. - // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) - // - // With this, appending for bytes increase by: - // 100% up to 4K - // 75% up to 8K - // 50% up to 16K - // 25% beyond that - - // unit can be 0 e.g. for struct{}{}; handle that appropriately - var t1, t2, t3 int // thresholds - if unit <= 1 { - t1, t2, t3 = 4*1024, 8*1024, 16*1024 - } else if unit < 16 { - t3 = 16 / unit * 1024 - t1 = t3 * 1 / 4 - t2 = t3 * 2 / 4 - } else { - t1, t2, t3 = 1024, 1024, 1024 - } - - var x int // temporary variable - - // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively - if oldCap <= t1 { // [0,t1] - x = 8 - } else if oldCap > t3 { // (t3,infinity] - x = 5 - } else if oldCap <= t2 { // (t1,t2] - x = 7 - } else { // (t2,t3] - x = 6 - } - newCap = x * oldCap / 4 - - if num > 0 { - newCap += num - } - - // ensure newCap is a multiple of 64 (if it is > 64) or 16. - if newCap > 64 { - if x = newCap % 64; x != 0 { - x = newCap / 64 - newCap = 64 * (x + 1) - } - } else { - if x = newCap % 16; x != 0 { - x = newCap / 16 - newCap = 16 * (x + 1) - } - } - return -} - -func expandSliceValue(s reflect.Value, num int) reflect.Value { - if num <= 0 { - return s - } - l0 := s.Len() - l1 := l0 + num // new slice length - if l1 < l0 { - panic("ExpandSlice: slice overflow") - } - c0 := s.Cap() - if l1 <= c0 { - return s.Slice(0, l1) - } - st := s.Type() - c1 := growCap(c0, int(st.Elem().Size()), num) - s2 := reflect.MakeSlice(st, l1, c1) - // println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", len-new: ", l1) - reflect.Copy(s2, s) - return s2 -} diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go deleted file mode 100644 index 7c2ffc0fd..000000000 --- a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go +++ /dev/null @@ -1,20 +0,0 @@ -//+build !unsafe - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// stringView returns a view of the []byte as a string. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -func stringView(v []byte) string { - return string(v) -} - -// bytesView returns a view of the string as a []byte. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -func bytesView(v string) []byte { - return []byte(v) -} diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go deleted file mode 100644 index 373b2b102..000000000 --- a/vendor/github.com/ugorji/go/codec/helper_unsafe.go +++ /dev/null @@ -1,45 +0,0 @@ -//+build unsafe - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "unsafe" -) - -// This file has unsafe variants of some helper methods. - -type unsafeString struct { - Data uintptr - Len int -} - -type unsafeBytes struct { - Data uintptr - Len int - Cap int -} - -// stringView returns a view of the []byte as a string. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -func stringView(v []byte) string { - if len(v) == 0 { - return "" - } - x := unsafeString{uintptr(unsafe.Pointer(&v[0])), len(v)} - return *(*string)(unsafe.Pointer(&x)) -} - -// bytesView returns a view of the string as a []byte. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -func bytesView(v string) []byte { - if len(v) == 0 { - return zeroByteSlice - } - x := unsafeBytes{uintptr(unsafe.Pointer(&v)), len(v), len(v)} - return *(*[]byte)(unsafe.Pointer(&x)) -} diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go deleted file mode 100644 index a18a5f706..000000000 --- a/vendor/github.com/ugorji/go/codec/json.go +++ /dev/null @@ -1,1072 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// By default, this json support uses base64 encoding for bytes, because you cannot -// store and read any arbitrary string in json (only unicode). -// However, the user can configre how to encode/decode bytes. -// -// This library specifically supports UTF-8 for encoding and decoding only. -// -// Note that the library will happily encode/decode things which are not valid -// json e.g. a map[int64]string. We do it for consistency. With valid json, -// we will encode and decode appropriately. -// Users can specify their map type if necessary to force it. -// -// Note: -// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. -// We implement it here. -// - Also, strconv.ParseXXX for floats and integers -// - only works on strings resulting in unnecessary allocation and []byte-string conversion. -// - it does a lot of redundant checks, because json numbers are simpler that what it supports. -// - We parse numbers (floats and integers) directly here. -// We only delegate parsing floats if it is a hairy float which could cause a loss of precision. -// In that case, we delegate to strconv.ParseFloat. -// -// Note: -// - encode does not beautify. There is no whitespace when encoding. -// - rpc calls which take single integer arguments or write single numeric arguments will need care. - -// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver -// MUST not call one-another. - -import ( - "bytes" - "encoding/base64" - "fmt" - "reflect" - "strconv" - "unicode/utf16" - "unicode/utf8" -) - -//-------------------------------- - -var jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'} - -var jsonFloat64Pow10 = [...]float64{ - 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, - 1e20, 1e21, 1e22, -} - -var jsonUint64Pow10 = [...]uint64{ - 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, -} - -const ( - // jsonUnreadAfterDecNum controls whether we unread after decoding a number. - // - // instead of unreading, just update d.tok (iff it's not a whitespace char) - // However, doing this means that we may HOLD onto some data which belongs to another stream. - // Thus, it is safest to unread the data when done. - // keep behind a constant flag for now. - jsonUnreadAfterDecNum = true - - // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: - // - If we see first character of null, false or true, - // do not validate subsequent characters. - // - e.g. if we see a n, assume null and skip next 3 characters, - // and do not validate they are ull. - // P.S. Do not expect a significant decoding boost from this. - jsonValidateSymbols = true - - // if jsonTruncateMantissa, truncate mantissa if trailing 0's. - // This is important because it could allow some floats to be decoded without - // deferring to strconv.ParseFloat. - jsonTruncateMantissa = true - - // if mantissa >= jsonNumUintCutoff before multiplying by 10, this is an overflow - jsonNumUintCutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) - - // if mantissa >= jsonNumUintMaxVal, this is an overflow - jsonNumUintMaxVal = 1<= slen { - e.bs = e.bs[:slen] - } else { - e.bs = make([]byte, slen) - } - base64.StdEncoding.Encode(e.bs, v) - e.w.writen1('"') - e.w.writeb(e.bs) - e.w.writen1('"') - } else { - // e.EncodeString(c, string(v)) - e.quoteStr(stringView(v)) - } -} - -func (e *jsonEncDriver) EncodeAsis(v []byte) { - e.w.writeb(v) -} - -func (e *jsonEncDriver) quoteStr(s string) { - // adapted from std pkg encoding/json - const hex = "0123456789abcdef" - w := e.w - w.writen1('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - w.writestr(s[start:i]) - } - switch b { - case '\\', '"': - w.writen2('\\', b) - case '\n': - w.writen2('\\', 'n') - case '\r': - w.writen2('\\', 'r') - case '\b': - w.writen2('\\', 'b') - case '\f': - w.writen2('\\', 'f') - case '\t': - w.writen2('\\', 't') - default: - // encode all bytes < 0x20 (except \r, \n). - // also encode < > & to prevent security holes when served to some browsers. - w.writestr(`\u00`) - w.writen2(hex[b>>4], hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - w.writestr(s[start:i]) - } - w.writestr(`\ufffd`) - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. - // Both technically valid JSON, but bomb on JSONP, so fix here. - if c == '\u2028' || c == '\u2029' { - if start < i { - w.writestr(s[start:i]) - } - w.writestr(`\u202`) - w.writen1(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - w.writestr(s[start:]) - } - w.writen1('"') -} - -//-------------------------------- - -type jsonNum struct { - // bytes []byte // may have [+-.eE0-9] - mantissa uint64 // where mantissa ends, and maybe dot begins. - exponent int16 // exponent value. - manOverflow bool - neg bool // started with -. No initial sign in the bytes above. - dot bool // has dot - explicitExponent bool // explicit exponent -} - -func (x *jsonNum) reset() { - x.manOverflow = false - x.neg = false - x.dot = false - x.explicitExponent = false - x.mantissa = 0 - x.exponent = 0 -} - -// uintExp is called only if exponent > 0. -func (x *jsonNum) uintExp() (n uint64, overflow bool) { - n = x.mantissa - e := x.exponent - if e >= int16(len(jsonUint64Pow10)) { - overflow = true - return - } - n *= jsonUint64Pow10[e] - if n < x.mantissa || n > jsonNumUintMaxVal { - overflow = true - return - } - return - // for i := int16(0); i < e; i++ { - // if n >= jsonNumUintCutoff { - // overflow = true - // return - // } - // n *= 10 - // } - // return -} - -// these constants are only used withn floatVal. -// They are brought out, so that floatVal can be inlined. -const ( - jsonUint64MantissaBits = 52 - jsonMaxExponent = int16(len(jsonFloat64Pow10)) - 1 -) - -func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) { - // We do not want to lose precision. - // Consequently, we will delegate to strconv.ParseFloat if any of the following happen: - // - There are more digits than in math.MaxUint64: 18446744073709551615 (20 digits) - // We expect up to 99.... (19 digits) - // - The mantissa cannot fit into a 52 bits of uint64 - // - The exponent is beyond our scope ie beyong 22. - parseUsingStrConv = x.manOverflow || - x.exponent > jsonMaxExponent || - (x.exponent < 0 && -(x.exponent) > jsonMaxExponent) || - x.mantissa>>jsonUint64MantissaBits != 0 - - if parseUsingStrConv { - return - } - - // all good. so handle parse here. - f = float64(x.mantissa) - // fmt.Printf(".Float: uint64 value: %v, float: %v\n", m, f) - if x.neg { - f = -f - } - if x.exponent > 0 { - f *= jsonFloat64Pow10[x.exponent] - } else if x.exponent < 0 { - f /= jsonFloat64Pow10[-x.exponent] - } - return -} - -type jsonDecDriver struct { - noBuiltInTypes - d *Decoder - h *JsonHandle - r decReader - - c containerState - // tok is used to store the token read right after skipWhiteSpace. - tok uint8 - - bstr [8]byte // scratch used for string \UXXX parsing - b [64]byte // scratch, used for parsing strings or numbers - b2 [64]byte // scratch, used only for decodeBytes (after base64) - bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. - - se setExtWrapper - - n jsonNum -} - -func jsonIsWS(b byte) bool { - return b == ' ' || b == '\t' || b == '\r' || b == '\n' -} - -// // This will skip whitespace characters and return the next byte to read. -// // The next byte determines what the value will be one of. -// func (d *jsonDecDriver) skipWhitespace() { -// // fast-path: do not enter loop. Just check first (in case no whitespace). -// b := d.r.readn1() -// if jsonIsWS(b) { -// r := d.r -// for b = r.readn1(); jsonIsWS(b); b = r.readn1() { -// } -// } -// d.tok = b -// } - -func (d *jsonDecDriver) uncacheRead() { - if d.tok != 0 { - d.r.unreadn1() - d.tok = 0 - } -} - -func (d *jsonDecDriver) sendContainerState(c containerState) { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - var xc uint8 // char expected - if c == containerMapKey { - if d.c != containerMapStart { - xc = ',' - } - } else if c == containerMapValue { - xc = ':' - } else if c == containerMapEnd { - xc = '}' - } else if c == containerArrayElem { - if d.c != containerArrayStart { - xc = ',' - } - } else if c == containerArrayEnd { - xc = ']' - } - if xc != 0 { - if d.tok != xc { - d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - } - d.c = c -} - -func (d *jsonDecDriver) CheckBreak() bool { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok == '}' || d.tok == ']' { - // d.tok = 0 // only checking, not consuming - return true - } - return false -} - -func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) { - bs := d.r.readx(int(toIdx - fromIdx)) - d.tok = 0 - if jsonValidateSymbols { - if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) { - d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs) - return - } - } -} - -func (d *jsonDecDriver) TryDecodeAsNil() bool { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok == 'n' { - d.readStrIdx(10, 13) // ull - return true - } - return false -} - -func (d *jsonDecDriver) DecodeBool() bool { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok == 'f' { - d.readStrIdx(5, 9) // alse - return false - } - if d.tok == 't' { - d.readStrIdx(1, 4) // rue - return true - } - d.d.errorf("json: decode bool: got first char %c", d.tok) - return false // "unreachable" -} - -func (d *jsonDecDriver) ReadMapStart() int { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok != '{' { - d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok) - } - d.tok = 0 - d.c = containerMapStart - return -1 -} - -func (d *jsonDecDriver) ReadArrayStart() int { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok != '[' { - d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok) - } - d.tok = 0 - d.c = containerArrayStart - return -1 -} - -func (d *jsonDecDriver) ContainerType() (vt valueType) { - // check container type by checking the first char - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if b := d.tok; b == '{' { - return valueTypeMap - } else if b == '[' { - return valueTypeArray - } else if b == 'n' { - return valueTypeNil - } else if b == '"' { - return valueTypeString - } - return valueTypeUnset - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - // return false // "unreachable" -} - -func (d *jsonDecDriver) decNum(storeBytes bool) { - // If it is has a . or an e|E, decode as a float; else decode as an int. - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - b := d.tok - if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) { - d.d.errorf("json: decNum: got first char '%c'", b) - return - } - d.tok = 0 - - const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) - const jsonNumUintMaxVal = 1<= jsonNumUintCutoff { - n.manOverflow = true - break - } - v := uint64(b - '0') - n.mantissa *= 10 - if v != 0 { - n1 := n.mantissa + v - if n1 < n.mantissa || n1 > jsonNumUintMaxVal { - n.manOverflow = true // n+v overflows - break - } - n.mantissa = n1 - } - case 6: - state = 7 - fallthrough - case 7: - if !(b == '0' && e == 0) { - e = e*10 + int16(b-'0') - } - default: - break LOOP - } - default: - break LOOP - } - if storeBytes { - d.bs = append(d.bs, b) - } - b, eof = r.readn1eof() - } - - if jsonTruncateMantissa && n.mantissa != 0 { - for n.mantissa%10 == 0 { - n.mantissa /= 10 - n.exponent++ - } - } - - if e != 0 { - if eNeg { - n.exponent -= e - } else { - n.exponent += e - } - } - - // d.n = n - - if !eof { - if jsonUnreadAfterDecNum { - r.unreadn1() - } else { - if !jsonIsWS(b) { - d.tok = b - } - } - } - // fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, mantissaEndIndex: %v\n", - // n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex) - return -} - -func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) { - d.decNum(false) - n := &d.n - if n.manOverflow { - d.d.errorf("json: overflow integer after: %v", n.mantissa) - return - } - var u uint64 - if n.exponent == 0 { - u = n.mantissa - } else if n.exponent < 0 { - d.d.errorf("json: fractional integer") - return - } else if n.exponent > 0 { - var overflow bool - if u, overflow = n.uintExp(); overflow { - d.d.errorf("json: overflow integer") - return - } - } - i = int64(u) - if n.neg { - i = -i - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) - return - } - // fmt.Printf("DecodeInt: %v\n", i) - return -} - -// floatVal MUST only be called after a decNum, as d.bs now contains the bytes of the number -func (d *jsonDecDriver) floatVal() (f float64) { - f, useStrConv := d.n.floatVal() - if useStrConv { - var err error - if f, err = strconv.ParseFloat(stringView(d.bs), 64); err != nil { - panic(fmt.Errorf("parse float: %s, %v", d.bs, err)) - } - if d.n.neg { - f = -f - } - } - return -} - -func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) { - d.decNum(false) - n := &d.n - if n.neg { - d.d.errorf("json: unsigned integer cannot be negative") - return - } - if n.manOverflow { - d.d.errorf("json: overflow integer after: %v", n.mantissa) - return - } - if n.exponent == 0 { - u = n.mantissa - } else if n.exponent < 0 { - d.d.errorf("json: fractional integer") - return - } else if n.exponent > 0 { - var overflow bool - if u, overflow = n.uintExp(); overflow { - d.d.errorf("json: overflow integer") - return - } - } - if chkOvf.Uint(u, bitsize) { - d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) - return - } - // fmt.Printf("DecodeUint: %v\n", u) - return -} - -func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - d.decNum(true) - f = d.floatVal() - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("json: overflow float32: %v, %s", f, d.bs) - return - } - return -} - -func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if ext == nil { - re := rv.(*RawExt) - re.Tag = xtag - d.d.decode(&re.Value) - } else { - var v interface{} - d.d.decode(&v) - ext.UpdateExt(rv, v) - } - return -} - -func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. - if !isstring && d.se.i != nil { - bsOut = bs - d.DecodeExt(&bsOut, 0, &d.se) - return - } - d.appendStringAsBytes() - // if isstring, then just return the bytes, even if it is using the scratch buffer. - // the bytes will be converted to a string as needed. - if isstring { - return d.bs - } - bs0 := d.bs - slen := base64.StdEncoding.DecodedLen(len(bs0)) - if slen <= cap(bs) { - bsOut = bs[:slen] - } else if zerocopy && slen <= cap(d.b2) { - bsOut = d.b2[:slen] - } else { - bsOut = make([]byte, slen) - } - slen2, err := base64.StdEncoding.Decode(bsOut, bs0) - if err != nil { - d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, err) - return nil - } - if slen != slen2 { - bsOut = bsOut[:slen2] - } - return -} - -func (d *jsonDecDriver) DecodeString() (s string) { - d.appendStringAsBytes() - // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key - if d.c == containerMapKey { - return d.d.string(d.bs) - } - return string(d.bs) -} - -func (d *jsonDecDriver) appendStringAsBytes() { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok != '"' { - d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok) - } - d.tok = 0 - - v := d.bs[:0] - var c uint8 - r := d.r - for { - c = r.readn1() - if c == '"' { - break - } else if c == '\\' { - c = r.readn1() - switch c { - case '"', '\\', '/', '\'': - v = append(v, c) - case 'b': - v = append(v, '\b') - case 'f': - v = append(v, '\f') - case 'n': - v = append(v, '\n') - case 'r': - v = append(v, '\r') - case 't': - v = append(v, '\t') - case 'u': - rr := d.jsonU4(false) - // fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr)) - if utf16.IsSurrogate(rr) { - rr = utf16.DecodeRune(rr, d.jsonU4(true)) - } - w2 := utf8.EncodeRune(d.bstr[:], rr) - v = append(v, d.bstr[:w2]...) - default: - d.d.errorf("json: unsupported escaped value: %c", c) - } - } else { - v = append(v, c) - } - } - d.bs = v -} - -func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune { - r := d.r - if checkSlashU && !(r.readn1() == '\\' && r.readn1() == 'u') { - d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`) - return 0 - } - // u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64) - var u uint32 - for i := 0; i < 4; i++ { - v := r.readn1() - if '0' <= v && v <= '9' { - v = v - '0' - } else if 'a' <= v && v <= 'z' { - v = v - 'a' + 10 - } else if 'A' <= v && v <= 'Z' { - v = v - 'A' + 10 - } else { - d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, v) - return 0 - } - u = u*16 + uint32(v) - } - return rune(u) -} - -func (d *jsonDecDriver) DecodeNaked() { - z := &d.d.n - // var decodeFurther bool - - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - switch d.tok { - case 'n': - d.readStrIdx(10, 13) // ull - z.v = valueTypeNil - case 'f': - d.readStrIdx(5, 9) // alse - z.v = valueTypeBool - z.b = false - case 't': - d.readStrIdx(1, 4) // rue - z.v = valueTypeBool - z.b = true - case '{': - z.v = valueTypeMap - // d.tok = 0 // don't consume. kInterfaceNaked will call ReadMapStart - // decodeFurther = true - case '[': - z.v = valueTypeArray - // d.tok = 0 // don't consume. kInterfaceNaked will call ReadArrayStart - // decodeFurther = true - case '"': - z.v = valueTypeString - z.s = d.DecodeString() - default: // number - d.decNum(true) - n := &d.n - // if the string had a any of [.eE], then decode as float. - switch { - case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow: - z.v = valueTypeFloat - z.f = d.floatVal() - case n.exponent == 0: - u := n.mantissa - switch { - case n.neg: - z.v = valueTypeInt - z.i = -int64(u) - case d.h.SignedInteger: - z.v = valueTypeInt - z.i = int64(u) - default: - z.v = valueTypeUint - z.u = u - } - default: - u, overflow := n.uintExp() - switch { - case overflow: - z.v = valueTypeFloat - z.f = d.floatVal() - case n.neg: - z.v = valueTypeInt - z.i = -int64(u) - case d.h.SignedInteger: - z.v = valueTypeInt - z.i = int64(u) - default: - z.v = valueTypeUint - z.u = u - } - } - // fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v) - } - // if decodeFurther { - // d.s.sc.retryRead() - // } - return -} - -//---------------------- - -// JsonHandle is a handle for JSON encoding format. -// -// Json is comprehensively supported: -// - decodes numbers into interface{} as int, uint or float64 -// - configurable way to encode/decode []byte . -// by default, encodes and decodes []byte using base64 Std Encoding -// - UTF-8 support for encoding and decoding -// -// It has better performance than the json library in the standard library, -// by leveraging the performance improvements of the codec library and -// minimizing allocations. -// -// In addition, it doesn't read more bytes than necessary during a decode, which allows -// reading multiple values from a stream containing json and non-json content. -// For example, a user can read a json value, then a cbor value, then a msgpack value, -// all from the same stream in sequence. -type JsonHandle struct { - textEncodingType - BasicHandle - // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. - // If not configured, raw bytes are encoded to/from base64 text. - RawBytesExt InterfaceExt -} - -func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{i: ext}) -} - -func (h *JsonHandle) newEncDriver(e *Encoder) encDriver { - hd := jsonEncDriver{e: e, w: e.w, h: h} - hd.bs = hd.b[:0] - hd.se.i = h.RawBytesExt - return &hd -} - -func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { - // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} - hd := jsonDecDriver{d: d, r: d.r, h: h} - hd.bs = hd.b[:0] - hd.se.i = h.RawBytesExt - return &hd -} - -func (e *jsonEncDriver) reset() { - e.w = e.e.w -} - -func (d *jsonDecDriver) reset() { - d.r = d.d.r -} - -var jsonEncodeTerminate = []byte{' '} - -func (h *JsonHandle) rpcEncodeTerminate() []byte { - return jsonEncodeTerminate -} - -var _ decDriver = (*jsonDecDriver)(nil) -var _ encDriver = (*jsonEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go deleted file mode 100644 index 5eb4c9636..000000000 --- a/vendor/github.com/ugorji/go/codec/msgpack.go +++ /dev/null @@ -1,844 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -/* -MSGPACK - -Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. -We need to maintain compatibility with it and how it encodes integer values -without caring about the type. - -For compatibility with behaviour of msgpack-c reference implementation: - - Go intX (>0) and uintX - IS ENCODED AS - msgpack +ve fixnum, unsigned - - Go intX (<0) - IS ENCODED AS - msgpack -ve fixnum, signed - -*/ -package codec - -import ( - "fmt" - "io" - "math" - "net/rpc" - "reflect" -) - -const ( - mpPosFixNumMin byte = 0x00 - mpPosFixNumMax = 0x7f - mpFixMapMin = 0x80 - mpFixMapMax = 0x8f - mpFixArrayMin = 0x90 - mpFixArrayMax = 0x9f - mpFixStrMin = 0xa0 - mpFixStrMax = 0xbf - mpNil = 0xc0 - _ = 0xc1 - mpFalse = 0xc2 - mpTrue = 0xc3 - mpFloat = 0xca - mpDouble = 0xcb - mpUint8 = 0xcc - mpUint16 = 0xcd - mpUint32 = 0xce - mpUint64 = 0xcf - mpInt8 = 0xd0 - mpInt16 = 0xd1 - mpInt32 = 0xd2 - mpInt64 = 0xd3 - - // extensions below - mpBin8 = 0xc4 - mpBin16 = 0xc5 - mpBin32 = 0xc6 - mpExt8 = 0xc7 - mpExt16 = 0xc8 - mpExt32 = 0xc9 - mpFixExt1 = 0xd4 - mpFixExt2 = 0xd5 - mpFixExt4 = 0xd6 - mpFixExt8 = 0xd7 - mpFixExt16 = 0xd8 - - mpStr8 = 0xd9 // new - mpStr16 = 0xda - mpStr32 = 0xdb - - mpArray16 = 0xdc - mpArray32 = 0xdd - - mpMap16 = 0xde - mpMap32 = 0xdf - - mpNegFixNumMin = 0xe0 - mpNegFixNumMax = 0xff -) - -// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec -// that the backend RPC service takes multiple arguments, which have been arranged -// in sequence in the slice. -// -// The Codec then passes it AS-IS to the rpc service (without wrapping it in an -// array of 1 element). -type MsgpackSpecRpcMultiArgs []interface{} - -// A MsgpackContainer type specifies the different types of msgpackContainers. -type msgpackContainerType struct { - fixCutoff int - bFixMin, b8, b16, b32 byte - hasFixMin, has8, has8Always bool -} - -var ( - msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} - msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} - msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} - msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} -) - -//--------------------------------------------- - -type msgpackEncDriver struct { - noBuiltInTypes - encNoSeparator - e *Encoder - w encWriter - h *MsgpackHandle - x [8]byte -} - -func (e *msgpackEncDriver) EncodeNil() { - e.w.writen1(mpNil) -} - -func (e *msgpackEncDriver) EncodeInt(i int64) { - if i >= 0 { - e.EncodeUint(uint64(i)) - } else if i >= -32 { - e.w.writen1(byte(i)) - } else if i >= math.MinInt8 { - e.w.writen2(mpInt8, byte(i)) - } else if i >= math.MinInt16 { - e.w.writen1(mpInt16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) - } else if i >= math.MinInt32 { - e.w.writen1(mpInt32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) - } else { - e.w.writen1(mpInt64) - bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) EncodeUint(i uint64) { - if i <= math.MaxInt8 { - e.w.writen1(byte(i)) - } else if i <= math.MaxUint8 { - e.w.writen2(mpUint8, byte(i)) - } else if i <= math.MaxUint16 { - e.w.writen1(mpUint16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) - } else if i <= math.MaxUint32 { - e.w.writen1(mpUint32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) - } else { - e.w.writen1(mpUint64) - bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(mpTrue) - } else { - e.w.writen1(mpFalse) - } -} - -func (e *msgpackEncDriver) EncodeFloat32(f float32) { - e.w.writen1(mpFloat) - bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *msgpackEncDriver) EncodeFloat64(f float64) { - e.w.writen1(mpDouble) - bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) -} - -func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { - bs := ext.WriteExt(v) - if bs == nil { - e.EncodeNil() - return - } - if e.h.WriteExt { - e.encodeExtPreamble(uint8(xtag), len(bs)) - e.w.writeb(bs) - } else { - e.EncodeStringBytes(c_RAW, bs) - } -} - -func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { - e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.w.writeb(re.Data) -} - -func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { - if l == 1 { - e.w.writen2(mpFixExt1, xtag) - } else if l == 2 { - e.w.writen2(mpFixExt2, xtag) - } else if l == 4 { - e.w.writen2(mpFixExt4, xtag) - } else if l == 8 { - e.w.writen2(mpFixExt8, xtag) - } else if l == 16 { - e.w.writen2(mpFixExt16, xtag) - } else if l < 256 { - e.w.writen2(mpExt8, byte(l)) - e.w.writen1(xtag) - } else if l < 65536 { - e.w.writen1(mpExt16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) - e.w.writen1(xtag) - } else { - e.w.writen1(mpExt32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) - e.w.writen1(xtag) - } -} - -func (e *msgpackEncDriver) EncodeArrayStart(length int) { - e.writeContainerLen(msgpackContainerList, length) -} - -func (e *msgpackEncDriver) EncodeMapStart(length int) { - e.writeContainerLen(msgpackContainerMap, length) -} - -func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(s)) - } else { - e.writeContainerLen(msgpackContainerStr, len(s)) - } - if len(s) > 0 { - e.w.writestr(s) - } -} - -func (e *msgpackEncDriver) EncodeSymbol(v string) { - e.EncodeString(c_UTF8, v) -} - -func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(bs)) - } else { - e.writeContainerLen(msgpackContainerStr, len(bs)) - } - if len(bs) > 0 { - e.w.writeb(bs) - } -} - -func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { - if ct.hasFixMin && l < ct.fixCutoff { - e.w.writen1(ct.bFixMin | byte(l)) - } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) { - e.w.writen2(ct.b8, uint8(l)) - } else if l < 65536 { - e.w.writen1(ct.b16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) - } else { - e.w.writen1(ct.b32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) - } -} - -//--------------------------------------------- - -type msgpackDecDriver struct { - d *Decoder - r decReader // *Decoder decReader decReaderT - h *MsgpackHandle - b [scratchByteArrayLen]byte - bd byte - bdRead bool - br bool // bytes reader - noBuiltInTypes - noStreamingCodec - decNoSeparator -} - -// Note: This returns either a primitive (int, bool, etc) for non-containers, -// or a containerType, or a specific type denoting nil or extension. -// It is called when a nil interface{} is passed, leaving it up to the DecDriver -// to introspect the stream and decide how best to decode. -// It deciphers the value by looking at the stream first. -func (d *msgpackDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - bd := d.bd - n := &d.d.n - var decodeFurther bool - - switch bd { - case mpNil: - n.v = valueTypeNil - d.bdRead = false - case mpFalse: - n.v = valueTypeBool - n.b = false - case mpTrue: - n.v = valueTypeBool - n.b = true - - case mpFloat: - n.v = valueTypeFloat - n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - case mpDouble: - n.v = valueTypeFloat - n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - - case mpUint8: - n.v = valueTypeUint - n.u = uint64(d.r.readn1()) - case mpUint16: - n.v = valueTypeUint - n.u = uint64(bigen.Uint16(d.r.readx(2))) - case mpUint32: - n.v = valueTypeUint - n.u = uint64(bigen.Uint32(d.r.readx(4))) - case mpUint64: - n.v = valueTypeUint - n.u = uint64(bigen.Uint64(d.r.readx(8))) - - case mpInt8: - n.v = valueTypeInt - n.i = int64(int8(d.r.readn1())) - case mpInt16: - n.v = valueTypeInt - n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) - case mpInt32: - n.v = valueTypeInt - n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) - case mpInt64: - n.v = valueTypeInt - n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) - - default: - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: - // positive fixnum (always signed) - n.v = valueTypeInt - n.i = int64(int8(bd)) - case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - // negative fixnum - n.v = valueTypeInt - n.i = int64(int8(bd)) - case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - n.v = valueTypeString - n.s = d.DecodeString() - } else { - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - } - case bd == mpBin8, bd == mpBin16, bd == mpBin32: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - n.v = valueTypeArray - decodeFurther = true - case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - n.v = valueTypeMap - decodeFurther = true - case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - n.v = valueTypeExt - clen := d.readExtLen() - n.u = uint64(d.r.readn1()) - n.l = d.r.readx(clen) - default: - d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } - } - if !decodeFurther { - d.bdRead = false - } - if n.v == valueTypeUint && d.h.SignedInteger { - n.v = valueTypeInt - n.i = int64(n.v) - } - return -} - -// int can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case mpUint8: - i = int64(uint64(d.r.readn1())) - case mpUint16: - i = int64(uint64(bigen.Uint16(d.r.readx(2)))) - case mpUint32: - i = int64(uint64(bigen.Uint32(d.r.readx(4)))) - case mpUint64: - i = int64(bigen.Uint64(d.r.readx(8))) - case mpInt8: - i = int64(int8(d.r.readn1())) - case mpInt16: - i = int64(int16(bigen.Uint16(d.r.readx(2)))) - case mpInt32: - i = int64(int32(bigen.Uint32(d.r.readx(4)))) - case mpInt64: - i = int64(bigen.Uint64(d.r.readx(8))) - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - i = int64(int8(d.bd)) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - i = int64(int8(d.bd)) - default: - d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - return - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - d.d.errorf("Overflow int value: %v", i) - return - } - } - d.bdRead = false - return -} - -// uint can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case mpUint8: - ui = uint64(d.r.readn1()) - case mpUint16: - ui = uint64(bigen.Uint16(d.r.readx(2))) - case mpUint32: - ui = uint64(bigen.Uint32(d.r.readx(4))) - case mpUint64: - ui = bigen.Uint64(d.r.readx(8)) - case mpInt8: - if i := int64(int8(d.r.readn1())); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - case mpInt16: - if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - case mpInt32: - if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - case mpInt64: - if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - ui = uint64(d.bd) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd)) - return - default: - d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - return - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - d.d.errorf("Overflow uint value: %v", ui) - return - } - } - d.bdRead = false - return -} - -// float can either be decoded from msgpack type: float, double or intX -func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpFloat { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if d.bd == mpDouble { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else { - f = float64(d.DecodeInt(0)) - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("msgpack: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool, fixnum 0 or 1. -func (d *msgpackDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpFalse || d.bd == 0 { - // b = false - } else if d.bd == mpTrue || d.bd == 1 { - b = true - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - var clen int - // ignore isstring. Expect that the bytes may be found from msgpackContainerStr or msgpackContainerBin - if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { - clen = d.readContainerLen(msgpackContainerBin) - } else { - clen = d.readContainerLen(msgpackContainerStr) - } - // println("DecodeBytes: clen: ", clen) - d.bdRead = false - // bytes may be nil, so handle it. if nil, clen=-1. - if clen < 0 { - return nil - } - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, bs) -} - -func (d *msgpackDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true, true)) -} - -func (d *msgpackDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.bdRead = true -} - -func (d *msgpackDecDriver) ContainerType() (vt valueType) { - bd := d.bd - if bd == mpNil { - return valueTypeNil - } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 || - (!d.h.RawToString && - (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) { - return valueTypeBytes - } else if d.h.RawToString && - (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) { - return valueTypeString - } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { - return valueTypeArray - } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpNil { - d.bdRead = false - v = true - } - return -} - -func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { - bd := d.bd - if bd == mpNil { - clen = -1 // to represent nil - } else if bd == ct.b8 { - clen = int(d.r.readn1()) - } else if bd == ct.b16 { - clen = int(bigen.Uint16(d.r.readx(2))) - } else if bd == ct.b32 { - clen = int(bigen.Uint32(d.r.readx(4))) - } else if (ct.bFixMin & bd) == ct.bFixMin { - clen = int(ct.bFixMin ^ bd) - } else { - d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) - return - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) ReadMapStart() int { - return d.readContainerLen(msgpackContainerMap) -} - -func (d *msgpackDecDriver) ReadArrayStart() int { - return d.readContainerLen(msgpackContainerList) -} - -func (d *msgpackDecDriver) readExtLen() (clen int) { - switch d.bd { - case mpNil: - clen = -1 // to represent nil - case mpFixExt1: - clen = 1 - case mpFixExt2: - clen = 2 - case mpFixExt4: - clen = 4 - case mpFixExt8: - clen = 8 - case mpFixExt16: - clen = 16 - case mpExt8: - clen = int(d.r.readn1()) - case mpExt16: - clen = int(bigen.Uint16(d.r.readx(2))) - case mpExt32: - clen = int(bigen.Uint32(d.r.readx(4))) - default: - d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) - return - } - return -} - -func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } - return -} - -func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } - xbd := d.bd - if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { - xbs = d.DecodeBytes(nil, false, true) - } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || - (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { - xbs = d.DecodeBytes(nil, true, true) - } else { - clen := d.readExtLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - return - } - xbs = d.r.readx(clen) - } - d.bdRead = false - return -} - -//-------------------------------------------------- - -//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. -type MsgpackHandle struct { - BasicHandle - - // RawToString controls how raw bytes are decoded into a nil interface{}. - RawToString bool - - // WriteExt flag supports encoding configured extensions with extension tags. - // It also controls whether other elements of the new spec are encoded (ie Str8). - // - // With WriteExt=false, configured extensions are serialized as raw bytes - // and Str8 is not encoded. - // - // A stream can still be decoded into a typed value, provided an appropriate value - // is provided, but the type cannot be inferred from the stream. If no appropriate - // type is provided (e.g. decoding into a nil interface{}), you get back - // a []byte or string based on the setting of RawToString. - WriteExt bool - binaryEncodingType -} - -func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{b: ext}) -} - -func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { - return &msgpackEncDriver{e: e, w: e.w, h: h} -} - -func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { - return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes} -} - -func (e *msgpackEncDriver) reset() { - e.w = e.e.w -} - -func (d *msgpackDecDriver) reset() { - d.r = d.d.r -} - -//-------------------------------------------------- - -type msgpackSpecRpcCodec struct { - rpcCodec -} - -// /////////////// Spec RPC Codec /////////////////// -func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // WriteRequest can write to both a Go service, and other services that do - // not abide by the 1 argument rule of a Go service. - // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs - var bodyArr []interface{} - if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { - bodyArr = ([]interface{})(m) - } else { - bodyArr = []interface{}{body} - } - r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - var moe interface{} - if r.Error != "" { - moe = r.Error - } - if moe != nil && body != nil { - body = nil - } - r2 := []interface{}{1, uint32(r.Seq), moe, body} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { - return c.parseCustomHeader(1, &r.Seq, &r.Error) -} - -func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { - return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) -} - -func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { - if body == nil { // read and discard - return c.read(nil) - } - bodyArr := []interface{}{body} - return c.read(&bodyArr) -} - -func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { - - if c.isClosed() { - return io.EOF - } - - // We read the response header by hand - // so that the body can be decoded on its own from the stream at a later time. - - const fia byte = 0x94 //four item array descriptor value - // Not sure why the panic of EOF is swallowed above. - // if bs1 := c.dec.r.readn1(); bs1 != fia { - // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) - // return - // } - var b byte - b, err = c.br.ReadByte() - if err != nil { - return - } - if b != fia { - err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) - return - } - - if err = c.read(&b); err != nil { - return - } - if b != expectTypeByte { - err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) - return - } - if err = c.read(msgid); err != nil { - return - } - if err = c.read(methodOrError); err != nil { - return - } - return -} - -//-------------------------------------------------- - -// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol -// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md -type msgpackSpecRpc struct{} - -// MsgpackSpecRpc implements Rpc using the communication protocol defined in -// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. -var MsgpackSpecRpc msgpackSpecRpc - -func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -var _ decDriver = (*msgpackDecDriver)(nil) -var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/noop.go b/vendor/github.com/ugorji/go/codec/noop.go deleted file mode 100644 index cfee3d084..000000000 --- a/vendor/github.com/ugorji/go/codec/noop.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math/rand" - "time" -) - -// NoopHandle returns a no-op handle. It basically does nothing. -// It is only useful for benchmarking, as it gives an idea of the -// overhead from the codec framework. -// -// LIBRARY USERS: *** DO NOT USE *** -func NoopHandle(slen int) *noopHandle { - h := noopHandle{} - h.rand = rand.New(rand.NewSource(time.Now().UnixNano())) - h.B = make([][]byte, slen) - h.S = make([]string, slen) - for i := 0; i < len(h.S); i++ { - b := make([]byte, i+1) - for j := 0; j < len(b); j++ { - b[j] = 'a' + byte(i) - } - h.B[i] = b - h.S[i] = string(b) - } - return &h -} - -// noopHandle does nothing. -// It is used to simulate the overhead of the codec framework. -type noopHandle struct { - BasicHandle - binaryEncodingType - noopDrv // noopDrv is unexported here, so we can get a copy of it when needed. -} - -type noopDrv struct { - d *Decoder - e *Encoder - i int - S []string - B [][]byte - mks []bool // stack. if map (true), else if array (false) - mk bool // top of stack. what container are we on? map or array? - ct valueType // last response for IsContainerType. - cb int // counter for ContainerType - rand *rand.Rand -} - -func (h *noopDrv) r(v int) int { return h.rand.Intn(v) } -func (h *noopDrv) m(v int) int { h.i++; return h.i % v } - -func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h } -func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h } - -func (h *noopDrv) reset() {} -func (h *noopDrv) uncacheRead() {} - -// --- encDriver - -// stack functions (for map and array) -func (h *noopDrv) start(b bool) { - // println("start", len(h.mks)+1) - h.mks = append(h.mks, b) - h.mk = b -} -func (h *noopDrv) end() { - // println("end: ", len(h.mks)-1) - h.mks = h.mks[:len(h.mks)-1] - if len(h.mks) > 0 { - h.mk = h.mks[len(h.mks)-1] - } else { - h.mk = false - } -} - -func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {} -func (h *noopDrv) EncodeNil() {} -func (h *noopDrv) EncodeInt(i int64) {} -func (h *noopDrv) EncodeUint(i uint64) {} -func (h *noopDrv) EncodeBool(b bool) {} -func (h *noopDrv) EncodeFloat32(f float32) {} -func (h *noopDrv) EncodeFloat64(f float64) {} -func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {} -func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) } -func (h *noopDrv) EncodeMapStart(length int) { h.start(false) } -func (h *noopDrv) EncodeEnd() { h.end() } - -func (h *noopDrv) EncodeString(c charEncoding, v string) {} -func (h *noopDrv) EncodeSymbol(v string) {} -func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {} - -func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {} - -// ---- decDriver -func (h *noopDrv) initReadNext() {} -func (h *noopDrv) CheckBreak() bool { return false } -func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false } -func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {} -func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) } -func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) } -func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) } -func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 } -func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] } - -// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) } - -func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] } - -func (h *noopDrv) ReadEnd() { h.end() } - -// toggle map/slice -func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) } -func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) } - -func (h *noopDrv) ContainerType() (vt valueType) { - // return h.m(2) == 0 - // handle kStruct, which will bomb is it calls this and doesn't get back a map or array. - // consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2 - // for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs) - // however, every 10th time it is called, we just return something else. - var vals = [...]valueType{valueTypeArray, valueTypeMap} - // ------------ TAKE ------------ - // if h.cb%2 == 0 { - // if h.ct == valueTypeMap || h.ct == valueTypeArray { - // } else { - // h.ct = vals[h.m(2)] - // } - // } else if h.cb%5 == 0 { - // h.ct = valueType(h.m(8)) - // } else { - // h.ct = vals[h.m(2)] - // } - // ------------ TAKE ------------ - // if h.cb%16 == 0 { - // h.ct = valueType(h.cb % 8) - // } else { - // h.ct = vals[h.cb%2] - // } - h.ct = vals[h.cb%2] - h.cb++ - return h.ct - - // if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes { - // return h.ct - // } - // return valueTypeUnset - // TODO: may need to tweak this so it works. - // if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap { - // h.cb = !h.cb - // h.ct = vt - // return h.cb - // } - // // go in a loop and check it. - // h.ct = vt - // h.cb = h.m(7) == 0 - // return h.cb -} -func (h *noopDrv) TryDecodeAsNil() bool { - if h.mk { - return false - } else { - return h.m(8) == 0 - } -} -func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 { - return 0 -} - -func (h *noopDrv) DecodeNaked() { - // use h.r (random) not h.m() because h.m() could cause the same value to be given. - var sk int - if h.mk { - // if mapkey, do not support values of nil OR bytes, array, map or rawext - sk = h.r(7) + 1 - } else { - sk = h.r(12) - } - n := &h.d.n - switch sk { - case 0: - n.v = valueTypeNil - case 1: - n.v, n.b = valueTypeBool, false - case 2: - n.v, n.b = valueTypeBool, true - case 3: - n.v, n.i = valueTypeInt, h.DecodeInt(64) - case 4: - n.v, n.u = valueTypeUint, h.DecodeUint(64) - case 5: - n.v, n.f = valueTypeFloat, h.DecodeFloat(true) - case 6: - n.v, n.f = valueTypeFloat, h.DecodeFloat(false) - case 7: - n.v, n.s = valueTypeString, h.DecodeString() - case 8: - n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))] - case 9: - n.v = valueTypeArray - case 10: - n.v = valueTypeMap - default: - n.v = valueTypeExt - n.u = h.DecodeUint(64) - n.l = h.B[h.m(len(h.B))] - } - h.ct = n.v - return -} diff --git a/vendor/github.com/ugorji/go/codec/prebuild.go b/vendor/github.com/ugorji/go/codec/prebuild.go deleted file mode 100644 index 2353263e8..000000000 --- a/vendor/github.com/ugorji/go/codec/prebuild.go +++ /dev/null @@ -1,3 +0,0 @@ -package codec - -//go:generate bash prebuild.sh diff --git a/vendor/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/ugorji/go/codec/rpc.go deleted file mode 100644 index dad53d0c6..000000000 --- a/vendor/github.com/ugorji/go/codec/rpc.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "bufio" - "io" - "net/rpc" - "sync" -) - -// rpcEncodeTerminator allows a handler specify a []byte terminator to send after each Encode. -// -// Some codecs like json need to put a space after each encoded value, to serve as a -// delimiter for things like numbers (else json codec will continue reading till EOF). -type rpcEncodeTerminator interface { - rpcEncodeTerminate() []byte -} - -// Rpc provides a rpc Server or Client Codec for rpc communication. -type Rpc interface { - ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec - ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec -} - -// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer -// used by the rpc connection. It accomodates use-cases where the connection -// should be used by rpc and non-rpc functions, e.g. streaming a file after -// sending an rpc response. -type RpcCodecBuffered interface { - BufferedReader() *bufio.Reader - BufferedWriter() *bufio.Writer -} - -// ------------------------------------- - -// rpcCodec defines the struct members and common methods. -type rpcCodec struct { - rwc io.ReadWriteCloser - dec *Decoder - enc *Encoder - bw *bufio.Writer - br *bufio.Reader - mu sync.Mutex - h Handle - - cls bool - clsmu sync.RWMutex -} - -func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { - bw := bufio.NewWriter(conn) - br := bufio.NewReader(conn) - return rpcCodec{ - rwc: conn, - bw: bw, - br: br, - enc: NewEncoder(bw, h), - dec: NewDecoder(br, h), - h: h, - } -} - -func (c *rpcCodec) BufferedReader() *bufio.Reader { - return c.br -} - -func (c *rpcCodec) BufferedWriter() *bufio.Writer { - return c.bw -} - -func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { - if c.isClosed() { - return io.EOF - } - if err = c.enc.Encode(obj1); err != nil { - return - } - t, tOk := c.h.(rpcEncodeTerminator) - if tOk { - c.bw.Write(t.rpcEncodeTerminate()) - } - if writeObj2 { - if err = c.enc.Encode(obj2); err != nil { - return - } - if tOk { - c.bw.Write(t.rpcEncodeTerminate()) - } - } - if doFlush { - return c.bw.Flush() - } - return -} - -func (c *rpcCodec) read(obj interface{}) (err error) { - if c.isClosed() { - return io.EOF - } - //If nil is passed in, we should still attempt to read content to nowhere. - if obj == nil { - var obj2 interface{} - return c.dec.Decode(&obj2) - } - return c.dec.Decode(obj) -} - -func (c *rpcCodec) isClosed() bool { - c.clsmu.RLock() - x := c.cls - c.clsmu.RUnlock() - return x -} - -func (c *rpcCodec) Close() error { - if c.isClosed() { - return io.EOF - } - c.clsmu.Lock() - c.cls = true - c.clsmu.Unlock() - return c.rwc.Close() -} - -func (c *rpcCodec) ReadResponseBody(body interface{}) error { - return c.read(body) -} - -// ------------------------------------- - -type goRpcCodec struct { - rpcCodec -} - -func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // Must protect for concurrent access as per API - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) -} - -func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) -} - -func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { - return c.read(r) -} - -func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { - return c.read(r) -} - -func (c *goRpcCodec) ReadRequestBody(body interface{}) error { - return c.read(body) -} - -// ------------------------------------- - -// goRpc is the implementation of Rpc that uses the communication protocol -// as defined in net/rpc package. -type goRpc struct{} - -// GoRpc implements Rpc using the communication protocol defined in net/rpc package. -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. -var GoRpc goRpc - -func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { - return &goRpcCodec{newRPCCodec(conn, h)} -} - -func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { - return &goRpcCodec{newRPCCodec(conn, h)} -} - -var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go deleted file mode 100644 index c15049650..000000000 --- a/vendor/github.com/ugorji/go/codec/simple.go +++ /dev/null @@ -1,518 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math" - "reflect" -) - -const ( - _ uint8 = iota - simpleVdNil = 1 - simpleVdFalse = 2 - simpleVdTrue = 3 - simpleVdFloat32 = 4 - simpleVdFloat64 = 5 - - // each lasts for 4 (ie n, n+1, n+2, n+3) - simpleVdPosInt = 8 - simpleVdNegInt = 12 - - // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) - simpleVdString = 216 - simpleVdByteArray = 224 - simpleVdArray = 232 - simpleVdMap = 240 - simpleVdExt = 248 -) - -type simpleEncDriver struct { - noBuiltInTypes - encNoSeparator - e *Encoder - h *SimpleHandle - w encWriter - b [8]byte -} - -func (e *simpleEncDriver) EncodeNil() { - e.w.writen1(simpleVdNil) -} - -func (e *simpleEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(simpleVdTrue) - } else { - e.w.writen1(simpleVdFalse) - } -} - -func (e *simpleEncDriver) EncodeFloat32(f float32) { - e.w.writen1(simpleVdFloat32) - bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *simpleEncDriver) EncodeFloat64(f float64) { - e.w.writen1(simpleVdFloat64) - bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f)) -} - -func (e *simpleEncDriver) EncodeInt(v int64) { - if v < 0 { - e.encUint(uint64(-v), simpleVdNegInt) - } else { - e.encUint(uint64(v), simpleVdPosInt) - } -} - -func (e *simpleEncDriver) EncodeUint(v uint64) { - e.encUint(v, simpleVdPosInt) -} - -func (e *simpleEncDriver) encUint(v uint64, bd uint8) { - if v <= math.MaxUint8 { - e.w.writen2(bd, uint8(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd + 1) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.w.writen1(bd + 2) - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) - } else { // if v <= math.MaxUint64 { - e.w.writen1(bd + 3) - bigenHelper{e.b[:8], e.w}.writeUint64(v) - } -} - -func (e *simpleEncDriver) encLen(bd byte, length int) { - if length == 0 { - e.w.writen1(bd) - } else if length <= math.MaxUint8 { - e.w.writen1(bd + 1) - e.w.writen1(uint8(length)) - } else if length <= math.MaxUint16 { - e.w.writen1(bd + 2) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length)) - } else if int64(length) <= math.MaxUint32 { - e.w.writen1(bd + 3) - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length)) - } else { - e.w.writen1(bd + 4) - bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length)) - } -} - -func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { - bs := ext.WriteExt(rv) - if bs == nil { - e.EncodeNil() - return - } - e.encodeExtPreamble(uint8(xtag), len(bs)) - e.w.writeb(bs) -} - -func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { - e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.w.writeb(re.Data) -} - -func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { - e.encLen(simpleVdExt, length) - e.w.writen1(xtag) -} - -func (e *simpleEncDriver) EncodeArrayStart(length int) { - e.encLen(simpleVdArray, length) -} - -func (e *simpleEncDriver) EncodeMapStart(length int) { - e.encLen(simpleVdMap, length) -} - -func (e *simpleEncDriver) EncodeString(c charEncoding, v string) { - e.encLen(simpleVdString, len(v)) - e.w.writestr(v) -} - -func (e *simpleEncDriver) EncodeSymbol(v string) { - e.EncodeString(c_UTF8, v) -} - -func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - e.encLen(simpleVdByteArray, len(v)) - e.w.writeb(v) -} - -//------------------------------------ - -type simpleDecDriver struct { - d *Decoder - h *SimpleHandle - r decReader - bdRead bool - bd byte - br bool // bytes reader - noBuiltInTypes - noStreamingCodec - decNoSeparator - b [scratchByteArrayLen]byte -} - -func (d *simpleDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.bdRead = true -} - -func (d *simpleDecDriver) ContainerType() (vt valueType) { - if d.bd == simpleVdNil { - return valueTypeNil - } else if d.bd == simpleVdByteArray || d.bd == simpleVdByteArray+1 || - d.bd == simpleVdByteArray+2 || d.bd == simpleVdByteArray+3 || d.bd == simpleVdByteArray+4 { - return valueTypeBytes - } else if d.bd == simpleVdString || d.bd == simpleVdString+1 || - d.bd == simpleVdString+2 || d.bd == simpleVdString+3 || d.bd == simpleVdString+4 { - return valueTypeString - } else if d.bd == simpleVdArray || d.bd == simpleVdArray+1 || - d.bd == simpleVdArray+2 || d.bd == simpleVdArray+3 || d.bd == simpleVdArray+4 { - return valueTypeArray - } else if d.bd == simpleVdMap || d.bd == simpleVdMap+1 || - d.bd == simpleVdMap+2 || d.bd == simpleVdMap+3 || d.bd == simpleVdMap+4 { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *simpleDecDriver) TryDecodeAsNil() bool { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdNil { - d.bdRead = false - return true - } - return false -} - -func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case simpleVdPosInt: - ui = uint64(d.r.readn1()) - case simpleVdPosInt + 1: - ui = uint64(bigen.Uint16(d.r.readx(2))) - case simpleVdPosInt + 2: - ui = uint64(bigen.Uint32(d.r.readx(4))) - case simpleVdPosInt + 3: - ui = uint64(bigen.Uint64(d.r.readx(8))) - case simpleVdNegInt: - ui = uint64(d.r.readn1()) - neg = true - case simpleVdNegInt + 1: - ui = uint64(bigen.Uint16(d.r.readx(2))) - neg = true - case simpleVdNegInt + 2: - ui = uint64(bigen.Uint32(d.r.readx(4))) - neg = true - case simpleVdNegInt + 3: - ui = uint64(bigen.Uint64(d.r.readx(8))) - neg = true - default: - d.d.errorf("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) - return - } - // don't do this check, because callers may only want the unsigned value. - // if ui > math.MaxInt64 { - // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) - // return - // } - return -} - -func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) { - ui, neg := d.decCheckInteger() - i, overflow := chkOvf.SignedInt(ui) - if overflow { - d.d.errorf("simple: overflow converting %v to signed integer", ui) - return - } - if neg { - i = -i - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("simple: overflow integer: %v", i) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - ui, neg := d.decCheckInteger() - if neg { - d.d.errorf("Assigning negative signed value to unsigned type") - return - } - if chkOvf.Uint(ui, bitsize) { - d.d.errorf("simple: overflow integer: %v", ui) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdFloat32 { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if d.bd == simpleVdFloat64 { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else { - if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { - f = float64(d.DecodeInt(64)) - } else { - d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd) - return - } - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("msgpack: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *simpleDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdTrue { - b = true - } else if d.bd == simpleVdFalse { - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) ReadMapStart() (length int) { - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) ReadArrayStart() (length int) { - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) decLen() int { - switch d.bd % 8 { - case 0: - return 0 - case 1: - return int(d.r.readn1()) - case 2: - return int(bigen.Uint16(d.r.readx(2))) - case 3: - ui := uint64(bigen.Uint32(d.r.readx(4))) - if chkOvf.Uint(ui, intBitsize) { - d.d.errorf("simple: overflow integer: %v", ui) - return 0 - } - return int(ui) - case 4: - ui := bigen.Uint64(d.r.readx(8)) - if chkOvf.Uint(ui, intBitsize) { - d.d.errorf("simple: overflow integer: %v", ui) - return 0 - } - return int(ui) - } - d.d.errorf("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) - return -1 -} - -func (d *simpleDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true, true)) -} - -func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdNil { - d.bdRead = false - return - } - clen := d.decLen() - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, bs) -} - -func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } - return -} - -func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - return - } - xbs = d.r.readx(l) - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - xbs = d.DecodeBytes(nil, false, true) - default: - d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := &d.d.n - var decodeFurther bool - - switch d.bd { - case simpleVdNil: - n.v = valueTypeNil - case simpleVdFalse: - n.v = valueTypeBool - n.b = false - case simpleVdTrue: - n.v = valueTypeBool - n.b = true - case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - if d.h.SignedInteger { - n.v = valueTypeInt - n.i = d.DecodeInt(64) - } else { - n.v = valueTypeUint - n.u = d.DecodeUint(64) - } - case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - n.v = valueTypeInt - n.i = d.DecodeInt(64) - case simpleVdFloat32: - n.v = valueTypeFloat - n.f = d.DecodeFloat(true) - case simpleVdFloat64: - n.v = valueTypeFloat - n.f = d.DecodeFloat(false) - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - n.v = valueTypeString - n.s = d.DecodeString() - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - n.v = valueTypeExt - l := d.decLen() - n.u = uint64(d.r.readn1()) - n.l = d.r.readx(l) - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - n.v = valueTypeArray - decodeFurther = true - case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - n.v = valueTypeMap - decodeFurther = true - default: - d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -//------------------------------------ - -// SimpleHandle is a Handle for a very simple encoding format. -// -// simple is a simplistic codec similar to binc, but not as compact. -// - Encoding of a value is always preceeded by the descriptor byte (bd) -// - True, false, nil are encoded fully in 1 byte (the descriptor) -// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). -// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. -// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) -// - Lenght of containers (strings, bytes, array, map, extensions) -// are encoded in 0, 1, 2, 4 or 8 bytes. -// Zero-length containers have no length encoded. -// For others, the number of bytes is given by pow(2, bd%3) -// - maps are encoded as [bd] [length] [[key][value]]... -// - arrays are encoded as [bd] [length] [value]... -// - extensions are encoded as [bd] [length] [tag] [byte]... -// - strings/bytearrays are encoded as [bd] [length] [byte]... -// -// The full spec will be published soon. -type SimpleHandle struct { - BasicHandle - binaryEncodingType -} - -func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{b: ext}) -} - -func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { - return &simpleEncDriver{e: e, w: e.w, h: h} -} - -func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { - return &simpleDecDriver{d: d, r: d.r, h: h, br: d.bytes} -} - -func (e *simpleEncDriver) reset() { - e.w = e.e.w -} - -func (d *simpleDecDriver) reset() { - d.r = d.d.r -} - -var _ decDriver = (*simpleDecDriver)(nil) -var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/test.py b/vendor/github.com/ugorji/go/codec/test.py deleted file mode 100755 index dfe3b0c9a..000000000 --- a/vendor/github.com/ugorji/go/codec/test.py +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env python - -# This will create golden files in a directory passed to it. -# A Test calls this internally to create the golden files -# So it can process them (so we don't have to checkin the files). - -# Ensure msgpack-python and cbor are installed first, using: -# sudo apt-get install python-dev -# sudo apt-get install python-pip -# pip install --user msgpack-python msgpack-rpc-python cbor - -import cbor, msgpack, msgpackrpc, sys, os, threading - -def get_test_data_list(): - # get list with all primitive types, and a combo type - l0 = [ - -8, - -1616, - -32323232, - -6464646464646464, - 192, - 1616, - 32323232, - 6464646464646464, - 192, - -3232.0, - -6464646464.0, - 3232.0, - 6464646464.0, - False, - True, - None, - u"someday", - u"", - u"bytestring", - 1328176922000002000, - -2206187877999998000, - 270, - -2013855847999995777, - #-6795364578871345152, - ] - l1 = [ - { "true": True, - "false": False }, - { "true": "True", - "false": False, - "uint16(1616)": 1616 }, - { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], - "int32":32323232, "bool": True, - "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", - "SHORT STRING": "1234567890" }, - { True: "true", 8: False, "false": 0 } - ] - - l = [] - l.extend(l0) - l.append(l0) - l.extend(l1) - return l - -def build_test_data(destdir): - l = get_test_data_list() - for i in range(len(l)): - # packer = msgpack.Packer() - serialized = msgpack.dumps(l[i]) - f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb') - f.write(serialized) - f.close() - serialized = cbor.dumps(l[i]) - f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb') - f.write(serialized) - f.close() - -def doRpcServer(port, stopTimeSec): - class EchoHandler(object): - def Echo123(self, msg1, msg2, msg3): - return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) - def EchoStruct(self, msg): - return ("%s" % msg) - - addr = msgpackrpc.Address('localhost', port) - server = msgpackrpc.Server(EchoHandler()) - server.listen(addr) - # run thread to stop it after stopTimeSec seconds if > 0 - if stopTimeSec > 0: - def myStopRpcServer(): - server.stop() - t = threading.Timer(stopTimeSec, myStopRpcServer) - t.start() - server.start() - -def doRpcClientToPythonSvc(port): - address = msgpackrpc.Address('localhost', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("Echo123", "A1", "B2", "C3") - print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doRpcClientToGoSvc(port): - # print ">>>> port: ", port, " <<<<<" - address = msgpackrpc.Address('localhost', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) - print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doMain(args): - if len(args) == 2 and args[0] == "testdata": - build_test_data(args[1]) - elif len(args) == 3 and args[0] == "rpc-server": - doRpcServer(int(args[1]), int(args[2])) - elif len(args) == 2 and args[0] == "rpc-client-python-service": - doRpcClientToPythonSvc(int(args[1])) - elif len(args) == 2 and args[0] == "rpc-client-go-service": - doRpcClientToGoSvc(int(args[1])) - else: - print("Usage: test.py " + - "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") - -if __name__ == "__main__": - doMain(sys.argv[1:]) - diff --git a/vendor/github.com/ugorji/go/codec/time.go b/vendor/github.com/ugorji/go/codec/time.go deleted file mode 100644 index fc4c63e1d..000000000 --- a/vendor/github.com/ugorji/go/codec/time.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "fmt" - "time" -) - -var ( - timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} -) - -type timeExt struct{} - -func (x timeExt) WriteExt(v interface{}) (bs []byte) { - switch v2 := v.(type) { - case time.Time: - bs = encodeTime(v2) - case *time.Time: - bs = encodeTime(*v2) - default: - panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2)) - } - return -} -func (x timeExt) ReadExt(v interface{}, bs []byte) { - tt, err := decodeTime(bs) - if err != nil { - panic(err) - } - *(v.(*time.Time)) = tt -} - -func (x timeExt) ConvertExt(v interface{}) interface{} { - return x.WriteExt(v) -} -func (x timeExt) UpdateExt(v interface{}, src interface{}) { - x.ReadExt(v, src.([]byte)) -} - -// EncodeTime encodes a time.Time as a []byte, including -// information on the instant in time and UTC offset. -// -// Format Description -// -// A timestamp is composed of 3 components: -// -// - secs: signed integer representing seconds since unix epoch -// - nsces: unsigned integer representing fractional seconds as a -// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 -// - tz: signed integer representing timezone offset in minutes east of UTC, -// and a dst (daylight savings time) flag -// -// When encoding a timestamp, the first byte is the descriptor, which -// defines which components are encoded and how many bytes are used to -// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it -// is not encoded in the byte array explicitly*. -// -// Descriptor 8 bits are of the form `A B C DDD EE`: -// A: Is secs component encoded? 1 = true -// B: Is nsecs component encoded? 1 = true -// C: Is tz component encoded? 1 = true -// DDD: Number of extra bytes for secs (range 0-7). -// If A = 1, secs encoded in DDD+1 bytes. -// If A = 0, secs is not encoded, and is assumed to be 0. -// If A = 1, then we need at least 1 byte to encode secs. -// DDD says the number of extra bytes beyond that 1. -// E.g. if DDD=0, then secs is represented in 1 byte. -// if DDD=2, then secs is represented in 3 bytes. -// EE: Number of extra bytes for nsecs (range 0-3). -// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) -// -// Following the descriptor bytes, subsequent bytes are: -// -// secs component encoded in `DDD + 1` bytes (if A == 1) -// nsecs component encoded in `EE + 1` bytes (if B == 1) -// tz component encoded in 2 bytes (if C == 1) -// -// secs and nsecs components are integers encoded in a BigEndian -// 2-complement encoding format. -// -// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to -// Least significant bit 0 are described below: -// -// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). -// Bit 15 = have\_dst: set to 1 if we set the dst flag. -// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. -// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. -// -func encodeTime(t time.Time) []byte { - //t := rv.Interface().(time.Time) - tsecs, tnsecs := t.Unix(), t.Nanosecond() - var ( - bd byte - btmp [8]byte - bs [16]byte - i int = 1 - ) - l := t.Location() - if l == time.UTC { - l = nil - } - if tsecs != 0 { - bd = bd | 0x80 - bigen.PutUint64(btmp[:], uint64(tsecs)) - f := pruneSignExt(btmp[:], tsecs >= 0) - bd = bd | (byte(7-f) << 2) - copy(bs[i:], btmp[f:]) - i = i + (8 - f) - } - if tnsecs != 0 { - bd = bd | 0x40 - bigen.PutUint32(btmp[:4], uint32(tnsecs)) - f := pruneSignExt(btmp[:4], true) - bd = bd | byte(3-f) - copy(bs[i:], btmp[f:4]) - i = i + (4 - f) - } - if l != nil { - bd = bd | 0x20 - // Note that Go Libs do not give access to dst flag. - _, zoneOffset := t.Zone() - //zoneName, zoneOffset := t.Zone() - zoneOffset /= 60 - z := uint16(zoneOffset) - bigen.PutUint16(btmp[:2], z) - // clear dst flags - bs[i] = btmp[0] & 0x3f - bs[i+1] = btmp[1] - i = i + 2 - } - bs[0] = bd - return bs[0:i] -} - -// DecodeTime decodes a []byte into a time.Time. -func decodeTime(bs []byte) (tt time.Time, err error) { - bd := bs[0] - var ( - tsec int64 - tnsec uint32 - tz uint16 - i byte = 1 - i2 byte - n byte - ) - if bd&(1<<7) != 0 { - var btmp [8]byte - n = ((bd >> 2) & 0x7) + 1 - i2 = i + n - copy(btmp[8-n:], bs[i:i2]) - //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) - if bs[i]&(1<<7) != 0 { - copy(btmp[0:8-n], bsAll0xff) - //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } - } - i = i2 - tsec = int64(bigen.Uint64(btmp[:])) - } - if bd&(1<<6) != 0 { - var btmp [4]byte - n = (bd & 0x3) + 1 - i2 = i + n - copy(btmp[4-n:], bs[i:i2]) - i = i2 - tnsec = bigen.Uint32(btmp[:]) - } - if bd&(1<<5) == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - return - } - // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. - // However, we need name here, so it can be shown when time is printed. - // Zone name is in form: UTC-08:00. - // Note that Go Libs do not give access to dst flag, so we ignore dst bits - - i2 = i + 2 - tz = bigen.Uint16(bs[i:i2]) - i = i2 - // sign extend sign bit into top 2 MSB (which were dst bits): - if tz&(1<<13) == 0 { // positive - tz = tz & 0x3fff //clear 2 MSBs: dst bits - } else { // negative - tz = tz | 0xc000 //set 2 MSBs: dst bits - //tzname[3] = '-' (TODO: verify. this works here) - } - tzint := int16(tz) - if tzint == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - } else { - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - // var zoneName = timeLocUTCName(tzint) - tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) - } - return -} - -func timeLocUTCName(tzint int16) string { - if tzint == 0 { - return "UTC" - } - var tzname = []byte("UTC+00:00") - //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. - //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first - var tzhr, tzmin int16 - if tzint < 0 { - tzname[3] = '-' // (TODO: verify. this works here) - tzhr, tzmin = -tzint/60, (-tzint)%60 - } else { - tzhr, tzmin = tzint/60, tzint%60 - } - tzname[4] = timeDigits[tzhr/10] - tzname[5] = timeDigits[tzhr%10] - tzname[7] = timeDigits[tzmin/10] - tzname[8] = timeDigits[tzmin%10] - return string(tzname) - //return time.FixedZone(string(tzname), int(tzint)*60) -} diff --git a/vendor/golang.org/x/text/cases/cases.go b/vendor/golang.org/x/text/cases/cases.go deleted file mode 100644 index 13b3a5716..000000000 --- a/vendor/golang.org/x/text/cases/cases.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run gen.go gen_trieval.go - -// Package cases provides general and language-specific case mappers. -package cases - -import ( - "golang.org/x/text/language" - "golang.org/x/text/transform" -) - -// References: -// - Unicode Reference Manual Chapter 3.13, 4.2, and 5.18. -// - http://www.unicode.org/reports/tr29/ -// - http://www.unicode.org/Public/6.3.0/ucd/CaseFolding.txt -// - http://www.unicode.org/Public/6.3.0/ucd/SpecialCasing.txt -// - http://www.unicode.org/Public/6.3.0/ucd/DerivedCoreProperties.txt -// - http://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakProperty.txt -// - http://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakTest.txt -// - http://userguide.icu-project.org/transforms/casemappings - -// TODO: -// - Case folding -// - Wide and Narrow? -// - Segmenter option for title casing. -// - ASCII fast paths -// - Encode Soft-Dotted property within trie somehow. - -// A Caser transforms given input to a certain case. It implements -// transform.Transformer. -// -// A Caser may be stateful and should therefore not be shared between -// goroutines. -type Caser struct { - t transform.Transformer -} - -// Bytes returns a new byte slice with the result of converting b to the case -// form implemented by c. -func (c Caser) Bytes(b []byte) []byte { - b, _, _ = transform.Bytes(c.t, b) - return b -} - -// String returns a string with the result of transforming s to the case form -// implemented by c. -func (c Caser) String(s string) string { - s, _, _ = transform.String(c.t, s) - return s -} - -// Reset resets the Caser to be reused for new input after a previous call to -// Transform. -func (c Caser) Reset() { c.t.Reset() } - -// Transform implements the Transformer interface and transforms the given input -// to the case form implemented by c. -func (c Caser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - return c.t.Transform(dst, src, atEOF) -} - -// Upper returns a Caser for language-specific uppercasing. -func Upper(t language.Tag, opts ...Option) Caser { - return Caser{makeUpper(t, getOpts(opts...))} -} - -// Lower returns a Caser for language-specific lowercasing. -func Lower(t language.Tag, opts ...Option) Caser { - return Caser{makeLower(t, getOpts(opts...))} -} - -// Title returns a Caser for language-specific title casing. It uses an -// approximation of the default Unicode Word Break algorithm. -func Title(t language.Tag, opts ...Option) Caser { - return Caser{makeTitle(t, getOpts(opts...))} -} - -// Fold returns a Caser that implements Unicode case folding. The returned Caser -// is stateless and safe to use concurrently by multiple goroutines. -// -// Case folding does not normalize the input and may not preserve a normal form. -// Use the collate or search package for more convenient and linguistically -// sound comparisons. Use unicode/precis for string comparisons where security -// aspects are a concern. -func Fold(opts ...Option) Caser { - return Caser{makeFold(getOpts(opts...))} -} - -// An Option is used to modify the behavior of a Caser. -type Option func(o *options) - -var ( - // NoLower disables the lowercasing of non-leading letters for a title - // caser. - NoLower Option = noLower - - // Compact omits mappings in case folding for characters that would grow the - // input. (Unimplemented.) - Compact Option = compact -) - -// TODO: option to preserve a normal form, if applicable? - -type options struct { - noLower bool - simple bool - - // TODO: segmenter, max ignorable, alternative versions, etc. - - noFinalSigma bool // Only used for testing. -} - -func getOpts(o ...Option) (res options) { - for _, f := range o { - f(&res) - } - return -} - -func noLower(o *options) { - o.noLower = true -} - -func compact(o *options) { - o.simple = true -} diff --git a/vendor/golang.org/x/text/cases/context.go b/vendor/golang.org/x/text/cases/context.go deleted file mode 100644 index 0d2e497eb..000000000 --- a/vendor/golang.org/x/text/cases/context.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cases - -import ( - "golang.org/x/text/transform" -) - -// A context is used for iterating over source bytes, fetching case info and -// writing to a destination buffer. -// -// Casing operations may need more than one rune of context to decide how a rune -// should be cased. Casing implementations should call checkpoint on context -// whenever it is known to be safe to return the runes processed so far. -// -// It is recommended for implementations to not allow for more than 30 case -// ignorables as lookahead (analogous to the limit in norm) and to use state if -// unbounded lookahead is needed for cased runes. -type context struct { - dst, src []byte - atEOF bool - - pDst int // pDst points past the last written rune in dst. - pSrc int // pSrc points to the start of the currently scanned rune. - - // checkpoints safe to return in Transform, where nDst <= pDst and nSrc <= pSrc. - nDst, nSrc int - err error - - sz int // size of current rune - info info // case information of currently scanned rune - - // State preserved across calls to Transform. - isMidWord bool // false if next cased letter needs to be title-cased. -} - -func (c *context) Reset() { - c.isMidWord = false -} - -// ret returns the return values for the Transform method. It checks whether -// there were insufficient bytes in src to complete and introduces an error -// accordingly, if necessary. -func (c *context) ret() (nDst, nSrc int, err error) { - if c.err != nil || c.nSrc == len(c.src) { - return c.nDst, c.nSrc, c.err - } - // This point is only reached by mappers if there was no short destination - // buffer. This means that the source buffer was exhausted and that c.sz was - // set to 0 by next. - if c.atEOF && c.pSrc == len(c.src) { - return c.pDst, c.pSrc, nil - } - return c.nDst, c.nSrc, transform.ErrShortSrc -} - -// checkpoint sets the return value buffer points for Transform to the current -// positions. -func (c *context) checkpoint() { - if c.err == nil { - c.nDst, c.nSrc = c.pDst, c.pSrc+c.sz - } -} - -// unreadRune causes the last rune read by next to be reread on the next -// invocation of next. Only one unreadRune may be called after a call to next. -func (c *context) unreadRune() { - c.sz = 0 -} - -func (c *context) next() bool { - c.pSrc += c.sz - if c.pSrc == len(c.src) || c.err != nil { - c.info, c.sz = 0, 0 - return false - } - v, sz := trie.lookup(c.src[c.pSrc:]) - c.info, c.sz = info(v), sz - if c.sz == 0 { - if c.atEOF { - // A zero size means we have an incomplete rune. If we are atEOF, - // this means it is an illegal rune, which we will consume one - // byte at a time. - c.sz = 1 - } else { - c.err = transform.ErrShortSrc - return false - } - } - return true -} - -// writeBytes adds bytes to dst. -func (c *context) writeBytes(b []byte) bool { - if len(c.dst)-c.pDst < len(b) { - c.err = transform.ErrShortDst - return false - } - // This loop is faster than using copy. - for _, ch := range b { - c.dst[c.pDst] = ch - c.pDst++ - } - return true -} - -// writeString writes the given string to dst. -func (c *context) writeString(s string) bool { - if len(c.dst)-c.pDst < len(s) { - c.err = transform.ErrShortDst - return false - } - // This loop is faster than using copy. - for i := 0; i < len(s); i++ { - c.dst[c.pDst] = s[i] - c.pDst++ - } - return true -} - -// copy writes the current rune to dst. -func (c *context) copy() bool { - return c.writeBytes(c.src[c.pSrc : c.pSrc+c.sz]) -} - -// copyXOR copies the current rune to dst and modifies it by applying the XOR -// pattern of the case info. It is the responsibility of the caller to ensure -// that this is a rune with a XOR pattern defined. -func (c *context) copyXOR() bool { - if !c.copy() { - return false - } - if c.info&xorIndexBit == 0 { - // Fast path for 6-bit XOR pattern, which covers most cases. - c.dst[c.pDst-1] ^= byte(c.info >> xorShift) - } else { - // Interpret XOR bits as an index. - // TODO: test performance for unrolling this loop. Verify that we have - // at least two bytes and at most three. - idx := c.info >> xorShift - for p := c.pDst - 1; ; p-- { - c.dst[p] ^= xorData[idx] - idx-- - if xorData[idx] == 0 { - break - } - } - } - return true -} - -// hasPrefix returns true if src[pSrc:] starts with the given string. -func (c *context) hasPrefix(s string) bool { - b := c.src[c.pSrc:] - if len(b) < len(s) { - return false - } - for i, c := range b[:len(s)] { - if c != s[i] { - return false - } - } - return true -} - -// caseType returns an info with only the case bits, normalized to either -// cLower, cUpper, cTitle or cUncased. -func (c *context) caseType() info { - cm := c.info & 0x7 - if cm < 4 { - return cm - } - if cm >= cXORCase { - // xor the last bit of the rune with the case type bits. - b := c.src[c.pSrc+c.sz-1] - return info(b&1) ^ cm&0x3 - } - if cm == cIgnorableCased { - return cLower - } - return cUncased -} - -// lower writes the lowercase version of the current rune to dst. -func lower(c *context) bool { - ct := c.caseType() - if c.info&hasMappingMask == 0 || ct == cLower { - return c.copy() - } - if c.info&exceptionBit == 0 { - return c.copyXOR() - } - e := exceptions[c.info>>exceptionShift:] - offset := 2 + e[0]&lengthMask // size of header + fold string - if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange { - return c.writeString(e[offset : offset+nLower]) - } - return c.copy() -} - -// upper writes the uppercase version of the current rune to dst. -func upper(c *context) bool { - ct := c.caseType() - if c.info&hasMappingMask == 0 || ct == cUpper { - return c.copy() - } - if c.info&exceptionBit == 0 { - return c.copyXOR() - } - e := exceptions[c.info>>exceptionShift:] - offset := 2 + e[0]&lengthMask // size of header + fold string - // Get length of first special case mapping. - n := (e[1] >> lengthBits) & lengthMask - if ct == cTitle { - // The first special case mapping is for lower. Set n to the second. - if n == noChange { - n = 0 - } - n, e = e[1]&lengthMask, e[n:] - } - if n != noChange { - return c.writeString(e[offset : offset+n]) - } - return c.copy() -} - -// title writes the title case version of the current rune to dst. -func title(c *context) bool { - ct := c.caseType() - if c.info&hasMappingMask == 0 || ct == cTitle { - return c.copy() - } - if c.info&exceptionBit == 0 { - if ct == cLower { - return c.copyXOR() - } - return c.copy() - } - // Get the exception data. - e := exceptions[c.info>>exceptionShift:] - offset := 2 + e[0]&lengthMask // size of header + fold string - - nFirst := (e[1] >> lengthBits) & lengthMask - if nTitle := e[1] & lengthMask; nTitle != noChange { - if nFirst != noChange { - e = e[nFirst:] - } - return c.writeString(e[offset : offset+nTitle]) - } - if ct == cLower && nFirst != noChange { - // Use the uppercase version instead. - return c.writeString(e[offset : offset+nFirst]) - } - // Already in correct case. - return c.copy() -} - -// foldFull writes the foldFull version of the current rune to dst. -func foldFull(c *context) bool { - if c.info&hasMappingMask == 0 { - return c.copy() - } - ct := c.caseType() - if c.info&exceptionBit == 0 { - if ct != cLower || c.info&inverseFoldBit != 0 { - return c.copyXOR() - } - return c.copy() - } - e := exceptions[c.info>>exceptionShift:] - n := e[0] & lengthMask - if n == 0 { - if ct == cLower { - return c.copy() - } - n = (e[1] >> lengthBits) & lengthMask - } - return c.writeString(e[2 : 2+n]) -} diff --git a/vendor/golang.org/x/text/cases/fold.go b/vendor/golang.org/x/text/cases/fold.go deleted file mode 100644 index e95bfa8ea..000000000 --- a/vendor/golang.org/x/text/cases/fold.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cases - -import "golang.org/x/text/transform" - -type caseFolder struct{ transform.NopResetter } - -// caseFolder implements the Transformer interface for doing case folding. -func (t *caseFolder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - c := context{dst: dst, src: src, atEOF: atEOF} - for c.next() { - foldFull(&c) - c.checkpoint() - } - return c.ret() -} - -func makeFold(o options) transform.Transformer { - // TODO: Special case folding, through option Language, Special/Turkic, or - // both. - // TODO: Implement Compact options. - return &caseFolder{} -} diff --git a/vendor/golang.org/x/text/cases/gen.go b/vendor/golang.org/x/text/cases/gen.go deleted file mode 100644 index f46170fca..000000000 --- a/vendor/golang.org/x/text/cases/gen.go +++ /dev/null @@ -1,831 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// This program generates the trie for casing operations. The Unicode casing -// algorithm requires the lookup of various properties and mappings for each -// rune. The table generated by this generator combines several of the most -// frequently used of these into a single trie so that they can be accessed -// with a single lookup. -package main - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "reflect" - "strconv" - "strings" - "unicode" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" - "golang.org/x/text/unicode/norm" -) - -func main() { - gen.Init() - genTables() - genTablesTest() - gen.Repackage("gen_trieval.go", "trieval.go", "cases") -} - -// runeInfo contains all information for a rune that we care about for casing -// operations. -type runeInfo struct { - Rune rune - - entry info // trie value for this rune. - - CaseMode info - - // Simple case mappings. - Simple [1 + maxCaseMode][]rune - - // Special casing - HasSpecial bool - Conditional bool - Special [1 + maxCaseMode][]rune - - // Folding - FoldSimple rune - FoldSpecial rune - FoldFull []rune - - // TODO: FC_NFKC, or equivalent data. - - // Properties - SoftDotted bool - CaseIgnorable bool - Cased bool - DecomposeGreek bool - BreakType string - BreakCat breakCategory - - // We care mostly about 0, Above, and IotaSubscript. - CCC byte -} - -type breakCategory int - -const ( - breakBreak breakCategory = iota - breakLetter - breakIgnored -) - -// mapping returns the case mapping for the given case type. -func (r *runeInfo) mapping(c info) string { - if r.HasSpecial { - return string(r.Special[c]) - } - if len(r.Simple[c]) != 0 { - return string(r.Simple[c]) - } - return string(r.Rune) -} - -func parse(file string, f func(p *ucd.Parser)) { - ucd.Parse(gen.OpenUCDFile(file), f) -} - -func parseUCD() []runeInfo { - chars := make([]runeInfo, unicode.MaxRune) - - get := func(r rune) *runeInfo { - c := &chars[r] - c.Rune = r - return c - } - - parse("UnicodeData.txt", func(p *ucd.Parser) { - ri := get(p.Rune(0)) - ri.CCC = byte(p.Int(ucd.CanonicalCombiningClass)) - ri.Simple[cLower] = p.Runes(ucd.SimpleLowercaseMapping) - ri.Simple[cUpper] = p.Runes(ucd.SimpleUppercaseMapping) - ri.Simple[cTitle] = p.Runes(ucd.SimpleTitlecaseMapping) - if p.String(ucd.GeneralCategory) == "Lt" { - ri.CaseMode = cTitle - } - }) - - // ; - parse("PropList.txt", func(p *ucd.Parser) { - if p.String(1) == "Soft_Dotted" { - chars[p.Rune(0)].SoftDotted = true - } - }) - - // ; - parse("DerivedCoreProperties.txt", func(p *ucd.Parser) { - ri := get(p.Rune(0)) - switch p.String(1) { - case "Case_Ignorable": - ri.CaseIgnorable = true - case "Cased": - ri.Cased = true - case "Lowercase": - ri.CaseMode = cLower - case "Uppercase": - ri.CaseMode = cUpper - } - }) - - // ; ; ; <upper> ; (<condition_list> ;)? - parse("SpecialCasing.txt", func(p *ucd.Parser) { - // We drop all conditional special casing and deal with them manually in - // the language-specific case mappers. Rune 0x03A3 is the only one with - // a conditional formatting that is not language-specific. However, - // dealing with this letter is tricky, especially in a streaming - // context, so we deal with it in the Caser for Greek specifically. - ri := get(p.Rune(0)) - if p.String(4) == "" { - ri.HasSpecial = true - ri.Special[cLower] = p.Runes(1) - ri.Special[cTitle] = p.Runes(2) - ri.Special[cUpper] = p.Runes(3) - } else { - ri.Conditional = true - } - }) - - // TODO: Use text breaking according to UAX #29. - // <code>; <word break type> - parse("auxiliary/WordBreakProperty.txt", func(p *ucd.Parser) { - ri := get(p.Rune(0)) - ri.BreakType = p.String(1) - - // We collapse the word breaking properties onto the categories we need. - switch p.String(1) { // TODO: officially we need to canonicalize. - case "Format", "MidLetter", "MidNumLet", "Single_Quote": - ri.BreakCat = breakIgnored - case "ALetter", "Hebrew_Letter", "Numeric", "Extend", "ExtendNumLet": - ri.BreakCat = breakLetter - } - }) - - // <code>; <type>; <mapping> - parse("CaseFolding.txt", func(p *ucd.Parser) { - ri := get(p.Rune(0)) - switch p.String(1) { - case "C": - ri.FoldSimple = p.Rune(2) - ri.FoldFull = p.Runes(2) - case "S": - ri.FoldSimple = p.Rune(2) - case "T": - ri.FoldSpecial = p.Rune(2) - case "F": - ri.FoldFull = p.Runes(2) - default: - log.Fatalf("%U: unknown type: %s", p.Rune(0), p.String(1)) - } - }) - - return chars -} - -func genTables() { - chars := parseUCD() - verifyProperties(chars) - - t := triegen.NewTrie("case") - for i := range chars { - c := &chars[i] - makeEntry(c) - t.Insert(rune(i), uint64(c.entry)) - } - - w := gen.NewCodeWriter() - defer w.WriteGoFile("tables.go", "cases") - - gen.WriteUnicodeVersion(w) - - // TODO: write CLDR version after adding a mechanism to detect that the - // tables on which the manually created locale-sensitive casing code is - // based hasn't changed. - - w.WriteVar("xorData", string(xorData)) - w.WriteVar("exceptions", string(exceptionData)) - - sz, err := t.Gen(w, triegen.Compact(&sparseCompacter{})) - if err != nil { - log.Fatal(err) - } - w.Size += sz -} - -func makeEntry(ri *runeInfo) { - if ri.CaseIgnorable { - if ri.Cased { - ri.entry = cIgnorableCased - } else { - ri.entry = cIgnorableUncased - } - } else { - ri.entry = ri.CaseMode - } - - // TODO: handle soft-dotted. - - ccc := cccOther - switch ri.CCC { - case 0: // Not_Reordered - ccc = cccZero - case above: // Above - ccc = cccAbove - } - if ri.BreakCat == breakBreak { - ccc = cccBreak - } - - ri.entry |= ccc - - if ri.CaseMode == cUncased { - return - } - - // Need to do something special. - if ri.CaseMode == cTitle || ri.HasSpecial || ri.mapping(cTitle) != ri.mapping(cUpper) { - makeException(ri) - return - } - if f := string(ri.FoldFull); len(f) > 0 && f != ri.mapping(cUpper) && f != ri.mapping(cLower) { - makeException(ri) - return - } - - // Rune is either lowercase or uppercase. - - orig := string(ri.Rune) - mapped := "" - if ri.CaseMode == cUpper { - mapped = ri.mapping(cLower) - } else { - mapped = ri.mapping(cUpper) - } - - if len(orig) != len(mapped) { - makeException(ri) - return - } - - if string(ri.FoldFull) == ri.mapping(cUpper) { - ri.entry |= inverseFoldBit - } - - n := len(orig) - - // Create per-byte XOR mask. - var b []byte - for i := 0; i < n; i++ { - b = append(b, orig[i]^mapped[i]) - } - - // Remove leading 0 bytes, but keep at least one byte. - for ; len(b) > 1 && b[0] == 0; b = b[1:] { - } - - if len(b) == 1 && b[0]&0xc0 == 0 { - ri.entry |= info(b[0]) << xorShift - return - } - - key := string(b) - x, ok := xorCache[key] - if !ok { - xorData = append(xorData, 0) // for detecting start of sequence - xorData = append(xorData, b...) - - x = len(xorData) - 1 - xorCache[key] = x - } - ri.entry |= info(x<<xorShift) | xorIndexBit -} - -var xorCache = map[string]int{} - -// xorData contains byte-wise XOR data for the least significant bytes of a -// UTF-8 encoded rune. An index points to the last byte. The sequence starts -// with a zero terminator. -var xorData = []byte{} - -// See the comments in gen_trieval.go re "the exceptions slice". -var exceptionData = []byte{0} - -// makeException encodes case mappings that cannot be expressed in a simple -// XOR diff. -func makeException(ri *runeInfo) { - ccc := ri.entry & cccMask - // Set exception bit and retain case type. - ri.entry &= 0x0007 - ri.entry |= exceptionBit - - if len(exceptionData) >= 1<<numExceptionBits { - log.Fatalf("%U:exceptionData too large %x > %d bits", ri.Rune, len(exceptionData), numExceptionBits) - } - - // Set the offset in the exceptionData array. - ri.entry |= info(len(exceptionData) << exceptionShift) - - orig := string(ri.Rune) - tc := ri.mapping(cTitle) - uc := ri.mapping(cUpper) - lc := ri.mapping(cLower) - ff := string(ri.FoldFull) - - // addString sets the length of a string and adds it to the expansions array. - addString := func(s string, b *byte) { - if len(s) == 0 { - // Zero-length mappings exist, but only for conditional casing, - // which we are representing outside of this table. - log.Fatalf("%U: has zero-length mapping.", ri.Rune) - } - *b <<= 3 - if s != orig { - n := len(s) - if n > 7 { - log.Fatalf("%U: mapping larger than 7 (%d)", ri.Rune, n) - } - *b |= byte(n) - exceptionData = append(exceptionData, s...) - } - } - - // byte 0: - exceptionData = append(exceptionData, byte(ccc)|byte(len(ff))) - - // byte 1: - p := len(exceptionData) - exceptionData = append(exceptionData, 0) - - if len(ff) > 7 { // May be zero-length. - log.Fatalf("%U: fold string larger than 7 (%d)", ri.Rune, len(ff)) - } - exceptionData = append(exceptionData, ff...) - ct := ri.CaseMode - if ct != cLower { - addString(lc, &exceptionData[p]) - } - if ct != cUpper { - addString(uc, &exceptionData[p]) - } - if ct != cTitle { - // If title is the same as upper, we set it to the original string so - // that it will be marked as not present. This implies title case is - // the same as upper case. - if tc == uc { - tc = orig - } - addString(tc, &exceptionData[p]) - } -} - -// sparseCompacter is a trie value block Compacter. There are many cases where -// successive runes alternate between lower- and upper-case. This Compacter -// exploits this by adding a special case type where the case value is obtained -// from or-ing it with the least-significant bit of the rune, creating large -// ranges of equal case values that compress well. -type sparseCompacter struct { - sparseBlocks [][]uint16 - sparseOffsets []uint16 - sparseCount int -} - -// makeSparse returns the number of elements that compact block would contain -// as well as the modified values. -func makeSparse(vals []uint64) ([]uint16, int) { - // Copy the values. - values := make([]uint16, len(vals)) - for i, v := range vals { - values[i] = uint16(v) - } - - alt := func(i int, v uint16) uint16 { - if cm := info(v & fullCasedMask); cm == cUpper || cm == cLower { - // Convert cLower or cUpper to cXORCase value, which has the form 11x. - xor := v - xor &^= 1 - xor |= uint16(i&1) ^ (v & 1) - xor |= 0x4 - return xor - } - return v - } - - var count int - var previous uint16 - for i, v := range values { - if v != 0 { - // Try if the unmodified value is equal to the previous. - if v == previous { - continue - } - - // Try if the xor-ed value is equal to the previous value. - a := alt(i, v) - if a == previous { - values[i] = a - continue - } - - // This is a new value. - count++ - - // Use the xor-ed value if it will be identical to the next value. - if p := i + 1; p < len(values) && alt(p, values[p]) == a { - values[i] = a - v = a - } - } - previous = v - } - return values, count -} - -func (s *sparseCompacter) Size(v []uint64) (int, bool) { - _, n := makeSparse(v) - - // We limit using this method to having 16 entries. - if n > 16 { - return 0, false - } - - return 2 + int(reflect.TypeOf(valueRange{}).Size())*n, true -} - -func (s *sparseCompacter) Store(v []uint64) uint32 { - h := uint32(len(s.sparseOffsets)) - values, sz := makeSparse(v) - s.sparseBlocks = append(s.sparseBlocks, values) - s.sparseOffsets = append(s.sparseOffsets, uint16(s.sparseCount)) - s.sparseCount += sz - return h -} - -func (s *sparseCompacter) Handler() string { - // The sparse global variable and its lookup method is defined in gen_trieval.go. - return "sparse.lookup" -} - -func (s *sparseCompacter) Print(w io.Writer) (retErr error) { - p := func(format string, args ...interface{}) { - _, err := fmt.Fprintf(w, format, args...) - if retErr == nil && err != nil { - retErr = err - } - } - - ls := len(s.sparseBlocks) - if ls == len(s.sparseOffsets) { - s.sparseOffsets = append(s.sparseOffsets, uint16(s.sparseCount)) - } - p("// sparseOffsets: %d entries, %d bytes\n", ls+1, (ls+1)*2) - p("var sparseOffsets = %#v\n\n", s.sparseOffsets) - - ns := s.sparseCount - p("// sparseValues: %d entries, %d bytes\n", ns, ns*4) - p("var sparseValues = [%d]valueRange {", ns) - for i, values := range s.sparseBlocks { - p("\n// Block %#x, offset %#x", i, s.sparseOffsets[i]) - var v uint16 - for i, nv := range values { - if nv != v { - if v != 0 { - p(",hi:%#02x},", 0x80+i-1) - } - if nv != 0 { - p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) - } - } - v = nv - } - if v != 0 { - p(",hi:%#02x},", 0x80+len(values)-1) - } - } - p("\n}\n\n") - return -} - -// verifyProperties that properties of the runes that are relied upon in the -// implementation. Each property is marked with an identifier that is referred -// to in the places where it is used. -func verifyProperties(chars []runeInfo) { - for i, c := range chars { - r := rune(i) - - // Rune properties. - - // A.1: modifier never changes on lowercase. [ltLower] - if c.CCC > 0 && unicode.ToLower(r) != r { - log.Fatalf("%U: non-starter changes when lowercased", r) - } - - // A.2: properties of decompositions starting with I or J. [ltLower] - d := norm.NFD.PropertiesString(string(r)).Decomposition() - if len(d) > 0 { - if d[0] == 'I' || d[0] == 'J' { - // A.2.1: we expect at least an ASCII character and a modifier. - if len(d) < 3 { - log.Fatalf("%U: length of decomposition was %d; want >= 3", r, len(d)) - } - - // All subsequent runes are modifiers and all have the same CCC. - runes := []rune(string(d[1:])) - ccc := chars[runes[0]].CCC - - for _, mr := range runes[1:] { - mc := chars[mr] - - // A.2.2: all modifiers have a CCC of Above or less. - if ccc == 0 || ccc > above { - log.Fatalf("%U: CCC of successive rune (%U) was %d; want (0,230]", r, mr, ccc) - } - - // A.2.3: a sequence of modifiers all have the same CCC. - if mc.CCC != ccc { - log.Fatalf("%U: CCC of follow-up modifier (%U) was %d; want %d", r, mr, mc.CCC, ccc) - } - - // A.2.4: for each trailing r, r in [0x300, 0x311] <=> CCC == Above. - if (ccc == above) != (0x300 <= mr && mr <= 0x311) { - log.Fatalf("%U: modifier %U in [U+0300, U+0311] != ccc(%U) == 230", r, mr, mr) - } - - if i += len(string(mr)); i >= len(d) { - break - } - } - } - } - - // A.3: no U+0307 in decomposition of Soft-Dotted rune. [ltUpper] - if unicode.Is(unicode.Soft_Dotted, r) && strings.Contains(string(d), "\u0307") { - log.Fatalf("%U: decomposition of soft-dotted rune may not contain U+0307", r) - } - - // A.4: only rune U+0345 may be of CCC Iota_Subscript. [elUpper] - if c.CCC == iotaSubscript && r != 0x0345 { - log.Fatalf("%U: only rune U+0345 may have CCC Iota_Subscript", r) - } - - // A.5: soft-dotted runes do not have exceptions. - if c.SoftDotted && c.entry&exceptionBit != 0 { - log.Fatalf("%U: soft-dotted has exception", r) - } - - // A.6: Greek decomposition. [elUpper] - if unicode.Is(unicode.Greek, r) { - if b := norm.NFD.PropertiesString(string(r)).Decomposition(); b != nil { - runes := []rune(string(b)) - // A.6.1: If a Greek rune decomposes and the first rune of the - // decomposition is greater than U+00FF, the rune is always - // great and not a modifier. - if f := runes[0]; unicode.IsMark(f) || f > 0xFF && !unicode.Is(unicode.Greek, f) { - log.Fatalf("%U: expeced first rune of Greek decomposition to be letter, found %U", r, f) - } - // A.6.2: Any follow-up rune in a Greek decomposition is a - // modifier of which the first should be gobbled in - // decomposition. - for _, m := range runes[1:] { - switch m { - case 0x0313, 0x0314, 0x0301, 0x0300, 0x0306, 0x0342, 0x0308, 0x0304, 0x345: - default: - log.Fatalf("%U: modifier %U is outside of expeced Greek modifier set", r, m) - } - } - } - } - - // Breaking properties. - - // B.1: all runes with CCC > 0 are of break type Extend. - if c.CCC > 0 && c.BreakType != "Extend" { - log.Fatalf("%U: CCC == %d, but got break type %s; want Extend", r, c.CCC, c.BreakType) - } - - // B.2: all cased runes with c.CCC == 0 are of break type ALetter. - if c.CCC == 0 && c.Cased && c.BreakType != "ALetter" { - log.Fatalf("%U: cased, but got break type %s; want ALetter", r, c.BreakType) - } - - // B.3: letter category. - if c.CCC == 0 && c.BreakCat != breakBreak && !c.CaseIgnorable { - if c.BreakCat != breakLetter { - log.Fatalf("%U: check for letter break type gave %d; want %d", r, c.BreakCat, breakLetter) - } - } - } -} - -func genTablesTest() { - w := &bytes.Buffer{} - - fmt.Fprintln(w, "var (") - printProperties(w, "DerivedCoreProperties.txt", "Case_Ignorable", verifyIgnore) - - // We discard the output as we know we have perfect functions. We run them - // just to verify the properties are correct. - n := printProperties(ioutil.Discard, "DerivedCoreProperties.txt", "Cased", verifyCased) - n += printProperties(ioutil.Discard, "DerivedCoreProperties.txt", "Lowercase", verifyLower) - n += printProperties(ioutil.Discard, "DerivedCoreProperties.txt", "Uppercase", verifyUpper) - if n > 0 { - log.Fatalf("One of the discarded properties does not have a perfect filter.") - } - - // <code>; <lower> ; <title> ; <upper> ; (<condition_list> ;)? - fmt.Fprintln(w, "\tspecial = map[rune]struct{ toLower, toTitle, toUpper string }{") - parse("SpecialCasing.txt", func(p *ucd.Parser) { - // Skip conditional entries. - if p.String(4) != "" { - return - } - r := p.Rune(0) - fmt.Fprintf(w, "\t\t0x%04x: {%q, %q, %q},\n", - r, string(p.Runes(1)), string(p.Runes(2)), string(p.Runes(3))) - }) - fmt.Fprint(w, "\t}\n\n") - - // <code>; <type>; <runes> - table := map[rune]struct{ simple, full, special string }{} - parse("CaseFolding.txt", func(p *ucd.Parser) { - r := p.Rune(0) - t := p.String(1) - v := string(p.Runes(2)) - if t != "T" && v == string(unicode.ToLower(r)) { - return - } - x := table[r] - switch t { - case "C": - x.full = v - x.simple = v - case "S": - x.simple = v - case "F": - x.full = v - case "T": - x.special = v - } - table[r] = x - }) - fmt.Fprintln(w, "\tfoldMap = map[rune]struct{ simple, full, special string }{") - for r := rune(0); r < 0x10FFFF; r++ { - x, ok := table[r] - if !ok { - continue - } - fmt.Fprintf(w, "\t\t0x%04x: {%q, %q, %q},\n", r, x.simple, x.full, x.special) - } - fmt.Fprint(w, "\t}\n\n") - - // Break property - notBreak := map[rune]bool{} - parse("auxiliary/WordBreakProperty.txt", func(p *ucd.Parser) { - switch p.String(1) { - case "Extend", "Format", "MidLetter", "MidNumLet", "Single_Quote", - "ALetter", "Hebrew_Letter", "Numeric", "ExtendNumLet": - notBreak[p.Rune(0)] = true - } - }) - - fmt.Fprintln(w, "\tbreakProp = []struct{ lo, hi rune }{") - inBreak := false - for r := rune(0); r <= lastRuneForTesting; r++ { - if isBreak := !notBreak[r]; isBreak != inBreak { - if isBreak { - fmt.Fprintf(w, "\t\t{0x%x, ", r) - } else { - fmt.Fprintf(w, "0x%x},\n", r-1) - } - inBreak = isBreak - } - } - if inBreak { - fmt.Fprintf(w, "0x%x},\n", lastRuneForTesting) - } - fmt.Fprint(w, "\t}\n\n") - - // Word break test - // Filter out all samples that do not contain cased characters. - cased := map[rune]bool{} - parse("DerivedCoreProperties.txt", func(p *ucd.Parser) { - if p.String(1) == "Cased" { - cased[p.Rune(0)] = true - } - }) - - fmt.Fprintln(w, "\tbreakTest = []string{") - parse("auxiliary/WordBreakTest.txt", func(p *ucd.Parser) { - c := strings.Split(p.String(0), " ") - - const sep = '|' - numCased := 0 - test := "" - for ; len(c) >= 2; c = c[2:] { - if c[0] == "÷" && test != "" { - test += string(sep) - } - i, err := strconv.ParseUint(c[1], 16, 32) - r := rune(i) - if err != nil { - log.Fatalf("Invalid rune %q.", c[1]) - } - if r == sep { - log.Fatalf("Separator %q not allowed in test data. Pick another one.", sep) - } - if cased[r] { - numCased++ - } - test += string(r) - } - if numCased > 1 { - fmt.Fprintf(w, "\t\t%q,\n", test) - } - }) - fmt.Fprintln(w, "\t}") - - fmt.Fprintln(w, ")") - - gen.WriteGoFile("tables_test.go", "cases", w.Bytes()) -} - -// These functions are just used for verification that their definition have not -// changed in the Unicode Standard. - -func verifyCased(r rune) bool { - return verifyLower(r) || verifyUpper(r) || unicode.IsTitle(r) -} - -func verifyLower(r rune) bool { - return unicode.IsLower(r) || unicode.Is(unicode.Other_Lowercase, r) -} - -func verifyUpper(r rune) bool { - return unicode.IsUpper(r) || unicode.Is(unicode.Other_Uppercase, r) -} - -// verifyIgnore is an approximation of the Case_Ignorable property using the -// core unicode package. It is used to reduce the size of the test data. -func verifyIgnore(r rune) bool { - props := []*unicode.RangeTable{ - unicode.Mn, - unicode.Me, - unicode.Cf, - unicode.Lm, - unicode.Sk, - } - for _, p := range props { - if unicode.Is(p, r) { - return true - } - } - return false -} - -// printProperties prints tables of rune properties from the given UCD file. -// A filter func f can be given to exclude certain values. A rune r will have -// the indicated property if it is in the generated table or if f(r). -func printProperties(w io.Writer, file, property string, f func(r rune) bool) int { - verify := map[rune]bool{} - n := 0 - varNameParts := strings.Split(property, "_") - varNameParts[0] = strings.ToLower(varNameParts[0]) - fmt.Fprintf(w, "\t%s = map[rune]bool{\n", strings.Join(varNameParts, "")) - parse(file, func(p *ucd.Parser) { - if p.String(1) == property { - r := p.Rune(0) - verify[r] = true - if !f(r) { - n++ - fmt.Fprintf(w, "\t\t0x%.4x: true,\n", r) - } - } - }) - fmt.Fprint(w, "\t}\n\n") - - // Verify that f is correct, that is, it represents a subset of the property. - for r := rune(0); r <= lastRuneForTesting; r++ { - if !verify[r] && f(r) { - log.Fatalf("Incorrect filter func for property %q.", property) - } - } - return n -} - -// The newCaseTrie, sparseValues and sparseOffsets definitions below are -// placeholders referred to by gen_trieval.go. The real definitions are -// generated by this program and written to tables.go. - -func newCaseTrie(int) int { return 0 } - -var ( - sparseValues [0]valueRange - sparseOffsets [0]uint16 -) diff --git a/vendor/golang.org/x/text/cases/gen_trieval.go b/vendor/golang.org/x/text/cases/gen_trieval.go deleted file mode 100644 index cf1a99d5e..000000000 --- a/vendor/golang.org/x/text/cases/gen_trieval.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This file contains definitions for interpreting the trie value of the case -// trie generated by "go run gen*.go". It is shared by both the generator -// program and the resultant package. Sharing is achieved by the generator -// copying gen_trieval.go to trieval.go and changing what's above this comment. - -// info holds case information for a single rune. It is the value returned -// by a trie lookup. Most mapping information can be stored in a single 16-bit -// value. If not, for example when a rune is mapped to multiple runes, the value -// stores some basic case data and an index into an array with additional data. -// -// The per-rune values have the following format: -// -// if (exception) { -// 15..5 unsigned exception index -// 4 unused -// } else { -// 15..8 XOR pattern or index to XOR pattern for case mapping -// Only 13..8 are used for XOR patterns. -// 7 inverseFold (fold to upper, not to lower) -// 6 index: interpret the XOR pattern as an index -// 5..4 CCC: zero (normal or break), above or other -// } -// 3 exception: interpret this value as an exception index -// (TODO: is this bit necessary? Probably implied from case mode.) -// 2..0 case mode -// -// For the non-exceptional cases, a rune must be either uncased, lowercase or -// uppercase. If the rune is cased, the XOR pattern maps either a lowercase -// rune to uppercase or an uppercase rune to lowercase (applied to the 10 -// least-significant bits of the rune). -// -// See the definitions below for a more detailed description of the various -// bits. -type info uint16 - -const ( - casedMask = 0x0003 - fullCasedMask = 0x0007 - ignorableMask = 0x0006 - ignorableValue = 0x0004 - - inverseFoldBit = 1 << 7 - - exceptionBit = 1 << 3 - exceptionShift = 5 - numExceptionBits = 11 - - xorIndexBit = 1 << 6 - xorShift = 8 - - // There is no mapping if all xor bits and the exception bit are zero. - hasMappingMask = 0xffc0 | exceptionBit -) - -// The case mode bits encodes the case type of a rune. This includes uncased, -// title, upper and lower case and case ignorable. (For a definition of these -// terms see Chapter 3 of The Unicode Standard Core Specification.) In some rare -// cases, a rune can be both cased and case-ignorable. This is encoded by -// cIgnorableCased. A rune of this type is always lower case. Some runes are -// cased while not having a mapping. -// -// A common pattern for scripts in the Unicode standard is for upper and lower -// case runes to alternate for increasing rune values (e.g. the accented Latin -// ranges starting from U+0100 and U+1E00 among others and some Cyrillic -// characters). We use this property by defining a cXORCase mode, where the case -// mode (always upper or lower case) is derived from the rune value. As the XOR -// pattern for case mappings is often identical for successive runes, using -// cXORCase can result in large series of identical trie values. This, in turn, -// allows us to better compress the trie blocks. -const ( - cUncased info = iota // 000 - cTitle // 001 - cLower // 010 - cUpper // 011 - cIgnorableUncased // 100 - cIgnorableCased // 101 // lower case if mappings exist - cXORCase // 11x // case is cLower | ((rune&1) ^ x) - - maxCaseMode = cUpper -) - -func (c info) isCased() bool { - return c&casedMask != 0 -} - -func (c info) isCaseIgnorable() bool { - return c&ignorableMask == ignorableValue -} - -func (c info) isCaseIgnorableAndNonBreakStarter() bool { - return c&(fullCasedMask|cccMask) == (ignorableValue | cccZero) -} - -func (c info) isNotCasedAndNotCaseIgnorable() bool { - return c&fullCasedMask == 0 -} - -func (c info) isCaseIgnorableAndNotCased() bool { - return c&fullCasedMask == cIgnorableUncased -} - -// The case mapping implementation will need to know about various Canonical -// Combining Class (CCC) values. We encode two of these in the trie value: -// cccZero (0) and cccAbove (230). If the value is cccOther, it means that -// CCC(r) > 0, but not 230. A value of cccBreak means that CCC(r) == 0 and that -// the rune also has the break category Break (see below). -const ( - cccBreak info = iota << 4 - cccZero - cccAbove - cccOther - - cccMask = cccBreak | cccZero | cccAbove | cccOther -) - -const ( - starter = 0 - above = 230 - iotaSubscript = 240 -) - -// The exceptions slice holds data that does not fit in a normal info entry. -// The entry is pointed to by the exception index in an entry. It has the -// following format: -// -// Header -// byte 0: -// 7..6 unused -// 5..4 CCC type (same bits as entry) -// 3 unused -// 2..0 length of fold -// -// byte 1: -// 7..6 unused -// 5..3 length of 1st mapping of case type -// 2..0 length of 2nd mapping of case type -// -// case 1st 2nd -// lower -> upper, title -// upper -> lower, title -// title -> lower, upper -// -// Lengths with the value 0x7 indicate no value and implies no change. -// A length of 0 indicates a mapping to zero-length string. -// -// Body bytes: -// case folding bytes -// lowercase mapping bytes -// uppercase mapping bytes -// titlecase mapping bytes -// closure mapping bytes (for NFKC_Casefold). (TODO) -// -// Fallbacks: -// missing fold -> lower -// missing title -> upper -// all missing -> original rune -// -// exceptions starts with a dummy byte to enforce that there is no zero index -// value. -const ( - lengthMask = 0x07 - lengthBits = 3 - noChange = 0 -) - -// References to generated trie. - -var trie = newCaseTrie(0) - -var sparse = sparseBlocks{ - values: sparseValues[:], - offsets: sparseOffsets[:], -} - -// Sparse block lookup code. - -// valueRange is an entry in a sparse block. -type valueRange struct { - value uint16 - lo, hi byte -} - -type sparseBlocks struct { - values []valueRange - offsets []uint16 -} - -// lookup returns the value from values block n for byte b using binary search. -func (s *sparseBlocks) lookup(n uint32, b byte) uint16 { - lo := s.offsets[n] - hi := s.offsets[n+1] - for lo < hi { - m := lo + (hi-lo)/2 - r := s.values[m] - if r.lo <= b && b <= r.hi { - return r.value - } - if b < r.lo { - hi = m - } else { - lo = m + 1 - } - } - return 0 -} - -// lastRuneForTesting is the last rune used for testing. Everything after this -// is boring. -const lastRuneForTesting = rune(0x1FFFF) diff --git a/vendor/golang.org/x/text/cases/info.go b/vendor/golang.org/x/text/cases/info.go deleted file mode 100644 index 669d7ae1f..000000000 --- a/vendor/golang.org/x/text/cases/info.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cases - -func (c info) cccVal() info { - if c&exceptionBit != 0 { - return info(exceptions[c>>exceptionShift]) & cccMask - } - return c & cccMask -} - -func (c info) cccType() info { - ccc := c.cccVal() - if ccc <= cccZero { - return cccZero - } - return ccc -} - -// TODO: Implement full Unicode breaking algorithm: -// 1) Implement breaking in separate package. -// 2) Use the breaker here. -// 3) Compare table size and performance of using the more generic breaker. -// -// Note that we can extend the current algorithm to be much more accurate. This -// only makes sense, though, if the performance and/or space penalty of using -// the generic breaker is big. Extra data will only be needed for non-cased -// runes, which means there are sufficient bits left in the caseType. -// Also note that the standard breaking algorithm doesn't always make sense -// for title casing. For example, a4a -> A4a, but a"4a -> A"4A (where " stands -// for modifier \u0308). -// ICU prohibits breaking in such cases as well. - -// For the purpose of title casing we use an approximation of the Unicode Word -// Breaking algorithm defined in Annex #29: -// http://www.unicode.org/reports/tr29/#Default_Grapheme_Cluster_Table. -// -// For our approximation, we group the Word Break types into the following -// categories, with associated rules: -// -// 1) Letter: -// ALetter, Hebrew_Letter, Numeric, ExtendNumLet, Extend. -// Rule: Never break between consecutive runes of this category. -// -// 2) Mid: -// Format, MidLetter, MidNumLet, Single_Quote. -// (Cf. case-ignorable: MidLetter, MidNumLet or cat is Mn, Me, Cf, Lm or Sk). -// Rule: Don't break between Letter and Mid, but break between two Mids. -// -// 3) Break: -// Any other category, including NewLine, CR, LF and Double_Quote. These -// categories should always result in a break between two cased letters. -// Rule: Always break. -// -// Note 1: the Katakana and MidNum categories can, in esoteric cases, result in -// preventing a break between two cased letters. For now we will ignore this -// (e.g. [ALetter] [ExtendNumLet] [Katakana] [ExtendNumLet] [ALetter] and -// [ALetter] [Numeric] [MidNum] [Numeric] [ALetter].) -// -// Note 2: the rule for Mid is very approximate, but works in most cases. To -// improve, we could store the categories in the trie value and use a FA to -// manage breaks. See TODO comment above. -// -// Note 3: according to the spec, it is possible for the Extend category to -// introduce breaks between other categories grouped in Letter. However, this -// is undesirable for our purposes. ICU prevents breaks in such cases as well. - -// isBreak returns whether this rune should introduce a break. -func (c info) isBreak() bool { - return c.cccVal() == cccBreak -} - -// isLetter returns whether the rune is of break type ALetter, Hebrew_Letter, -// Numeric, ExtendNumLet, or Extend. -func (c info) isLetter() bool { - ccc := c.cccVal() - if ccc == cccZero { - return !c.isCaseIgnorable() - } - return ccc != cccBreak -} diff --git a/vendor/golang.org/x/text/cases/map.go b/vendor/golang.org/x/text/cases/map.go deleted file mode 100644 index f2a8e96d0..000000000 --- a/vendor/golang.org/x/text/cases/map.go +++ /dev/null @@ -1,599 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cases - -// This file contains the definitions of case mappings for all supported -// languages. The rules for the language-specific tailorings were taken and -// modified from the CLDR transform definitions in common/transforms. - -import ( - "strings" - "unicode" - "unicode/utf8" - - "golang.org/x/text/language" - "golang.org/x/text/transform" - "golang.org/x/text/unicode/norm" -) - -// A mapFunc takes a context set to the current rune and writes the mapped -// version to the same context. It may advance the context to the next rune. It -// returns whether a checkpoint is possible: whether the pDst bytes written to -// dst so far won't need changing as we see more source bytes. -type mapFunc func(*context) bool - -// maxIgnorable defines the maximum number of ignorables to consider for -// lookahead operations. -const maxIgnorable = 30 - -// supported lists the language tags for which we have tailorings. -const supported = "und af az el lt nl tr" - -func init() { - tags := []language.Tag{} - for _, s := range strings.Split(supported, " ") { - tags = append(tags, language.MustParse(s)) - } - matcher = language.NewMatcher(tags) - Supported = language.NewCoverage(tags) -} - -var ( - matcher language.Matcher - - Supported language.Coverage - - // We keep the following lists separate, instead of having a single per- - // language struct, to give the compiler a chance to remove unused code. - - // Some uppercase mappers are stateless, so we can precompute the - // Transformers and save a bit on runtime allocations. - upperFunc = []mapFunc{ - nil, // und - nil, // af - aztrUpper(upper), // az - elUpper, // el - ltUpper(upper), // lt - nil, // nl - aztrUpper(upper), // tr - } - - undUpper transform.Transformer = &undUpperCaser{} - - lowerFunc = []mapFunc{ - lower, // und - lower, // af - aztrLower, // az - lower, // el - ltLower, // lt - lower, // nl - aztrLower, // tr - } - - titleInfos = []struct { - title, lower mapFunc - rewrite func(*context) - }{ - {title, lower, nil}, // und - {title, lower, afnlRewrite}, // af - {aztrUpper(title), aztrLower, nil}, // az - {title, lower, nil}, // el - {ltUpper(title), ltLower, nil}, // lt - {nlTitle, lower, afnlRewrite}, // nl - {aztrUpper(title), aztrLower, nil}, // tr - } -) - -func makeUpper(t language.Tag, o options) transform.Transformer { - _, i, _ := matcher.Match(t) - f := upperFunc[i] - if f == nil { - return undUpper - } - return &simpleCaser{f: f} -} - -func makeLower(t language.Tag, o options) transform.Transformer { - _, i, _ := matcher.Match(t) - f := lowerFunc[i] - if o.noFinalSigma { - return &simpleCaser{f: f} - } - return &lowerCaser{ - first: f, - midWord: finalSigma(f), - } -} - -func makeTitle(t language.Tag, o options) transform.Transformer { - _, i, _ := matcher.Match(t) - x := &titleInfos[i] - lower := x.lower - if o.noLower { - lower = (*context).copy - } else if !o.noFinalSigma { - lower = finalSigma(lower) - } - return &titleCaser{ - title: x.title, - lower: lower, - rewrite: x.rewrite, - } -} - -// TODO: consider a similar special case for the fast majority lower case. This -// is a bit more involved so will require some more precise benchmarking to -// justify it. - -type undUpperCaser struct{ transform.NopResetter } - -// undUpperCaser implements the Transformer interface for doing an upper case -// mapping for the root locale (und). It eliminates the need for an allocation -// as it prevents escaping by not using function pointers. -func (t *undUpperCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - c := context{dst: dst, src: src, atEOF: atEOF} - for c.next() { - upper(&c) - c.checkpoint() - } - return c.ret() -} - -type simpleCaser struct { - context - f mapFunc -} - -// simpleCaser implements the Transformer interface for doing a case operation -// on a rune-by-rune basis. -func (t *simpleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - t.context = context{dst: dst, src: src, atEOF: atEOF} - c := &t.context - for c.next() && t.f(c) { - c.checkpoint() - } - return c.ret() -} - -// lowerCaser implements the Transformer interface. The default Unicode lower -// casing requires different treatment for the first and subsequent characters -// of a word, most notably to handle the Greek final Sigma. -type lowerCaser struct { - context - - first, midWord mapFunc -} - -func (t *lowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - t.context = context{dst: dst, src: src, atEOF: atEOF} - c := &t.context - - for isInterWord := true; c.next(); { - if isInterWord { - if c.info.isCased() { - if !t.first(c) { - break - } - isInterWord = false - } else if !c.copy() { - break - } - } else { - if c.info.isNotCasedAndNotCaseIgnorable() { - if !c.copy() { - break - } - isInterWord = true - } else if !t.midWord(c) { - break - } - } - c.checkpoint() - } - return c.ret() -} - -// titleCaser implements the Transformer interface. Title casing algorithms -// distinguish between the first letter of a word and subsequent letters of the -// same word. It uses state to avoid requiring a potentially infinite lookahead. -type titleCaser struct { - context - - // rune mappings used by the actual casing algorithms. - title, lower mapFunc - - rewrite func(*context) -} - -// Transform implements the standard Unicode title case algorithm as defined in -// Chapter 3 of The Unicode Standard: -// toTitlecase(X): Find the word boundaries in X according to Unicode Standard -// Annex #29, "Unicode Text Segmentation." For each word boundary, find the -// first cased character F following the word boundary. If F exists, map F to -// Titlecase_Mapping(F); then map all characters C between F and the following -// word boundary to Lowercase_Mapping(C). -func (t *titleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - t.context = context{dst: dst, src: src, atEOF: atEOF, isMidWord: t.isMidWord} - c := &t.context - - if !c.next() { - return c.ret() - } - - for { - p := c.info - if t.rewrite != nil { - t.rewrite(c) - } - - wasMid := p.isCaseIgnorableAndNonBreakStarter() - // Break out of this loop on failure to ensure we do not modify the - // state incorrectly. - if p.isCased() && !p.isCaseIgnorableAndNotCased() { - if !c.isMidWord { - if !t.title(c) { - break - } - c.isMidWord = true - } else if !t.lower(c) { - break - } - } else if !c.copy() { - break - } - - // TODO: make this an "else if" if we can prove that no rune that does - // not match the first condition of the if statement can be a break. - if p.isBreak() { - c.isMidWord = false - } - - // As we save the state of the transformer, it is safe to call - // checkpoint after any successful write. - c.checkpoint() - - if !c.next() { - break - } - if wasMid && c.info.isCaseIgnorableAndNonBreakStarter() { - c.isMidWord = false - } - } - return c.ret() -} - -// finalSigma adds Greek final Sigma handing to another casing function. It -// determines whether a lowercased sigma should be σ or ς, by looking ahead for -// case-ignorables and a cased letters. -func finalSigma(f mapFunc) mapFunc { - return func(c *context) bool { - // ::NFD(); - // # 03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA - // Σ } [:case-ignorable:]* [:cased:] → σ; - // [:cased:] [:case-ignorable:]* { Σ → ς; - // ::Any-Lower; - // ::NFC(); - - if !c.hasPrefix("Σ") { - return f(c) - } - - p := c.pDst - c.writeString("ς") - // We need to do one more iteration after maxIgnorable, as a cased - // letter is not an ignorable and may modify the result. - for i := 0; i < maxIgnorable+1; i++ { - if !c.next() { - return false - } - if !c.info.isCaseIgnorable() { - if c.info.isCased() { - // p+1 is guaranteed to be in bounds: if writing ς was - // successful, p+1 will contain the second byte of ς. If not, - // this function will have returned after c.next returned false. - c.dst[p+1]++ // ς → σ - } - c.unreadRune() - return true - } - // A case ignorable may also introduce a word break, so we may need - // to continue searching even after detecting a break. - c.isMidWord = c.isMidWord && !c.info.isBreak() - c.copy() - } - return true - } -} - -// elUpper implements Greek upper casing, which entails removing a predefined -// set of non-blocked modifiers. Note that these accents should not be removed -// for title casing! -// Example: "Οδός" -> "ΟΔΟΣ". -func elUpper(c *context) bool { - // From CLDR: - // [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Above:]]*? { [\u0313\u0314\u0301\u0300\u0306\u0342\u0308\u0304] → ; - // [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Iota_Subscript:]]*? { \u0345 → ; - - r, _ := utf8.DecodeRune(c.src[c.pSrc:]) - oldPDst := c.pDst - if !upper(c) { - return false - } - if !unicode.Is(unicode.Greek, r) { - return true - } - i := 0 - // Take the properties of the uppercased rune that is already written to the - // destination. This saves us the trouble of having to uppercase the - // decomposed rune again. - if b := norm.NFD.Properties(c.dst[oldPDst:]).Decomposition(); b != nil { - // Restore the destination position and process the decomposed rune. - r, sz := utf8.DecodeRune(b) - if r <= 0xFF { // See A.6.1 - return true - } - c.pDst = oldPDst - // Insert the first rune and ignore the modifiers. See A.6.2. - c.writeBytes(b[:sz]) - i = len(b[sz:]) / 2 // Greek modifiers are always of length 2. - } - - for ; i < maxIgnorable && c.next(); i++ { - switch r, _ := utf8.DecodeRune(c.src[c.pSrc:]); r { - // Above and Iota Subscript - case 0x0300, // U+0300 COMBINING GRAVE ACCENT - 0x0301, // U+0301 COMBINING ACUTE ACCENT - 0x0304, // U+0304 COMBINING MACRON - 0x0306, // U+0306 COMBINING BREVE - 0x0308, // U+0308 COMBINING DIAERESIS - 0x0313, // U+0313 COMBINING COMMA ABOVE - 0x0314, // U+0314 COMBINING REVERSED COMMA ABOVE - 0x0342, // U+0342 COMBINING GREEK PERISPOMENI - 0x0345: // U+0345 COMBINING GREEK YPOGEGRAMMENI - // No-op. Gobble the modifier. - - default: - switch v, _ := trie.lookup(c.src[c.pSrc:]); info(v).cccType() { - case cccZero: - c.unreadRune() - return true - - // We don't need to test for IotaSubscript as the only rune that - // qualifies (U+0345) was already excluded in the switch statement - // above. See A.4. - - case cccAbove: - return c.copy() - default: - // Some other modifier. We're still allowed to gobble Greek - // modifiers after this. - c.copy() - } - } - } - return i == maxIgnorable -} - -func ltLower(c *context) bool { - // From CLDR: - // # Introduce an explicit dot above when lowercasing capital I's and J's - // # whenever there are more accents above. - // # (of the accents used in Lithuanian: grave, acute, tilde above, and ogonek) - // # 0049; 0069 0307; 0049; 0049; lt More_Above; # LATIN CAPITAL LETTER I - // # 004A; 006A 0307; 004A; 004A; lt More_Above; # LATIN CAPITAL LETTER J - // # 012E; 012F 0307; 012E; 012E; lt More_Above; # LATIN CAPITAL LETTER I WITH OGONEK - // # 00CC; 0069 0307 0300; 00CC; 00CC; lt; # LATIN CAPITAL LETTER I WITH GRAVE - // # 00CD; 0069 0307 0301; 00CD; 00CD; lt; # LATIN CAPITAL LETTER I WITH ACUTE - // # 0128; 0069 0307 0303; 0128; 0128; lt; # LATIN CAPITAL LETTER I WITH TILDE - // ::NFD(); - // I } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0307; - // J } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → j \u0307; - // Į } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → į \u0307; - // Ì → i \u0307 \u0300; - // Í → i \u0307 \u0301; - // Ĩ → i \u0307 \u0303; - // ::Any-Lower(); - // ::NFC(); - - i := 0 - if r := c.src[c.pSrc]; r < utf8.RuneSelf { - lower(c) - if r != 'I' && r != 'J' { - return true - } - } else { - p := norm.NFD.Properties(c.src[c.pSrc:]) - if d := p.Decomposition(); len(d) >= 3 && (d[0] == 'I' || d[0] == 'J') { - // UTF-8 optimization: the decomposition will only have an above - // modifier if the last rune of the decomposition is in [U+300-U+311]. - // In all other cases, a decomposition starting with I is always - // an I followed by modifiers that are not cased themselves. See A.2. - if d[1] == 0xCC && d[2] <= 0x91 { // A.2.4. - if !c.writeBytes(d[:1]) { - return false - } - c.dst[c.pDst-1] += 'a' - 'A' // lower - - // Assumption: modifier never changes on lowercase. See A.1. - // Assumption: all modifiers added have CCC = Above. See A.2.3. - return c.writeString("\u0307") && c.writeBytes(d[1:]) - } - // In all other cases the additional modifiers will have a CCC - // that is less than 230 (Above). We will insert the U+0307, if - // needed, after these modifiers so that a string in FCD form - // will remain so. See A.2.2. - lower(c) - i = 1 - } else { - return lower(c) - } - } - - for ; i < maxIgnorable && c.next(); i++ { - switch c.info.cccType() { - case cccZero: - c.unreadRune() - return true - case cccAbove: - return c.writeString("\u0307") && c.copy() // See A.1. - default: - c.copy() // See A.1. - } - } - return i == maxIgnorable -} - -func ltUpper(f mapFunc) mapFunc { - return func(c *context) bool { - // From CLDR: - // ::NFD(); - // [:Soft_Dotted:] [^[:ccc=Not_Reordered:][:ccc=Above:]]* { \u0307 → ; - // ::Any-Upper(); - // ::NFC(); - - // TODO: See A.5. A soft-dotted rune never has an exception. This would - // allow us to overload the exception bit and encode this property in - // info. Need to measure performance impact of this. - r, _ := utf8.DecodeRune(c.src[c.pSrc:]) - oldPDst := c.pDst - if !f(c) { - return false - } - if !unicode.Is(unicode.Soft_Dotted, r) { - return true - } - - // We don't need to do an NFD normalization, as a soft-dotted rune never - // contains U+0307. See A.3. - - i := 0 - for ; i < maxIgnorable && c.next(); i++ { - switch c.info.cccType() { - case cccZero: - c.unreadRune() - return true - case cccAbove: - if c.hasPrefix("\u0307") { - // We don't do a full NFC, but rather combine runes for - // some of the common cases. (Returning NFC or - // preserving normal form is neither a requirement nor - // a possibility anyway). - if !c.next() { - return false - } - if c.dst[oldPDst] == 'I' && c.pDst == oldPDst+1 && c.src[c.pSrc] == 0xcc { - s := "" - switch c.src[c.pSrc+1] { - case 0x80: // U+0300 COMBINING GRAVE ACCENT - s = "\u00cc" // U+00CC LATIN CAPITAL LETTER I WITH GRAVE - case 0x81: // U+0301 COMBINING ACUTE ACCENT - s = "\u00cd" // U+00CD LATIN CAPITAL LETTER I WITH ACUTE - case 0x83: // U+0303 COMBINING TILDE - s = "\u0128" // U+0128 LATIN CAPITAL LETTER I WITH TILDE - case 0x88: // U+0308 COMBINING DIAERESIS - s = "\u00cf" // U+00CF LATIN CAPITAL LETTER I WITH DIAERESIS - default: - } - if s != "" { - c.pDst = oldPDst - return c.writeString(s) - } - } - } - return c.copy() - default: - c.copy() - } - } - return i == maxIgnorable - } -} - -func aztrUpper(f mapFunc) mapFunc { - return func(c *context) bool { - // i→İ; - if c.src[c.pSrc] == 'i' { - return c.writeString("İ") - } - return f(c) - } -} - -func aztrLower(c *context) (done bool) { - // From CLDR: - // # I and i-dotless; I-dot and i are case pairs in Turkish and Azeri - // # 0130; 0069; 0130; 0130; tr; # LATIN CAPITAL LETTER I WITH DOT ABOVE - // İ→i; - // # When lowercasing, remove dot_above in the sequence I + dot_above, which will turn into i. - // # This matches the behavior of the canonically equivalent I-dot_above - // # 0307; ; 0307; 0307; tr After_I; # COMBINING DOT ABOVE - // # When lowercasing, unless an I is before a dot_above, it turns into a dotless i. - // # 0049; 0131; 0049; 0049; tr Not_Before_Dot; # LATIN CAPITAL LETTER I - // I([^[:ccc=Not_Reordered:][:ccc=Above:]]*)\u0307 → i$1 ; - // I→ı ; - // ::Any-Lower(); - if c.hasPrefix("\u0130") { // İ - return c.writeString("i") - } - if c.src[c.pSrc] != 'I' { - return lower(c) - } - - // We ignore the lower-case I for now, but insert it later when we know - // which form we need. - start := c.pSrc + c.sz - - i := 0 -Loop: - // We check for up to n ignorables before \u0307. As \u0307 is an - // ignorable as well, n is maxIgnorable-1. - for ; i < maxIgnorable && c.next(); i++ { - switch c.info.cccType() { - case cccAbove: - if c.hasPrefix("\u0307") { - return c.writeString("i") && c.writeBytes(c.src[start:c.pSrc]) // ignore U+0307 - } - done = true - break Loop - case cccZero: - c.unreadRune() - done = true - break Loop - default: - // We'll write this rune after we know which starter to use. - } - } - if i == maxIgnorable { - done = true - } - return c.writeString("ı") && c.writeBytes(c.src[start:c.pSrc+c.sz]) && done -} - -func nlTitle(c *context) bool { - // From CLDR: - // # Special titlecasing for Dutch initial "ij". - // ::Any-Title(); - // # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29) - // [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ; - if c.src[c.pSrc] != 'I' && c.src[c.pSrc] != 'i' { - return title(c) - } - - if !c.writeString("I") || !c.next() { - return false - } - if c.src[c.pSrc] == 'j' || c.src[c.pSrc] == 'J' { - return c.writeString("J") - } - c.unreadRune() - return true -} - -// Not part of CLDR, but see http://unicode.org/cldr/trac/ticket/7078. -func afnlRewrite(c *context) { - if c.hasPrefix("'") || c.hasPrefix("’") { - c.isMidWord = true - } -} diff --git a/vendor/golang.org/x/text/cases/tables.go b/vendor/golang.org/x/text/cases/tables.go deleted file mode 100644 index 4257e57ae..000000000 --- a/vendor/golang.org/x/text/cases/tables.go +++ /dev/null @@ -1,2213 +0,0 @@ -// This file was generated by go generate; DO NOT EDIT - -package cases - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "9.0.0" - -var xorData string = "" + // Size: 185 bytes - "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + - "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + - "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + - "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + - "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + - "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + - "\x0b!\x10\x00\x0b!0\x00\x0b(\x04\x00\x03\x04\x1e\x00\x03\x0a\x00\x02:" + - "\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<\x00\x01&\x00\x01*" + - "\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x01\x1e\x00\x01\x22" - -var exceptions string = "" + // Size: 1852 bytes - "\x00\x12\x10μΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x08I\x13\x18ʼnʼN\x11\x08sS" + - "\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj\x12\x12" + - "njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x18ǰJ̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10\x12DZDz" + - "\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x18Ȿ\x10\x18Ɀ\x10\x18Ɐ\x10\x18Ɑ\x10\x18Ɒ\x10" + - "\x18Ɜ\x10\x18Ɡ\x10\x18Ɥ\x10\x18Ɦ\x10\x18Ɪ\x10\x18Ɫ\x10\x18Ɬ\x10\x18Ɱ\x10" + - "\x18Ɽ\x10\x18Ʇ\x10\x18Ʝ\x10\x18Ʞ2\x10ιΙ\x160ΐΪ́\x160ΰΫ́\x12\x10σΣ" + - "\x12\x10βΒ\x12\x10θΘ\x12\x10φΦ\x12\x10πΠ\x12\x10κΚ\x12\x10ρΡ\x12\x10εΕ" + - "\x14$եւԵՒԵւ\x12\x10вВ\x12\x10дД\x12\x10оО\x12\x10сС\x12\x10тТ\x12\x10тТ" + - "\x12\x10ъЪ\x12\x10ѣѢ\x13\x18ꙋꙊ\x13\x18ẖH̱\x13\x18ẗT̈\x13\x18ẘW̊\x13" + - "\x18ẙY̊\x13\x18aʾAʾ\x13\x18ṡṠ\x12\x10ssß\x14 ὐΥ̓\x160ὒΥ̓̀\x160ὔΥ̓́" + - "\x160ὖΥ̓͂\x15+ἀιἈΙᾈ\x15+ἁιἉΙᾉ\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ" + - "\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ\x15\x1dἀιᾀἈΙ\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ" + - "\x15\x1dἄιᾄἌΙ\x15\x1dἅιᾅἍΙ\x15\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ" + - "\x15+ἢιἪΙᾚ\x15+ἣιἫΙᾛ\x15+ἤιἬΙᾜ\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨ" + - "Ι\x15\x1dἡιᾑἩΙ\x15\x1dἢιᾒἪΙ\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15" + - "\x1dἦιᾖἮΙ\x15\x1dἧιᾗἯΙ\x15+ὠιὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ" + - "\x15+ὥιὭΙᾭ\x15+ὦιὮΙᾮ\x15+ὧιὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ" + - "\x15\x1dὣιᾣὫΙ\x15\x1dὤιᾤὬΙ\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰι" + - "ᾺΙᾺͅ\x14#αιΑΙᾼ\x14$άιΆΙΆͅ\x14 ᾶΑ͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12\x10ι" + - "Ι\x15-ὴιῊΙῊͅ\x14#ηιΗΙῌ\x14$ήιΉΙΉͅ\x14 ῆΗ͂\x166ῆιΗ͂Ιῌ͂\x14\x1cηιῃΗΙ" + - "\x160ῒΪ̀\x160ΐΪ́\x14 ῖΙ͂\x160ῗΪ͂\x160ῢΫ̀\x160ΰΫ́\x14 ῤΡ" + - "̓\x14 ῦΥ͂\x160ῧΫ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙῼ\x14$ώιΏΙΏͅ\x14 ῶΩ͂\x166ῶιΩ" + - "͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk\x12\x10åå\x12\x10ɫɫ\x12\x10ɽɽ" + - "\x10\x10Ⱥ\x10\x10Ⱦ\x12\x10ɑɑ\x12\x10ɱɱ\x12\x10ɐɐ\x12\x10ɒɒ\x12\x10ȿȿ\x12" + - "\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ\x12\x10ɡɡ\x12\x10ɬɬ\x12\x10ɪɪ\x12" + - "\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x12ffFFFf\x12\x12fiFIFi\x12\x12flFLFl\x13" + - "\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12stSTSt\x12\x12stSTSt\x14$մնՄՆՄն" + - "\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄԽՄխ" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return caseValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := caseIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := caseIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = caseIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := caseIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = caseIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = caseIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *caseTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return caseValues[c0] - } - i := caseIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = caseIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = caseIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *caseTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return caseValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := caseIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := caseIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = caseIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := caseIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = caseIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = caseIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *caseTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return caseValues[c0] - } - i := caseIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = caseIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = caseIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// caseTrie. Total size: 11750 bytes (11.47 KiB). Checksum: 9ec26581a3006e0d. -type caseTrie struct{} - -func newCaseTrie(i int) *caseTrie { - return &caseTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { - switch { - case n < 18: - return uint16(caseValues[n<<6+uint32(b)]) - default: - n -= 18 - return uint16(sparse.lookup(n, b)) - } -} - -// caseValues: 20 blocks, 1280 entries, 2560 bytes -// The third block is the zero block. -var caseValues = [1280]uint16{ - // Block 0x0, offset 0x0 - 0x27: 0x0014, - 0x2e: 0x0014, - 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, - 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0014, - // Block 0x1, offset 0x40 - 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, - 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, - 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, - 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, - 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, - 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, - 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, - 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, - 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, - 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, - 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, - 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, - 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, - 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, - 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, - 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, - 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, - 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, - 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, - 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, - // Block 0x4, offset 0x100 - 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x04cb, 0x105: 0x05c9, - 0x106: 0x06ca, 0x107: 0x078b, 0x108: 0x0889, 0x109: 0x098a, 0x10a: 0x0a4b, 0x10b: 0x0b49, - 0x10c: 0x0c4a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, - 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, - 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, - 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, - 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, - 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, - 0x130: 0x0d0a, 0x131: 0x0e0b, 0x132: 0x0f09, 0x133: 0x100a, 0x134: 0x0113, 0x135: 0x0112, - 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, - 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, - // Block 0x5, offset 0x140 - 0x140: 0x136a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, - 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, - 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x140a, 0x151: 0x14aa, - 0x152: 0x154a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, - 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x15ea, 0x15d: 0x0012, - 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x168a, 0x162: 0x0012, 0x163: 0x2052, - 0x164: 0x0012, 0x165: 0x172a, 0x166: 0x17ca, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, - 0x16a: 0x186a, 0x16b: 0x190a, 0x16c: 0x19aa, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, - 0x170: 0x0012, 0x171: 0x1a4a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, - 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, - 0x17c: 0x0012, 0x17d: 0x1aea, 0x17e: 0x0012, 0x17f: 0x0012, - // Block 0x6, offset 0x180 - 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x0012, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, - 0x186: 0x0012, 0x187: 0x1b8a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, - 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, - 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, - 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x1c2a, - 0x19e: 0x1cca, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, - 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, - 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, - 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, - 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, - 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x1d6d, - 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, - 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, - 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, - 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, - 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, - 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, - 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, - 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, - 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, - 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, - // Block 0x8, offset 0x200 - 0x204: 0x0004, 0x205: 0x0004, - 0x206: 0x2a13, 0x207: 0x0014, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, - 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x1e2a, 0x211: 0x2013, - 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, - 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, - 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, - 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, - 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, - 0x230: 0x1fea, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, - 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, - 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, - // Block 0x9, offset 0x240 - 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x21aa, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, - 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, - 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x226a, 0x251: 0x232a, - 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x23ea, 0x256: 0x24aa, 0x257: 0x1812, - 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, - 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, - 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, - 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, - 0x270: 0x256a, 0x271: 0x262a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x26ea, - 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, - 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, - // Block 0xa, offset 0x280 - 0x280: 0x0812, 0x281: 0x0812, 0x282: 0x0812, 0x283: 0x0812, 0x284: 0x0812, 0x285: 0x0812, - 0x288: 0x0813, 0x289: 0x0813, 0x28a: 0x0813, 0x28b: 0x0813, - 0x28c: 0x0813, 0x28d: 0x0813, 0x290: 0x372a, 0x291: 0x0812, - 0x292: 0x386a, 0x293: 0x0812, 0x294: 0x3a2a, 0x295: 0x0812, 0x296: 0x3bea, 0x297: 0x0812, - 0x299: 0x0813, 0x29b: 0x0813, 0x29d: 0x0813, - 0x29f: 0x0813, 0x2a0: 0x0812, 0x2a1: 0x0812, 0x2a2: 0x0812, 0x2a3: 0x0812, - 0x2a4: 0x0812, 0x2a5: 0x0812, 0x2a6: 0x0812, 0x2a7: 0x0812, 0x2a8: 0x0813, 0x2a9: 0x0813, - 0x2aa: 0x0813, 0x2ab: 0x0813, 0x2ac: 0x0813, 0x2ad: 0x0813, 0x2ae: 0x0813, 0x2af: 0x0813, - 0x2b0: 0x8b52, 0x2b1: 0x8b52, 0x2b2: 0x8e52, 0x2b3: 0x8e52, 0x2b4: 0x9152, 0x2b5: 0x9152, - 0x2b6: 0x9452, 0x2b7: 0x9452, 0x2b8: 0x9752, 0x2b9: 0x9752, 0x2ba: 0x9a52, 0x2bb: 0x9a52, - 0x2bc: 0x4d52, 0x2bd: 0x4d52, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x3daa, 0x2c1: 0x3f8a, 0x2c2: 0x416a, 0x2c3: 0x434a, 0x2c4: 0x452a, 0x2c5: 0x470a, - 0x2c6: 0x48ea, 0x2c7: 0x4aca, 0x2c8: 0x4ca9, 0x2c9: 0x4e89, 0x2ca: 0x5069, 0x2cb: 0x5249, - 0x2cc: 0x5429, 0x2cd: 0x5609, 0x2ce: 0x57e9, 0x2cf: 0x59c9, 0x2d0: 0x5baa, 0x2d1: 0x5d8a, - 0x2d2: 0x5f6a, 0x2d3: 0x614a, 0x2d4: 0x632a, 0x2d5: 0x650a, 0x2d6: 0x66ea, 0x2d7: 0x68ca, - 0x2d8: 0x6aa9, 0x2d9: 0x6c89, 0x2da: 0x6e69, 0x2db: 0x7049, 0x2dc: 0x7229, 0x2dd: 0x7409, - 0x2de: 0x75e9, 0x2df: 0x77c9, 0x2e0: 0x79aa, 0x2e1: 0x7b8a, 0x2e2: 0x7d6a, 0x2e3: 0x7f4a, - 0x2e4: 0x812a, 0x2e5: 0x830a, 0x2e6: 0x84ea, 0x2e7: 0x86ca, 0x2e8: 0x88a9, 0x2e9: 0x8a89, - 0x2ea: 0x8c69, 0x2eb: 0x8e49, 0x2ec: 0x9029, 0x2ed: 0x9209, 0x2ee: 0x93e9, 0x2ef: 0x95c9, - 0x2f0: 0x0812, 0x2f1: 0x0812, 0x2f2: 0x97aa, 0x2f3: 0x99ca, 0x2f4: 0x9b6a, - 0x2f6: 0x9d2a, 0x2f7: 0x9e6a, 0x2f8: 0x0813, 0x2f9: 0x0813, 0x2fa: 0x8b53, 0x2fb: 0x8b53, - 0x2fc: 0xa0e9, 0x2fd: 0x0004, 0x2fe: 0xa28a, 0x2ff: 0x0004, - // Block 0xc, offset 0x300 - 0x300: 0x0004, 0x301: 0x0004, 0x302: 0xa34a, 0x303: 0xa56a, 0x304: 0xa70a, - 0x306: 0xa8ca, 0x307: 0xaa0a, 0x308: 0x8e53, 0x309: 0x8e53, 0x30a: 0x9153, 0x30b: 0x9153, - 0x30c: 0xac89, 0x30d: 0x0004, 0x30e: 0x0004, 0x30f: 0x0004, 0x310: 0x0812, 0x311: 0x0812, - 0x312: 0xae2a, 0x313: 0xafea, 0x316: 0xb1aa, 0x317: 0xb2ea, - 0x318: 0x0813, 0x319: 0x0813, 0x31a: 0x9453, 0x31b: 0x9453, 0x31d: 0x0004, - 0x31e: 0x0004, 0x31f: 0x0004, 0x320: 0x0812, 0x321: 0x0812, 0x322: 0xb4aa, 0x323: 0xb66a, - 0x324: 0xb82a, 0x325: 0x0912, 0x326: 0xb96a, 0x327: 0xbaaa, 0x328: 0x0813, 0x329: 0x0813, - 0x32a: 0x9a53, 0x32b: 0x9a53, 0x32c: 0x0913, 0x32d: 0x0004, 0x32e: 0x0004, 0x32f: 0x0004, - 0x332: 0xbc6a, 0x333: 0xbe8a, 0x334: 0xc02a, - 0x336: 0xc1ea, 0x337: 0xc32a, 0x338: 0x9753, 0x339: 0x9753, 0x33a: 0x4d53, 0x33b: 0x4d53, - 0x33c: 0xc5a9, 0x33d: 0x0004, 0x33e: 0x0004, - // Block 0xd, offset 0x340 - 0x342: 0x0013, - 0x347: 0x0013, 0x34a: 0x0012, 0x34b: 0x0013, - 0x34c: 0x0013, 0x34d: 0x0013, 0x34e: 0x0012, 0x34f: 0x0012, 0x350: 0x0013, 0x351: 0x0013, - 0x352: 0x0013, 0x353: 0x0012, 0x355: 0x0013, - 0x359: 0x0013, 0x35a: 0x0013, 0x35b: 0x0013, 0x35c: 0x0013, 0x35d: 0x0013, - 0x364: 0x0013, 0x366: 0xc74b, 0x368: 0x0013, - 0x36a: 0xc80b, 0x36b: 0xc88b, 0x36c: 0x0013, 0x36d: 0x0013, 0x36f: 0x0012, - 0x370: 0x0013, 0x371: 0x0013, 0x372: 0x9d53, 0x373: 0x0013, 0x374: 0x0012, 0x375: 0x0010, - 0x376: 0x0010, 0x377: 0x0010, 0x378: 0x0010, 0x379: 0x0012, - 0x37c: 0x0012, 0x37d: 0x0012, 0x37e: 0x0013, 0x37f: 0x0013, - // Block 0xe, offset 0x380 - 0x380: 0x1a13, 0x381: 0x1a13, 0x382: 0x1e13, 0x383: 0x1e13, 0x384: 0x1a13, 0x385: 0x1a13, - 0x386: 0x2613, 0x387: 0x2613, 0x388: 0x2a13, 0x389: 0x2a13, 0x38a: 0x2e13, 0x38b: 0x2e13, - 0x38c: 0x2a13, 0x38d: 0x2a13, 0x38e: 0x2613, 0x38f: 0x2613, 0x390: 0xa052, 0x391: 0xa052, - 0x392: 0xa352, 0x393: 0xa352, 0x394: 0xa652, 0x395: 0xa652, 0x396: 0xa352, 0x397: 0xa352, - 0x398: 0xa052, 0x399: 0xa052, 0x39a: 0x1a12, 0x39b: 0x1a12, 0x39c: 0x1e12, 0x39d: 0x1e12, - 0x39e: 0x1a12, 0x39f: 0x1a12, 0x3a0: 0x2612, 0x3a1: 0x2612, 0x3a2: 0x2a12, 0x3a3: 0x2a12, - 0x3a4: 0x2e12, 0x3a5: 0x2e12, 0x3a6: 0x2a12, 0x3a7: 0x2a12, 0x3a8: 0x2612, 0x3a9: 0x2612, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x6552, 0x3c1: 0x6552, 0x3c2: 0x6552, 0x3c3: 0x6552, 0x3c4: 0x6552, 0x3c5: 0x6552, - 0x3c6: 0x6552, 0x3c7: 0x6552, 0x3c8: 0x6552, 0x3c9: 0x6552, 0x3ca: 0x6552, 0x3cb: 0x6552, - 0x3cc: 0x6552, 0x3cd: 0x6552, 0x3ce: 0x6552, 0x3cf: 0x6552, 0x3d0: 0xa952, 0x3d1: 0xa952, - 0x3d2: 0xa952, 0x3d3: 0xa952, 0x3d4: 0xa952, 0x3d5: 0xa952, 0x3d6: 0xa952, 0x3d7: 0xa952, - 0x3d8: 0xa952, 0x3d9: 0xa952, 0x3da: 0xa952, 0x3db: 0xa952, 0x3dc: 0xa952, 0x3dd: 0xa952, - 0x3de: 0xa952, 0x3e0: 0x0113, 0x3e1: 0x0112, 0x3e2: 0xc94b, 0x3e3: 0x8853, - 0x3e4: 0xca0b, 0x3e5: 0xcaca, 0x3e6: 0xcb4a, 0x3e7: 0x0f13, 0x3e8: 0x0f12, 0x3e9: 0x0313, - 0x3ea: 0x0312, 0x3eb: 0x0713, 0x3ec: 0x0712, 0x3ed: 0xcbcb, 0x3ee: 0xcc8b, 0x3ef: 0xcd4b, - 0x3f0: 0xce0b, 0x3f1: 0x0012, 0x3f2: 0x0113, 0x3f3: 0x0112, 0x3f4: 0x0012, 0x3f5: 0x0313, - 0x3f6: 0x0312, 0x3f7: 0x0012, 0x3f8: 0x0012, 0x3f9: 0x0012, 0x3fa: 0x0012, 0x3fb: 0x0012, - 0x3fc: 0x0015, 0x3fd: 0x0015, 0x3fe: 0xcecb, 0x3ff: 0xcf8b, - // Block 0x10, offset 0x400 - 0x400: 0x0113, 0x401: 0x0112, 0x402: 0x0113, 0x403: 0x0112, 0x404: 0x0113, 0x405: 0x0112, - 0x406: 0x0113, 0x407: 0x0112, 0x408: 0x0014, 0x409: 0x0004, 0x40a: 0x0004, 0x40b: 0x0713, - 0x40c: 0x0712, 0x40d: 0xd04b, 0x40e: 0x0012, 0x40f: 0x0010, 0x410: 0x0113, 0x411: 0x0112, - 0x412: 0x0113, 0x413: 0x0112, 0x414: 0x0012, 0x415: 0x0012, 0x416: 0x0113, 0x417: 0x0112, - 0x418: 0x0113, 0x419: 0x0112, 0x41a: 0x0113, 0x41b: 0x0112, 0x41c: 0x0113, 0x41d: 0x0112, - 0x41e: 0x0113, 0x41f: 0x0112, 0x420: 0x0113, 0x421: 0x0112, 0x422: 0x0113, 0x423: 0x0112, - 0x424: 0x0113, 0x425: 0x0112, 0x426: 0x0113, 0x427: 0x0112, 0x428: 0x0113, 0x429: 0x0112, - 0x42a: 0xd10b, 0x42b: 0xd1cb, 0x42c: 0xd28b, 0x42d: 0xd34b, 0x42e: 0xd40b, - 0x430: 0xd4cb, 0x431: 0xd58b, 0x432: 0xd64b, 0x433: 0xac53, 0x434: 0x0113, 0x435: 0x0112, - 0x436: 0x0113, 0x437: 0x0112, - // Block 0x11, offset 0x440 - 0x440: 0xd70a, 0x441: 0xd80a, 0x442: 0xd90a, 0x443: 0xda0a, 0x444: 0xdb6a, 0x445: 0xdcca, - 0x446: 0xddca, - 0x453: 0xdeca, 0x454: 0xe08a, 0x455: 0xe24a, 0x456: 0xe40a, 0x457: 0xe5ca, - 0x45d: 0x0010, - 0x45e: 0x0034, 0x45f: 0x0010, 0x460: 0x0010, 0x461: 0x0010, 0x462: 0x0010, 0x463: 0x0010, - 0x464: 0x0010, 0x465: 0x0010, 0x466: 0x0010, 0x467: 0x0010, 0x468: 0x0010, - 0x46a: 0x0010, 0x46b: 0x0010, 0x46c: 0x0010, 0x46d: 0x0010, 0x46e: 0x0010, 0x46f: 0x0010, - 0x470: 0x0010, 0x471: 0x0010, 0x472: 0x0010, 0x473: 0x0010, 0x474: 0x0010, 0x475: 0x0010, - 0x476: 0x0010, 0x478: 0x0010, 0x479: 0x0010, 0x47a: 0x0010, 0x47b: 0x0010, - 0x47c: 0x0010, 0x47e: 0x0010, - // Block 0x12, offset 0x480 - 0x480: 0x2213, 0x481: 0x2213, 0x482: 0x2613, 0x483: 0x2613, 0x484: 0x2213, 0x485: 0x2213, - 0x486: 0x2e13, 0x487: 0x2e13, 0x488: 0x2213, 0x489: 0x2213, 0x48a: 0x2613, 0x48b: 0x2613, - 0x48c: 0x2213, 0x48d: 0x2213, 0x48e: 0x3e13, 0x48f: 0x3e13, 0x490: 0x2213, 0x491: 0x2213, - 0x492: 0x2613, 0x493: 0x2613, 0x494: 0x2213, 0x495: 0x2213, 0x496: 0x2e13, 0x497: 0x2e13, - 0x498: 0x2213, 0x499: 0x2213, 0x49a: 0x2613, 0x49b: 0x2613, 0x49c: 0x2213, 0x49d: 0x2213, - 0x49e: 0xb553, 0x49f: 0xb553, 0x4a0: 0xb853, 0x4a1: 0xb853, 0x4a2: 0x2212, 0x4a3: 0x2212, - 0x4a4: 0x2612, 0x4a5: 0x2612, 0x4a6: 0x2212, 0x4a7: 0x2212, 0x4a8: 0x2e12, 0x4a9: 0x2e12, - 0x4aa: 0x2212, 0x4ab: 0x2212, 0x4ac: 0x2612, 0x4ad: 0x2612, 0x4ae: 0x2212, 0x4af: 0x2212, - 0x4b0: 0x3e12, 0x4b1: 0x3e12, 0x4b2: 0x2212, 0x4b3: 0x2212, 0x4b4: 0x2612, 0x4b5: 0x2612, - 0x4b6: 0x2212, 0x4b7: 0x2212, 0x4b8: 0x2e12, 0x4b9: 0x2e12, 0x4ba: 0x2212, 0x4bb: 0x2212, - 0x4bc: 0x2612, 0x4bd: 0x2612, 0x4be: 0x2212, 0x4bf: 0x2212, - // Block 0x13, offset 0x4c0 - 0x4c2: 0x0010, - 0x4c7: 0x0010, 0x4c9: 0x0010, 0x4cb: 0x0010, - 0x4cd: 0x0010, 0x4ce: 0x0010, 0x4cf: 0x0010, 0x4d1: 0x0010, - 0x4d2: 0x0010, 0x4d4: 0x0010, 0x4d7: 0x0010, - 0x4d9: 0x0010, 0x4db: 0x0010, 0x4dd: 0x0010, - 0x4df: 0x0010, 0x4e1: 0x0010, 0x4e2: 0x0010, - 0x4e4: 0x0010, 0x4e7: 0x0010, 0x4e8: 0x0010, 0x4e9: 0x0010, - 0x4ea: 0x0010, 0x4ec: 0x0010, 0x4ed: 0x0010, 0x4ee: 0x0010, 0x4ef: 0x0010, - 0x4f0: 0x0010, 0x4f1: 0x0010, 0x4f2: 0x0010, 0x4f4: 0x0010, 0x4f5: 0x0010, - 0x4f6: 0x0010, 0x4f7: 0x0010, 0x4f9: 0x0010, 0x4fa: 0x0010, 0x4fb: 0x0010, - 0x4fc: 0x0010, 0x4fe: 0x0010, -} - -// caseIndex: 25 blocks, 1600 entries, 3200 bytes -// Block 0 is the zero block. -var caseIndex = [1600]uint16{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x12, 0xc3: 0x13, 0xc4: 0x14, 0xc5: 0x15, 0xc6: 0x01, 0xc7: 0x02, - 0xc8: 0x16, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x17, 0xcc: 0x18, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, - 0xd0: 0x19, 0xd1: 0x1a, 0xd2: 0x1b, 0xd3: 0x1c, 0xd4: 0x1d, 0xd5: 0x1e, 0xd6: 0x1f, 0xd7: 0x20, - 0xd8: 0x21, 0xd9: 0x22, 0xda: 0x23, 0xdb: 0x24, 0xdc: 0x25, 0xdd: 0x26, 0xde: 0x27, 0xdf: 0x28, - 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, - 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, - 0xf0: 0x14, 0xf3: 0x16, - // Block 0x4, offset 0x100 - 0x120: 0x29, 0x121: 0x2a, 0x122: 0x2b, 0x123: 0x2c, 0x124: 0x2d, 0x125: 0x2e, 0x126: 0x2f, 0x127: 0x30, - 0x128: 0x31, 0x129: 0x32, 0x12a: 0x33, 0x12b: 0x34, 0x12c: 0x35, 0x12d: 0x36, 0x12e: 0x37, 0x12f: 0x38, - 0x130: 0x39, 0x131: 0x3a, 0x132: 0x3b, 0x133: 0x3c, 0x134: 0x3d, 0x135: 0x3e, 0x136: 0x3f, 0x137: 0x40, - 0x138: 0x41, 0x139: 0x42, 0x13a: 0x43, 0x13b: 0x44, 0x13c: 0x45, 0x13d: 0x46, 0x13e: 0x47, 0x13f: 0x48, - // Block 0x5, offset 0x140 - 0x140: 0x49, 0x141: 0x4a, 0x142: 0x4b, 0x143: 0x4c, 0x144: 0x23, 0x145: 0x23, 0x146: 0x23, 0x147: 0x23, - 0x148: 0x23, 0x149: 0x4d, 0x14a: 0x4e, 0x14b: 0x4f, 0x14c: 0x50, 0x14d: 0x51, 0x14e: 0x52, 0x14f: 0x53, - 0x150: 0x54, 0x151: 0x23, 0x152: 0x23, 0x153: 0x23, 0x154: 0x23, 0x155: 0x23, 0x156: 0x23, 0x157: 0x23, - 0x158: 0x23, 0x159: 0x55, 0x15a: 0x56, 0x15b: 0x57, 0x15c: 0x58, 0x15d: 0x59, 0x15e: 0x5a, 0x15f: 0x5b, - 0x160: 0x5c, 0x161: 0x5d, 0x162: 0x5e, 0x163: 0x5f, 0x164: 0x60, 0x165: 0x61, 0x167: 0x62, - 0x168: 0x63, 0x169: 0x64, 0x16a: 0x65, 0x16c: 0x66, 0x16d: 0x67, 0x16e: 0x68, 0x16f: 0x69, - 0x170: 0x6a, 0x171: 0x6b, 0x172: 0x6c, 0x173: 0x6d, 0x174: 0x6e, 0x175: 0x6f, 0x176: 0x70, 0x177: 0x71, - 0x178: 0x72, 0x179: 0x72, 0x17a: 0x73, 0x17b: 0x72, 0x17c: 0x74, 0x17d: 0x08, 0x17e: 0x09, 0x17f: 0x0a, - // Block 0x6, offset 0x180 - 0x180: 0x75, 0x181: 0x76, 0x182: 0x77, 0x183: 0x78, 0x184: 0x0b, 0x185: 0x79, 0x186: 0x7a, - 0x192: 0x7b, 0x193: 0x0c, - 0x1b0: 0x7c, 0x1b1: 0x0d, 0x1b2: 0x72, 0x1b3: 0x7d, 0x1b4: 0x7e, 0x1b5: 0x7f, 0x1b6: 0x80, 0x1b7: 0x81, - 0x1b8: 0x82, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x83, 0x1c2: 0x84, 0x1c3: 0x85, 0x1c4: 0x86, 0x1c5: 0x23, 0x1c6: 0x87, - // Block 0x8, offset 0x200 - 0x200: 0x88, 0x201: 0x23, 0x202: 0x23, 0x203: 0x23, 0x204: 0x23, 0x205: 0x23, 0x206: 0x23, 0x207: 0x23, - 0x208: 0x23, 0x209: 0x23, 0x20a: 0x23, 0x20b: 0x23, 0x20c: 0x23, 0x20d: 0x23, 0x20e: 0x23, 0x20f: 0x23, - 0x210: 0x23, 0x211: 0x23, 0x212: 0x89, 0x213: 0x8a, 0x214: 0x23, 0x215: 0x23, 0x216: 0x23, 0x217: 0x23, - 0x218: 0x8b, 0x219: 0x8c, 0x21a: 0x8d, 0x21b: 0x8e, 0x21c: 0x8f, 0x21d: 0x90, 0x21e: 0x0e, 0x21f: 0x91, - 0x220: 0x92, 0x221: 0x93, 0x222: 0x23, 0x223: 0x94, 0x224: 0x95, 0x225: 0x96, 0x226: 0x97, 0x227: 0x98, - 0x228: 0x99, 0x229: 0x9a, 0x22a: 0x9b, 0x22b: 0x9c, 0x22c: 0x9d, 0x22d: 0x9e, 0x22e: 0x9f, 0x22f: 0xa0, - 0x230: 0x23, 0x231: 0x23, 0x232: 0x23, 0x233: 0x23, 0x234: 0x23, 0x235: 0x23, 0x236: 0x23, 0x237: 0x23, - 0x238: 0x23, 0x239: 0x23, 0x23a: 0x23, 0x23b: 0x23, 0x23c: 0x23, 0x23d: 0x23, 0x23e: 0x23, 0x23f: 0x23, - // Block 0x9, offset 0x240 - 0x240: 0x23, 0x241: 0x23, 0x242: 0x23, 0x243: 0x23, 0x244: 0x23, 0x245: 0x23, 0x246: 0x23, 0x247: 0x23, - 0x248: 0x23, 0x249: 0x23, 0x24a: 0x23, 0x24b: 0x23, 0x24c: 0x23, 0x24d: 0x23, 0x24e: 0x23, 0x24f: 0x23, - 0x250: 0x23, 0x251: 0x23, 0x252: 0x23, 0x253: 0x23, 0x254: 0x23, 0x255: 0x23, 0x256: 0x23, 0x257: 0x23, - 0x258: 0x23, 0x259: 0x23, 0x25a: 0x23, 0x25b: 0x23, 0x25c: 0x23, 0x25d: 0x23, 0x25e: 0x23, 0x25f: 0x23, - 0x260: 0x23, 0x261: 0x23, 0x262: 0x23, 0x263: 0x23, 0x264: 0x23, 0x265: 0x23, 0x266: 0x23, 0x267: 0x23, - 0x268: 0x23, 0x269: 0x23, 0x26a: 0x23, 0x26b: 0x23, 0x26c: 0x23, 0x26d: 0x23, 0x26e: 0x23, 0x26f: 0x23, - 0x270: 0x23, 0x271: 0x23, 0x272: 0x23, 0x273: 0x23, 0x274: 0x23, 0x275: 0x23, 0x276: 0x23, 0x277: 0x23, - 0x278: 0x23, 0x279: 0x23, 0x27a: 0x23, 0x27b: 0x23, 0x27c: 0x23, 0x27d: 0x23, 0x27e: 0x23, 0x27f: 0x23, - // Block 0xa, offset 0x280 - 0x280: 0x23, 0x281: 0x23, 0x282: 0x23, 0x283: 0x23, 0x284: 0x23, 0x285: 0x23, 0x286: 0x23, 0x287: 0x23, - 0x288: 0x23, 0x289: 0x23, 0x28a: 0x23, 0x28b: 0x23, 0x28c: 0x23, 0x28d: 0x23, 0x28e: 0x23, 0x28f: 0x23, - 0x290: 0x23, 0x291: 0x23, 0x292: 0x23, 0x293: 0x23, 0x294: 0x23, 0x295: 0x23, 0x296: 0x23, 0x297: 0x23, - 0x298: 0x23, 0x299: 0x23, 0x29a: 0x23, 0x29b: 0x23, 0x29c: 0x23, 0x29d: 0x23, 0x29e: 0xa1, 0x29f: 0xa2, - // Block 0xb, offset 0x2c0 - 0x2ec: 0x0f, 0x2ed: 0xa3, 0x2ee: 0xa4, 0x2ef: 0xa5, - 0x2f0: 0x23, 0x2f1: 0x23, 0x2f2: 0x23, 0x2f3: 0x23, 0x2f4: 0xa6, 0x2f5: 0xa7, 0x2f6: 0xa8, 0x2f7: 0xa9, - 0x2f8: 0xaa, 0x2f9: 0xab, 0x2fa: 0x23, 0x2fb: 0xac, 0x2fc: 0xad, 0x2fd: 0xae, 0x2fe: 0xaf, 0x2ff: 0xb0, - // Block 0xc, offset 0x300 - 0x300: 0xb1, 0x301: 0xb2, 0x302: 0x23, 0x303: 0xb3, 0x305: 0xb4, 0x307: 0xb5, - 0x30a: 0xb6, 0x30b: 0xb7, 0x30c: 0xb8, 0x30d: 0xb9, 0x30e: 0xba, 0x30f: 0xbb, - 0x310: 0xbc, 0x311: 0xbd, 0x312: 0xbe, 0x313: 0xbf, 0x314: 0xc0, 0x315: 0xc1, - 0x318: 0x23, 0x319: 0x23, 0x31a: 0x23, 0x31b: 0x23, 0x31c: 0xc2, 0x31d: 0xc3, - 0x320: 0xc4, 0x321: 0xc5, 0x322: 0xc6, 0x323: 0xc7, 0x324: 0xc8, 0x326: 0xc9, - 0x328: 0xca, 0x329: 0xcb, 0x32a: 0xcc, 0x32b: 0xcd, 0x32c: 0x5f, 0x32d: 0xce, 0x32e: 0xcf, - 0x330: 0x23, 0x331: 0xd0, 0x332: 0xd1, 0x333: 0xd2, - // Block 0xd, offset 0x340 - 0x340: 0xd3, 0x341: 0xd4, 0x342: 0xd5, 0x343: 0xd6, 0x344: 0xd7, 0x345: 0xd8, 0x346: 0xd9, 0x347: 0xda, - 0x348: 0xdb, 0x34a: 0xdc, 0x34b: 0xdd, 0x34c: 0xde, 0x34d: 0xdf, - 0x350: 0xe0, 0x351: 0xe1, 0x352: 0xe2, 0x353: 0xe3, 0x356: 0xe4, 0x357: 0xe5, - 0x358: 0xe6, 0x359: 0xe7, 0x35a: 0xe8, 0x35b: 0xe9, 0x35c: 0xea, - 0x362: 0xeb, 0x363: 0xec, - 0x36b: 0xed, - 0x370: 0xee, 0x371: 0xef, 0x372: 0xf0, - // Block 0xe, offset 0x380 - 0x380: 0x23, 0x381: 0x23, 0x382: 0x23, 0x383: 0x23, 0x384: 0x23, 0x385: 0x23, 0x386: 0x23, 0x387: 0x23, - 0x388: 0x23, 0x389: 0x23, 0x38a: 0x23, 0x38b: 0x23, 0x38c: 0x23, 0x38d: 0x23, 0x38e: 0xf1, - 0x390: 0x23, 0x391: 0xf2, 0x392: 0x23, 0x393: 0x23, 0x394: 0x23, 0x395: 0xf3, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x23, 0x3c1: 0x23, 0x3c2: 0x23, 0x3c3: 0x23, 0x3c4: 0x23, 0x3c5: 0x23, 0x3c6: 0x23, 0x3c7: 0x23, - 0x3c8: 0x23, 0x3c9: 0x23, 0x3ca: 0x23, 0x3cb: 0x23, 0x3cc: 0x23, 0x3cd: 0x23, 0x3ce: 0x23, 0x3cf: 0x23, - 0x3d0: 0xf2, - // Block 0x10, offset 0x400 - 0x410: 0x23, 0x411: 0x23, 0x412: 0x23, 0x413: 0x23, 0x414: 0x23, 0x415: 0x23, 0x416: 0x23, 0x417: 0x23, - 0x418: 0x23, 0x419: 0xf4, - // Block 0x11, offset 0x440 - 0x460: 0x23, 0x461: 0x23, 0x462: 0x23, 0x463: 0x23, 0x464: 0x23, 0x465: 0x23, 0x466: 0x23, 0x467: 0x23, - 0x468: 0xed, 0x469: 0xf5, 0x46b: 0xf6, 0x46c: 0xf7, 0x46d: 0xf8, 0x46e: 0xf9, - 0x47c: 0x23, 0x47d: 0xfa, 0x47e: 0xfb, 0x47f: 0xfc, - // Block 0x12, offset 0x480 - 0x4b0: 0x23, 0x4b1: 0xfd, 0x4b2: 0xfe, - // Block 0x13, offset 0x4c0 - 0x4c5: 0xff, 0x4c6: 0x100, - 0x4c9: 0x101, - 0x4d0: 0x102, 0x4d1: 0x103, 0x4d2: 0x104, 0x4d3: 0x105, 0x4d4: 0x106, 0x4d5: 0x107, 0x4d6: 0x108, 0x4d7: 0x109, - 0x4d8: 0x10a, 0x4d9: 0x10b, 0x4da: 0x10c, 0x4db: 0x10d, 0x4dc: 0x10e, 0x4dd: 0x10f, 0x4de: 0x110, 0x4df: 0x111, - 0x4e8: 0x112, 0x4e9: 0x113, 0x4ea: 0x114, - // Block 0x14, offset 0x500 - 0x500: 0x115, - 0x520: 0x23, 0x521: 0x23, 0x522: 0x23, 0x523: 0x116, 0x524: 0x10, 0x525: 0x117, - 0x538: 0x118, 0x539: 0x11, 0x53a: 0x119, - // Block 0x15, offset 0x540 - 0x544: 0x11a, 0x545: 0x11b, 0x546: 0x11c, - 0x54f: 0x11d, - // Block 0x16, offset 0x580 - 0x590: 0x0a, 0x591: 0x0b, 0x592: 0x0c, 0x593: 0x0d, 0x594: 0x0e, 0x596: 0x0f, - 0x59b: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, - // Block 0x17, offset 0x5c0 - 0x5c0: 0x11e, 0x5c1: 0x11f, 0x5c4: 0x11f, 0x5c5: 0x11f, 0x5c6: 0x11f, 0x5c7: 0x120, - // Block 0x18, offset 0x600 - 0x620: 0x15, -} - -// sparseOffsets: 272 entries, 544 bytes -var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x3a, 0x3d, 0x41, 0x44, 0x48, 0x52, 0x54, 0x59, 0x69, 0x70, 0x75, 0x83, 0x84, 0x92, 0xa1, 0xab, 0xae, 0xb4, 0xbc, 0xbe, 0xc0, 0xce, 0xd4, 0xe2, 0xed, 0xf8, 0x103, 0x10f, 0x119, 0x124, 0x12f, 0x13b, 0x147, 0x14f, 0x157, 0x161, 0x16c, 0x178, 0x17e, 0x189, 0x18e, 0x196, 0x199, 0x19e, 0x1a2, 0x1a6, 0x1ad, 0x1b6, 0x1be, 0x1bf, 0x1c8, 0x1cf, 0x1d7, 0x1dd, 0x1e3, 0x1e8, 0x1ec, 0x1ef, 0x1f1, 0x1f4, 0x1f9, 0x1fa, 0x1fc, 0x1fe, 0x200, 0x207, 0x20c, 0x210, 0x219, 0x21c, 0x21f, 0x225, 0x226, 0x231, 0x232, 0x233, 0x238, 0x245, 0x24d, 0x255, 0x25e, 0x267, 0x270, 0x275, 0x278, 0x281, 0x28e, 0x290, 0x297, 0x299, 0x2a4, 0x2a5, 0x2b0, 0x2b8, 0x2c2, 0x2c8, 0x2c9, 0x2d7, 0x2dc, 0x2df, 0x2e4, 0x2e8, 0x2ee, 0x2f3, 0x2f6, 0x2fb, 0x300, 0x301, 0x307, 0x309, 0x30a, 0x30c, 0x30e, 0x311, 0x312, 0x314, 0x317, 0x31d, 0x321, 0x323, 0x329, 0x330, 0x334, 0x33d, 0x33e, 0x346, 0x34a, 0x34f, 0x357, 0x35d, 0x363, 0x36d, 0x372, 0x37b, 0x381, 0x388, 0x38c, 0x394, 0x396, 0x398, 0x39b, 0x39d, 0x39f, 0x3a0, 0x3a1, 0x3a3, 0x3a5, 0x3ab, 0x3b0, 0x3b2, 0x3b8, 0x3bb, 0x3bd, 0x3c3, 0x3c8, 0x3ca, 0x3cb, 0x3cc, 0x3cd, 0x3cf, 0x3d1, 0x3d3, 0x3d6, 0x3d8, 0x3db, 0x3e3, 0x3e6, 0x3ea, 0x3f2, 0x3f4, 0x3f5, 0x3f6, 0x3f8, 0x3fe, 0x400, 0x401, 0x403, 0x405, 0x407, 0x414, 0x415, 0x416, 0x41a, 0x41c, 0x41d, 0x41e, 0x41f, 0x420, 0x424, 0x428, 0x42e, 0x430, 0x437, 0x43a, 0x43e, 0x444, 0x44d, 0x453, 0x459, 0x463, 0x46d, 0x46f, 0x476, 0x47c, 0x482, 0x488, 0x48b, 0x491, 0x494, 0x49c, 0x49d, 0x4a4, 0x4a5, 0x4a8, 0x4a9, 0x4af, 0x4b2, 0x4ba, 0x4bb, 0x4bc, 0x4bd, 0x4be, 0x4c0, 0x4c2, 0x4c4, 0x4c8, 0x4c9, 0x4cb, 0x4cc, 0x4cd, 0x4cf, 0x4d4, 0x4d9, 0x4dd, 0x4de, 0x4e1, 0x4e5, 0x4f0, 0x4f4, 0x4fc, 0x501, 0x505, 0x508, 0x50c, 0x50f, 0x512, 0x517, 0x51b, 0x51f, 0x523, 0x527, 0x529, 0x52b, 0x52e, 0x533, 0x535, 0x53a, 0x543, 0x548, 0x549, 0x54c, 0x54d, 0x54e, 0x550, 0x551, 0x552} - -// sparseValues: 1362 entries, 5448 bytes -var sparseValues = [1362]valueRange{ - // Block 0x0, offset 0x0 - {value: 0x0004, lo: 0xa8, hi: 0xa8}, - {value: 0x0012, lo: 0xaa, hi: 0xaa}, - {value: 0x0014, lo: 0xad, hi: 0xad}, - {value: 0x0004, lo: 0xaf, hi: 0xaf}, - {value: 0x0004, lo: 0xb4, hi: 0xb4}, - {value: 0x002a, lo: 0xb5, hi: 0xb5}, - {value: 0x0014, lo: 0xb7, hi: 0xb7}, - {value: 0x0004, lo: 0xb8, hi: 0xb8}, - {value: 0x0012, lo: 0xba, hi: 0xba}, - // Block 0x1, offset 0x9 - {value: 0x2013, lo: 0x80, hi: 0x96}, - {value: 0x2013, lo: 0x98, hi: 0x9e}, - {value: 0x00ea, lo: 0x9f, hi: 0x9f}, - {value: 0x2012, lo: 0xa0, hi: 0xb6}, - {value: 0x2012, lo: 0xb8, hi: 0xbe}, - {value: 0x0252, lo: 0xbf, hi: 0xbf}, - // Block 0x2, offset 0xf - {value: 0x0117, lo: 0x80, hi: 0xaf}, - {value: 0x01eb, lo: 0xb0, hi: 0xb0}, - {value: 0x02ea, lo: 0xb1, hi: 0xb1}, - {value: 0x0117, lo: 0xb2, hi: 0xb7}, - {value: 0x0012, lo: 0xb8, hi: 0xb8}, - {value: 0x0316, lo: 0xb9, hi: 0xba}, - {value: 0x0716, lo: 0xbb, hi: 0xbc}, - {value: 0x0316, lo: 0xbd, hi: 0xbe}, - {value: 0x0553, lo: 0xbf, hi: 0xbf}, - // Block 0x3, offset 0x18 - {value: 0x0552, lo: 0x80, hi: 0x80}, - {value: 0x0316, lo: 0x81, hi: 0x82}, - {value: 0x0716, lo: 0x83, hi: 0x84}, - {value: 0x0316, lo: 0x85, hi: 0x86}, - {value: 0x0f16, lo: 0x87, hi: 0x88}, - {value: 0x034a, lo: 0x89, hi: 0x89}, - {value: 0x0117, lo: 0x8a, hi: 0xb7}, - {value: 0x0253, lo: 0xb8, hi: 0xb8}, - {value: 0x0316, lo: 0xb9, hi: 0xba}, - {value: 0x0716, lo: 0xbb, hi: 0xbc}, - {value: 0x0316, lo: 0xbd, hi: 0xbe}, - {value: 0x044a, lo: 0xbf, hi: 0xbf}, - // Block 0x4, offset 0x24 - {value: 0x0117, lo: 0x80, hi: 0x9f}, - {value: 0x2f53, lo: 0xa0, hi: 0xa0}, - {value: 0x0012, lo: 0xa1, hi: 0xa1}, - {value: 0x0117, lo: 0xa2, hi: 0xb3}, - {value: 0x0012, lo: 0xb4, hi: 0xb9}, - {value: 0x10cb, lo: 0xba, hi: 0xba}, - {value: 0x0716, lo: 0xbb, hi: 0xbc}, - {value: 0x2953, lo: 0xbd, hi: 0xbd}, - {value: 0x11cb, lo: 0xbe, hi: 0xbe}, - {value: 0x12ca, lo: 0xbf, hi: 0xbf}, - // Block 0x5, offset 0x2e - {value: 0x0015, lo: 0x80, hi: 0x81}, - {value: 0x0004, lo: 0x82, hi: 0x85}, - {value: 0x0014, lo: 0x86, hi: 0x91}, - {value: 0x0004, lo: 0x92, hi: 0x96}, - {value: 0x0014, lo: 0x97, hi: 0x97}, - {value: 0x0004, lo: 0x98, hi: 0x9f}, - {value: 0x0015, lo: 0xa0, hi: 0xa4}, - {value: 0x0004, lo: 0xa5, hi: 0xab}, - {value: 0x0014, lo: 0xac, hi: 0xac}, - {value: 0x0004, lo: 0xad, hi: 0xad}, - {value: 0x0014, lo: 0xae, hi: 0xae}, - {value: 0x0004, lo: 0xaf, hi: 0xbf}, - // Block 0x6, offset 0x3a - {value: 0x0024, lo: 0x80, hi: 0x94}, - {value: 0x0034, lo: 0x95, hi: 0xbc}, - {value: 0x0024, lo: 0xbd, hi: 0xbf}, - // Block 0x7, offset 0x3d - {value: 0x6553, lo: 0x80, hi: 0x8f}, - {value: 0x2013, lo: 0x90, hi: 0x9f}, - {value: 0x5f53, lo: 0xa0, hi: 0xaf}, - {value: 0x2012, lo: 0xb0, hi: 0xbf}, - // Block 0x8, offset 0x41 - {value: 0x5f52, lo: 0x80, hi: 0x8f}, - {value: 0x6552, lo: 0x90, hi: 0x9f}, - {value: 0x0117, lo: 0xa0, hi: 0xbf}, - // Block 0x9, offset 0x44 - {value: 0x0117, lo: 0x80, hi: 0x81}, - {value: 0x0024, lo: 0x83, hi: 0x87}, - {value: 0x0014, lo: 0x88, hi: 0x89}, - {value: 0x0117, lo: 0x8a, hi: 0xbf}, - // Block 0xa, offset 0x48 - {value: 0x0f13, lo: 0x80, hi: 0x80}, - {value: 0x0316, lo: 0x81, hi: 0x82}, - {value: 0x0716, lo: 0x83, hi: 0x84}, - {value: 0x0316, lo: 0x85, hi: 0x86}, - {value: 0x0f16, lo: 0x87, hi: 0x88}, - {value: 0x0316, lo: 0x89, hi: 0x8a}, - {value: 0x0716, lo: 0x8b, hi: 0x8c}, - {value: 0x0316, lo: 0x8d, hi: 0x8e}, - {value: 0x0f12, lo: 0x8f, hi: 0x8f}, - {value: 0x0117, lo: 0x90, hi: 0xbf}, - // Block 0xb, offset 0x52 - {value: 0x0117, lo: 0x80, hi: 0xaf}, - {value: 0x6553, lo: 0xb1, hi: 0xbf}, - // Block 0xc, offset 0x54 - {value: 0x3013, lo: 0x80, hi: 0x8f}, - {value: 0x6853, lo: 0x90, hi: 0x96}, - {value: 0x0014, lo: 0x99, hi: 0x99}, - {value: 0x6552, lo: 0xa1, hi: 0xaf}, - {value: 0x3012, lo: 0xb0, hi: 0xbf}, - // Block 0xd, offset 0x59 - {value: 0x6852, lo: 0x80, hi: 0x86}, - {value: 0x27aa, lo: 0x87, hi: 0x87}, - {value: 0x0034, lo: 0x91, hi: 0x91}, - {value: 0x0024, lo: 0x92, hi: 0x95}, - {value: 0x0034, lo: 0x96, hi: 0x96}, - {value: 0x0024, lo: 0x97, hi: 0x99}, - {value: 0x0034, lo: 0x9a, hi: 0x9b}, - {value: 0x0024, lo: 0x9c, hi: 0xa1}, - {value: 0x0034, lo: 0xa2, hi: 0xa7}, - {value: 0x0024, lo: 0xa8, hi: 0xa9}, - {value: 0x0034, lo: 0xaa, hi: 0xaa}, - {value: 0x0024, lo: 0xab, hi: 0xac}, - {value: 0x0034, lo: 0xad, hi: 0xae}, - {value: 0x0024, lo: 0xaf, hi: 0xaf}, - {value: 0x0034, lo: 0xb0, hi: 0xbd}, - {value: 0x0034, lo: 0xbf, hi: 0xbf}, - // Block 0xe, offset 0x69 - {value: 0x0034, lo: 0x81, hi: 0x82}, - {value: 0x0024, lo: 0x84, hi: 0x84}, - {value: 0x0034, lo: 0x85, hi: 0x85}, - {value: 0x0034, lo: 0x87, hi: 0x87}, - {value: 0x0010, lo: 0x90, hi: 0xaa}, - {value: 0x0010, lo: 0xb0, hi: 0xb3}, - {value: 0x0014, lo: 0xb4, hi: 0xb4}, - // Block 0xf, offset 0x70 - {value: 0x0014, lo: 0x80, hi: 0x85}, - {value: 0x0024, lo: 0x90, hi: 0x97}, - {value: 0x0034, lo: 0x98, hi: 0x9a}, - {value: 0x0014, lo: 0x9c, hi: 0x9c}, - {value: 0x0010, lo: 0xa0, hi: 0xbf}, - // Block 0x10, offset 0x75 - {value: 0x0014, lo: 0x80, hi: 0x80}, - {value: 0x0010, lo: 0x81, hi: 0x8a}, - {value: 0x0034, lo: 0x8b, hi: 0x92}, - {value: 0x0024, lo: 0x93, hi: 0x94}, - {value: 0x0034, lo: 0x95, hi: 0x96}, - {value: 0x0024, lo: 0x97, hi: 0x9b}, - {value: 0x0034, lo: 0x9c, hi: 0x9c}, - {value: 0x0024, lo: 0x9d, hi: 0x9e}, - {value: 0x0034, lo: 0x9f, hi: 0x9f}, - {value: 0x0010, lo: 0xa0, hi: 0xa9}, - {value: 0x0010, lo: 0xab, hi: 0xab}, - {value: 0x0010, lo: 0xae, hi: 0xaf}, - {value: 0x0034, lo: 0xb0, hi: 0xb0}, - {value: 0x0010, lo: 0xb1, hi: 0xbf}, - // Block 0x11, offset 0x83 - {value: 0x0010, lo: 0x80, hi: 0xbf}, - // Block 0x12, offset 0x84 - {value: 0x0010, lo: 0x80, hi: 0x93}, - {value: 0x0010, lo: 0x95, hi: 0x95}, - {value: 0x0024, lo: 0x96, hi: 0x9c}, - {value: 0x0014, lo: 0x9d, hi: 0x9d}, - {value: 0x0024, lo: 0x9f, hi: 0xa2}, - {value: 0x0034, lo: 0xa3, hi: 0xa3}, - {value: 0x0024, lo: 0xa4, hi: 0xa4}, - {value: 0x0014, lo: 0xa5, hi: 0xa6}, - {value: 0x0024, lo: 0xa7, hi: 0xa8}, - {value: 0x0034, lo: 0xaa, hi: 0xaa}, - {value: 0x0024, lo: 0xab, hi: 0xac}, - {value: 0x0034, lo: 0xad, hi: 0xad}, - {value: 0x0010, lo: 0xae, hi: 0xbc}, - {value: 0x0010, lo: 0xbf, hi: 0xbf}, - // Block 0x13, offset 0x92 - {value: 0x0014, lo: 0x8f, hi: 0x8f}, - {value: 0x0010, lo: 0x90, hi: 0x90}, - {value: 0x0034, lo: 0x91, hi: 0x91}, - {value: 0x0010, lo: 0x92, hi: 0xaf}, - {value: 0x0024, lo: 0xb0, hi: 0xb0}, - {value: 0x0034, lo: 0xb1, hi: 0xb1}, - {value: 0x0024, lo: 0xb2, hi: 0xb3}, - {value: 0x0034, lo: 0xb4, hi: 0xb4}, - {value: 0x0024, lo: 0xb5, hi: 0xb6}, - {value: 0x0034, lo: 0xb7, hi: 0xb9}, - {value: 0x0024, lo: 0xba, hi: 0xba}, - {value: 0x0034, lo: 0xbb, hi: 0xbc}, - {value: 0x0024, lo: 0xbd, hi: 0xbd}, - {value: 0x0034, lo: 0xbe, hi: 0xbe}, - {value: 0x0024, lo: 0xbf, hi: 0xbf}, - // Block 0x14, offset 0xa1 - {value: 0x0024, lo: 0x80, hi: 0x81}, - {value: 0x0034, lo: 0x82, hi: 0x82}, - {value: 0x0024, lo: 0x83, hi: 0x83}, - {value: 0x0034, lo: 0x84, hi: 0x84}, - {value: 0x0024, lo: 0x85, hi: 0x85}, - {value: 0x0034, lo: 0x86, hi: 0x86}, - {value: 0x0024, lo: 0x87, hi: 0x87}, - {value: 0x0034, lo: 0x88, hi: 0x88}, - {value: 0x0024, lo: 0x89, hi: 0x8a}, - {value: 0x0010, lo: 0x8d, hi: 0xbf}, - // Block 0x15, offset 0xab - {value: 0x0010, lo: 0x80, hi: 0xa5}, - {value: 0x0014, lo: 0xa6, hi: 0xb0}, - {value: 0x0010, lo: 0xb1, hi: 0xb1}, - // Block 0x16, offset 0xae - {value: 0x0010, lo: 0x80, hi: 0xaa}, - {value: 0x0024, lo: 0xab, hi: 0xb1}, - {value: 0x0034, lo: 0xb2, hi: 0xb2}, - {value: 0x0024, lo: 0xb3, hi: 0xb3}, - {value: 0x0014, lo: 0xb4, hi: 0xb5}, - {value: 0x0014, lo: 0xba, hi: 0xba}, - // Block 0x17, offset 0xb4 - {value: 0x0010, lo: 0x80, hi: 0x95}, - {value: 0x0024, lo: 0x96, hi: 0x99}, - {value: 0x0014, lo: 0x9a, hi: 0x9a}, - {value: 0x0024, lo: 0x9b, hi: 0xa3}, - {value: 0x0014, lo: 0xa4, hi: 0xa4}, - {value: 0x0024, lo: 0xa5, hi: 0xa7}, - {value: 0x0014, lo: 0xa8, hi: 0xa8}, - {value: 0x0024, lo: 0xa9, hi: 0xad}, - // Block 0x18, offset 0xbc - {value: 0x0010, lo: 0x80, hi: 0x98}, - {value: 0x0034, lo: 0x99, hi: 0x9b}, - // Block 0x19, offset 0xbe - {value: 0x0010, lo: 0xa0, hi: 0xb4}, - {value: 0x0010, lo: 0xb6, hi: 0xbd}, - // Block 0x1a, offset 0xc0 - {value: 0x0024, lo: 0x94, hi: 0xa1}, - {value: 0x0014, lo: 0xa2, hi: 0xa2}, - {value: 0x0034, lo: 0xa3, hi: 0xa3}, - {value: 0x0024, lo: 0xa4, hi: 0xa5}, - {value: 0x0034, lo: 0xa6, hi: 0xa6}, - {value: 0x0024, lo: 0xa7, hi: 0xa8}, - {value: 0x0034, lo: 0xa9, hi: 0xa9}, - {value: 0x0024, lo: 0xaa, hi: 0xac}, - {value: 0x0034, lo: 0xad, hi: 0xb2}, - {value: 0x0024, lo: 0xb3, hi: 0xb5}, - {value: 0x0034, lo: 0xb6, hi: 0xb6}, - {value: 0x0024, lo: 0xb7, hi: 0xb8}, - {value: 0x0034, lo: 0xb9, hi: 0xba}, - {value: 0x0024, lo: 0xbb, hi: 0xbf}, - // Block 0x1b, offset 0xce - {value: 0x0014, lo: 0x80, hi: 0x82}, - {value: 0x0010, lo: 0x83, hi: 0xb9}, - {value: 0x0014, lo: 0xba, hi: 0xba}, - {value: 0x0010, lo: 0xbb, hi: 0xbb}, - {value: 0x0034, lo: 0xbc, hi: 0xbc}, - {value: 0x0010, lo: 0xbd, hi: 0xbf}, - // Block 0x1c, offset 0xd4 - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0014, lo: 0x81, hi: 0x88}, - {value: 0x0010, lo: 0x89, hi: 0x8c}, - {value: 0x0034, lo: 0x8d, hi: 0x8d}, - {value: 0x0010, lo: 0x8e, hi: 0x90}, - {value: 0x0024, lo: 0x91, hi: 0x91}, - {value: 0x0034, lo: 0x92, hi: 0x92}, - {value: 0x0024, lo: 0x93, hi: 0x94}, - {value: 0x0014, lo: 0x95, hi: 0x97}, - {value: 0x0010, lo: 0x98, hi: 0xa1}, - {value: 0x0014, lo: 0xa2, hi: 0xa3}, - {value: 0x0010, lo: 0xa6, hi: 0xaf}, - {value: 0x0014, lo: 0xb1, hi: 0xb1}, - {value: 0x0010, lo: 0xb2, hi: 0xbf}, - // Block 0x1d, offset 0xe2 - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0014, lo: 0x81, hi: 0x81}, - {value: 0x0010, lo: 0x82, hi: 0x83}, - {value: 0x0010, lo: 0x85, hi: 0x8c}, - {value: 0x0010, lo: 0x8f, hi: 0x90}, - {value: 0x0010, lo: 0x93, hi: 0xa8}, - {value: 0x0010, lo: 0xaa, hi: 0xb0}, - {value: 0x0010, lo: 0xb2, hi: 0xb2}, - {value: 0x0010, lo: 0xb6, hi: 0xb9}, - {value: 0x0034, lo: 0xbc, hi: 0xbc}, - {value: 0x0010, lo: 0xbd, hi: 0xbf}, - // Block 0x1e, offset 0xed - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0014, lo: 0x81, hi: 0x84}, - {value: 0x0010, lo: 0x87, hi: 0x88}, - {value: 0x0010, lo: 0x8b, hi: 0x8c}, - {value: 0x0034, lo: 0x8d, hi: 0x8d}, - {value: 0x0010, lo: 0x8e, hi: 0x8e}, - {value: 0x0010, lo: 0x97, hi: 0x97}, - {value: 0x0010, lo: 0x9c, hi: 0x9d}, - {value: 0x0010, lo: 0x9f, hi: 0xa1}, - {value: 0x0014, lo: 0xa2, hi: 0xa3}, - {value: 0x0010, lo: 0xa6, hi: 0xb1}, - // Block 0x1f, offset 0xf8 - {value: 0x0014, lo: 0x81, hi: 0x82}, - {value: 0x0010, lo: 0x83, hi: 0x83}, - {value: 0x0010, lo: 0x85, hi: 0x8a}, - {value: 0x0010, lo: 0x8f, hi: 0x90}, - {value: 0x0010, lo: 0x93, hi: 0xa8}, - {value: 0x0010, lo: 0xaa, hi: 0xb0}, - {value: 0x0010, lo: 0xb2, hi: 0xb3}, - {value: 0x0010, lo: 0xb5, hi: 0xb6}, - {value: 0x0010, lo: 0xb8, hi: 0xb9}, - {value: 0x0034, lo: 0xbc, hi: 0xbc}, - {value: 0x0010, lo: 0xbe, hi: 0xbf}, - // Block 0x20, offset 0x103 - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0014, lo: 0x81, hi: 0x82}, - {value: 0x0014, lo: 0x87, hi: 0x88}, - {value: 0x0014, lo: 0x8b, hi: 0x8c}, - {value: 0x0034, lo: 0x8d, hi: 0x8d}, - {value: 0x0014, lo: 0x91, hi: 0x91}, - {value: 0x0010, lo: 0x99, hi: 0x9c}, - {value: 0x0010, lo: 0x9e, hi: 0x9e}, - {value: 0x0010, lo: 0xa6, hi: 0xaf}, - {value: 0x0014, lo: 0xb0, hi: 0xb1}, - {value: 0x0010, lo: 0xb2, hi: 0xb4}, - {value: 0x0014, lo: 0xb5, hi: 0xb5}, - // Block 0x21, offset 0x10f - {value: 0x0014, lo: 0x81, hi: 0x82}, - {value: 0x0010, lo: 0x83, hi: 0x83}, - {value: 0x0010, lo: 0x85, hi: 0x8d}, - {value: 0x0010, lo: 0x8f, hi: 0x91}, - {value: 0x0010, lo: 0x93, hi: 0xa8}, - {value: 0x0010, lo: 0xaa, hi: 0xb0}, - {value: 0x0010, lo: 0xb2, hi: 0xb3}, - {value: 0x0010, lo: 0xb5, hi: 0xb9}, - {value: 0x0034, lo: 0xbc, hi: 0xbc}, - {value: 0x0010, lo: 0xbd, hi: 0xbf}, - // Block 0x22, offset 0x119 - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0014, lo: 0x81, hi: 0x85}, - {value: 0x0014, lo: 0x87, hi: 0x88}, - {value: 0x0010, lo: 0x89, hi: 0x89}, - {value: 0x0010, lo: 0x8b, hi: 0x8c}, - {value: 0x0034, lo: 0x8d, hi: 0x8d}, - {value: 0x0010, lo: 0x90, hi: 0x90}, - {value: 0x0010, lo: 0xa0, hi: 0xa1}, - {value: 0x0014, lo: 0xa2, hi: 0xa3}, - {value: 0x0010, lo: 0xa6, hi: 0xaf}, - {value: 0x0010, lo: 0xb9, hi: 0xb9}, - // Block 0x23, offset 0x124 - {value: 0x0014, lo: 0x81, hi: 0x81}, - {value: 0x0010, lo: 0x82, hi: 0x83}, - {value: 0x0010, lo: 0x85, hi: 0x8c}, - {value: 0x0010, lo: 0x8f, hi: 0x90}, - {value: 0x0010, lo: 0x93, hi: 0xa8}, - {value: 0x0010, lo: 0xaa, hi: 0xb0}, - {value: 0x0010, lo: 0xb2, hi: 0xb3}, - {value: 0x0010, lo: 0xb5, hi: 0xb9}, - {value: 0x0034, lo: 0xbc, hi: 0xbc}, - {value: 0x0010, lo: 0xbd, hi: 0xbe}, - {value: 0x0014, lo: 0xbf, hi: 0xbf}, - // Block 0x24, offset 0x12f - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0014, lo: 0x81, hi: 0x84}, - {value: 0x0010, lo: 0x87, hi: 0x88}, - {value: 0x0010, lo: 0x8b, hi: 0x8c}, - {value: 0x0034, lo: 0x8d, hi: 0x8d}, - {value: 0x0014, lo: 0x96, hi: 0x96}, - {value: 0x0010, lo: 0x97, hi: 0x97}, - {value: 0x0010, lo: 0x9c, hi: 0x9d}, - {value: 0x0010, lo: 0x9f, hi: 0xa1}, - {value: 0x0014, lo: 0xa2, hi: 0xa3}, - {value: 0x0010, lo: 0xa6, hi: 0xaf}, - {value: 0x0010, lo: 0xb1, hi: 0xb1}, - // Block 0x25, offset 0x13b - {value: 0x0014, lo: 0x82, hi: 0x82}, - {value: 0x0010, lo: 0x83, hi: 0x83}, - {value: 0x0010, lo: 0x85, hi: 0x8a}, - {value: 0x0010, lo: 0x8e, hi: 0x90}, - {value: 0x0010, lo: 0x92, hi: 0x95}, - {value: 0x0010, lo: 0x99, hi: 0x9a}, - {value: 0x0010, lo: 0x9c, hi: 0x9c}, - {value: 0x0010, lo: 0x9e, hi: 0x9f}, - {value: 0x0010, lo: 0xa3, hi: 0xa4}, - {value: 0x0010, lo: 0xa8, hi: 0xaa}, - {value: 0x0010, lo: 0xae, hi: 0xb9}, - {value: 0x0010, lo: 0xbe, hi: 0xbf}, - // Block 0x26, offset 0x147 - {value: 0x0014, lo: 0x80, hi: 0x80}, - {value: 0x0010, lo: 0x81, hi: 0x82}, - {value: 0x0010, lo: 0x86, hi: 0x88}, - {value: 0x0010, lo: 0x8a, hi: 0x8c}, - {value: 0x0034, lo: 0x8d, hi: 0x8d}, - {value: 0x0010, lo: 0x90, hi: 0x90}, - {value: 0x0010, lo: 0x97, hi: 0x97}, - {value: 0x0010, lo: 0xa6, hi: 0xaf}, - // Block 0x27, offset 0x14f - {value: 0x0014, lo: 0x80, hi: 0x80}, - {value: 0x0010, lo: 0x81, hi: 0x83}, - {value: 0x0010, lo: 0x85, hi: 0x8c}, - {value: 0x0010, lo: 0x8e, hi: 0x90}, - {value: 0x0010, lo: 0x92, hi: 0xa8}, - {value: 0x0010, lo: 0xaa, hi: 0xb9}, - {value: 0x0010, lo: 0xbd, hi: 0xbd}, - {value: 0x0014, lo: 0xbe, hi: 0xbf}, - // Block 0x28, offset 0x157 - {value: 0x0014, lo: 0x80, hi: 0x80}, - {value: 0x0010, lo: 0x81, hi: 0x84}, - {value: 0x0014, lo: 0x86, hi: 0x88}, - {value: 0x0014, lo: 0x8a, hi: 0x8c}, - {value: 0x0034, lo: 0x8d, hi: 0x8d}, - {value: 0x0034, lo: 0x95, hi: 0x96}, - {value: 0x0010, lo: 0x98, hi: 0x9a}, - {value: 0x0010, lo: 0xa0, hi: 0xa1}, - {value: 0x0014, lo: 0xa2, hi: 0xa3}, - {value: 0x0010, lo: 0xa6, hi: 0xaf}, - // Block 0x29, offset 0x161 - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0014, lo: 0x81, hi: 0x81}, - {value: 0x0010, lo: 0x82, hi: 0x83}, - {value: 0x0010, lo: 0x85, hi: 0x8c}, - {value: 0x0010, lo: 0x8e, hi: 0x90}, - {value: 0x0010, lo: 0x92, hi: 0xa8}, - {value: 0x0010, lo: 0xaa, hi: 0xb3}, - {value: 0x0010, lo: 0xb5, hi: 0xb9}, - {value: 0x0034, lo: 0xbc, hi: 0xbc}, - {value: 0x0010, lo: 0xbd, hi: 0xbe}, - {value: 0x0014, lo: 0xbf, hi: 0xbf}, - // Block 0x2a, offset 0x16c - {value: 0x0010, lo: 0x80, hi: 0x84}, - {value: 0x0014, lo: 0x86, hi: 0x86}, - {value: 0x0010, lo: 0x87, hi: 0x88}, - {value: 0x0010, lo: 0x8a, hi: 0x8b}, - {value: 0x0014, lo: 0x8c, hi: 0x8c}, - {value: 0x0034, lo: 0x8d, hi: 0x8d}, - {value: 0x0010, lo: 0x95, hi: 0x96}, - {value: 0x0010, lo: 0x9e, hi: 0x9e}, - {value: 0x0010, lo: 0xa0, hi: 0xa1}, - {value: 0x0014, lo: 0xa2, hi: 0xa3}, - {value: 0x0010, lo: 0xa6, hi: 0xaf}, - {value: 0x0010, lo: 0xb1, hi: 0xb2}, - // Block 0x2b, offset 0x178 - {value: 0x0014, lo: 0x81, hi: 0x81}, - {value: 0x0010, lo: 0x82, hi: 0x83}, - {value: 0x0010, lo: 0x85, hi: 0x8c}, - {value: 0x0010, lo: 0x8e, hi: 0x90}, - {value: 0x0010, lo: 0x92, hi: 0xba}, - {value: 0x0010, lo: 0xbd, hi: 0xbf}, - // Block 0x2c, offset 0x17e - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0014, lo: 0x81, hi: 0x84}, - {value: 0x0010, lo: 0x86, hi: 0x88}, - {value: 0x0010, lo: 0x8a, hi: 0x8c}, - {value: 0x0034, lo: 0x8d, hi: 0x8d}, - {value: 0x0010, lo: 0x8e, hi: 0x8e}, - {value: 0x0010, lo: 0x94, hi: 0x97}, - {value: 0x0010, lo: 0x9f, hi: 0xa1}, - {value: 0x0014, lo: 0xa2, hi: 0xa3}, - {value: 0x0010, lo: 0xa6, hi: 0xaf}, - {value: 0x0010, lo: 0xba, hi: 0xbf}, - // Block 0x2d, offset 0x189 - {value: 0x0010, lo: 0x82, hi: 0x83}, - {value: 0x0010, lo: 0x85, hi: 0x96}, - {value: 0x0010, lo: 0x9a, hi: 0xb1}, - {value: 0x0010, lo: 0xb3, hi: 0xbb}, - {value: 0x0010, lo: 0xbd, hi: 0xbd}, - // Block 0x2e, offset 0x18e - {value: 0x0010, lo: 0x80, hi: 0x86}, - {value: 0x0034, lo: 0x8a, hi: 0x8a}, - {value: 0x0010, lo: 0x8f, hi: 0x91}, - {value: 0x0014, lo: 0x92, hi: 0x94}, - {value: 0x0014, lo: 0x96, hi: 0x96}, - {value: 0x0010, lo: 0x98, hi: 0x9f}, - {value: 0x0010, lo: 0xa6, hi: 0xaf}, - {value: 0x0010, lo: 0xb2, hi: 0xb3}, - // Block 0x2f, offset 0x196 - {value: 0x0014, lo: 0xb1, hi: 0xb1}, - {value: 0x0014, lo: 0xb4, hi: 0xb7}, - {value: 0x0034, lo: 0xb8, hi: 0xba}, - // Block 0x30, offset 0x199 - {value: 0x0004, lo: 0x86, hi: 0x86}, - {value: 0x0014, lo: 0x87, hi: 0x87}, - {value: 0x0034, lo: 0x88, hi: 0x8b}, - {value: 0x0014, lo: 0x8c, hi: 0x8e}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - // Block 0x31, offset 0x19e - {value: 0x0014, lo: 0xb1, hi: 0xb1}, - {value: 0x0014, lo: 0xb4, hi: 0xb7}, - {value: 0x0034, lo: 0xb8, hi: 0xb9}, - {value: 0x0014, lo: 0xbb, hi: 0xbc}, - // Block 0x32, offset 0x1a2 - {value: 0x0004, lo: 0x86, hi: 0x86}, - {value: 0x0034, lo: 0x88, hi: 0x8b}, - {value: 0x0014, lo: 0x8c, hi: 0x8d}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - // Block 0x33, offset 0x1a6 - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0034, lo: 0x98, hi: 0x99}, - {value: 0x0010, lo: 0xa0, hi: 0xa9}, - {value: 0x0034, lo: 0xb5, hi: 0xb5}, - {value: 0x0034, lo: 0xb7, hi: 0xb7}, - {value: 0x0034, lo: 0xb9, hi: 0xb9}, - {value: 0x0010, lo: 0xbe, hi: 0xbf}, - // Block 0x34, offset 0x1ad - {value: 0x0010, lo: 0x80, hi: 0x87}, - {value: 0x0010, lo: 0x89, hi: 0xac}, - {value: 0x0034, lo: 0xb1, hi: 0xb2}, - {value: 0x0014, lo: 0xb3, hi: 0xb3}, - {value: 0x0034, lo: 0xb4, hi: 0xb4}, - {value: 0x0014, lo: 0xb5, hi: 0xb9}, - {value: 0x0034, lo: 0xba, hi: 0xbd}, - {value: 0x0014, lo: 0xbe, hi: 0xbe}, - {value: 0x0010, lo: 0xbf, hi: 0xbf}, - // Block 0x35, offset 0x1b6 - {value: 0x0034, lo: 0x80, hi: 0x80}, - {value: 0x0014, lo: 0x81, hi: 0x81}, - {value: 0x0024, lo: 0x82, hi: 0x83}, - {value: 0x0034, lo: 0x84, hi: 0x84}, - {value: 0x0024, lo: 0x86, hi: 0x87}, - {value: 0x0010, lo: 0x88, hi: 0x8c}, - {value: 0x0014, lo: 0x8d, hi: 0x97}, - {value: 0x0014, lo: 0x99, hi: 0xbc}, - // Block 0x36, offset 0x1be - {value: 0x0034, lo: 0x86, hi: 0x86}, - // Block 0x37, offset 0x1bf - {value: 0x0010, lo: 0xab, hi: 0xac}, - {value: 0x0014, lo: 0xad, hi: 0xb0}, - {value: 0x0010, lo: 0xb1, hi: 0xb1}, - {value: 0x0014, lo: 0xb2, hi: 0xb6}, - {value: 0x0034, lo: 0xb7, hi: 0xb7}, - {value: 0x0010, lo: 0xb8, hi: 0xb8}, - {value: 0x0034, lo: 0xb9, hi: 0xba}, - {value: 0x0010, lo: 0xbb, hi: 0xbc}, - {value: 0x0014, lo: 0xbd, hi: 0xbe}, - // Block 0x38, offset 0x1c8 - {value: 0x0010, lo: 0x80, hi: 0x89}, - {value: 0x0010, lo: 0x96, hi: 0x97}, - {value: 0x0014, lo: 0x98, hi: 0x99}, - {value: 0x0014, lo: 0x9e, hi: 0xa0}, - {value: 0x0010, lo: 0xa2, hi: 0xa4}, - {value: 0x0010, lo: 0xa7, hi: 0xad}, - {value: 0x0014, lo: 0xb1, hi: 0xb4}, - // Block 0x39, offset 0x1cf - {value: 0x0014, lo: 0x82, hi: 0x82}, - {value: 0x0010, lo: 0x83, hi: 0x84}, - {value: 0x0014, lo: 0x85, hi: 0x86}, - {value: 0x0010, lo: 0x87, hi: 0x8c}, - {value: 0x0034, lo: 0x8d, hi: 0x8d}, - {value: 0x0010, lo: 0x8f, hi: 0x9c}, - {value: 0x0014, lo: 0x9d, hi: 0x9d}, - {value: 0x6c53, lo: 0xa0, hi: 0xbf}, - // Block 0x3a, offset 0x1d7 - {value: 0x7053, lo: 0x80, hi: 0x85}, - {value: 0x7053, lo: 0x87, hi: 0x87}, - {value: 0x7053, lo: 0x8d, hi: 0x8d}, - {value: 0x0010, lo: 0x90, hi: 0xba}, - {value: 0x0014, lo: 0xbc, hi: 0xbc}, - {value: 0x0010, lo: 0xbd, hi: 0xbf}, - // Block 0x3b, offset 0x1dd - {value: 0x0010, lo: 0x80, hi: 0x88}, - {value: 0x0010, lo: 0x8a, hi: 0x8d}, - {value: 0x0010, lo: 0x90, hi: 0x96}, - {value: 0x0010, lo: 0x98, hi: 0x98}, - {value: 0x0010, lo: 0x9a, hi: 0x9d}, - {value: 0x0010, lo: 0xa0, hi: 0xbf}, - // Block 0x3c, offset 0x1e3 - {value: 0x0010, lo: 0x80, hi: 0x88}, - {value: 0x0010, lo: 0x8a, hi: 0x8d}, - {value: 0x0010, lo: 0x90, hi: 0xb0}, - {value: 0x0010, lo: 0xb2, hi: 0xb5}, - {value: 0x0010, lo: 0xb8, hi: 0xbe}, - // Block 0x3d, offset 0x1e8 - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0010, lo: 0x82, hi: 0x85}, - {value: 0x0010, lo: 0x88, hi: 0x96}, - {value: 0x0010, lo: 0x98, hi: 0xbf}, - // Block 0x3e, offset 0x1ec - {value: 0x0010, lo: 0x80, hi: 0x90}, - {value: 0x0010, lo: 0x92, hi: 0x95}, - {value: 0x0010, lo: 0x98, hi: 0xbf}, - // Block 0x3f, offset 0x1ef - {value: 0x0010, lo: 0x80, hi: 0x9a}, - {value: 0x0024, lo: 0x9d, hi: 0x9f}, - // Block 0x40, offset 0x1f1 - {value: 0x0010, lo: 0x80, hi: 0x8f}, - {value: 0x7453, lo: 0xa0, hi: 0xaf}, - {value: 0x7853, lo: 0xb0, hi: 0xbf}, - // Block 0x41, offset 0x1f4 - {value: 0x7c53, lo: 0x80, hi: 0x8f}, - {value: 0x8053, lo: 0x90, hi: 0x9f}, - {value: 0x7c53, lo: 0xa0, hi: 0xaf}, - {value: 0x0813, lo: 0xb0, hi: 0xb5}, - {value: 0x0892, lo: 0xb8, hi: 0xbd}, - // Block 0x42, offset 0x1f9 - {value: 0x0010, lo: 0x81, hi: 0xbf}, - // Block 0x43, offset 0x1fa - {value: 0x0010, lo: 0x80, hi: 0xac}, - {value: 0x0010, lo: 0xaf, hi: 0xbf}, - // Block 0x44, offset 0x1fc - {value: 0x0010, lo: 0x81, hi: 0x9a}, - {value: 0x0010, lo: 0xa0, hi: 0xbf}, - // Block 0x45, offset 0x1fe - {value: 0x0010, lo: 0x80, hi: 0xaa}, - {value: 0x0010, lo: 0xae, hi: 0xb8}, - // Block 0x46, offset 0x200 - {value: 0x0010, lo: 0x80, hi: 0x8c}, - {value: 0x0010, lo: 0x8e, hi: 0x91}, - {value: 0x0014, lo: 0x92, hi: 0x93}, - {value: 0x0034, lo: 0x94, hi: 0x94}, - {value: 0x0010, lo: 0xa0, hi: 0xb1}, - {value: 0x0014, lo: 0xb2, hi: 0xb3}, - {value: 0x0034, lo: 0xb4, hi: 0xb4}, - // Block 0x47, offset 0x207 - {value: 0x0010, lo: 0x80, hi: 0x91}, - {value: 0x0014, lo: 0x92, hi: 0x93}, - {value: 0x0010, lo: 0xa0, hi: 0xac}, - {value: 0x0010, lo: 0xae, hi: 0xb0}, - {value: 0x0014, lo: 0xb2, hi: 0xb3}, - // Block 0x48, offset 0x20c - {value: 0x0014, lo: 0xb4, hi: 0xb5}, - {value: 0x0010, lo: 0xb6, hi: 0xb6}, - {value: 0x0014, lo: 0xb7, hi: 0xbd}, - {value: 0x0010, lo: 0xbe, hi: 0xbf}, - // Block 0x49, offset 0x210 - {value: 0x0010, lo: 0x80, hi: 0x85}, - {value: 0x0014, lo: 0x86, hi: 0x86}, - {value: 0x0010, lo: 0x87, hi: 0x88}, - {value: 0x0014, lo: 0x89, hi: 0x91}, - {value: 0x0034, lo: 0x92, hi: 0x92}, - {value: 0x0014, lo: 0x93, hi: 0x93}, - {value: 0x0004, lo: 0x97, hi: 0x97}, - {value: 0x0024, lo: 0x9d, hi: 0x9d}, - {value: 0x0010, lo: 0xa0, hi: 0xa9}, - // Block 0x4a, offset 0x219 - {value: 0x0014, lo: 0x8b, hi: 0x8e}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - {value: 0x0010, lo: 0xa0, hi: 0xbf}, - // Block 0x4b, offset 0x21c - {value: 0x0010, lo: 0x80, hi: 0x82}, - {value: 0x0014, lo: 0x83, hi: 0x83}, - {value: 0x0010, lo: 0x84, hi: 0xb7}, - // Block 0x4c, offset 0x21f - {value: 0x0010, lo: 0x80, hi: 0x84}, - {value: 0x0014, lo: 0x85, hi: 0x86}, - {value: 0x0010, lo: 0x87, hi: 0xa8}, - {value: 0x0034, lo: 0xa9, hi: 0xa9}, - {value: 0x0010, lo: 0xaa, hi: 0xaa}, - {value: 0x0010, lo: 0xb0, hi: 0xbf}, - // Block 0x4d, offset 0x225 - {value: 0x0010, lo: 0x80, hi: 0xb5}, - // Block 0x4e, offset 0x226 - {value: 0x0010, lo: 0x80, hi: 0x9e}, - {value: 0x0014, lo: 0xa0, hi: 0xa2}, - {value: 0x0010, lo: 0xa3, hi: 0xa6}, - {value: 0x0014, lo: 0xa7, hi: 0xa8}, - {value: 0x0010, lo: 0xa9, hi: 0xab}, - {value: 0x0010, lo: 0xb0, hi: 0xb1}, - {value: 0x0014, lo: 0xb2, hi: 0xb2}, - {value: 0x0010, lo: 0xb3, hi: 0xb8}, - {value: 0x0034, lo: 0xb9, hi: 0xb9}, - {value: 0x0024, lo: 0xba, hi: 0xba}, - {value: 0x0034, lo: 0xbb, hi: 0xbb}, - // Block 0x4f, offset 0x231 - {value: 0x0010, lo: 0x86, hi: 0x8f}, - // Block 0x50, offset 0x232 - {value: 0x0010, lo: 0x90, hi: 0x99}, - // Block 0x51, offset 0x233 - {value: 0x0010, lo: 0x80, hi: 0x96}, - {value: 0x0024, lo: 0x97, hi: 0x97}, - {value: 0x0034, lo: 0x98, hi: 0x98}, - {value: 0x0010, lo: 0x99, hi: 0x9a}, - {value: 0x0014, lo: 0x9b, hi: 0x9b}, - // Block 0x52, offset 0x238 - {value: 0x0010, lo: 0x95, hi: 0x95}, - {value: 0x0014, lo: 0x96, hi: 0x96}, - {value: 0x0010, lo: 0x97, hi: 0x97}, - {value: 0x0014, lo: 0x98, hi: 0x9e}, - {value: 0x0034, lo: 0xa0, hi: 0xa0}, - {value: 0x0010, lo: 0xa1, hi: 0xa1}, - {value: 0x0014, lo: 0xa2, hi: 0xa2}, - {value: 0x0010, lo: 0xa3, hi: 0xa4}, - {value: 0x0014, lo: 0xa5, hi: 0xac}, - {value: 0x0010, lo: 0xad, hi: 0xb2}, - {value: 0x0014, lo: 0xb3, hi: 0xb4}, - {value: 0x0024, lo: 0xb5, hi: 0xbc}, - {value: 0x0034, lo: 0xbf, hi: 0xbf}, - // Block 0x53, offset 0x245 - {value: 0x0010, lo: 0x80, hi: 0x89}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - {value: 0x0004, lo: 0xa7, hi: 0xa7}, - {value: 0x0024, lo: 0xb0, hi: 0xb4}, - {value: 0x0034, lo: 0xb5, hi: 0xba}, - {value: 0x0024, lo: 0xbb, hi: 0xbc}, - {value: 0x0034, lo: 0xbd, hi: 0xbd}, - {value: 0x0014, lo: 0xbe, hi: 0xbe}, - // Block 0x54, offset 0x24d - {value: 0x0014, lo: 0x80, hi: 0x83}, - {value: 0x0010, lo: 0x84, hi: 0xb3}, - {value: 0x0034, lo: 0xb4, hi: 0xb4}, - {value: 0x0010, lo: 0xb5, hi: 0xb5}, - {value: 0x0014, lo: 0xb6, hi: 0xba}, - {value: 0x0010, lo: 0xbb, hi: 0xbb}, - {value: 0x0014, lo: 0xbc, hi: 0xbc}, - {value: 0x0010, lo: 0xbd, hi: 0xbf}, - // Block 0x55, offset 0x255 - {value: 0x0010, lo: 0x80, hi: 0x81}, - {value: 0x0014, lo: 0x82, hi: 0x82}, - {value: 0x0010, lo: 0x83, hi: 0x83}, - {value: 0x0030, lo: 0x84, hi: 0x84}, - {value: 0x0010, lo: 0x85, hi: 0x8b}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - {value: 0x0024, lo: 0xab, hi: 0xab}, - {value: 0x0034, lo: 0xac, hi: 0xac}, - {value: 0x0024, lo: 0xad, hi: 0xb3}, - // Block 0x56, offset 0x25e - {value: 0x0014, lo: 0x80, hi: 0x81}, - {value: 0x0010, lo: 0x82, hi: 0xa1}, - {value: 0x0014, lo: 0xa2, hi: 0xa5}, - {value: 0x0010, lo: 0xa6, hi: 0xa7}, - {value: 0x0014, lo: 0xa8, hi: 0xa9}, - {value: 0x0030, lo: 0xaa, hi: 0xaa}, - {value: 0x0034, lo: 0xab, hi: 0xab}, - {value: 0x0014, lo: 0xac, hi: 0xad}, - {value: 0x0010, lo: 0xae, hi: 0xbf}, - // Block 0x57, offset 0x267 - {value: 0x0010, lo: 0x80, hi: 0xa5}, - {value: 0x0034, lo: 0xa6, hi: 0xa6}, - {value: 0x0010, lo: 0xa7, hi: 0xa7}, - {value: 0x0014, lo: 0xa8, hi: 0xa9}, - {value: 0x0010, lo: 0xaa, hi: 0xac}, - {value: 0x0014, lo: 0xad, hi: 0xad}, - {value: 0x0010, lo: 0xae, hi: 0xae}, - {value: 0x0014, lo: 0xaf, hi: 0xb1}, - {value: 0x0030, lo: 0xb2, hi: 0xb3}, - // Block 0x58, offset 0x270 - {value: 0x0010, lo: 0x80, hi: 0xab}, - {value: 0x0014, lo: 0xac, hi: 0xb3}, - {value: 0x0010, lo: 0xb4, hi: 0xb5}, - {value: 0x0014, lo: 0xb6, hi: 0xb6}, - {value: 0x0034, lo: 0xb7, hi: 0xb7}, - // Block 0x59, offset 0x275 - {value: 0x0010, lo: 0x80, hi: 0x89}, - {value: 0x0010, lo: 0x8d, hi: 0xb7}, - {value: 0x0014, lo: 0xb8, hi: 0xbd}, - // Block 0x5a, offset 0x278 - {value: 0x296a, lo: 0x80, hi: 0x80}, - {value: 0x2a2a, lo: 0x81, hi: 0x81}, - {value: 0x2aea, lo: 0x82, hi: 0x82}, - {value: 0x2baa, lo: 0x83, hi: 0x83}, - {value: 0x2c6a, lo: 0x84, hi: 0x84}, - {value: 0x2d2a, lo: 0x85, hi: 0x85}, - {value: 0x2dea, lo: 0x86, hi: 0x86}, - {value: 0x2eaa, lo: 0x87, hi: 0x87}, - {value: 0x2f6a, lo: 0x88, hi: 0x88}, - // Block 0x5b, offset 0x281 - {value: 0x0024, lo: 0x90, hi: 0x92}, - {value: 0x0034, lo: 0x94, hi: 0x99}, - {value: 0x0024, lo: 0x9a, hi: 0x9b}, - {value: 0x0034, lo: 0x9c, hi: 0x9f}, - {value: 0x0024, lo: 0xa0, hi: 0xa0}, - {value: 0x0010, lo: 0xa1, hi: 0xa1}, - {value: 0x0034, lo: 0xa2, hi: 0xa8}, - {value: 0x0010, lo: 0xa9, hi: 0xac}, - {value: 0x0034, lo: 0xad, hi: 0xad}, - {value: 0x0010, lo: 0xae, hi: 0xb3}, - {value: 0x0024, lo: 0xb4, hi: 0xb4}, - {value: 0x0010, lo: 0xb5, hi: 0xb6}, - {value: 0x0024, lo: 0xb8, hi: 0xb9}, - // Block 0x5c, offset 0x28e - {value: 0x0012, lo: 0x80, hi: 0xab}, - {value: 0x0015, lo: 0xac, hi: 0xbf}, - // Block 0x5d, offset 0x290 - {value: 0x0015, lo: 0x80, hi: 0xaa}, - {value: 0x0012, lo: 0xab, hi: 0xb7}, - {value: 0x0015, lo: 0xb8, hi: 0xb8}, - {value: 0x8452, lo: 0xb9, hi: 0xb9}, - {value: 0x0012, lo: 0xba, hi: 0xbc}, - {value: 0x8852, lo: 0xbd, hi: 0xbd}, - {value: 0x0012, lo: 0xbe, hi: 0xbf}, - // Block 0x5e, offset 0x297 - {value: 0x0012, lo: 0x80, hi: 0x9a}, - {value: 0x0015, lo: 0x9b, hi: 0xbf}, - // Block 0x5f, offset 0x299 - {value: 0x0024, lo: 0x80, hi: 0x81}, - {value: 0x0034, lo: 0x82, hi: 0x82}, - {value: 0x0024, lo: 0x83, hi: 0x89}, - {value: 0x0034, lo: 0x8a, hi: 0x8a}, - {value: 0x0024, lo: 0x8b, hi: 0x8c}, - {value: 0x0034, lo: 0x8d, hi: 0x90}, - {value: 0x0024, lo: 0x91, hi: 0xb5}, - {value: 0x0024, lo: 0xbb, hi: 0xbb}, - {value: 0x0034, lo: 0xbc, hi: 0xbd}, - {value: 0x0024, lo: 0xbe, hi: 0xbe}, - {value: 0x0034, lo: 0xbf, hi: 0xbf}, - // Block 0x60, offset 0x2a4 - {value: 0x0117, lo: 0x80, hi: 0xbf}, - // Block 0x61, offset 0x2a5 - {value: 0x0117, lo: 0x80, hi: 0x95}, - {value: 0x306a, lo: 0x96, hi: 0x96}, - {value: 0x316a, lo: 0x97, hi: 0x97}, - {value: 0x326a, lo: 0x98, hi: 0x98}, - {value: 0x336a, lo: 0x99, hi: 0x99}, - {value: 0x346a, lo: 0x9a, hi: 0x9a}, - {value: 0x356a, lo: 0x9b, hi: 0x9b}, - {value: 0x0012, lo: 0x9c, hi: 0x9d}, - {value: 0x366b, lo: 0x9e, hi: 0x9e}, - {value: 0x0012, lo: 0x9f, hi: 0x9f}, - {value: 0x0117, lo: 0xa0, hi: 0xbf}, - // Block 0x62, offset 0x2b0 - {value: 0x0812, lo: 0x80, hi: 0x87}, - {value: 0x0813, lo: 0x88, hi: 0x8f}, - {value: 0x0812, lo: 0x90, hi: 0x95}, - {value: 0x0813, lo: 0x98, hi: 0x9d}, - {value: 0x0812, lo: 0xa0, hi: 0xa7}, - {value: 0x0813, lo: 0xa8, hi: 0xaf}, - {value: 0x0812, lo: 0xb0, hi: 0xb7}, - {value: 0x0813, lo: 0xb8, hi: 0xbf}, - // Block 0x63, offset 0x2b8 - {value: 0x0004, lo: 0x8b, hi: 0x8b}, - {value: 0x0014, lo: 0x8c, hi: 0x8c}, - {value: 0x0004, lo: 0x8d, hi: 0x8d}, - {value: 0x0014, lo: 0x8e, hi: 0x8f}, - {value: 0x0014, lo: 0x98, hi: 0x99}, - {value: 0x0014, lo: 0xa4, hi: 0xa4}, - {value: 0x0014, lo: 0xa7, hi: 0xa7}, - {value: 0x0014, lo: 0xaa, hi: 0xae}, - {value: 0x0010, lo: 0xaf, hi: 0xaf}, - {value: 0x0010, lo: 0xbf, hi: 0xbf}, - // Block 0x64, offset 0x2c2 - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0010, lo: 0x94, hi: 0x94}, - {value: 0x0014, lo: 0xa0, hi: 0xa4}, - {value: 0x0014, lo: 0xa6, hi: 0xaf}, - {value: 0x0015, lo: 0xb1, hi: 0xb1}, - {value: 0x0015, lo: 0xbf, hi: 0xbf}, - // Block 0x65, offset 0x2c8 - {value: 0x0015, lo: 0x90, hi: 0x9c}, - // Block 0x66, offset 0x2c9 - {value: 0x0024, lo: 0x90, hi: 0x91}, - {value: 0x0034, lo: 0x92, hi: 0x93}, - {value: 0x0024, lo: 0x94, hi: 0x97}, - {value: 0x0034, lo: 0x98, hi: 0x9a}, - {value: 0x0024, lo: 0x9b, hi: 0x9c}, - {value: 0x0014, lo: 0x9d, hi: 0xa0}, - {value: 0x0024, lo: 0xa1, hi: 0xa1}, - {value: 0x0014, lo: 0xa2, hi: 0xa4}, - {value: 0x0034, lo: 0xa5, hi: 0xa6}, - {value: 0x0024, lo: 0xa7, hi: 0xa7}, - {value: 0x0034, lo: 0xa8, hi: 0xa8}, - {value: 0x0024, lo: 0xa9, hi: 0xa9}, - {value: 0x0034, lo: 0xaa, hi: 0xaf}, - {value: 0x0024, lo: 0xb0, hi: 0xb0}, - // Block 0x67, offset 0x2d7 - {value: 0x0016, lo: 0x85, hi: 0x86}, - {value: 0x0012, lo: 0x87, hi: 0x89}, - {value: 0x9d52, lo: 0x8e, hi: 0x8e}, - {value: 0x1013, lo: 0xa0, hi: 0xaf}, - {value: 0x1012, lo: 0xb0, hi: 0xbf}, - // Block 0x68, offset 0x2dc - {value: 0x0010, lo: 0x80, hi: 0x82}, - {value: 0x0716, lo: 0x83, hi: 0x84}, - {value: 0x0010, lo: 0x85, hi: 0x88}, - // Block 0x69, offset 0x2df - {value: 0xa053, lo: 0xb6, hi: 0xb7}, - {value: 0xa353, lo: 0xb8, hi: 0xb9}, - {value: 0xa653, lo: 0xba, hi: 0xbb}, - {value: 0xa353, lo: 0xbc, hi: 0xbd}, - {value: 0xa053, lo: 0xbe, hi: 0xbf}, - // Block 0x6a, offset 0x2e4 - {value: 0x3013, lo: 0x80, hi: 0x8f}, - {value: 0x6553, lo: 0x90, hi: 0x9f}, - {value: 0xa953, lo: 0xa0, hi: 0xae}, - {value: 0x3012, lo: 0xb0, hi: 0xbf}, - // Block 0x6b, offset 0x2e8 - {value: 0x0117, lo: 0x80, hi: 0xa3}, - {value: 0x0012, lo: 0xa4, hi: 0xa4}, - {value: 0x0716, lo: 0xab, hi: 0xac}, - {value: 0x0316, lo: 0xad, hi: 0xae}, - {value: 0x0024, lo: 0xaf, hi: 0xb1}, - {value: 0x0117, lo: 0xb2, hi: 0xb3}, - // Block 0x6c, offset 0x2ee - {value: 0x6c52, lo: 0x80, hi: 0x9f}, - {value: 0x7052, lo: 0xa0, hi: 0xa5}, - {value: 0x7052, lo: 0xa7, hi: 0xa7}, - {value: 0x7052, lo: 0xad, hi: 0xad}, - {value: 0x0010, lo: 0xb0, hi: 0xbf}, - // Block 0x6d, offset 0x2f3 - {value: 0x0010, lo: 0x80, hi: 0xa7}, - {value: 0x0014, lo: 0xaf, hi: 0xaf}, - {value: 0x0034, lo: 0xbf, hi: 0xbf}, - // Block 0x6e, offset 0x2f6 - {value: 0x0010, lo: 0x80, hi: 0x96}, - {value: 0x0010, lo: 0xa0, hi: 0xa6}, - {value: 0x0010, lo: 0xa8, hi: 0xae}, - {value: 0x0010, lo: 0xb0, hi: 0xb6}, - {value: 0x0010, lo: 0xb8, hi: 0xbe}, - // Block 0x6f, offset 0x2fb - {value: 0x0010, lo: 0x80, hi: 0x86}, - {value: 0x0010, lo: 0x88, hi: 0x8e}, - {value: 0x0010, lo: 0x90, hi: 0x96}, - {value: 0x0010, lo: 0x98, hi: 0x9e}, - {value: 0x0024, lo: 0xa0, hi: 0xbf}, - // Block 0x70, offset 0x300 - {value: 0x0014, lo: 0xaf, hi: 0xaf}, - // Block 0x71, offset 0x301 - {value: 0x0014, lo: 0x85, hi: 0x85}, - {value: 0x0034, lo: 0xaa, hi: 0xad}, - {value: 0x0030, lo: 0xae, hi: 0xaf}, - {value: 0x0004, lo: 0xb1, hi: 0xb5}, - {value: 0x0014, lo: 0xbb, hi: 0xbb}, - {value: 0x0010, lo: 0xbc, hi: 0xbc}, - // Block 0x72, offset 0x307 - {value: 0x0034, lo: 0x99, hi: 0x9a}, - {value: 0x0004, lo: 0x9b, hi: 0x9e}, - // Block 0x73, offset 0x309 - {value: 0x0004, lo: 0xbc, hi: 0xbe}, - // Block 0x74, offset 0x30a - {value: 0x0010, lo: 0x85, hi: 0xad}, - {value: 0x0010, lo: 0xb1, hi: 0xbf}, - // Block 0x75, offset 0x30c - {value: 0x0010, lo: 0x80, hi: 0x8e}, - {value: 0x0010, lo: 0xa0, hi: 0xba}, - // Block 0x76, offset 0x30e - {value: 0x0010, lo: 0x80, hi: 0x94}, - {value: 0x0014, lo: 0x95, hi: 0x95}, - {value: 0x0010, lo: 0x96, hi: 0xbf}, - // Block 0x77, offset 0x311 - {value: 0x0010, lo: 0x80, hi: 0x8c}, - // Block 0x78, offset 0x312 - {value: 0x0010, lo: 0x90, hi: 0xb7}, - {value: 0x0014, lo: 0xb8, hi: 0xbd}, - // Block 0x79, offset 0x314 - {value: 0x0010, lo: 0x80, hi: 0x8b}, - {value: 0x0014, lo: 0x8c, hi: 0x8c}, - {value: 0x0010, lo: 0x90, hi: 0xab}, - // Block 0x7a, offset 0x317 - {value: 0x0117, lo: 0x80, hi: 0xad}, - {value: 0x0010, lo: 0xae, hi: 0xae}, - {value: 0x0024, lo: 0xaf, hi: 0xaf}, - {value: 0x0014, lo: 0xb0, hi: 0xb2}, - {value: 0x0024, lo: 0xb4, hi: 0xbd}, - {value: 0x0014, lo: 0xbf, hi: 0xbf}, - // Block 0x7b, offset 0x31d - {value: 0x0117, lo: 0x80, hi: 0x9b}, - {value: 0x0015, lo: 0x9c, hi: 0x9d}, - {value: 0x0024, lo: 0x9e, hi: 0x9f}, - {value: 0x0010, lo: 0xa0, hi: 0xbf}, - // Block 0x7c, offset 0x321 - {value: 0x0010, lo: 0x80, hi: 0xaf}, - {value: 0x0024, lo: 0xb0, hi: 0xb1}, - // Block 0x7d, offset 0x323 - {value: 0x0004, lo: 0x80, hi: 0x96}, - {value: 0x0014, lo: 0x97, hi: 0x9f}, - {value: 0x0004, lo: 0xa0, hi: 0xa1}, - {value: 0x0117, lo: 0xa2, hi: 0xaf}, - {value: 0x0012, lo: 0xb0, hi: 0xb1}, - {value: 0x0117, lo: 0xb2, hi: 0xbf}, - // Block 0x7e, offset 0x329 - {value: 0x0117, lo: 0x80, hi: 0xaf}, - {value: 0x0015, lo: 0xb0, hi: 0xb0}, - {value: 0x0012, lo: 0xb1, hi: 0xb8}, - {value: 0x0316, lo: 0xb9, hi: 0xba}, - {value: 0x0716, lo: 0xbb, hi: 0xbc}, - {value: 0x8453, lo: 0xbd, hi: 0xbd}, - {value: 0x0117, lo: 0xbe, hi: 0xbf}, - // Block 0x7f, offset 0x330 - {value: 0x0010, lo: 0xb7, hi: 0xb7}, - {value: 0x0015, lo: 0xb8, hi: 0xb9}, - {value: 0x0012, lo: 0xba, hi: 0xba}, - {value: 0x0010, lo: 0xbb, hi: 0xbf}, - // Block 0x80, offset 0x334 - {value: 0x0010, lo: 0x80, hi: 0x81}, - {value: 0x0014, lo: 0x82, hi: 0x82}, - {value: 0x0010, lo: 0x83, hi: 0x85}, - {value: 0x0034, lo: 0x86, hi: 0x86}, - {value: 0x0010, lo: 0x87, hi: 0x8a}, - {value: 0x0014, lo: 0x8b, hi: 0x8b}, - {value: 0x0010, lo: 0x8c, hi: 0xa4}, - {value: 0x0014, lo: 0xa5, hi: 0xa6}, - {value: 0x0010, lo: 0xa7, hi: 0xa7}, - // Block 0x81, offset 0x33d - {value: 0x0010, lo: 0x80, hi: 0xb3}, - // Block 0x82, offset 0x33e - {value: 0x0010, lo: 0x80, hi: 0x83}, - {value: 0x0034, lo: 0x84, hi: 0x84}, - {value: 0x0014, lo: 0x85, hi: 0x85}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - {value: 0x0024, lo: 0xa0, hi: 0xb1}, - {value: 0x0010, lo: 0xb2, hi: 0xb7}, - {value: 0x0010, lo: 0xbb, hi: 0xbb}, - {value: 0x0010, lo: 0xbd, hi: 0xbd}, - // Block 0x83, offset 0x346 - {value: 0x0010, lo: 0x80, hi: 0xa5}, - {value: 0x0014, lo: 0xa6, hi: 0xaa}, - {value: 0x0034, lo: 0xab, hi: 0xad}, - {value: 0x0010, lo: 0xb0, hi: 0xbf}, - // Block 0x84, offset 0x34a - {value: 0x0010, lo: 0x80, hi: 0x86}, - {value: 0x0014, lo: 0x87, hi: 0x91}, - {value: 0x0010, lo: 0x92, hi: 0x92}, - {value: 0x0030, lo: 0x93, hi: 0x93}, - {value: 0x0010, lo: 0xa0, hi: 0xbc}, - // Block 0x85, offset 0x34f - {value: 0x0014, lo: 0x80, hi: 0x82}, - {value: 0x0010, lo: 0x83, hi: 0xb2}, - {value: 0x0034, lo: 0xb3, hi: 0xb3}, - {value: 0x0010, lo: 0xb4, hi: 0xb5}, - {value: 0x0014, lo: 0xb6, hi: 0xb9}, - {value: 0x0010, lo: 0xba, hi: 0xbb}, - {value: 0x0014, lo: 0xbc, hi: 0xbc}, - {value: 0x0010, lo: 0xbd, hi: 0xbf}, - // Block 0x86, offset 0x357 - {value: 0x0030, lo: 0x80, hi: 0x80}, - {value: 0x0014, lo: 0x8f, hi: 0x8f}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - {value: 0x0014, lo: 0xa5, hi: 0xa5}, - {value: 0x0004, lo: 0xa6, hi: 0xa6}, - {value: 0x0010, lo: 0xb0, hi: 0xb9}, - // Block 0x87, offset 0x35d - {value: 0x0010, lo: 0x80, hi: 0xa8}, - {value: 0x0014, lo: 0xa9, hi: 0xae}, - {value: 0x0010, lo: 0xaf, hi: 0xb0}, - {value: 0x0014, lo: 0xb1, hi: 0xb2}, - {value: 0x0010, lo: 0xb3, hi: 0xb4}, - {value: 0x0014, lo: 0xb5, hi: 0xb6}, - // Block 0x88, offset 0x363 - {value: 0x0010, lo: 0x80, hi: 0x82}, - {value: 0x0014, lo: 0x83, hi: 0x83}, - {value: 0x0010, lo: 0x84, hi: 0x8b}, - {value: 0x0014, lo: 0x8c, hi: 0x8c}, - {value: 0x0010, lo: 0x8d, hi: 0x8d}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - {value: 0x0004, lo: 0xb0, hi: 0xb0}, - {value: 0x0010, lo: 0xbb, hi: 0xbb}, - {value: 0x0014, lo: 0xbc, hi: 0xbc}, - {value: 0x0010, lo: 0xbd, hi: 0xbd}, - // Block 0x89, offset 0x36d - {value: 0x0024, lo: 0xb0, hi: 0xb0}, - {value: 0x0024, lo: 0xb2, hi: 0xb3}, - {value: 0x0034, lo: 0xb4, hi: 0xb4}, - {value: 0x0024, lo: 0xb7, hi: 0xb8}, - {value: 0x0024, lo: 0xbe, hi: 0xbf}, - // Block 0x8a, offset 0x372 - {value: 0x0024, lo: 0x81, hi: 0x81}, - {value: 0x0004, lo: 0x9d, hi: 0x9d}, - {value: 0x0010, lo: 0xa0, hi: 0xab}, - {value: 0x0014, lo: 0xac, hi: 0xad}, - {value: 0x0010, lo: 0xae, hi: 0xaf}, - {value: 0x0010, lo: 0xb2, hi: 0xb2}, - {value: 0x0014, lo: 0xb3, hi: 0xb4}, - {value: 0x0010, lo: 0xb5, hi: 0xb5}, - {value: 0x0034, lo: 0xb6, hi: 0xb6}, - // Block 0x8b, offset 0x37b - {value: 0x0010, lo: 0x81, hi: 0x86}, - {value: 0x0010, lo: 0x89, hi: 0x8e}, - {value: 0x0010, lo: 0x91, hi: 0x96}, - {value: 0x0010, lo: 0xa0, hi: 0xa6}, - {value: 0x0010, lo: 0xa8, hi: 0xae}, - {value: 0x0012, lo: 0xb0, hi: 0xbf}, - // Block 0x8c, offset 0x381 - {value: 0x0012, lo: 0x80, hi: 0x92}, - {value: 0xac52, lo: 0x93, hi: 0x93}, - {value: 0x0012, lo: 0x94, hi: 0x9a}, - {value: 0x0004, lo: 0x9b, hi: 0x9b}, - {value: 0x0015, lo: 0x9c, hi: 0x9f}, - {value: 0x0012, lo: 0xa0, hi: 0xa5}, - {value: 0x74d2, lo: 0xb0, hi: 0xbf}, - // Block 0x8d, offset 0x388 - {value: 0x78d2, lo: 0x80, hi: 0x8f}, - {value: 0x7cd2, lo: 0x90, hi: 0x9f}, - {value: 0x80d2, lo: 0xa0, hi: 0xaf}, - {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, - // Block 0x8e, offset 0x38c - {value: 0x0010, lo: 0x80, hi: 0xa4}, - {value: 0x0014, lo: 0xa5, hi: 0xa5}, - {value: 0x0010, lo: 0xa6, hi: 0xa7}, - {value: 0x0014, lo: 0xa8, hi: 0xa8}, - {value: 0x0010, lo: 0xa9, hi: 0xaa}, - {value: 0x0010, lo: 0xac, hi: 0xac}, - {value: 0x0034, lo: 0xad, hi: 0xad}, - {value: 0x0010, lo: 0xb0, hi: 0xb9}, - // Block 0x8f, offset 0x394 - {value: 0x0010, lo: 0x80, hi: 0xa3}, - {value: 0x0010, lo: 0xb0, hi: 0xbf}, - // Block 0x90, offset 0x396 - {value: 0x0010, lo: 0x80, hi: 0x86}, - {value: 0x0010, lo: 0x8b, hi: 0xbb}, - // Block 0x91, offset 0x398 - {value: 0x0010, lo: 0x80, hi: 0x81}, - {value: 0x0010, lo: 0x83, hi: 0x84}, - {value: 0x0010, lo: 0x86, hi: 0xbf}, - // Block 0x92, offset 0x39b - {value: 0x0010, lo: 0x80, hi: 0xb1}, - {value: 0x0004, lo: 0xb2, hi: 0xbf}, - // Block 0x93, offset 0x39d - {value: 0x0004, lo: 0x80, hi: 0x81}, - {value: 0x0010, lo: 0x93, hi: 0xbf}, - // Block 0x94, offset 0x39f - {value: 0x0010, lo: 0x80, hi: 0xbd}, - // Block 0x95, offset 0x3a0 - {value: 0x0010, lo: 0x90, hi: 0xbf}, - // Block 0x96, offset 0x3a1 - {value: 0x0010, lo: 0x80, hi: 0x8f}, - {value: 0x0010, lo: 0x92, hi: 0xbf}, - // Block 0x97, offset 0x3a3 - {value: 0x0010, lo: 0x80, hi: 0x87}, - {value: 0x0010, lo: 0xb0, hi: 0xbb}, - // Block 0x98, offset 0x3a5 - {value: 0x0014, lo: 0x80, hi: 0x8f}, - {value: 0x0014, lo: 0x93, hi: 0x93}, - {value: 0x0024, lo: 0xa0, hi: 0xa6}, - {value: 0x0034, lo: 0xa7, hi: 0xad}, - {value: 0x0024, lo: 0xae, hi: 0xaf}, - {value: 0x0010, lo: 0xb3, hi: 0xb4}, - // Block 0x99, offset 0x3ab - {value: 0x0010, lo: 0x8d, hi: 0x8f}, - {value: 0x0014, lo: 0x92, hi: 0x92}, - {value: 0x0014, lo: 0x95, hi: 0x95}, - {value: 0x0010, lo: 0xb0, hi: 0xb4}, - {value: 0x0010, lo: 0xb6, hi: 0xbf}, - // Block 0x9a, offset 0x3b0 - {value: 0x0010, lo: 0x80, hi: 0xbc}, - {value: 0x0014, lo: 0xbf, hi: 0xbf}, - // Block 0x9b, offset 0x3b2 - {value: 0x0014, lo: 0x87, hi: 0x87}, - {value: 0x0014, lo: 0x8e, hi: 0x8e}, - {value: 0x0014, lo: 0x9a, hi: 0x9a}, - {value: 0x5f53, lo: 0xa1, hi: 0xba}, - {value: 0x0004, lo: 0xbe, hi: 0xbe}, - {value: 0x0010, lo: 0xbf, hi: 0xbf}, - // Block 0x9c, offset 0x3b8 - {value: 0x0004, lo: 0x80, hi: 0x80}, - {value: 0x5f52, lo: 0x81, hi: 0x9a}, - {value: 0x0004, lo: 0xb0, hi: 0xb0}, - // Block 0x9d, offset 0x3bb - {value: 0x0014, lo: 0x9e, hi: 0x9f}, - {value: 0x0010, lo: 0xa0, hi: 0xbe}, - // Block 0x9e, offset 0x3bd - {value: 0x0010, lo: 0x82, hi: 0x87}, - {value: 0x0010, lo: 0x8a, hi: 0x8f}, - {value: 0x0010, lo: 0x92, hi: 0x97}, - {value: 0x0010, lo: 0x9a, hi: 0x9c}, - {value: 0x0004, lo: 0xa3, hi: 0xa3}, - {value: 0x0014, lo: 0xb9, hi: 0xbb}, - // Block 0x9f, offset 0x3c3 - {value: 0x0010, lo: 0x80, hi: 0x8b}, - {value: 0x0010, lo: 0x8d, hi: 0xa6}, - {value: 0x0010, lo: 0xa8, hi: 0xba}, - {value: 0x0010, lo: 0xbc, hi: 0xbd}, - {value: 0x0010, lo: 0xbf, hi: 0xbf}, - // Block 0xa0, offset 0x3c8 - {value: 0x0010, lo: 0x80, hi: 0x8d}, - {value: 0x0010, lo: 0x90, hi: 0x9d}, - // Block 0xa1, offset 0x3ca - {value: 0x0010, lo: 0x80, hi: 0xba}, - // Block 0xa2, offset 0x3cb - {value: 0x0010, lo: 0x80, hi: 0xb4}, - // Block 0xa3, offset 0x3cc - {value: 0x0034, lo: 0xbd, hi: 0xbd}, - // Block 0xa4, offset 0x3cd - {value: 0x0010, lo: 0x80, hi: 0x9c}, - {value: 0x0010, lo: 0xa0, hi: 0xbf}, - // Block 0xa5, offset 0x3cf - {value: 0x0010, lo: 0x80, hi: 0x90}, - {value: 0x0034, lo: 0xa0, hi: 0xa0}, - // Block 0xa6, offset 0x3d1 - {value: 0x0010, lo: 0x80, hi: 0x9f}, - {value: 0x0010, lo: 0xb0, hi: 0xbf}, - // Block 0xa7, offset 0x3d3 - {value: 0x0010, lo: 0x80, hi: 0x8a}, - {value: 0x0010, lo: 0x90, hi: 0xb5}, - {value: 0x0024, lo: 0xb6, hi: 0xba}, - // Block 0xa8, offset 0x3d6 - {value: 0x0010, lo: 0x80, hi: 0x9d}, - {value: 0x0010, lo: 0xa0, hi: 0xbf}, - // Block 0xa9, offset 0x3d8 - {value: 0x0010, lo: 0x80, hi: 0x83}, - {value: 0x0010, lo: 0x88, hi: 0x8f}, - {value: 0x0010, lo: 0x91, hi: 0x95}, - // Block 0xaa, offset 0x3db - {value: 0x2813, lo: 0x80, hi: 0x87}, - {value: 0x3813, lo: 0x88, hi: 0x8f}, - {value: 0x2813, lo: 0x90, hi: 0x97}, - {value: 0xaf53, lo: 0x98, hi: 0x9f}, - {value: 0xb253, lo: 0xa0, hi: 0xa7}, - {value: 0x2812, lo: 0xa8, hi: 0xaf}, - {value: 0x3812, lo: 0xb0, hi: 0xb7}, - {value: 0x2812, lo: 0xb8, hi: 0xbf}, - // Block 0xab, offset 0x3e3 - {value: 0xaf52, lo: 0x80, hi: 0x87}, - {value: 0xb252, lo: 0x88, hi: 0x8f}, - {value: 0x0010, lo: 0x90, hi: 0xbf}, - // Block 0xac, offset 0x3e6 - {value: 0x0010, lo: 0x80, hi: 0x9d}, - {value: 0x0010, lo: 0xa0, hi: 0xa9}, - {value: 0xb253, lo: 0xb0, hi: 0xb7}, - {value: 0xaf53, lo: 0xb8, hi: 0xbf}, - // Block 0xad, offset 0x3ea - {value: 0x2813, lo: 0x80, hi: 0x87}, - {value: 0x3813, lo: 0x88, hi: 0x8f}, - {value: 0x2813, lo: 0x90, hi: 0x93}, - {value: 0xb252, lo: 0x98, hi: 0x9f}, - {value: 0xaf52, lo: 0xa0, hi: 0xa7}, - {value: 0x2812, lo: 0xa8, hi: 0xaf}, - {value: 0x3812, lo: 0xb0, hi: 0xb7}, - {value: 0x2812, lo: 0xb8, hi: 0xbb}, - // Block 0xae, offset 0x3f2 - {value: 0x0010, lo: 0x80, hi: 0xa7}, - {value: 0x0010, lo: 0xb0, hi: 0xbf}, - // Block 0xaf, offset 0x3f4 - {value: 0x0010, lo: 0x80, hi: 0xa3}, - // Block 0xb0, offset 0x3f5 - {value: 0x0010, lo: 0x80, hi: 0xb6}, - // Block 0xb1, offset 0x3f6 - {value: 0x0010, lo: 0x80, hi: 0x95}, - {value: 0x0010, lo: 0xa0, hi: 0xa7}, - // Block 0xb2, offset 0x3f8 - {value: 0x0010, lo: 0x80, hi: 0x85}, - {value: 0x0010, lo: 0x88, hi: 0x88}, - {value: 0x0010, lo: 0x8a, hi: 0xb5}, - {value: 0x0010, lo: 0xb7, hi: 0xb8}, - {value: 0x0010, lo: 0xbc, hi: 0xbc}, - {value: 0x0010, lo: 0xbf, hi: 0xbf}, - // Block 0xb3, offset 0x3fe - {value: 0x0010, lo: 0x80, hi: 0x95}, - {value: 0x0010, lo: 0xa0, hi: 0xb6}, - // Block 0xb4, offset 0x400 - {value: 0x0010, lo: 0x80, hi: 0x9e}, - // Block 0xb5, offset 0x401 - {value: 0x0010, lo: 0xa0, hi: 0xb2}, - {value: 0x0010, lo: 0xb4, hi: 0xb5}, - // Block 0xb6, offset 0x403 - {value: 0x0010, lo: 0x80, hi: 0x95}, - {value: 0x0010, lo: 0xa0, hi: 0xb9}, - // Block 0xb7, offset 0x405 - {value: 0x0010, lo: 0x80, hi: 0xb7}, - {value: 0x0010, lo: 0xbe, hi: 0xbf}, - // Block 0xb8, offset 0x407 - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0014, lo: 0x81, hi: 0x83}, - {value: 0x0014, lo: 0x85, hi: 0x86}, - {value: 0x0014, lo: 0x8c, hi: 0x8c}, - {value: 0x0034, lo: 0x8d, hi: 0x8d}, - {value: 0x0014, lo: 0x8e, hi: 0x8e}, - {value: 0x0024, lo: 0x8f, hi: 0x8f}, - {value: 0x0010, lo: 0x90, hi: 0x93}, - {value: 0x0010, lo: 0x95, hi: 0x97}, - {value: 0x0010, lo: 0x99, hi: 0xb3}, - {value: 0x0024, lo: 0xb8, hi: 0xb8}, - {value: 0x0034, lo: 0xb9, hi: 0xba}, - {value: 0x0034, lo: 0xbf, hi: 0xbf}, - // Block 0xb9, offset 0x414 - {value: 0x0010, lo: 0xa0, hi: 0xbc}, - // Block 0xba, offset 0x415 - {value: 0x0010, lo: 0x80, hi: 0x9c}, - // Block 0xbb, offset 0x416 - {value: 0x0010, lo: 0x80, hi: 0x87}, - {value: 0x0010, lo: 0x89, hi: 0xa4}, - {value: 0x0024, lo: 0xa5, hi: 0xa5}, - {value: 0x0034, lo: 0xa6, hi: 0xa6}, - // Block 0xbc, offset 0x41a - {value: 0x0010, lo: 0x80, hi: 0x95}, - {value: 0x0010, lo: 0xa0, hi: 0xb2}, - // Block 0xbd, offset 0x41c - {value: 0x0010, lo: 0x80, hi: 0x91}, - // Block 0xbe, offset 0x41d - {value: 0x0010, lo: 0x80, hi: 0x88}, - // Block 0xbf, offset 0x41e - {value: 0x5653, lo: 0x80, hi: 0xb2}, - // Block 0xc0, offset 0x41f - {value: 0x5652, lo: 0x80, hi: 0xb2}, - // Block 0xc1, offset 0x420 - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0014, lo: 0x81, hi: 0x81}, - {value: 0x0010, lo: 0x82, hi: 0xb7}, - {value: 0x0014, lo: 0xb8, hi: 0xbf}, - // Block 0xc2, offset 0x424 - {value: 0x0014, lo: 0x80, hi: 0x85}, - {value: 0x0034, lo: 0x86, hi: 0x86}, - {value: 0x0010, lo: 0xa6, hi: 0xaf}, - {value: 0x0034, lo: 0xbf, hi: 0xbf}, - // Block 0xc3, offset 0x428 - {value: 0x0014, lo: 0x80, hi: 0x81}, - {value: 0x0010, lo: 0x82, hi: 0xb2}, - {value: 0x0014, lo: 0xb3, hi: 0xb6}, - {value: 0x0010, lo: 0xb7, hi: 0xb8}, - {value: 0x0034, lo: 0xb9, hi: 0xba}, - {value: 0x0014, lo: 0xbd, hi: 0xbd}, - // Block 0xc4, offset 0x42e - {value: 0x0010, lo: 0x90, hi: 0xa8}, - {value: 0x0010, lo: 0xb0, hi: 0xb9}, - // Block 0xc5, offset 0x430 - {value: 0x0024, lo: 0x80, hi: 0x82}, - {value: 0x0010, lo: 0x83, hi: 0xa6}, - {value: 0x0014, lo: 0xa7, hi: 0xab}, - {value: 0x0010, lo: 0xac, hi: 0xac}, - {value: 0x0014, lo: 0xad, hi: 0xb2}, - {value: 0x0034, lo: 0xb3, hi: 0xb4}, - {value: 0x0010, lo: 0xb6, hi: 0xbf}, - // Block 0xc6, offset 0x437 - {value: 0x0010, lo: 0x90, hi: 0xb2}, - {value: 0x0034, lo: 0xb3, hi: 0xb3}, - {value: 0x0010, lo: 0xb6, hi: 0xb6}, - // Block 0xc7, offset 0x43a - {value: 0x0014, lo: 0x80, hi: 0x81}, - {value: 0x0010, lo: 0x82, hi: 0xb5}, - {value: 0x0014, lo: 0xb6, hi: 0xbe}, - {value: 0x0010, lo: 0xbf, hi: 0xbf}, - // Block 0xc8, offset 0x43e - {value: 0x0030, lo: 0x80, hi: 0x80}, - {value: 0x0010, lo: 0x81, hi: 0x84}, - {value: 0x0034, lo: 0x8a, hi: 0x8a}, - {value: 0x0014, lo: 0x8b, hi: 0x8c}, - {value: 0x0010, lo: 0x90, hi: 0x9a}, - {value: 0x0010, lo: 0x9c, hi: 0x9c}, - // Block 0xc9, offset 0x444 - {value: 0x0010, lo: 0x80, hi: 0x91}, - {value: 0x0010, lo: 0x93, hi: 0xae}, - {value: 0x0014, lo: 0xaf, hi: 0xb1}, - {value: 0x0010, lo: 0xb2, hi: 0xb3}, - {value: 0x0014, lo: 0xb4, hi: 0xb4}, - {value: 0x0030, lo: 0xb5, hi: 0xb5}, - {value: 0x0034, lo: 0xb6, hi: 0xb6}, - {value: 0x0014, lo: 0xb7, hi: 0xb7}, - {value: 0x0014, lo: 0xbe, hi: 0xbe}, - // Block 0xca, offset 0x44d - {value: 0x0010, lo: 0x80, hi: 0x86}, - {value: 0x0010, lo: 0x88, hi: 0x88}, - {value: 0x0010, lo: 0x8a, hi: 0x8d}, - {value: 0x0010, lo: 0x8f, hi: 0x9d}, - {value: 0x0010, lo: 0x9f, hi: 0xa8}, - {value: 0x0010, lo: 0xb0, hi: 0xbf}, - // Block 0xcb, offset 0x453 - {value: 0x0010, lo: 0x80, hi: 0x9e}, - {value: 0x0014, lo: 0x9f, hi: 0x9f}, - {value: 0x0010, lo: 0xa0, hi: 0xa2}, - {value: 0x0014, lo: 0xa3, hi: 0xa8}, - {value: 0x0034, lo: 0xa9, hi: 0xaa}, - {value: 0x0010, lo: 0xb0, hi: 0xb9}, - // Block 0xcc, offset 0x459 - {value: 0x0014, lo: 0x80, hi: 0x81}, - {value: 0x0010, lo: 0x82, hi: 0x83}, - {value: 0x0010, lo: 0x85, hi: 0x8c}, - {value: 0x0010, lo: 0x8f, hi: 0x90}, - {value: 0x0010, lo: 0x93, hi: 0xa8}, - {value: 0x0010, lo: 0xaa, hi: 0xb0}, - {value: 0x0010, lo: 0xb2, hi: 0xb3}, - {value: 0x0010, lo: 0xb5, hi: 0xb9}, - {value: 0x0034, lo: 0xbc, hi: 0xbc}, - {value: 0x0010, lo: 0xbd, hi: 0xbf}, - // Block 0xcd, offset 0x463 - {value: 0x0014, lo: 0x80, hi: 0x80}, - {value: 0x0010, lo: 0x81, hi: 0x84}, - {value: 0x0010, lo: 0x87, hi: 0x88}, - {value: 0x0010, lo: 0x8b, hi: 0x8c}, - {value: 0x0030, lo: 0x8d, hi: 0x8d}, - {value: 0x0010, lo: 0x90, hi: 0x90}, - {value: 0x0010, lo: 0x97, hi: 0x97}, - {value: 0x0010, lo: 0x9d, hi: 0xa3}, - {value: 0x0024, lo: 0xa6, hi: 0xac}, - {value: 0x0024, lo: 0xb0, hi: 0xb4}, - // Block 0xce, offset 0x46d - {value: 0x0010, lo: 0x80, hi: 0xb7}, - {value: 0x0014, lo: 0xb8, hi: 0xbf}, - // Block 0xcf, offset 0x46f - {value: 0x0010, lo: 0x80, hi: 0x81}, - {value: 0x0034, lo: 0x82, hi: 0x82}, - {value: 0x0014, lo: 0x83, hi: 0x84}, - {value: 0x0010, lo: 0x85, hi: 0x85}, - {value: 0x0034, lo: 0x86, hi: 0x86}, - {value: 0x0010, lo: 0x87, hi: 0x8a}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - // Block 0xd0, offset 0x476 - {value: 0x0010, lo: 0x80, hi: 0xb2}, - {value: 0x0014, lo: 0xb3, hi: 0xb8}, - {value: 0x0010, lo: 0xb9, hi: 0xb9}, - {value: 0x0014, lo: 0xba, hi: 0xba}, - {value: 0x0010, lo: 0xbb, hi: 0xbe}, - {value: 0x0014, lo: 0xbf, hi: 0xbf}, - // Block 0xd1, offset 0x47c - {value: 0x0014, lo: 0x80, hi: 0x80}, - {value: 0x0010, lo: 0x81, hi: 0x81}, - {value: 0x0034, lo: 0x82, hi: 0x83}, - {value: 0x0010, lo: 0x84, hi: 0x85}, - {value: 0x0010, lo: 0x87, hi: 0x87}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - // Block 0xd2, offset 0x482 - {value: 0x0010, lo: 0x80, hi: 0xb1}, - {value: 0x0014, lo: 0xb2, hi: 0xb5}, - {value: 0x0010, lo: 0xb8, hi: 0xbb}, - {value: 0x0014, lo: 0xbc, hi: 0xbd}, - {value: 0x0010, lo: 0xbe, hi: 0xbe}, - {value: 0x0034, lo: 0xbf, hi: 0xbf}, - // Block 0xd3, offset 0x488 - {value: 0x0034, lo: 0x80, hi: 0x80}, - {value: 0x0010, lo: 0x98, hi: 0x9b}, - {value: 0x0014, lo: 0x9c, hi: 0x9d}, - // Block 0xd4, offset 0x48b - {value: 0x0010, lo: 0x80, hi: 0xb2}, - {value: 0x0014, lo: 0xb3, hi: 0xba}, - {value: 0x0010, lo: 0xbb, hi: 0xbc}, - {value: 0x0014, lo: 0xbd, hi: 0xbd}, - {value: 0x0010, lo: 0xbe, hi: 0xbe}, - {value: 0x0034, lo: 0xbf, hi: 0xbf}, - // Block 0xd5, offset 0x491 - {value: 0x0014, lo: 0x80, hi: 0x80}, - {value: 0x0010, lo: 0x84, hi: 0x84}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - // Block 0xd6, offset 0x494 - {value: 0x0010, lo: 0x80, hi: 0xaa}, - {value: 0x0014, lo: 0xab, hi: 0xab}, - {value: 0x0010, lo: 0xac, hi: 0xac}, - {value: 0x0014, lo: 0xad, hi: 0xad}, - {value: 0x0010, lo: 0xae, hi: 0xaf}, - {value: 0x0014, lo: 0xb0, hi: 0xb5}, - {value: 0x0030, lo: 0xb6, hi: 0xb6}, - {value: 0x0034, lo: 0xb7, hi: 0xb7}, - // Block 0xd7, offset 0x49c - {value: 0x0010, lo: 0x80, hi: 0x89}, - // Block 0xd8, offset 0x49d - {value: 0x0014, lo: 0x9d, hi: 0x9f}, - {value: 0x0010, lo: 0xa0, hi: 0xa1}, - {value: 0x0014, lo: 0xa2, hi: 0xa5}, - {value: 0x0010, lo: 0xa6, hi: 0xa6}, - {value: 0x0014, lo: 0xa7, hi: 0xaa}, - {value: 0x0034, lo: 0xab, hi: 0xab}, - {value: 0x0010, lo: 0xb0, hi: 0xb9}, - // Block 0xd9, offset 0x4a4 - {value: 0x5f53, lo: 0xa0, hi: 0xbf}, - // Block 0xda, offset 0x4a5 - {value: 0x5f52, lo: 0x80, hi: 0x9f}, - {value: 0x0010, lo: 0xa0, hi: 0xa9}, - {value: 0x0010, lo: 0xbf, hi: 0xbf}, - // Block 0xdb, offset 0x4a8 - {value: 0x0010, lo: 0x80, hi: 0xb8}, - // Block 0xdc, offset 0x4a9 - {value: 0x0010, lo: 0x80, hi: 0x88}, - {value: 0x0010, lo: 0x8a, hi: 0xaf}, - {value: 0x0014, lo: 0xb0, hi: 0xb6}, - {value: 0x0014, lo: 0xb8, hi: 0xbd}, - {value: 0x0010, lo: 0xbe, hi: 0xbe}, - {value: 0x0034, lo: 0xbf, hi: 0xbf}, - // Block 0xdd, offset 0x4af - {value: 0x0010, lo: 0x80, hi: 0x80}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - {value: 0x0010, lo: 0xb2, hi: 0xbf}, - // Block 0xde, offset 0x4b2 - {value: 0x0010, lo: 0x80, hi: 0x8f}, - {value: 0x0014, lo: 0x92, hi: 0xa7}, - {value: 0x0010, lo: 0xa9, hi: 0xa9}, - {value: 0x0014, lo: 0xaa, hi: 0xb0}, - {value: 0x0010, lo: 0xb1, hi: 0xb1}, - {value: 0x0014, lo: 0xb2, hi: 0xb3}, - {value: 0x0010, lo: 0xb4, hi: 0xb4}, - {value: 0x0014, lo: 0xb5, hi: 0xb6}, - // Block 0xdf, offset 0x4ba - {value: 0x0010, lo: 0x80, hi: 0x99}, - // Block 0xe0, offset 0x4bb - {value: 0x0010, lo: 0x80, hi: 0xae}, - // Block 0xe1, offset 0x4bc - {value: 0x0010, lo: 0x80, hi: 0x83}, - // Block 0xe2, offset 0x4bd - {value: 0x0010, lo: 0x80, hi: 0x86}, - // Block 0xe3, offset 0x4be - {value: 0x0010, lo: 0x80, hi: 0x9e}, - {value: 0x0010, lo: 0xa0, hi: 0xa9}, - // Block 0xe4, offset 0x4c0 - {value: 0x0010, lo: 0x90, hi: 0xad}, - {value: 0x0034, lo: 0xb0, hi: 0xb4}, - // Block 0xe5, offset 0x4c2 - {value: 0x0010, lo: 0x80, hi: 0xaf}, - {value: 0x0024, lo: 0xb0, hi: 0xb6}, - // Block 0xe6, offset 0x4c4 - {value: 0x0014, lo: 0x80, hi: 0x83}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - {value: 0x0010, lo: 0xa3, hi: 0xb7}, - {value: 0x0010, lo: 0xbd, hi: 0xbf}, - // Block 0xe7, offset 0x4c8 - {value: 0x0010, lo: 0x80, hi: 0x8f}, - // Block 0xe8, offset 0x4c9 - {value: 0x0010, lo: 0x80, hi: 0x84}, - {value: 0x0010, lo: 0x90, hi: 0xbe}, - // Block 0xe9, offset 0x4cb - {value: 0x0014, lo: 0x8f, hi: 0x9f}, - // Block 0xea, offset 0x4cc - {value: 0x0014, lo: 0xa0, hi: 0xa0}, - // Block 0xeb, offset 0x4cd - {value: 0x0010, lo: 0x80, hi: 0xaa}, - {value: 0x0010, lo: 0xb0, hi: 0xbc}, - // Block 0xec, offset 0x4cf - {value: 0x0010, lo: 0x80, hi: 0x88}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - {value: 0x0014, lo: 0x9d, hi: 0x9d}, - {value: 0x0034, lo: 0x9e, hi: 0x9e}, - {value: 0x0014, lo: 0xa0, hi: 0xa3}, - // Block 0xed, offset 0x4d4 - {value: 0x0030, lo: 0xa5, hi: 0xa6}, - {value: 0x0034, lo: 0xa7, hi: 0xa9}, - {value: 0x0030, lo: 0xad, hi: 0xb2}, - {value: 0x0014, lo: 0xb3, hi: 0xba}, - {value: 0x0034, lo: 0xbb, hi: 0xbf}, - // Block 0xee, offset 0x4d9 - {value: 0x0034, lo: 0x80, hi: 0x82}, - {value: 0x0024, lo: 0x85, hi: 0x89}, - {value: 0x0034, lo: 0x8a, hi: 0x8b}, - {value: 0x0024, lo: 0xaa, hi: 0xad}, - // Block 0xef, offset 0x4dd - {value: 0x0024, lo: 0x82, hi: 0x84}, - // Block 0xf0, offset 0x4de - {value: 0x0013, lo: 0x80, hi: 0x99}, - {value: 0x0012, lo: 0x9a, hi: 0xb3}, - {value: 0x0013, lo: 0xb4, hi: 0xbf}, - // Block 0xf1, offset 0x4e1 - {value: 0x0013, lo: 0x80, hi: 0x8d}, - {value: 0x0012, lo: 0x8e, hi: 0x94}, - {value: 0x0012, lo: 0x96, hi: 0xa7}, - {value: 0x0013, lo: 0xa8, hi: 0xbf}, - // Block 0xf2, offset 0x4e5 - {value: 0x0013, lo: 0x80, hi: 0x81}, - {value: 0x0012, lo: 0x82, hi: 0x9b}, - {value: 0x0013, lo: 0x9c, hi: 0x9c}, - {value: 0x0013, lo: 0x9e, hi: 0x9f}, - {value: 0x0013, lo: 0xa2, hi: 0xa2}, - {value: 0x0013, lo: 0xa5, hi: 0xa6}, - {value: 0x0013, lo: 0xa9, hi: 0xac}, - {value: 0x0013, lo: 0xae, hi: 0xb5}, - {value: 0x0012, lo: 0xb6, hi: 0xb9}, - {value: 0x0012, lo: 0xbb, hi: 0xbb}, - {value: 0x0012, lo: 0xbd, hi: 0xbf}, - // Block 0xf3, offset 0x4f0 - {value: 0x0012, lo: 0x80, hi: 0x83}, - {value: 0x0012, lo: 0x85, hi: 0x8f}, - {value: 0x0013, lo: 0x90, hi: 0xa9}, - {value: 0x0012, lo: 0xaa, hi: 0xbf}, - // Block 0xf4, offset 0x4f4 - {value: 0x0012, lo: 0x80, hi: 0x83}, - {value: 0x0013, lo: 0x84, hi: 0x85}, - {value: 0x0013, lo: 0x87, hi: 0x8a}, - {value: 0x0013, lo: 0x8d, hi: 0x94}, - {value: 0x0013, lo: 0x96, hi: 0x9c}, - {value: 0x0012, lo: 0x9e, hi: 0xb7}, - {value: 0x0013, lo: 0xb8, hi: 0xb9}, - {value: 0x0013, lo: 0xbb, hi: 0xbe}, - // Block 0xf5, offset 0x4fc - {value: 0x0013, lo: 0x80, hi: 0x84}, - {value: 0x0013, lo: 0x86, hi: 0x86}, - {value: 0x0013, lo: 0x8a, hi: 0x90}, - {value: 0x0012, lo: 0x92, hi: 0xab}, - {value: 0x0013, lo: 0xac, hi: 0xbf}, - // Block 0xf6, offset 0x501 - {value: 0x0013, lo: 0x80, hi: 0x85}, - {value: 0x0012, lo: 0x86, hi: 0x9f}, - {value: 0x0013, lo: 0xa0, hi: 0xb9}, - {value: 0x0012, lo: 0xba, hi: 0xbf}, - // Block 0xf7, offset 0x505 - {value: 0x0012, lo: 0x80, hi: 0x93}, - {value: 0x0013, lo: 0x94, hi: 0xad}, - {value: 0x0012, lo: 0xae, hi: 0xbf}, - // Block 0xf8, offset 0x508 - {value: 0x0012, lo: 0x80, hi: 0x87}, - {value: 0x0013, lo: 0x88, hi: 0xa1}, - {value: 0x0012, lo: 0xa2, hi: 0xbb}, - {value: 0x0013, lo: 0xbc, hi: 0xbf}, - // Block 0xf9, offset 0x50c - {value: 0x0013, lo: 0x80, hi: 0x95}, - {value: 0x0012, lo: 0x96, hi: 0xaf}, - {value: 0x0013, lo: 0xb0, hi: 0xbf}, - // Block 0xfa, offset 0x50f - {value: 0x0013, lo: 0x80, hi: 0x89}, - {value: 0x0012, lo: 0x8a, hi: 0xa5}, - {value: 0x0013, lo: 0xa8, hi: 0xbf}, - // Block 0xfb, offset 0x512 - {value: 0x0013, lo: 0x80, hi: 0x80}, - {value: 0x0012, lo: 0x82, hi: 0x9a}, - {value: 0x0012, lo: 0x9c, hi: 0xa1}, - {value: 0x0013, lo: 0xa2, hi: 0xba}, - {value: 0x0012, lo: 0xbc, hi: 0xbf}, - // Block 0xfc, offset 0x517 - {value: 0x0012, lo: 0x80, hi: 0x94}, - {value: 0x0012, lo: 0x96, hi: 0x9b}, - {value: 0x0013, lo: 0x9c, hi: 0xb4}, - {value: 0x0012, lo: 0xb6, hi: 0xbf}, - // Block 0xfd, offset 0x51b - {value: 0x0012, lo: 0x80, hi: 0x8e}, - {value: 0x0012, lo: 0x90, hi: 0x95}, - {value: 0x0013, lo: 0x96, hi: 0xae}, - {value: 0x0012, lo: 0xb0, hi: 0xbf}, - // Block 0xfe, offset 0x51f - {value: 0x0012, lo: 0x80, hi: 0x88}, - {value: 0x0012, lo: 0x8a, hi: 0x8f}, - {value: 0x0013, lo: 0x90, hi: 0xa8}, - {value: 0x0012, lo: 0xaa, hi: 0xbf}, - // Block 0xff, offset 0x523 - {value: 0x0012, lo: 0x80, hi: 0x82}, - {value: 0x0012, lo: 0x84, hi: 0x89}, - {value: 0x0017, lo: 0x8a, hi: 0x8b}, - {value: 0x0010, lo: 0x8e, hi: 0xbf}, - // Block 0x100, offset 0x527 - {value: 0x0014, lo: 0x80, hi: 0xb6}, - {value: 0x0014, lo: 0xbb, hi: 0xbf}, - // Block 0x101, offset 0x529 - {value: 0x0014, lo: 0x80, hi: 0xac}, - {value: 0x0014, lo: 0xb5, hi: 0xb5}, - // Block 0x102, offset 0x52b - {value: 0x0014, lo: 0x84, hi: 0x84}, - {value: 0x0014, lo: 0x9b, hi: 0x9f}, - {value: 0x0014, lo: 0xa1, hi: 0xaf}, - // Block 0x103, offset 0x52e - {value: 0x0024, lo: 0x80, hi: 0x86}, - {value: 0x0024, lo: 0x88, hi: 0x98}, - {value: 0x0024, lo: 0x9b, hi: 0xa1}, - {value: 0x0024, lo: 0xa3, hi: 0xa4}, - {value: 0x0024, lo: 0xa6, hi: 0xaa}, - // Block 0x104, offset 0x533 - {value: 0x0010, lo: 0x80, hi: 0x84}, - {value: 0x0034, lo: 0x90, hi: 0x96}, - // Block 0x105, offset 0x535 - {value: 0xb552, lo: 0x80, hi: 0x81}, - {value: 0xb852, lo: 0x82, hi: 0x83}, - {value: 0x0024, lo: 0x84, hi: 0x89}, - {value: 0x0034, lo: 0x8a, hi: 0x8a}, - {value: 0x0010, lo: 0x90, hi: 0x99}, - // Block 0x106, offset 0x53a - {value: 0x0010, lo: 0x80, hi: 0x83}, - {value: 0x0010, lo: 0x85, hi: 0x9f}, - {value: 0x0010, lo: 0xa1, hi: 0xa2}, - {value: 0x0010, lo: 0xa4, hi: 0xa4}, - {value: 0x0010, lo: 0xa7, hi: 0xa7}, - {value: 0x0010, lo: 0xa9, hi: 0xb2}, - {value: 0x0010, lo: 0xb4, hi: 0xb7}, - {value: 0x0010, lo: 0xb9, hi: 0xb9}, - {value: 0x0010, lo: 0xbb, hi: 0xbb}, - // Block 0x107, offset 0x543 - {value: 0x0010, lo: 0x80, hi: 0x89}, - {value: 0x0010, lo: 0x8b, hi: 0x9b}, - {value: 0x0010, lo: 0xa1, hi: 0xa3}, - {value: 0x0010, lo: 0xa5, hi: 0xa9}, - {value: 0x0010, lo: 0xab, hi: 0xbb}, - // Block 0x108, offset 0x548 - {value: 0x0013, lo: 0xb0, hi: 0xbf}, - // Block 0x109, offset 0x549 - {value: 0x0013, lo: 0x80, hi: 0x89}, - {value: 0x0013, lo: 0x90, hi: 0xa9}, - {value: 0x0013, lo: 0xb0, hi: 0xbf}, - // Block 0x10a, offset 0x54c - {value: 0x0013, lo: 0x80, hi: 0x89}, - // Block 0x10b, offset 0x54d - {value: 0x0004, lo: 0xbb, hi: 0xbf}, - // Block 0x10c, offset 0x54e - {value: 0x0014, lo: 0x81, hi: 0x81}, - {value: 0x0014, lo: 0xa0, hi: 0xbf}, - // Block 0x10d, offset 0x550 - {value: 0x0014, lo: 0x80, hi: 0xbf}, - // Block 0x10e, offset 0x551 - {value: 0x0014, lo: 0x80, hi: 0xaf}, -} - -// Total table size 13819 bytes (13KiB); checksum: 4CC48DA3 diff --git a/vendor/golang.org/x/text/cases/trieval.go b/vendor/golang.org/x/text/cases/trieval.go deleted file mode 100644 index 7f2aa5b01..000000000 --- a/vendor/golang.org/x/text/cases/trieval.go +++ /dev/null @@ -1,213 +0,0 @@ -// This file was generated by go generate; DO NOT EDIT - -package cases - -// This file contains definitions for interpreting the trie value of the case -// trie generated by "go run gen*.go". It is shared by both the generator -// program and the resultant package. Sharing is achieved by the generator -// copying gen_trieval.go to trieval.go and changing what's above this comment. - -// info holds case information for a single rune. It is the value returned -// by a trie lookup. Most mapping information can be stored in a single 16-bit -// value. If not, for example when a rune is mapped to multiple runes, the value -// stores some basic case data and an index into an array with additional data. -// -// The per-rune values have the following format: -// -// if (exception) { -// 15..5 unsigned exception index -// 4 unused -// } else { -// 15..8 XOR pattern or index to XOR pattern for case mapping -// Only 13..8 are used for XOR patterns. -// 7 inverseFold (fold to upper, not to lower) -// 6 index: interpret the XOR pattern as an index -// 5..4 CCC: zero (normal or break), above or other -// } -// 3 exception: interpret this value as an exception index -// (TODO: is this bit necessary? Probably implied from case mode.) -// 2..0 case mode -// -// For the non-exceptional cases, a rune must be either uncased, lowercase or -// uppercase. If the rune is cased, the XOR pattern maps either a lowercase -// rune to uppercase or an uppercase rune to lowercase (applied to the 10 -// least-significant bits of the rune). -// -// See the definitions below for a more detailed description of the various -// bits. -type info uint16 - -const ( - casedMask = 0x0003 - fullCasedMask = 0x0007 - ignorableMask = 0x0006 - ignorableValue = 0x0004 - - inverseFoldBit = 1 << 7 - - exceptionBit = 1 << 3 - exceptionShift = 5 - numExceptionBits = 11 - - xorIndexBit = 1 << 6 - xorShift = 8 - - // There is no mapping if all xor bits and the exception bit are zero. - hasMappingMask = 0xffc0 | exceptionBit -) - -// The case mode bits encodes the case type of a rune. This includes uncased, -// title, upper and lower case and case ignorable. (For a definition of these -// terms see Chapter 3 of The Unicode Standard Core Specification.) In some rare -// cases, a rune can be both cased and case-ignorable. This is encoded by -// cIgnorableCased. A rune of this type is always lower case. Some runes are -// cased while not having a mapping. -// -// A common pattern for scripts in the Unicode standard is for upper and lower -// case runes to alternate for increasing rune values (e.g. the accented Latin -// ranges starting from U+0100 and U+1E00 among others and some Cyrillic -// characters). We use this property by defining a cXORCase mode, where the case -// mode (always upper or lower case) is derived from the rune value. As the XOR -// pattern for case mappings is often identical for successive runes, using -// cXORCase can result in large series of identical trie values. This, in turn, -// allows us to better compress the trie blocks. -const ( - cUncased info = iota // 000 - cTitle // 001 - cLower // 010 - cUpper // 011 - cIgnorableUncased // 100 - cIgnorableCased // 101 // lower case if mappings exist - cXORCase // 11x // case is cLower | ((rune&1) ^ x) - - maxCaseMode = cUpper -) - -func (c info) isCased() bool { - return c&casedMask != 0 -} - -func (c info) isCaseIgnorable() bool { - return c&ignorableMask == ignorableValue -} - -func (c info) isCaseIgnorableAndNonBreakStarter() bool { - return c&(fullCasedMask|cccMask) == (ignorableValue | cccZero) -} - -func (c info) isNotCasedAndNotCaseIgnorable() bool { - return c&fullCasedMask == 0 -} - -func (c info) isCaseIgnorableAndNotCased() bool { - return c&fullCasedMask == cIgnorableUncased -} - -// The case mapping implementation will need to know about various Canonical -// Combining Class (CCC) values. We encode two of these in the trie value: -// cccZero (0) and cccAbove (230). If the value is cccOther, it means that -// CCC(r) > 0, but not 230. A value of cccBreak means that CCC(r) == 0 and that -// the rune also has the break category Break (see below). -const ( - cccBreak info = iota << 4 - cccZero - cccAbove - cccOther - - cccMask = cccBreak | cccZero | cccAbove | cccOther -) - -const ( - starter = 0 - above = 230 - iotaSubscript = 240 -) - -// The exceptions slice holds data that does not fit in a normal info entry. -// The entry is pointed to by the exception index in an entry. It has the -// following format: -// -// Header -// byte 0: -// 7..6 unused -// 5..4 CCC type (same bits as entry) -// 3 unused -// 2..0 length of fold -// -// byte 1: -// 7..6 unused -// 5..3 length of 1st mapping of case type -// 2..0 length of 2nd mapping of case type -// -// case 1st 2nd -// lower -> upper, title -// upper -> lower, title -// title -> lower, upper -// -// Lengths with the value 0x7 indicate no value and implies no change. -// A length of 0 indicates a mapping to zero-length string. -// -// Body bytes: -// case folding bytes -// lowercase mapping bytes -// uppercase mapping bytes -// titlecase mapping bytes -// closure mapping bytes (for NFKC_Casefold). (TODO) -// -// Fallbacks: -// missing fold -> lower -// missing title -> upper -// all missing -> original rune -// -// exceptions starts with a dummy byte to enforce that there is no zero index -// value. -const ( - lengthMask = 0x07 - lengthBits = 3 - noChange = 0 -) - -// References to generated trie. - -var trie = newCaseTrie(0) - -var sparse = sparseBlocks{ - values: sparseValues[:], - offsets: sparseOffsets[:], -} - -// Sparse block lookup code. - -// valueRange is an entry in a sparse block. -type valueRange struct { - value uint16 - lo, hi byte -} - -type sparseBlocks struct { - values []valueRange - offsets []uint16 -} - -// lookup returns the value from values block n for byte b using binary search. -func (s *sparseBlocks) lookup(n uint32, b byte) uint16 { - lo := s.offsets[n] - hi := s.offsets[n+1] - for lo < hi { - m := lo + (hi-lo)/2 - r := s.values[m] - if r.lo <= b && b <= r.hi { - return r.value - } - if b < r.lo { - hi = m - } else { - lo = m + 1 - } - } - return 0 -} - -// lastRuneForTesting is the last rune used for testing. Everything after this -// is boring. -const lastRuneForTesting = rune(0x1FFFF) diff --git a/vendor/golang.org/x/text/internal/tag/tag.go b/vendor/golang.org/x/text/internal/tag/tag.go deleted file mode 100644 index 2cf4ecd29..000000000 --- a/vendor/golang.org/x/text/internal/tag/tag.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tag contains functionality handling tags and related data. -package tag - -import "sort" - -// An Index converts tags to a compact numeric value. -// -// All elements are of size 4. Tags may be up to 4 bytes long. Excess bytes can -// be used to store additional information about the tag. -type Index string - -// Elem returns the element data at the given index. -func (s Index) Elem(x int) string { - return string(s[x*4 : x*4+4]) -} - -// Index reports the index of the given key or -1 if it could not be found. -// Only the first len(key) bytes from the start of the 4-byte entries will be -// considered for the search and the first match in Index will be returned. -func (s Index) Index(key []byte) int { - n := len(key) - // search the index of the first entry with an equal or higher value than - // key in s. - index := sort.Search(len(s)/4, func(i int) bool { - return cmp(s[i*4:i*4+n], key) != -1 - }) - i := index * 4 - if cmp(s[i:i+len(key)], key) != 0 { - return -1 - } - return index -} - -// Next finds the next occurrence of key after index x, which must have been -// obtained from a call to Index using the same key. It returns x+1 or -1. -func (s Index) Next(key []byte, x int) int { - if x++; x*4 < len(s) && cmp(s[x*4:x*4+len(key)], key) == 0 { - return x - } - return -1 -} - -// cmp returns an integer comparing a and b lexicographically. -func cmp(a Index, b []byte) int { - n := len(a) - if len(b) < n { - n = len(b) - } - for i, c := range b[:n] { - switch { - case a[i] > c: - return 1 - case a[i] < c: - return -1 - } - } - switch { - case len(a) < len(b): - return -1 - case len(a) > len(b): - return 1 - } - return 0 -} - -// Compare returns an integer comparing a and b lexicographically. -func Compare(a string, b []byte) int { - return cmp(Index(a), b) -} - -// FixCase reformats b to the same pattern of cases as form. -// If returns false if string b is malformed. -func FixCase(form string, b []byte) bool { - if len(form) != len(b) { - return false - } - for i, c := range b { - if form[i] <= 'Z' { - if c >= 'a' { - c -= 'z' - 'Z' - } - if c < 'A' || 'Z' < c { - return false - } - } else { - if c <= 'Z' { - c += 'z' - 'Z' - } - if c < 'a' || 'z' < c { - return false - } - } - b[i] = c - } - return true -} diff --git a/vendor/golang.org/x/text/language/Makefile b/vendor/golang.org/x/text/language/Makefile deleted file mode 100644 index 79f005784..000000000 --- a/vendor/golang.org/x/text/language/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2013 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -CLEANFILES+=maketables - -maketables: maketables.go - go build $^ - -tables: maketables - ./maketables > tables.go - gofmt -w -s tables.go - -# Build (but do not run) maketables during testing, -# just to make sure it still compiles. -testshort: maketables diff --git a/vendor/golang.org/x/text/language/common.go b/vendor/golang.org/x/text/language/common.go deleted file mode 100644 index a255bb0a5..000000000 --- a/vendor/golang.org/x/text/language/common.go +++ /dev/null @@ -1,16 +0,0 @@ -// This file was generated by go generate; DO NOT EDIT - -package language - -// This file contains code common to the maketables.go and the package code. - -// langAliasType is the type of an alias in langAliasMap. -type langAliasType int8 - -const ( - langDeprecated langAliasType = iota - langMacro - langLegacy - - langAliasTypeUnknown langAliasType = -1 -) diff --git a/vendor/golang.org/x/text/language/coverage.go b/vendor/golang.org/x/text/language/coverage.go deleted file mode 100644 index 101fd23c1..000000000 --- a/vendor/golang.org/x/text/language/coverage.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package language - -import ( - "fmt" - "sort" -) - -// The Coverage interface is used to define the level of coverage of an -// internationalization service. Note that not all types are supported by all -// services. As lists may be generated on the fly, it is recommended that users -// of a Coverage cache the results. -type Coverage interface { - // Tags returns the list of supported tags. - Tags() []Tag - - // BaseLanguages returns the list of supported base languages. - BaseLanguages() []Base - - // Scripts returns the list of supported scripts. - Scripts() []Script - - // Regions returns the list of supported regions. - Regions() []Region -} - -var ( - // Supported defines a Coverage that lists all supported subtags. Tags - // always returns nil. - Supported Coverage = allSubtags{} -) - -// TODO: -// - Support Variants, numbering systems. -// - CLDR coverage levels. -// - Set of common tags defined in this package. - -type allSubtags struct{} - -// Regions returns the list of supported regions. As all regions are in a -// consecutive range, it simply returns a slice of numbers in increasing order. -// The "undefined" region is not returned. -func (s allSubtags) Regions() []Region { - reg := make([]Region, numRegions) - for i := range reg { - reg[i] = Region{regionID(i + 1)} - } - return reg -} - -// Scripts returns the list of supported scripts. As all scripts are in a -// consecutive range, it simply returns a slice of numbers in increasing order. -// The "undefined" script is not returned. -func (s allSubtags) Scripts() []Script { - scr := make([]Script, numScripts) - for i := range scr { - scr[i] = Script{scriptID(i + 1)} - } - return scr -} - -// BaseLanguages returns the list of all supported base languages. It generates -// the list by traversing the internal structures. -func (s allSubtags) BaseLanguages() []Base { - base := make([]Base, 0, numLanguages) - for i := 0; i < langNoIndexOffset; i++ { - // We included "und" already for the value 0. - if i != nonCanonicalUnd { - base = append(base, Base{langID(i)}) - } - } - i := langNoIndexOffset - for _, v := range langNoIndex { - for k := 0; k < 8; k++ { - if v&1 == 1 { - base = append(base, Base{langID(i)}) - } - v >>= 1 - i++ - } - } - return base -} - -// Tags always returns nil. -func (s allSubtags) Tags() []Tag { - return nil -} - -// coverage is used used by NewCoverage which is used as a convenient way for -// creating Coverage implementations for partially defined data. Very often a -// package will only need to define a subset of slices. coverage provides a -// convenient way to do this. Moreover, packages using NewCoverage, instead of -// their own implementation, will not break if later new slice types are added. -type coverage struct { - tags func() []Tag - bases func() []Base - scripts func() []Script - regions func() []Region -} - -func (s *coverage) Tags() []Tag { - if s.tags == nil { - return nil - } - return s.tags() -} - -// bases implements sort.Interface and is used to sort base languages. -type bases []Base - -func (b bases) Len() int { - return len(b) -} - -func (b bases) Swap(i, j int) { - b[i], b[j] = b[j], b[i] -} - -func (b bases) Less(i, j int) bool { - return b[i].langID < b[j].langID -} - -// BaseLanguages returns the result from calling s.bases if it is specified or -// otherwise derives the set of supported base languages from tags. -func (s *coverage) BaseLanguages() []Base { - if s.bases == nil { - tags := s.Tags() - if len(tags) == 0 { - return nil - } - a := make([]Base, len(tags)) - for i, t := range tags { - a[i] = Base{langID(t.lang)} - } - sort.Sort(bases(a)) - k := 0 - for i := 1; i < len(a); i++ { - if a[k] != a[i] { - k++ - a[k] = a[i] - } - } - return a[:k+1] - } - return s.bases() -} - -func (s *coverage) Scripts() []Script { - if s.scripts == nil { - return nil - } - return s.scripts() -} - -func (s *coverage) Regions() []Region { - if s.regions == nil { - return nil - } - return s.regions() -} - -// NewCoverage returns a Coverage for the given lists. It is typically used by -// packages providing internationalization services to define their level of -// coverage. A list may be of type []T or func() []T, where T is either Tag, -// Base, Script or Region. The returned Coverage derives the value for Bases -// from Tags if no func or slice for []Base is specified. For other unspecified -// types the returned Coverage will return nil for the respective methods. -func NewCoverage(list ...interface{}) Coverage { - s := &coverage{} - for _, x := range list { - switch v := x.(type) { - case func() []Base: - s.bases = v - case func() []Script: - s.scripts = v - case func() []Region: - s.regions = v - case func() []Tag: - s.tags = v - case []Base: - s.bases = func() []Base { return v } - case []Script: - s.scripts = func() []Script { return v } - case []Region: - s.regions = func() []Region { return v } - case []Tag: - s.tags = func() []Tag { return v } - default: - panic(fmt.Sprintf("language: unsupported set type %T", v)) - } - } - return s -} diff --git a/vendor/golang.org/x/text/language/gen_common.go b/vendor/golang.org/x/text/language/gen_common.go deleted file mode 100644 index 83ce18013..000000000 --- a/vendor/golang.org/x/text/language/gen_common.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This file contains code common to the maketables.go and the package code. - -// langAliasType is the type of an alias in langAliasMap. -type langAliasType int8 - -const ( - langDeprecated langAliasType = iota - langMacro - langLegacy - - langAliasTypeUnknown langAliasType = -1 -) diff --git a/vendor/golang.org/x/text/language/gen_index.go b/vendor/golang.org/x/text/language/gen_index.go deleted file mode 100644 index eef555cd3..000000000 --- a/vendor/golang.org/x/text/language/gen_index.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This file generates derivative tables based on the language package itself. - -import ( - "bytes" - "flag" - "fmt" - "io/ioutil" - "log" - "reflect" - "sort" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/language" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", false, - "test existing tables; can be used to compare web data with package data.") - - draft = flag.String("draft", - "contributed", - `Minimal draft requirements (approved, contributed, provisional, unconfirmed).`) -) - -func main() { - gen.Init() - - // Read the CLDR zip file. - r := gen.OpenCLDRCoreZip() - defer r.Close() - - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - if err != nil { - log.Fatalf("DecodeZip: %v", err) - } - - w := gen.NewCodeWriter() - defer func() { - buf := &bytes.Buffer{} - - if _, err = w.WriteGo(buf, "language"); err != nil { - log.Fatalf("Error formatting file index.go: %v", err) - } - - // Since we're generating a table for our own package we need to rewrite - // doing the equivalent of go fmt -r 'language.b -> b'. Using - // bytes.Replace will do. - out := bytes.Replace(buf.Bytes(), []byte("language."), nil, -1) - if err := ioutil.WriteFile("index.go", out, 0600); err != nil { - log.Fatalf("Could not create file index.go: %v", err) - } - }() - - m := map[language.Tag]bool{} - for _, lang := range data.Locales() { - // We include all locales unconditionally to be consistent with en_US. - // We want en_US, even though it has no data associated with it. - - // TODO: put any of the languages for which no data exists at the end - // of the index. This allows all components based on ICU to use that - // as the cutoff point. - // if x := data.RawLDML(lang); false || - // x.LocaleDisplayNames != nil || - // x.Characters != nil || - // x.Delimiters != nil || - // x.Measurement != nil || - // x.Dates != nil || - // x.Numbers != nil || - // x.Units != nil || - // x.ListPatterns != nil || - // x.Collations != nil || - // x.Segmentations != nil || - // x.Rbnf != nil || - // x.Annotations != nil || - // x.Metadata != nil { - - // TODO: support POSIX natively, albeit non-standard. - tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1)) - m[tag] = true - // } - } - // Include locales for plural rules, which uses a different structure. - for _, plurals := range data.Supplemental().Plurals { - for _, rules := range plurals.PluralRules { - for _, lang := range strings.Split(rules.Locales, " ") { - m[language.Make(lang)] = true - } - } - } - - var core, special []language.Tag - - for t := range m { - if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" { - log.Fatalf("Unexpected extension %v in %v", x, t) - } - if len(t.Variants()) == 0 && len(t.Extensions()) == 0 { - core = append(core, t) - } else { - special = append(special, t) - } - } - - w.WriteComment(` - NumCompactTags is the number of common tags. The maximum tag is - NumCompactTags-1.`) - w.WriteConst("NumCompactTags", len(core)+len(special)) - - sort.Sort(byAlpha(special)) - w.WriteVar("specialTags", special) - - // TODO: order by frequency? - sort.Sort(byAlpha(core)) - - // Size computations are just an estimate. - w.Size += int(reflect.TypeOf(map[uint32]uint16{}).Size()) - w.Size += len(core) * 6 // size of uint32 and uint16 - - fmt.Fprintln(w) - fmt.Fprintln(w, "var coreTags = map[uint32]uint16{") - fmt.Fprintln(w, "0x0: 0, // und") - i := len(special) + 1 // Und and special tags already written. - for _, t := range core { - if t == language.Und { - continue - } - fmt.Fprint(w.Hash, t, i) - b, s, r := t.Raw() - fmt.Fprintf(w, "0x%s%s%s: %d, // %s\n", - getIndex(b, 3), // 3 is enough as it is guaranteed to be a compact number - getIndex(s, 2), - getIndex(r, 3), - i, t) - i++ - } - fmt.Fprintln(w, "}") -} - -// getIndex prints the subtag type and extracts its index of size nibble. -// If the index is less than n nibbles, the result is prefixed with 0s. -func getIndex(x interface{}, n int) string { - s := fmt.Sprintf("%#v", x) // s is of form Type{typeID: 0x00} - s = s[strings.Index(s, "0x")+2 : len(s)-1] - return strings.Repeat("0", n-len(s)) + s -} - -type byAlpha []language.Tag - -func (a byAlpha) Len() int { return len(a) } -func (a byAlpha) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byAlpha) Less(i, j int) bool { return a[i].String() < a[j].String() } diff --git a/vendor/golang.org/x/text/language/go1_1.go b/vendor/golang.org/x/text/language/go1_1.go deleted file mode 100644 index 380f4c09f..000000000 --- a/vendor/golang.org/x/text/language/go1_1.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.2 - -package language - -import "sort" - -func sortStable(s sort.Interface) { - ss := stableSort{ - s: s, - pos: make([]int, s.Len()), - } - for i := range ss.pos { - ss.pos[i] = i - } - sort.Sort(&ss) -} - -type stableSort struct { - s sort.Interface - pos []int -} - -func (s *stableSort) Len() int { - return len(s.pos) -} - -func (s *stableSort) Less(i, j int) bool { - return s.s.Less(i, j) || !s.s.Less(j, i) && s.pos[i] < s.pos[j] -} - -func (s *stableSort) Swap(i, j int) { - s.s.Swap(i, j) - s.pos[i], s.pos[j] = s.pos[j], s.pos[i] -} diff --git a/vendor/golang.org/x/text/language/go1_2.go b/vendor/golang.org/x/text/language/go1_2.go deleted file mode 100644 index 38268c57a..000000000 --- a/vendor/golang.org/x/text/language/go1_2.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.2 - -package language - -import "sort" - -var sortStable = sort.Stable diff --git a/vendor/golang.org/x/text/language/index.go b/vendor/golang.org/x/text/language/index.go deleted file mode 100644 index 7fa9cc82d..000000000 --- a/vendor/golang.org/x/text/language/index.go +++ /dev/null @@ -1,762 +0,0 @@ -// This file was generated by go generate; DO NOT EDIT - -package language - -// NumCompactTags is the number of common tags. The maximum tag is -// NumCompactTags-1. -const NumCompactTags = 747 - -var specialTags = []Tag{ // 2 elements - 0: {lang: 0x61, region: 0x6d, script: 0x0, pVariant: 0x5, pExt: 0xe, str: "ca-ES-valencia"}, - 1: {lang: 0x9b, region: 0x132, script: 0x0, pVariant: 0x5, pExt: 0x5, str: "en-US-u-va-posix"}, -} // Size: 72 bytes - -var coreTags = map[uint32]uint16{ - 0x0: 0, // und - 0x00a00000: 3, // af - 0x00a000d0: 4, // af-NA - 0x00a0015e: 5, // af-ZA - 0x00b00000: 6, // agq - 0x00b00051: 7, // agq-CM - 0x00d00000: 8, // ak - 0x00d0007e: 9, // ak-GH - 0x01100000: 10, // am - 0x0110006e: 11, // am-ET - 0x01500000: 12, // ar - 0x01500001: 13, // ar-001 - 0x01500022: 14, // ar-AE - 0x01500038: 15, // ar-BH - 0x01500061: 16, // ar-DJ - 0x01500066: 17, // ar-DZ - 0x0150006a: 18, // ar-EG - 0x0150006b: 19, // ar-EH - 0x0150006c: 20, // ar-ER - 0x01500095: 21, // ar-IL - 0x01500099: 22, // ar-IQ - 0x0150009f: 23, // ar-JO - 0x015000a6: 24, // ar-KM - 0x015000aa: 25, // ar-KW - 0x015000ae: 26, // ar-LB - 0x015000b7: 27, // ar-LY - 0x015000b8: 28, // ar-MA - 0x015000c7: 29, // ar-MR - 0x015000df: 30, // ar-OM - 0x015000eb: 31, // ar-PS - 0x015000f1: 32, // ar-QA - 0x01500106: 33, // ar-SA - 0x01500109: 34, // ar-SD - 0x01500113: 35, // ar-SO - 0x01500115: 36, // ar-SS - 0x0150011a: 37, // ar-SY - 0x0150011e: 38, // ar-TD - 0x01500126: 39, // ar-TN - 0x0150015b: 40, // ar-YE - 0x01c00000: 41, // as - 0x01c00097: 42, // as-IN - 0x01d00000: 43, // asa - 0x01d0012d: 44, // asa-TZ - 0x01f00000: 45, // ast - 0x01f0006d: 46, // ast-ES - 0x02400000: 47, // az - 0x0241e000: 48, // az-Cyrl - 0x0241e031: 49, // az-Cyrl-AZ - 0x02452000: 50, // az-Latn - 0x02452031: 51, // az-Latn-AZ - 0x02a00000: 52, // bas - 0x02a00051: 53, // bas-CM - 0x02f00000: 54, // be - 0x02f00046: 55, // be-BY - 0x03100000: 56, // bem - 0x0310015f: 57, // bem-ZM - 0x03300000: 58, // bez - 0x0330012d: 59, // bez-TZ - 0x03800000: 60, // bg - 0x03800037: 61, // bg-BG - 0x03c00000: 62, // bh - 0x04900000: 63, // bm - 0x049000c1: 64, // bm-ML - 0x04b00000: 65, // bn - 0x04b00034: 66, // bn-BD - 0x04b00097: 67, // bn-IN - 0x04c00000: 68, // bo - 0x04c00052: 69, // bo-CN - 0x04c00097: 70, // bo-IN - 0x05000000: 71, // br - 0x05000076: 72, // br-FR - 0x05300000: 73, // brx - 0x05300097: 74, // brx-IN - 0x05400000: 75, // bs - 0x0541e000: 76, // bs-Cyrl - 0x0541e032: 77, // bs-Cyrl-BA - 0x05452000: 78, // bs-Latn - 0x05452032: 79, // bs-Latn-BA - 0x06100000: 80, // ca - 0x06100021: 81, // ca-AD - 0x0610006d: 82, // ca-ES - 0x06100076: 83, // ca-FR - 0x0610009c: 84, // ca-IT - 0x06400000: 85, // ce - 0x06400104: 86, // ce-RU - 0x06600000: 87, // cgg - 0x0660012f: 88, // cgg-UG - 0x06c00000: 89, // chr - 0x06c00132: 90, // chr-US - 0x06f00000: 91, // ckb - 0x06f00099: 92, // ckb-IQ - 0x06f0009a: 93, // ckb-IR - 0x07900000: 94, // cs - 0x0790005d: 95, // cs-CZ - 0x07d00000: 96, // cu - 0x07d00104: 97, // cu-RU - 0x07f00000: 98, // cy - 0x07f00079: 99, // cy-GB - 0x08000000: 100, // da - 0x08000062: 101, // da-DK - 0x08000080: 102, // da-GL - 0x08300000: 103, // dav - 0x083000a2: 104, // dav-KE - 0x08500000: 105, // de - 0x0850002d: 106, // de-AT - 0x08500035: 107, // de-BE - 0x0850004d: 108, // de-CH - 0x0850005f: 109, // de-DE - 0x085000b0: 110, // de-LI - 0x085000b5: 111, // de-LU - 0x08800000: 112, // dje - 0x088000d2: 113, // dje-NE - 0x08b00000: 114, // dsb - 0x08b0005f: 115, // dsb-DE - 0x08f00000: 116, // dua - 0x08f00051: 117, // dua-CM - 0x09000000: 118, // dv - 0x09100000: 119, // dyo - 0x09100112: 120, // dyo-SN - 0x09300000: 121, // dz - 0x09300042: 122, // dz-BT - 0x09400000: 123, // ebu - 0x094000a2: 124, // ebu-KE - 0x09500000: 125, // ee - 0x0950007e: 126, // ee-GH - 0x09500120: 127, // ee-TG - 0x09a00000: 128, // el - 0x09a0005c: 129, // el-CY - 0x09a00085: 130, // el-GR - 0x09b00000: 131, // en - 0x09b00001: 132, // en-001 - 0x09b0001a: 133, // en-150 - 0x09b00024: 134, // en-AG - 0x09b00025: 135, // en-AI - 0x09b0002c: 136, // en-AS - 0x09b0002d: 137, // en-AT - 0x09b0002e: 138, // en-AU - 0x09b00033: 139, // en-BB - 0x09b00035: 140, // en-BE - 0x09b00039: 141, // en-BI - 0x09b0003c: 142, // en-BM - 0x09b00041: 143, // en-BS - 0x09b00045: 144, // en-BW - 0x09b00047: 145, // en-BZ - 0x09b00048: 146, // en-CA - 0x09b00049: 147, // en-CC - 0x09b0004d: 148, // en-CH - 0x09b0004f: 149, // en-CK - 0x09b00051: 150, // en-CM - 0x09b0005b: 151, // en-CX - 0x09b0005c: 152, // en-CY - 0x09b0005f: 153, // en-DE - 0x09b00060: 154, // en-DG - 0x09b00062: 155, // en-DK - 0x09b00063: 156, // en-DM - 0x09b0006c: 157, // en-ER - 0x09b00070: 158, // en-FI - 0x09b00071: 159, // en-FJ - 0x09b00072: 160, // en-FK - 0x09b00073: 161, // en-FM - 0x09b00079: 162, // en-GB - 0x09b0007a: 163, // en-GD - 0x09b0007d: 164, // en-GG - 0x09b0007e: 165, // en-GH - 0x09b0007f: 166, // en-GI - 0x09b00081: 167, // en-GM - 0x09b00088: 168, // en-GU - 0x09b0008a: 169, // en-GY - 0x09b0008b: 170, // en-HK - 0x09b00094: 171, // en-IE - 0x09b00095: 172, // en-IL - 0x09b00096: 173, // en-IM - 0x09b00097: 174, // en-IN - 0x09b00098: 175, // en-IO - 0x09b0009d: 176, // en-JE - 0x09b0009e: 177, // en-JM - 0x09b000a2: 178, // en-KE - 0x09b000a5: 179, // en-KI - 0x09b000a7: 180, // en-KN - 0x09b000ab: 181, // en-KY - 0x09b000af: 182, // en-LC - 0x09b000b2: 183, // en-LR - 0x09b000b3: 184, // en-LS - 0x09b000bd: 185, // en-MG - 0x09b000be: 186, // en-MH - 0x09b000c4: 187, // en-MO - 0x09b000c5: 188, // en-MP - 0x09b000c8: 189, // en-MS - 0x09b000c9: 190, // en-MT - 0x09b000ca: 191, // en-MU - 0x09b000cc: 192, // en-MW - 0x09b000ce: 193, // en-MY - 0x09b000d0: 194, // en-NA - 0x09b000d3: 195, // en-NF - 0x09b000d4: 196, // en-NG - 0x09b000d7: 197, // en-NL - 0x09b000db: 198, // en-NR - 0x09b000dd: 199, // en-NU - 0x09b000de: 200, // en-NZ - 0x09b000e4: 201, // en-PG - 0x09b000e5: 202, // en-PH - 0x09b000e6: 203, // en-PK - 0x09b000e9: 204, // en-PN - 0x09b000ea: 205, // en-PR - 0x09b000ee: 206, // en-PW - 0x09b00105: 207, // en-RW - 0x09b00107: 208, // en-SB - 0x09b00108: 209, // en-SC - 0x09b00109: 210, // en-SD - 0x09b0010a: 211, // en-SE - 0x09b0010b: 212, // en-SG - 0x09b0010c: 213, // en-SH - 0x09b0010d: 214, // en-SI - 0x09b00110: 215, // en-SL - 0x09b00115: 216, // en-SS - 0x09b00119: 217, // en-SX - 0x09b0011b: 218, // en-SZ - 0x09b0011d: 219, // en-TC - 0x09b00123: 220, // en-TK - 0x09b00127: 221, // en-TO - 0x09b0012a: 222, // en-TT - 0x09b0012b: 223, // en-TV - 0x09b0012d: 224, // en-TZ - 0x09b0012f: 225, // en-UG - 0x09b00131: 226, // en-UM - 0x09b00132: 227, // en-US - 0x09b00136: 228, // en-VC - 0x09b00139: 229, // en-VG - 0x09b0013a: 230, // en-VI - 0x09b0013c: 231, // en-VU - 0x09b0013f: 232, // en-WS - 0x09b0015e: 233, // en-ZA - 0x09b0015f: 234, // en-ZM - 0x09b00161: 235, // en-ZW - 0x09c00000: 236, // eo - 0x09c00001: 237, // eo-001 - 0x09d00000: 238, // es - 0x09d0001e: 239, // es-419 - 0x09d0002b: 240, // es-AR - 0x09d0003e: 241, // es-BO - 0x09d00040: 242, // es-BR - 0x09d00050: 243, // es-CL - 0x09d00053: 244, // es-CO - 0x09d00055: 245, // es-CR - 0x09d00058: 246, // es-CU - 0x09d00064: 247, // es-DO - 0x09d00067: 248, // es-EA - 0x09d00068: 249, // es-EC - 0x09d0006d: 250, // es-ES - 0x09d00084: 251, // es-GQ - 0x09d00087: 252, // es-GT - 0x09d0008d: 253, // es-HN - 0x09d00092: 254, // es-IC - 0x09d000cd: 255, // es-MX - 0x09d000d6: 256, // es-NI - 0x09d000e0: 257, // es-PA - 0x09d000e2: 258, // es-PE - 0x09d000e5: 259, // es-PH - 0x09d000ea: 260, // es-PR - 0x09d000ef: 261, // es-PY - 0x09d00118: 262, // es-SV - 0x09d00132: 263, // es-US - 0x09d00133: 264, // es-UY - 0x09d00138: 265, // es-VE - 0x09f00000: 266, // et - 0x09f00069: 267, // et-EE - 0x0a100000: 268, // eu - 0x0a10006d: 269, // eu-ES - 0x0a200000: 270, // ewo - 0x0a200051: 271, // ewo-CM - 0x0a400000: 272, // fa - 0x0a400023: 273, // fa-AF - 0x0a40009a: 274, // fa-IR - 0x0a600000: 275, // ff - 0x0a600051: 276, // ff-CM - 0x0a600082: 277, // ff-GN - 0x0a6000c7: 278, // ff-MR - 0x0a600112: 279, // ff-SN - 0x0a800000: 280, // fi - 0x0a800070: 281, // fi-FI - 0x0aa00000: 282, // fil - 0x0aa000e5: 283, // fil-PH - 0x0ad00000: 284, // fo - 0x0ad00062: 285, // fo-DK - 0x0ad00074: 286, // fo-FO - 0x0af00000: 287, // fr - 0x0af00035: 288, // fr-BE - 0x0af00036: 289, // fr-BF - 0x0af00039: 290, // fr-BI - 0x0af0003a: 291, // fr-BJ - 0x0af0003b: 292, // fr-BL - 0x0af00048: 293, // fr-CA - 0x0af0004a: 294, // fr-CD - 0x0af0004b: 295, // fr-CF - 0x0af0004c: 296, // fr-CG - 0x0af0004d: 297, // fr-CH - 0x0af0004e: 298, // fr-CI - 0x0af00051: 299, // fr-CM - 0x0af00061: 300, // fr-DJ - 0x0af00066: 301, // fr-DZ - 0x0af00076: 302, // fr-FR - 0x0af00078: 303, // fr-GA - 0x0af0007c: 304, // fr-GF - 0x0af00082: 305, // fr-GN - 0x0af00083: 306, // fr-GP - 0x0af00084: 307, // fr-GQ - 0x0af0008f: 308, // fr-HT - 0x0af000a6: 309, // fr-KM - 0x0af000b5: 310, // fr-LU - 0x0af000b8: 311, // fr-MA - 0x0af000b9: 312, // fr-MC - 0x0af000bc: 313, // fr-MF - 0x0af000bd: 314, // fr-MG - 0x0af000c1: 315, // fr-ML - 0x0af000c6: 316, // fr-MQ - 0x0af000c7: 317, // fr-MR - 0x0af000ca: 318, // fr-MU - 0x0af000d1: 319, // fr-NC - 0x0af000d2: 320, // fr-NE - 0x0af000e3: 321, // fr-PF - 0x0af000e8: 322, // fr-PM - 0x0af00100: 323, // fr-RE - 0x0af00105: 324, // fr-RW - 0x0af00108: 325, // fr-SC - 0x0af00112: 326, // fr-SN - 0x0af0011a: 327, // fr-SY - 0x0af0011e: 328, // fr-TD - 0x0af00120: 329, // fr-TG - 0x0af00126: 330, // fr-TN - 0x0af0013c: 331, // fr-VU - 0x0af0013d: 332, // fr-WF - 0x0af0015c: 333, // fr-YT - 0x0b600000: 334, // fur - 0x0b60009c: 335, // fur-IT - 0x0b900000: 336, // fy - 0x0b9000d7: 337, // fy-NL - 0x0ba00000: 338, // ga - 0x0ba00094: 339, // ga-IE - 0x0c200000: 340, // gd - 0x0c200079: 341, // gd-GB - 0x0c800000: 342, // gl - 0x0c80006d: 343, // gl-ES - 0x0d200000: 344, // gsw - 0x0d20004d: 345, // gsw-CH - 0x0d200076: 346, // gsw-FR - 0x0d2000b0: 347, // gsw-LI - 0x0d300000: 348, // gu - 0x0d300097: 349, // gu-IN - 0x0d700000: 350, // guw - 0x0d800000: 351, // guz - 0x0d8000a2: 352, // guz-KE - 0x0d900000: 353, // gv - 0x0d900096: 354, // gv-IM - 0x0dc00000: 355, // ha - 0x0dc0007e: 356, // ha-GH - 0x0dc000d2: 357, // ha-NE - 0x0dc000d4: 358, // ha-NG - 0x0de00000: 359, // haw - 0x0de00132: 360, // haw-US - 0x0e000000: 361, // he - 0x0e000095: 362, // he-IL - 0x0e100000: 363, // hi - 0x0e100097: 364, // hi-IN - 0x0ee00000: 365, // hr - 0x0ee00032: 366, // hr-BA - 0x0ee0008e: 367, // hr-HR - 0x0ef00000: 368, // hsb - 0x0ef0005f: 369, // hsb-DE - 0x0f200000: 370, // hu - 0x0f200090: 371, // hu-HU - 0x0f300000: 372, // hy - 0x0f300027: 373, // hy-AM - 0x0f800000: 374, // id - 0x0f800093: 375, // id-ID - 0x0fa00000: 376, // ig - 0x0fa000d4: 377, // ig-NG - 0x0fb00000: 378, // ii - 0x0fb00052: 379, // ii-CN - 0x10200000: 380, // is - 0x1020009b: 381, // is-IS - 0x10300000: 382, // it - 0x1030004d: 383, // it-CH - 0x1030009c: 384, // it-IT - 0x10300111: 385, // it-SM - 0x10400000: 386, // iu - 0x10700000: 387, // ja - 0x107000a0: 388, // ja-JP - 0x10900000: 389, // jbo - 0x10a00000: 390, // jgo - 0x10a00051: 391, // jgo-CM - 0x10c00000: 392, // jmc - 0x10c0012d: 393, // jmc-TZ - 0x10f00000: 394, // jv - 0x11100000: 395, // ka - 0x1110007b: 396, // ka-GE - 0x11300000: 397, // kab - 0x11300066: 398, // kab-DZ - 0x11500000: 399, // kaj - 0x11600000: 400, // kam - 0x116000a2: 401, // kam-KE - 0x11900000: 402, // kcg - 0x11b00000: 403, // kde - 0x11b0012d: 404, // kde-TZ - 0x11d00000: 405, // kea - 0x11d00059: 406, // kea-CV - 0x12800000: 407, // khq - 0x128000c1: 408, // khq-ML - 0x12b00000: 409, // ki - 0x12b000a2: 410, // ki-KE - 0x12f00000: 411, // kk - 0x12f000ac: 412, // kk-KZ - 0x13000000: 413, // kkj - 0x13000051: 414, // kkj-CM - 0x13100000: 415, // kl - 0x13100080: 416, // kl-GL - 0x13200000: 417, // kln - 0x132000a2: 418, // kln-KE - 0x13300000: 419, // km - 0x133000a4: 420, // km-KH - 0x13500000: 421, // kn - 0x13500097: 422, // kn-IN - 0x13600000: 423, // ko - 0x136000a8: 424, // ko-KP - 0x136000a9: 425, // ko-KR - 0x13800000: 426, // kok - 0x13800097: 427, // kok-IN - 0x14100000: 428, // ks - 0x14100097: 429, // ks-IN - 0x14200000: 430, // ksb - 0x1420012d: 431, // ksb-TZ - 0x14300000: 432, // ksf - 0x14300051: 433, // ksf-CM - 0x14400000: 434, // ksh - 0x1440005f: 435, // ksh-DE - 0x14500000: 436, // ku - 0x14a00000: 437, // kw - 0x14a00079: 438, // kw-GB - 0x14d00000: 439, // ky - 0x14d000a3: 440, // ky-KG - 0x15100000: 441, // lag - 0x1510012d: 442, // lag-TZ - 0x15400000: 443, // lb - 0x154000b5: 444, // lb-LU - 0x15a00000: 445, // lg - 0x15a0012f: 446, // lg-UG - 0x16100000: 447, // lkt - 0x16100132: 448, // lkt-US - 0x16400000: 449, // ln - 0x16400029: 450, // ln-AO - 0x1640004a: 451, // ln-CD - 0x1640004b: 452, // ln-CF - 0x1640004c: 453, // ln-CG - 0x16500000: 454, // lo - 0x165000ad: 455, // lo-LA - 0x16800000: 456, // lrc - 0x16800099: 457, // lrc-IQ - 0x1680009a: 458, // lrc-IR - 0x16900000: 459, // lt - 0x169000b4: 460, // lt-LT - 0x16b00000: 461, // lu - 0x16b0004a: 462, // lu-CD - 0x16d00000: 463, // luo - 0x16d000a2: 464, // luo-KE - 0x16e00000: 465, // luy - 0x16e000a2: 466, // luy-KE - 0x17000000: 467, // lv - 0x170000b6: 468, // lv-LV - 0x17a00000: 469, // mas - 0x17a000a2: 470, // mas-KE - 0x17a0012d: 471, // mas-TZ - 0x18000000: 472, // mer - 0x180000a2: 473, // mer-KE - 0x18200000: 474, // mfe - 0x182000ca: 475, // mfe-MU - 0x18300000: 476, // mg - 0x183000bd: 477, // mg-MG - 0x18400000: 478, // mgh - 0x184000cf: 479, // mgh-MZ - 0x18500000: 480, // mgo - 0x18500051: 481, // mgo-CM - 0x18c00000: 482, // mk - 0x18c000c0: 483, // mk-MK - 0x18d00000: 484, // ml - 0x18d00097: 485, // ml-IN - 0x18f00000: 486, // mn - 0x18f000c3: 487, // mn-MN - 0x19600000: 488, // mr - 0x19600097: 489, // mr-IN - 0x19a00000: 490, // ms - 0x19a0003d: 491, // ms-BN - 0x19a000ce: 492, // ms-MY - 0x19a0010b: 493, // ms-SG - 0x19b00000: 494, // mt - 0x19b000c9: 495, // mt-MT - 0x19d00000: 496, // mua - 0x19d00051: 497, // mua-CM - 0x1a500000: 498, // my - 0x1a5000c2: 499, // my-MM - 0x1a900000: 500, // mzn - 0x1a90009a: 501, // mzn-IR - 0x1ab00000: 502, // nah - 0x1ae00000: 503, // naq - 0x1ae000d0: 504, // naq-NA - 0x1af00000: 505, // nb - 0x1af000d8: 506, // nb-NO - 0x1af0010e: 507, // nb-SJ - 0x1b100000: 508, // nd - 0x1b100161: 509, // nd-ZW - 0x1b400000: 510, // ne - 0x1b400097: 511, // ne-IN - 0x1b4000d9: 512, // ne-NP - 0x1bd00000: 513, // nl - 0x1bd0002f: 514, // nl-AW - 0x1bd00035: 515, // nl-BE - 0x1bd0003f: 516, // nl-BQ - 0x1bd0005a: 517, // nl-CW - 0x1bd000d7: 518, // nl-NL - 0x1bd00114: 519, // nl-SR - 0x1bd00119: 520, // nl-SX - 0x1be00000: 521, // nmg - 0x1be00051: 522, // nmg-CM - 0x1bf00000: 523, // nn - 0x1bf000d8: 524, // nn-NO - 0x1c000000: 525, // nnh - 0x1c000051: 526, // nnh-CM - 0x1c100000: 527, // no - 0x1c500000: 528, // nqo - 0x1c600000: 529, // nr - 0x1c800000: 530, // nso - 0x1c900000: 531, // nus - 0x1c900115: 532, // nus-SS - 0x1cc00000: 533, // ny - 0x1ce00000: 534, // nyn - 0x1ce0012f: 535, // nyn-UG - 0x1d200000: 536, // om - 0x1d20006e: 537, // om-ET - 0x1d2000a2: 538, // om-KE - 0x1d300000: 539, // or - 0x1d300097: 540, // or-IN - 0x1d400000: 541, // os - 0x1d40007b: 542, // os-GE - 0x1d400104: 543, // os-RU - 0x1d700000: 544, // pa - 0x1d705000: 545, // pa-Arab - 0x1d7050e6: 546, // pa-Arab-PK - 0x1d72f000: 547, // pa-Guru - 0x1d72f097: 548, // pa-Guru-IN - 0x1db00000: 549, // pap - 0x1e700000: 550, // pl - 0x1e7000e7: 551, // pl-PL - 0x1ed00000: 552, // prg - 0x1ed00001: 553, // prg-001 - 0x1ee00000: 554, // ps - 0x1ee00023: 555, // ps-AF - 0x1ef00000: 556, // pt - 0x1ef00029: 557, // pt-AO - 0x1ef00040: 558, // pt-BR - 0x1ef0004d: 559, // pt-CH - 0x1ef00059: 560, // pt-CV - 0x1ef00084: 561, // pt-GQ - 0x1ef00089: 562, // pt-GW - 0x1ef000b5: 563, // pt-LU - 0x1ef000c4: 564, // pt-MO - 0x1ef000cf: 565, // pt-MZ - 0x1ef000ec: 566, // pt-PT - 0x1ef00116: 567, // pt-ST - 0x1ef00124: 568, // pt-TL - 0x1f100000: 569, // qu - 0x1f10003e: 570, // qu-BO - 0x1f100068: 571, // qu-EC - 0x1f1000e2: 572, // qu-PE - 0x1fc00000: 573, // rm - 0x1fc0004d: 574, // rm-CH - 0x20100000: 575, // rn - 0x20100039: 576, // rn-BI - 0x20300000: 577, // ro - 0x203000ba: 578, // ro-MD - 0x20300102: 579, // ro-RO - 0x20500000: 580, // rof - 0x2050012d: 581, // rof-TZ - 0x20700000: 582, // ru - 0x20700046: 583, // ru-BY - 0x207000a3: 584, // ru-KG - 0x207000ac: 585, // ru-KZ - 0x207000ba: 586, // ru-MD - 0x20700104: 587, // ru-RU - 0x2070012e: 588, // ru-UA - 0x20a00000: 589, // rw - 0x20a00105: 590, // rw-RW - 0x20b00000: 591, // rwk - 0x20b0012d: 592, // rwk-TZ - 0x20f00000: 593, // sah - 0x20f00104: 594, // sah-RU - 0x21000000: 595, // saq - 0x210000a2: 596, // saq-KE - 0x21400000: 597, // sbp - 0x2140012d: 598, // sbp-TZ - 0x21c00000: 599, // sdh - 0x21d00000: 600, // se - 0x21d00070: 601, // se-FI - 0x21d000d8: 602, // se-NO - 0x21d0010a: 603, // se-SE - 0x21f00000: 604, // seh - 0x21f000cf: 605, // seh-MZ - 0x22100000: 606, // ses - 0x221000c1: 607, // ses-ML - 0x22200000: 608, // sg - 0x2220004b: 609, // sg-CF - 0x22600000: 610, // shi - 0x22652000: 611, // shi-Latn - 0x226520b8: 612, // shi-Latn-MA - 0x226d2000: 613, // shi-Tfng - 0x226d20b8: 614, // shi-Tfng-MA - 0x22800000: 615, // si - 0x228000b1: 616, // si-LK - 0x22a00000: 617, // sk - 0x22a0010f: 618, // sk-SK - 0x22c00000: 619, // sl - 0x22c0010d: 620, // sl-SI - 0x23000000: 621, // sma - 0x23100000: 622, // smi - 0x23200000: 623, // smj - 0x23300000: 624, // smn - 0x23300070: 625, // smn-FI - 0x23500000: 626, // sms - 0x23600000: 627, // sn - 0x23600161: 628, // sn-ZW - 0x23800000: 629, // so - 0x23800061: 630, // so-DJ - 0x2380006e: 631, // so-ET - 0x238000a2: 632, // so-KE - 0x23800113: 633, // so-SO - 0x23a00000: 634, // sq - 0x23a00026: 635, // sq-AL - 0x23a000c0: 636, // sq-MK - 0x23a0014a: 637, // sq-XK - 0x23b00000: 638, // sr - 0x23b1e000: 639, // sr-Cyrl - 0x23b1e032: 640, // sr-Cyrl-BA - 0x23b1e0bb: 641, // sr-Cyrl-ME - 0x23b1e103: 642, // sr-Cyrl-RS - 0x23b1e14a: 643, // sr-Cyrl-XK - 0x23b52000: 644, // sr-Latn - 0x23b52032: 645, // sr-Latn-BA - 0x23b520bb: 646, // sr-Latn-ME - 0x23b52103: 647, // sr-Latn-RS - 0x23b5214a: 648, // sr-Latn-XK - 0x24000000: 649, // ss - 0x24100000: 650, // ssy - 0x24200000: 651, // st - 0x24700000: 652, // sv - 0x24700030: 653, // sv-AX - 0x24700070: 654, // sv-FI - 0x2470010a: 655, // sv-SE - 0x24800000: 656, // sw - 0x2480004a: 657, // sw-CD - 0x248000a2: 658, // sw-KE - 0x2480012d: 659, // sw-TZ - 0x2480012f: 660, // sw-UG - 0x24f00000: 661, // syr - 0x25100000: 662, // ta - 0x25100097: 663, // ta-IN - 0x251000b1: 664, // ta-LK - 0x251000ce: 665, // ta-MY - 0x2510010b: 666, // ta-SG - 0x25800000: 667, // te - 0x25800097: 668, // te-IN - 0x25a00000: 669, // teo - 0x25a000a2: 670, // teo-KE - 0x25a0012f: 671, // teo-UG - 0x25d00000: 672, // th - 0x25d00121: 673, // th-TH - 0x26100000: 674, // ti - 0x2610006c: 675, // ti-ER - 0x2610006e: 676, // ti-ET - 0x26200000: 677, // tig - 0x26400000: 678, // tk - 0x26400125: 679, // tk-TM - 0x26b00000: 680, // tn - 0x26c00000: 681, // to - 0x26c00127: 682, // to-TO - 0x26f00000: 683, // tr - 0x26f0005c: 684, // tr-CY - 0x26f00129: 685, // tr-TR - 0x27200000: 686, // ts - 0x27e00000: 687, // twq - 0x27e000d2: 688, // twq-NE - 0x28200000: 689, // tzm - 0x282000b8: 690, // tzm-MA - 0x28400000: 691, // ug - 0x28400052: 692, // ug-CN - 0x28600000: 693, // uk - 0x2860012e: 694, // uk-UA - 0x28c00000: 695, // ur - 0x28c00097: 696, // ur-IN - 0x28c000e6: 697, // ur-PK - 0x28d00000: 698, // uz - 0x28d05000: 699, // uz-Arab - 0x28d05023: 700, // uz-Arab-AF - 0x28d1e000: 701, // uz-Cyrl - 0x28d1e134: 702, // uz-Cyrl-UZ - 0x28d52000: 703, // uz-Latn - 0x28d52134: 704, // uz-Latn-UZ - 0x28e00000: 705, // vai - 0x28e52000: 706, // vai-Latn - 0x28e520b2: 707, // vai-Latn-LR - 0x28ed9000: 708, // vai-Vaii - 0x28ed90b2: 709, // vai-Vaii-LR - 0x28f00000: 710, // ve - 0x29200000: 711, // vi - 0x2920013b: 712, // vi-VN - 0x29700000: 713, // vo - 0x29700001: 714, // vo-001 - 0x29a00000: 715, // vun - 0x29a0012d: 716, // vun-TZ - 0x29b00000: 717, // wa - 0x29c00000: 718, // wae - 0x29c0004d: 719, // wae-CH - 0x2a400000: 720, // wo - 0x2a900000: 721, // xh - 0x2b100000: 722, // xog - 0x2b10012f: 723, // xog-UG - 0x2b700000: 724, // yav - 0x2b700051: 725, // yav-CM - 0x2b900000: 726, // yi - 0x2b900001: 727, // yi-001 - 0x2ba00000: 728, // yo - 0x2ba0003a: 729, // yo-BJ - 0x2ba000d4: 730, // yo-NG - 0x2bd00000: 731, // yue - 0x2bd0008b: 732, // yue-HK - 0x2c300000: 733, // zgh - 0x2c3000b8: 734, // zgh-MA - 0x2c400000: 735, // zh - 0x2c434000: 736, // zh-Hans - 0x2c434052: 737, // zh-Hans-CN - 0x2c43408b: 738, // zh-Hans-HK - 0x2c4340c4: 739, // zh-Hans-MO - 0x2c43410b: 740, // zh-Hans-SG - 0x2c435000: 741, // zh-Hant - 0x2c43508b: 742, // zh-Hant-HK - 0x2c4350c4: 743, // zh-Hant-MO - 0x2c43512c: 744, // zh-Hant-TW - 0x2c600000: 745, // zu - 0x2c60015e: 746, // zu-ZA -} - -// Total table size 4550 bytes (4KiB); checksum: B6D49547 diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go deleted file mode 100644 index 5c6dcbd94..000000000 --- a/vendor/golang.org/x/text/language/language.go +++ /dev/null @@ -1,975 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run maketables.go gen_common.go -output tables.go -//go:generate go run gen_index.go - -// Package language implements BCP 47 language tags and related functionality. -// -// The Tag type, which is used to represent languages, is agnostic to the -// meaning of its subtags. Tags are not fully canonicalized to preserve -// information that may be valuable in certain contexts. As a consequence, two -// different tags may represent identical languages. -// -// Initializing language- or locale-specific components usually consists of -// two steps. The first step is to select a display language based on the -// preferred languages of the user and the languages supported by an application. -// The second step is to create the language-specific services based on -// this selection. Each is discussed in more details below. -// -// Matching preferred against supported languages -// -// An application may support various languages. This list is typically limited -// by the languages for which there exists translations of the user interface. -// Similarly, a user may provide a list of preferred languages which is limited -// by the languages understood by this user. -// An application should use a Matcher to find the best supported language based -// on the user's preferred list. -// Matchers are aware of the intricacies of equivalence between languages. -// The default Matcher implementation takes into account things such as -// deprecated subtags, legacy tags, and mutual intelligibility between scripts -// and languages. -// -// A Matcher for English, Australian English, Danish, and standard Mandarin can -// be defined as follows: -// -// var matcher = language.NewMatcher([]language.Tag{ -// language.English, // The first language is used as fallback. -// language.MustParse("en-AU"), -// language.Danish, -// language.Chinese, -// }) -// -// The following code selects the best match for someone speaking Spanish and -// Norwegian: -// -// preferred := []language.Tag{ language.Spanish, language.Norwegian } -// tag, _, _ := matcher.Match(preferred...) -// -// In this case, the best match is Danish, as Danish is sufficiently a match to -// Norwegian to not have to fall back to the default. -// See ParseAcceptLanguage on how to handle the Accept-Language HTTP header. -// -// Selecting language-specific services -// -// One should always use the Tag returned by the Matcher to create an instance -// of any of the language-specific services provided by the text repository. -// This prevents the mixing of languages, such as having a different language for -// messages and display names, as well as improper casing or sorting order for -// the selected language. -// Using the returned Tag also allows user-defined settings, such as collation -// order or numbering system to be transparently passed as options. -// -// If you have language-specific data in your application, however, it will in -// most cases suffice to use the index returned by the matcher to identify -// the user language. -// The following loop provides an alternative in case this is not sufficient: -// -// supported := map[language.Tag]data{ -// language.English: enData, -// language.MustParse("en-AU"): enAUData, -// language.Danish: daData, -// language.Chinese: zhData, -// } -// tag, _, _ := matcher.Match(preferred...) -// for ; tag != language.Und; tag = tag.Parent() { -// if v, ok := supported[tag]; ok { -// return v -// } -// } -// return enData // should not reach here -// -// Repeatedly taking the Parent of the tag returned by Match will eventually -// match one of the tags used to initialize the Matcher. -// -// Canonicalization -// -// By default, only legacy and deprecated tags are converted into their -// canonical equivalent. All other information is preserved. This approach makes -// the confidence scores more accurate and allows matchers to distinguish -// between variants that are otherwise lost. -// -// As a consequence, two tags that should be treated as identical according to -// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The -// Matchers will handle such distinctions, though, and are aware of the -// equivalence relations. The CanonType type can be used to alter the -// canonicalization form. -// -// References -// -// BCP 47 - Tags for Identifying Languages -// http://tools.ietf.org/html/bcp47 -package language - -// TODO: Remove above NOTE after: -// - verifying that tables are dropped correctly (most notably matcher tables). - -import ( - "errors" - "fmt" - "strings" -) - -const ( - // maxCoreSize is the maximum size of a BCP 47 tag without variants and - // extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes. - maxCoreSize = 12 - - // max99thPercentileSize is a somewhat arbitrary buffer size that presumably - // is large enough to hold at least 99% of the BCP 47 tags. - max99thPercentileSize = 32 - - // maxSimpleUExtensionSize is the maximum size of a -u extension with one - // key-type pair. Equals len("-u-") + key (2) + dash + max value (8). - maxSimpleUExtensionSize = 14 -) - -// Tag represents a BCP 47 language tag. It is used to specify an instance of a -// specific language or locale. All language tag values are guaranteed to be -// well-formed. -type Tag struct { - lang langID - region regionID - script scriptID - pVariant byte // offset in str, includes preceding '-' - pExt uint16 // offset of first extension, includes preceding '-' - - // str is the string representation of the Tag. It will only be used if the - // tag has variants or extensions. - str string -} - -// Make is a convenience wrapper for Parse that omits the error. -// In case of an error, a sensible default is returned. -func Make(s string) Tag { - return Default.Make(s) -} - -// Make is a convenience wrapper for c.Parse that omits the error. -// In case of an error, a sensible default is returned. -func (c CanonType) Make(s string) Tag { - t, _ := c.Parse(s) - return t -} - -// Raw returns the raw base language, script and region, without making an -// attempt to infer their values. -func (t Tag) Raw() (b Base, s Script, r Region) { - return Base{t.lang}, Script{t.script}, Region{t.region} -} - -// equalTags compares language, script and region subtags only. -func (t Tag) equalTags(a Tag) bool { - return t.lang == a.lang && t.script == a.script && t.region == a.region -} - -// IsRoot returns true if t is equal to language "und". -func (t Tag) IsRoot() bool { - if int(t.pVariant) < len(t.str) { - return false - } - return t.equalTags(und) -} - -// private reports whether the Tag consists solely of a private use tag. -func (t Tag) private() bool { - return t.str != "" && t.pVariant == 0 -} - -// CanonType can be used to enable or disable various types of canonicalization. -type CanonType int - -const ( - // Replace deprecated base languages with their preferred replacements. - DeprecatedBase CanonType = 1 << iota - // Replace deprecated scripts with their preferred replacements. - DeprecatedScript - // Replace deprecated regions with their preferred replacements. - DeprecatedRegion - // Remove redundant scripts. - SuppressScript - // Normalize legacy encodings. This includes legacy languages defined in - // CLDR as well as bibliographic codes defined in ISO-639. - Legacy - // Map the dominant language of a macro language group to the macro language - // subtag. For example cmn -> zh. - Macro - // The CLDR flag should be used if full compatibility with CLDR is required. - // There are a few cases where language.Tag may differ from CLDR. To follow all - // of CLDR's suggestions, use All|CLDR. - CLDR - - // Raw can be used to Compose or Parse without Canonicalization. - Raw CanonType = 0 - - // Replace all deprecated tags with their preferred replacements. - Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion - - // All canonicalizations recommended by BCP 47. - BCP47 = Deprecated | SuppressScript - - // All canonicalizations. - All = BCP47 | Legacy | Macro - - // Default is the canonicalization used by Parse, Make and Compose. To - // preserve as much information as possible, canonicalizations that remove - // potentially valuable information are not included. The Matcher is - // designed to recognize similar tags that would be the same if - // they were canonicalized using All. - Default = Deprecated | Legacy - - canonLang = DeprecatedBase | Legacy | Macro - - // TODO: LikelyScript, LikelyRegion: suppress similar to ICU. -) - -// canonicalize returns the canonicalized equivalent of the tag and -// whether there was any change. -func (t Tag) canonicalize(c CanonType) (Tag, bool) { - if c == Raw { - return t, false - } - changed := false - if c&SuppressScript != 0 { - if t.lang < langNoIndexOffset && uint8(t.script) == suppressScript[t.lang] { - t.script = 0 - changed = true - } - } - if c&canonLang != 0 { - for { - if l, aliasType := normLang(t.lang); l != t.lang { - switch aliasType { - case langLegacy: - if c&Legacy != 0 { - if t.lang == _sh && t.script == 0 { - t.script = _Latn - } - t.lang = l - changed = true - } - case langMacro: - if c&Macro != 0 { - // We deviate here from CLDR. The mapping "nb" -> "no" - // qualifies as a typical Macro language mapping. However, - // for legacy reasons, CLDR maps "no", the macro language - // code for Norwegian, to the dominant variant "nb". This - // change is currently under consideration for CLDR as well. - // See http://unicode.org/cldr/trac/ticket/2698 and also - // http://unicode.org/cldr/trac/ticket/1790 for some of the - // practical implications. TODO: this check could be removed - // if CLDR adopts this change. - if c&CLDR == 0 || t.lang != _nb { - changed = true - t.lang = l - } - } - case langDeprecated: - if c&DeprecatedBase != 0 { - if t.lang == _mo && t.region == 0 { - t.region = _MD - } - t.lang = l - changed = true - // Other canonicalization types may still apply. - continue - } - } - } else if c&Legacy != 0 && t.lang == _no && c&CLDR != 0 { - t.lang = _nb - changed = true - } - break - } - } - if c&DeprecatedScript != 0 { - if t.script == _Qaai { - changed = true - t.script = _Zinh - } - } - if c&DeprecatedRegion != 0 { - if r := normRegion(t.region); r != 0 { - changed = true - t.region = r - } - } - return t, changed -} - -// Canonicalize returns the canonicalized equivalent of the tag. -func (c CanonType) Canonicalize(t Tag) (Tag, error) { - t, changed := t.canonicalize(c) - if changed { - t.remakeString() - } - return t, nil -} - -// Confidence indicates the level of certainty for a given return value. -// For example, Serbian may be written in Cyrillic or Latin script. -// The confidence level indicates whether a value was explicitly specified, -// whether it is typically the only possible value, or whether there is -// an ambiguity. -type Confidence int - -const ( - No Confidence = iota // full confidence that there was no match - Low // most likely value picked out of a set of alternatives - High // value is generally assumed to be the correct match - Exact // exact match or explicitly specified value -) - -var confName = []string{"No", "Low", "High", "Exact"} - -func (c Confidence) String() string { - return confName[c] -} - -// remakeString is used to update t.str in case lang, script or region changed. -// It is assumed that pExt and pVariant still point to the start of the -// respective parts. -func (t *Tag) remakeString() { - if t.str == "" { - return - } - extra := t.str[t.pVariant:] - if t.pVariant > 0 { - extra = extra[1:] - } - if t.equalTags(und) && strings.HasPrefix(extra, "x-") { - t.str = extra - t.pVariant = 0 - t.pExt = 0 - return - } - var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases. - b := buf[:t.genCoreBytes(buf[:])] - if extra != "" { - diff := len(b) - int(t.pVariant) - b = append(b, '-') - b = append(b, extra...) - t.pVariant = uint8(int(t.pVariant) + diff) - t.pExt = uint16(int(t.pExt) + diff) - } else { - t.pVariant = uint8(len(b)) - t.pExt = uint16(len(b)) - } - t.str = string(b) -} - -// genCoreBytes writes a string for the base languages, script and region tags -// to the given buffer and returns the number of bytes written. It will never -// write more than maxCoreSize bytes. -func (t *Tag) genCoreBytes(buf []byte) int { - n := t.lang.stringToBuf(buf[:]) - if t.script != 0 { - n += copy(buf[n:], "-") - n += copy(buf[n:], t.script.String()) - } - if t.region != 0 { - n += copy(buf[n:], "-") - n += copy(buf[n:], t.region.String()) - } - return n -} - -// String returns the canonical string representation of the language tag. -func (t Tag) String() string { - if t.str != "" { - return t.str - } - if t.script == 0 && t.region == 0 { - return t.lang.String() - } - buf := [maxCoreSize]byte{} - return string(buf[:t.genCoreBytes(buf[:])]) -} - -// Base returns the base language of the language tag. If the base language is -// unspecified, an attempt will be made to infer it from the context. -// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. -func (t Tag) Base() (Base, Confidence) { - if t.lang != 0 { - return Base{t.lang}, Exact - } - c := High - if t.script == 0 && !(Region{t.region}).IsCountry() { - c = Low - } - if tag, err := addTags(t); err == nil && tag.lang != 0 { - return Base{tag.lang}, c - } - return Base{0}, No -} - -// Script infers the script for the language tag. If it was not explicitly given, it will infer -// a most likely candidate. -// If more than one script is commonly used for a language, the most likely one -// is returned with a low confidence indication. For example, it returns (Cyrl, Low) -// for Serbian. -// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined) -// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks -// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts. -// See http://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for -// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified. -// Note that an inferred script is never guaranteed to be the correct one. Latin is -// almost exclusively used for Afrikaans, but Arabic has been used for some texts -// in the past. Also, the script that is commonly used may change over time. -// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. -func (t Tag) Script() (Script, Confidence) { - if t.script != 0 { - return Script{t.script}, Exact - } - sc, c := scriptID(_Zzzz), No - if t.lang < langNoIndexOffset { - if scr := scriptID(suppressScript[t.lang]); scr != 0 { - // Note: it is not always the case that a language with a suppress - // script value is only written in one script (e.g. kk, ms, pa). - if t.region == 0 { - return Script{scriptID(scr)}, High - } - sc, c = scr, High - } - } - if tag, err := addTags(t); err == nil { - if tag.script != sc { - sc, c = tag.script, Low - } - } else { - t, _ = (Deprecated | Macro).Canonicalize(t) - if tag, err := addTags(t); err == nil && tag.script != sc { - sc, c = tag.script, Low - } - } - return Script{sc}, c -} - -// Region returns the region for the language tag. If it was not explicitly given, it will -// infer a most likely candidate from the context. -// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. -func (t Tag) Region() (Region, Confidence) { - if t.region != 0 { - return Region{t.region}, Exact - } - if t, err := addTags(t); err == nil { - return Region{t.region}, Low // TODO: differentiate between high and low. - } - t, _ = (Deprecated | Macro).Canonicalize(t) - if tag, err := addTags(t); err == nil { - return Region{tag.region}, Low - } - return Region{_ZZ}, No // TODO: return world instead of undetermined? -} - -// Variant returns the variants specified explicitly for this language tag. -// or nil if no variant was specified. -func (t Tag) Variants() []Variant { - v := []Variant{} - if int(t.pVariant) < int(t.pExt) { - for x, str := "", t.str[t.pVariant:t.pExt]; str != ""; { - x, str = nextToken(str) - v = append(v, Variant{x}) - } - } - return v -} - -// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a -// specific language are substituted with fields from the parent language. -// The parent for a language may change for newer versions of CLDR. -func (t Tag) Parent() Tag { - if t.str != "" { - // Strip the variants and extensions. - t, _ = Raw.Compose(t.Raw()) - if t.region == 0 && t.script != 0 && t.lang != 0 { - base, _ := addTags(Tag{lang: t.lang}) - if base.script == t.script { - return Tag{lang: t.lang} - } - } - return t - } - if t.lang != 0 { - if t.region != 0 { - maxScript := t.script - if maxScript == 0 { - max, _ := addTags(t) - maxScript = max.script - } - - for i := range parents { - if langID(parents[i].lang) == t.lang && scriptID(parents[i].maxScript) == maxScript { - for _, r := range parents[i].fromRegion { - if regionID(r) == t.region { - return Tag{ - lang: t.lang, - script: scriptID(parents[i].script), - region: regionID(parents[i].toRegion), - } - } - } - } - } - - // Strip the script if it is the default one. - base, _ := addTags(Tag{lang: t.lang}) - if base.script != maxScript { - return Tag{lang: t.lang, script: maxScript} - } - return Tag{lang: t.lang} - } else if t.script != 0 { - // The parent for an base-script pair with a non-default script is - // "und" instead of the base language. - base, _ := addTags(Tag{lang: t.lang}) - if base.script != t.script { - return und - } - return Tag{lang: t.lang} - } - } - return und -} - -// returns token t and the rest of the string. -func nextToken(s string) (t, tail string) { - p := strings.Index(s[1:], "-") - if p == -1 { - return s[1:], "" - } - p++ - return s[1:p], s[p:] -} - -// Extension is a single BCP 47 extension. -type Extension struct { - s string -} - -// String returns the string representation of the extension, including the -// type tag. -func (e Extension) String() string { - return e.s -} - -// ParseExtension parses s as an extension and returns it on success. -func ParseExtension(s string) (e Extension, err error) { - scan := makeScannerString(s) - var end int - if n := len(scan.token); n != 1 { - return Extension{}, errSyntax - } - scan.toLower(0, len(scan.b)) - end = parseExtension(&scan) - if end != len(s) { - return Extension{}, errSyntax - } - return Extension{string(scan.b)}, nil -} - -// Type returns the one-byte extension type of e. It returns 0 for the zero -// exception. -func (e Extension) Type() byte { - if e.s == "" { - return 0 - } - return e.s[0] -} - -// Tokens returns the list of tokens of e. -func (e Extension) Tokens() []string { - return strings.Split(e.s, "-") -} - -// Extension returns the extension of type x for tag t. It will return -// false for ok if t does not have the requested extension. The returned -// extension will be invalid in this case. -func (t Tag) Extension(x byte) (ext Extension, ok bool) { - for i := int(t.pExt); i < len(t.str)-1; { - var ext string - i, ext = getExtension(t.str, i) - if ext[0] == x { - return Extension{ext}, true - } - } - return Extension{string(x)}, false -} - -// Extensions returns all extensions of t. -func (t Tag) Extensions() []Extension { - e := []Extension{} - for i := int(t.pExt); i < len(t.str)-1; { - var ext string - i, ext = getExtension(t.str, i) - e = append(e, Extension{ext}) - } - return e -} - -// TypeForKey returns the type associated with the given key, where key and type -// are of the allowed values defined for the Unicode locale extension ('u') in -// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. -// TypeForKey will traverse the inheritance chain to get the correct value. -func (t Tag) TypeForKey(key string) string { - if start, end, _ := t.findTypeForKey(key); end != start { - return t.str[start:end] - } - return "" -} - -var ( - errPrivateUse = errors.New("cannot set a key on a private use tag") - errInvalidArguments = errors.New("invalid key or type") -) - -// SetTypeForKey returns a new Tag with the key set to type, where key and type -// are of the allowed values defined for the Unicode locale extension ('u') in -// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. -// An empty value removes an existing pair with the same key. -func (t Tag) SetTypeForKey(key, value string) (Tag, error) { - if t.private() { - return t, errPrivateUse - } - if len(key) != 2 { - return t, errInvalidArguments - } - - // Remove the setting if value is "". - if value == "" { - start, end, _ := t.findTypeForKey(key) - if start != end { - // Remove key tag and leading '-'. - start -= 4 - - // Remove a possible empty extension. - if (end == len(t.str) || t.str[end+2] == '-') && t.str[start-2] == '-' { - start -= 2 - } - if start == int(t.pVariant) && end == len(t.str) { - t.str = "" - t.pVariant, t.pExt = 0, 0 - } else { - t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:]) - } - } - return t, nil - } - - if len(value) < 3 || len(value) > 8 { - return t, errInvalidArguments - } - - var ( - buf [maxCoreSize + maxSimpleUExtensionSize]byte - uStart int // start of the -u extension. - ) - - // Generate the tag string if needed. - if t.str == "" { - uStart = t.genCoreBytes(buf[:]) - buf[uStart] = '-' - uStart++ - } - - // Create new key-type pair and parse it to verify. - b := buf[uStart:] - copy(b, "u-") - copy(b[2:], key) - b[4] = '-' - b = b[:5+copy(b[5:], value)] - scan := makeScanner(b) - if parseExtensions(&scan); scan.err != nil { - return t, scan.err - } - - // Assemble the replacement string. - if t.str == "" { - t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1) - t.str = string(buf[:uStart+len(b)]) - } else { - s := t.str - start, end, hasExt := t.findTypeForKey(key) - if start == end { - if hasExt { - b = b[2:] - } - t.str = fmt.Sprintf("%s-%s%s", s[:start], b, s[end:]) - } else { - t.str = fmt.Sprintf("%s%s%s", s[:start], value, s[end:]) - } - } - return t, nil -} - -// findKeyAndType returns the start and end position for the type corresponding -// to key or the point at which to insert the key-value pair if the type -// wasn't found. The hasExt return value reports whether an -u extension was present. -// Note: the extensions are typically very small and are likely to contain -// only one key-type pair. -func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) { - p := int(t.pExt) - if len(key) != 2 || p == len(t.str) || p == 0 { - return p, p, false - } - s := t.str - - // Find the correct extension. - for p++; s[p] != 'u'; p++ { - if s[p] > 'u' { - p-- - return p, p, false - } - if p = nextExtension(s, p); p == len(s) { - return len(s), len(s), false - } - } - // Proceed to the hyphen following the extension name. - p++ - - // curKey is the key currently being processed. - curKey := "" - - // Iterate over keys until we get the end of a section. - for { - // p points to the hyphen preceding the current token. - if p3 := p + 3; s[p3] == '-' { - // Found a key. - // Check whether we just processed the key that was requested. - if curKey == key { - return start, p, true - } - // Set to the next key and continue scanning type tokens. - curKey = s[p+1 : p3] - if curKey > key { - return p, p, true - } - // Start of the type token sequence. - start = p + 4 - // A type is at least 3 characters long. - p += 7 // 4 + 3 - } else { - // Attribute or type, which is at least 3 characters long. - p += 4 - } - // p points past the third character of a type or attribute. - max := p + 5 // maximum length of token plus hyphen. - if len(s) < max { - max = len(s) - } - for ; p < max && s[p] != '-'; p++ { - } - // Bail if we have exhausted all tokens or if the next token starts - // a new extension. - if p == len(s) || s[p+2] == '-' { - if curKey == key { - return start, p, true - } - return p, p, true - } - } -} - -// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags -// for which data exists in the text repository. The index will change over time -// and should not be stored in persistent storage. Extensions, except for the -// 'va' type of the 'u' extension, are ignored. It will return 0, false if no -// compact tag exists, where 0 is the index for the root language (Und). -func CompactIndex(t Tag) (index int, ok bool) { - // TODO: perhaps give more frequent tags a lower index. - // TODO: we could make the indexes stable. This will excluded some - // possibilities for optimization, so don't do this quite yet. - b, s, r := t.Raw() - if len(t.str) > 0 { - if strings.HasPrefix(t.str, "x-") { - // We have no entries for user-defined tags. - return 0, false - } - if uint16(t.pVariant) != t.pExt { - // There are no tags with variants and an u-va type. - if t.TypeForKey("va") != "" { - return 0, false - } - t, _ = Raw.Compose(b, s, r, t.Variants()) - } else if _, ok := t.Extension('u'); ok { - // Strip all but the 'va' entry. - variant := t.TypeForKey("va") - t, _ = Raw.Compose(b, s, r) - t, _ = t.SetTypeForKey("va", variant) - } - if len(t.str) > 0 { - // We have some variants. - for i, s := range specialTags { - if s == t { - return i + 1, true - } - } - return 0, false - } - } - // No variants specified: just compare core components. - // The key has the form lllssrrr, where l, s, and r are nibbles for - // respectively the langID, scriptID, and regionID. - key := uint32(b.langID) << (8 + 12) - key |= uint32(s.scriptID) << 12 - key |= uint32(r.regionID) - x, ok := coreTags[key] - return int(x), ok -} - -// Base is an ISO 639 language code, used for encoding the base language -// of a language tag. -type Base struct { - langID -} - -// ParseBase parses a 2- or 3-letter ISO 639 code. -// It returns a ValueError if s is a well-formed but unknown language identifier -// or another error if another error occurred. -func ParseBase(s string) (Base, error) { - if n := len(s); n < 2 || 3 < n { - return Base{}, errSyntax - } - var buf [3]byte - l, err := getLangID(buf[:copy(buf[:], s)]) - return Base{l}, err -} - -// Script is a 4-letter ISO 15924 code for representing scripts. -// It is idiomatically represented in title case. -type Script struct { - scriptID -} - -// ParseScript parses a 4-letter ISO 15924 code. -// It returns a ValueError if s is a well-formed but unknown script identifier -// or another error if another error occurred. -func ParseScript(s string) (Script, error) { - if len(s) != 4 { - return Script{}, errSyntax - } - var buf [4]byte - sc, err := getScriptID(script, buf[:copy(buf[:], s)]) - return Script{sc}, err -} - -// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions. -type Region struct { - regionID -} - -// EncodeM49 returns the Region for the given UN M.49 code. -// It returns an error if r is not a valid code. -func EncodeM49(r int) (Region, error) { - rid, err := getRegionM49(r) - return Region{rid}, err -} - -// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code. -// It returns a ValueError if s is a well-formed but unknown region identifier -// or another error if another error occurred. -func ParseRegion(s string) (Region, error) { - if n := len(s); n < 2 || 3 < n { - return Region{}, errSyntax - } - var buf [3]byte - r, err := getRegionID(buf[:copy(buf[:], s)]) - return Region{r}, err -} - -// IsCountry returns whether this region is a country or autonomous area. This -// includes non-standard definitions from CLDR. -func (r Region) IsCountry() bool { - if r.regionID == 0 || r.IsGroup() || r.IsPrivateUse() && r.regionID != _XK { - return false - } - return true -} - -// IsGroup returns whether this region defines a collection of regions. This -// includes non-standard definitions from CLDR. -func (r Region) IsGroup() bool { - if r.regionID == 0 { - return false - } - return int(regionInclusion[r.regionID]) < len(regionContainment) -} - -// Contains returns whether Region c is contained by Region r. It returns true -// if c == r. -func (r Region) Contains(c Region) bool { - return r.regionID.contains(c.regionID) -} - -func (r regionID) contains(c regionID) bool { - if r == c { - return true - } - g := regionInclusion[r] - if g >= nRegionGroups { - return false - } - m := regionContainment[g] - - d := regionInclusion[c] - b := regionInclusionBits[d] - - // A contained country may belong to multiple disjoint groups. Matching any - // of these indicates containment. If the contained region is a group, it - // must strictly be a subset. - if d >= nRegionGroups { - return b&m != 0 - } - return b&^m == 0 -} - -var errNoTLD = errors.New("language: region is not a valid ccTLD") - -// TLD returns the country code top-level domain (ccTLD). UK is returned for GB. -// In all other cases it returns either the region itself or an error. -// -// This method may return an error for a region for which there exists a -// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The -// region will already be canonicalized it was obtained from a Tag that was -// obtained using any of the default methods. -func (r Region) TLD() (Region, error) { - // See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the - // difference between ISO 3166-1 and IANA ccTLD. - if r.regionID == _GB { - r = Region{_UK} - } - if (r.typ() & ccTLD) == 0 { - return Region{}, errNoTLD - } - return r, nil -} - -// Canonicalize returns the region or a possible replacement if the region is -// deprecated. It will not return a replacement for deprecated regions that -// are split into multiple regions. -func (r Region) Canonicalize() Region { - if cr := normRegion(r.regionID); cr != 0 { - return Region{cr} - } - return r -} - -// Variant represents a registered variant of a language as defined by BCP 47. -type Variant struct { - variant string -} - -// ParseVariant parses and returns a Variant. An error is returned if s is not -// a valid variant. -func ParseVariant(s string) (Variant, error) { - s = strings.ToLower(s) - if _, ok := variantIndex[s]; ok { - return Variant{s}, nil - } - return Variant{}, mkErrInvalid([]byte(s)) -} - -// String returns the string representation of the variant. -func (v Variant) String() string { - return v.variant -} diff --git a/vendor/golang.org/x/text/language/lookup.go b/vendor/golang.org/x/text/language/lookup.go deleted file mode 100644 index 1d80ac370..000000000 --- a/vendor/golang.org/x/text/language/lookup.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package language - -import ( - "bytes" - "fmt" - "sort" - "strconv" - - "golang.org/x/text/internal/tag" -) - -// findIndex tries to find the given tag in idx and returns a standardized error -// if it could not be found. -func findIndex(idx tag.Index, key []byte, form string) (index int, err error) { - if !tag.FixCase(form, key) { - return 0, errSyntax - } - i := idx.Index(key) - if i == -1 { - return 0, mkErrInvalid(key) - } - return i, nil -} - -func searchUint(imap []uint16, key uint16) int { - return sort.Search(len(imap), func(i int) bool { - return imap[i] >= key - }) -} - -type langID uint16 - -// getLangID returns the langID of s if s is a canonical subtag -// or langUnknown if s is not a canonical subtag. -func getLangID(s []byte) (langID, error) { - if len(s) == 2 { - return getLangISO2(s) - } - return getLangISO3(s) -} - -// mapLang returns the mapped langID of id according to mapping m. -func normLang(id langID) (langID, langAliasType) { - k := sort.Search(len(langAliasMap), func(i int) bool { - return langAliasMap[i].from >= uint16(id) - }) - if k < len(langAliasMap) && langAliasMap[k].from == uint16(id) { - return langID(langAliasMap[k].to), langAliasTypes[k] - } - return id, langAliasTypeUnknown -} - -// getLangISO2 returns the langID for the given 2-letter ISO language code -// or unknownLang if this does not exist. -func getLangISO2(s []byte) (langID, error) { - if !tag.FixCase("zz", s) { - return 0, errSyntax - } - if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 { - return langID(i), nil - } - return 0, mkErrInvalid(s) -} - -const base = 'z' - 'a' + 1 - -func strToInt(s []byte) uint { - v := uint(0) - for i := 0; i < len(s); i++ { - v *= base - v += uint(s[i] - 'a') - } - return v -} - -// converts the given integer to the original ASCII string passed to strToInt. -// len(s) must match the number of characters obtained. -func intToStr(v uint, s []byte) { - for i := len(s) - 1; i >= 0; i-- { - s[i] = byte(v%base) + 'a' - v /= base - } -} - -// getLangISO3 returns the langID for the given 3-letter ISO language code -// or unknownLang if this does not exist. -func getLangISO3(s []byte) (langID, error) { - if tag.FixCase("und", s) { - // first try to match canonical 3-letter entries - for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) { - if e := lang.Elem(i); e[3] == 0 && e[2] == s[2] { - // We treat "und" as special and always translate it to "unspecified". - // Note that ZZ and Zzzz are private use and are not treated as - // unspecified by default. - id := langID(i) - if id == nonCanonicalUnd { - return 0, nil - } - return id, nil - } - } - if i := altLangISO3.Index(s); i != -1 { - return langID(altLangIndex[altLangISO3.Elem(i)[3]]), nil - } - n := strToInt(s) - if langNoIndex[n/8]&(1<<(n%8)) != 0 { - return langID(n) + langNoIndexOffset, nil - } - // Check for non-canonical uses of ISO3. - for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) { - if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] { - return langID(i), nil - } - } - return 0, mkErrInvalid(s) - } - return 0, errSyntax -} - -// stringToBuf writes the string to b and returns the number of bytes -// written. cap(b) must be >= 3. -func (id langID) stringToBuf(b []byte) int { - if id >= langNoIndexOffset { - intToStr(uint(id)-langNoIndexOffset, b[:3]) - return 3 - } else if id == 0 { - return copy(b, "und") - } - l := lang[id<<2:] - if l[3] == 0 { - return copy(b, l[:3]) - } - return copy(b, l[:2]) -} - -// String returns the BCP 47 representation of the langID. -// Use b as variable name, instead of id, to ensure the variable -// used is consistent with that of Base in which this type is embedded. -func (b langID) String() string { - if b == 0 { - return "und" - } else if b >= langNoIndexOffset { - b -= langNoIndexOffset - buf := [3]byte{} - intToStr(uint(b), buf[:]) - return string(buf[:]) - } - l := lang.Elem(int(b)) - if l[3] == 0 { - return l[:3] - } - return l[:2] -} - -// ISO3 returns the ISO 639-3 language code. -func (b langID) ISO3() string { - if b == 0 || b >= langNoIndexOffset { - return b.String() - } - l := lang.Elem(int(b)) - if l[3] == 0 { - return l[:3] - } else if l[2] == 0 { - return altLangISO3.Elem(int(l[3]))[:3] - } - // This allocation will only happen for 3-letter ISO codes - // that are non-canonical BCP 47 language identifiers. - return l[0:1] + l[2:4] -} - -// IsPrivateUse reports whether this language code is reserved for private use. -func (b langID) IsPrivateUse() bool { - return langPrivateStart <= b && b <= langPrivateEnd -} - -type regionID uint16 - -// getRegionID returns the region id for s if s is a valid 2-letter region code -// or unknownRegion. -func getRegionID(s []byte) (regionID, error) { - if len(s) == 3 { - if isAlpha(s[0]) { - return getRegionISO3(s) - } - if i, err := strconv.ParseUint(string(s), 10, 10); err == nil { - return getRegionM49(int(i)) - } - } - return getRegionISO2(s) -} - -// getRegionISO2 returns the regionID for the given 2-letter ISO country code -// or unknownRegion if this does not exist. -func getRegionISO2(s []byte) (regionID, error) { - i, err := findIndex(regionISO, s, "ZZ") - if err != nil { - return 0, err - } - return regionID(i) + isoRegionOffset, nil -} - -// getRegionISO3 returns the regionID for the given 3-letter ISO country code -// or unknownRegion if this does not exist. -func getRegionISO3(s []byte) (regionID, error) { - if tag.FixCase("ZZZ", s) { - for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) { - if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] { - return regionID(i) + isoRegionOffset, nil - } - } - for i := 0; i < len(altRegionISO3); i += 3 { - if tag.Compare(altRegionISO3[i:i+3], s) == 0 { - return regionID(altRegionIDs[i/3]), nil - } - } - return 0, mkErrInvalid(s) - } - return 0, errSyntax -} - -func getRegionM49(n int) (regionID, error) { - if 0 < n && n <= 999 { - const ( - searchBits = 7 - regionBits = 9 - regionMask = 1<<regionBits - 1 - ) - idx := n >> searchBits - buf := fromM49[m49Index[idx]:m49Index[idx+1]] - val := uint16(n) << regionBits // we rely on bits shifting out - i := sort.Search(len(buf), func(i int) bool { - return buf[i] >= val - }) - if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val { - return regionID(r & regionMask), nil - } - } - var e ValueError - fmt.Fprint(bytes.NewBuffer([]byte(e.v[:])), n) - return 0, e -} - -// normRegion returns a region if r is deprecated or 0 otherwise. -// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ). -// TODO: consider mapping split up regions to new most populous one (like CLDR). -func normRegion(r regionID) regionID { - m := regionOldMap - k := sort.Search(len(m), func(i int) bool { - return m[i].from >= uint16(r) - }) - if k < len(m) && m[k].from == uint16(r) { - return regionID(m[k].to) - } - return 0 -} - -const ( - iso3166UserAssigned = 1 << iota - ccTLD - bcp47Region -) - -func (r regionID) typ() byte { - return regionTypes[r] -} - -// String returns the BCP 47 representation for the region. -// It returns "ZZ" for an unspecified region. -func (r regionID) String() string { - if r < isoRegionOffset { - if r == 0 { - return "ZZ" - } - return fmt.Sprintf("%03d", r.M49()) - } - r -= isoRegionOffset - return regionISO.Elem(int(r))[:2] -} - -// ISO3 returns the 3-letter ISO code of r. -// Note that not all regions have a 3-letter ISO code. -// In such cases this method returns "ZZZ". -func (r regionID) ISO3() string { - if r < isoRegionOffset { - return "ZZZ" - } - r -= isoRegionOffset - reg := regionISO.Elem(int(r)) - switch reg[2] { - case 0: - return altRegionISO3[reg[3]:][:3] - case ' ': - return "ZZZ" - } - return reg[0:1] + reg[2:4] -} - -// M49 returns the UN M.49 encoding of r, or 0 if this encoding -// is not defined for r. -func (r regionID) M49() int { - return int(m49[r]) -} - -// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This -// may include private-use tags that are assigned by CLDR and used in this -// implementation. So IsPrivateUse and IsCountry can be simultaneously true. -func (r regionID) IsPrivateUse() bool { - return r.typ()&iso3166UserAssigned != 0 -} - -type scriptID uint8 - -// getScriptID returns the script id for string s. It assumes that s -// is of the format [A-Z][a-z]{3}. -func getScriptID(idx tag.Index, s []byte) (scriptID, error) { - i, err := findIndex(idx, s, "Zzzz") - return scriptID(i), err -} - -// String returns the script code in title case. -// It returns "Zzzz" for an unspecified script. -func (s scriptID) String() string { - if s == 0 { - return "Zzzz" - } - return script.Elem(int(s)) -} - -// IsPrivateUse reports whether this script code is reserved for private use. -func (s scriptID) IsPrivateUse() bool { - return _Qaaa <= s && s <= _Qabx -} - -const ( - maxAltTaglen = len("en-US-POSIX") - maxLen = maxAltTaglen -) - -var ( - // grandfatheredMap holds a mapping from legacy and grandfathered tags to - // their base language or index to more elaborate tag. - grandfatheredMap = map[[maxLen]byte]int16{ - [maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban - [maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami - [maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn - [maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak - [maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon - [maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux - [maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo - [maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn - [maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao - [maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay - [maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu - [maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok - [maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn - [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR - [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL - [maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE - [maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu - [maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka - [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan - [maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang - - // Grandfathered tags with no modern replacement will be converted as - // follows: - [maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish - [maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed - [maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default - [maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian - [maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo - [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min - - // CLDR-specific tag. - [maxLen]byte{'r', 'o', 'o', 't'}: 0, // root - [maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX" - } - - altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102} - - altTags = "xtg-x-cel-gaulishen-GB-oxendicten-x-i-defaultund-x-i-enochiansee-x-i-mingonan-x-zh-minen-US-u-va-posix" -) - -func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) { - if v, ok := grandfatheredMap[s]; ok { - if v < 0 { - return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true - } - t.lang = langID(v) - return t, true - } - return t, false -} diff --git a/vendor/golang.org/x/text/language/maketables.go b/vendor/golang.org/x/text/language/maketables.go deleted file mode 100644 index 2cc995b37..000000000 --- a/vendor/golang.org/x/text/language/maketables.go +++ /dev/null @@ -1,1635 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Language tag table generator. -// Data read from the web. - -package main - -import ( - "bufio" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/tag" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", - false, - "test existing tables; can be used to compare web data with package data.") - outputFile = flag.String("output", - "tables.go", - "output file for generated tables") -) - -var comment = []string{ - ` -lang holds an alphabetically sorted list of ISO-639 language identifiers. -All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. -For 2-byte language identifiers, the two successive bytes have the following meaning: - - if the first letter of the 2- and 3-letter ISO codes are the same: - the second and third letter of the 3-letter ISO code. - - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. -For 3-byte language identifiers the 4th byte is 0.`, - ` -langNoIndex is a bit vector of all 3-letter language codes that are not used as an index -in lookup tables. The language ids for these language codes are derived directly -from the letters and are not consecutive.`, - ` -altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives -to 2-letter language codes that cannot be derived using the method described above. -Each 3-letter code is followed by its 1-byte langID.`, - ` -altLangIndex is used to convert indexes in altLangISO3 to langIDs.`, - ` -langAliasMap maps langIDs to their suggested replacements.`, - ` -script is an alphabetically sorted list of ISO 15924 codes. The index -of the script in the string, divided by 4, is the internal scriptID.`, - ` -isoRegionOffset needs to be added to the index of regionISO to obtain the regionID -for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for -the UN.M49 codes used for groups.)`, - ` -regionISO holds a list of alphabetically sorted 2-letter ISO region codes. -Each 2-letter codes is followed by two bytes with the following meaning: - - [A-Z}{2}: the first letter of the 2-letter code plus these two - letters form the 3-letter ISO code. - - 0, n: index into altRegionISO3.`, - ` -regionTypes defines the status of a region for various standards.`, - ` -m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are -codes indicating collections of regions.`, - ` -m49Index gives indexes into fromM49 based on the three most significant bits -of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in - fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] -for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. -The region code is stored in the 9 lsb of the indexed value.`, - ` -fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.`, - ` -altRegionISO3 holds a list of 3-letter region codes that cannot be -mapped to 2-letter codes using the default algorithm. This is a short list.`, - ` -altRegionIDs holds a list of regionIDs the positions of which match those -of the 3-letter ISO codes in altRegionISO3.`, - ` -variantNumSpecialized is the number of specialized variants in variants.`, - ` -suppressScript is an index from langID to the dominant script for that language, -if it exists. If a script is given, it should be suppressed from the language tag.`, - ` -likelyLang is a lookup table, indexed by langID, for the most likely -scripts and regions given incomplete information. If more entries exist for a -given language, region and script are the index and size respectively -of the list in likelyLangList.`, - ` -likelyLangList holds lists info associated with likelyLang.`, - ` -likelyRegion is a lookup table, indexed by regionID, for the most likely -languages and scripts given incomplete information. If more entries exist -for a given regionID, lang and script are the index and size respectively -of the list in likelyRegionList. -TODO: exclude containers and user-definable regions from the list.`, - ` -likelyRegionList holds lists info associated with likelyRegion.`, - ` -likelyScript is a lookup table, indexed by scriptID, for the most likely -languages and regions given a script.`, - ` -matchLang holds pairs of langIDs of base languages that are typically -mutually intelligible. Each pair is associated with a confidence and -whether the intelligibility goes one or both ways.`, - ` -matchScript holds pairs of scriptIDs where readers of one script -can typically also read the other. Each is associated with a confidence.`, - ` -nRegionGroups is the number of region groups.`, - ` -regionInclusion maps region identifiers to sets of regions in regionInclusionBits, -where each set holds all groupings that are directly connected in a region -containment graph.`, - ` -regionInclusionBits is an array of bit vectors where every vector represents -a set of region groupings. These sets are used to compute the distance -between two regions for the purpose of language matching.`, - ` -regionInclusionNext marks, for each entry in regionInclusionBits, the set of -all groups that are reachable from the groups set in the respective entry.`, -} - -// TODO: consider changing some of these structures to tries. This can reduce -// memory, but may increase the need for memory allocations. This could be -// mitigated if we can piggyback on language tags for common cases. - -func failOnError(e error) { - if e != nil { - log.Panic(e) - } -} - -type setType int - -const ( - Indexed setType = 1 + iota // all elements must be of same size - Linear -) - -type stringSet struct { - s []string - sorted, frozen bool - - // We often need to update values after the creation of an index is completed. - // We include a convenience map for keeping track of this. - update map[string]string - typ setType // used for checking. -} - -func (ss *stringSet) clone() stringSet { - c := *ss - c.s = append([]string(nil), c.s...) - return c -} - -func (ss *stringSet) setType(t setType) { - if ss.typ != t && ss.typ != 0 { - log.Panicf("type %d cannot be assigned as it was already %d", t, ss.typ) - } -} - -// parse parses a whitespace-separated string and initializes ss with its -// components. -func (ss *stringSet) parse(s string) { - scan := bufio.NewScanner(strings.NewReader(s)) - scan.Split(bufio.ScanWords) - for scan.Scan() { - ss.add(scan.Text()) - } -} - -func (ss *stringSet) assertChangeable() { - if ss.frozen { - log.Panic("attempt to modify a frozen stringSet") - } -} - -func (ss *stringSet) add(s string) { - ss.assertChangeable() - ss.s = append(ss.s, s) - ss.sorted = ss.frozen -} - -func (ss *stringSet) freeze() { - ss.compact() - ss.frozen = true -} - -func (ss *stringSet) compact() { - if ss.sorted { - return - } - a := ss.s - sort.Strings(a) - k := 0 - for i := 1; i < len(a); i++ { - if a[k] != a[i] { - a[k+1] = a[i] - k++ - } - } - ss.s = a[:k+1] - ss.sorted = ss.frozen -} - -type funcSorter struct { - fn func(a, b string) bool - sort.StringSlice -} - -func (s funcSorter) Less(i, j int) bool { - return s.fn(s.StringSlice[i], s.StringSlice[j]) -} - -func (ss *stringSet) sortFunc(f func(a, b string) bool) { - ss.compact() - sort.Sort(funcSorter{f, sort.StringSlice(ss.s)}) -} - -func (ss *stringSet) remove(s string) { - ss.assertChangeable() - if i, ok := ss.find(s); ok { - copy(ss.s[i:], ss.s[i+1:]) - ss.s = ss.s[:len(ss.s)-1] - } -} - -func (ss *stringSet) replace(ol, nu string) { - ss.s[ss.index(ol)] = nu - ss.sorted = ss.frozen -} - -func (ss *stringSet) index(s string) int { - ss.setType(Indexed) - i, ok := ss.find(s) - if !ok { - if i < len(ss.s) { - log.Panicf("find: item %q is not in list. Closest match is %q.", s, ss.s[i]) - } - log.Panicf("find: item %q is not in list", s) - - } - return i -} - -func (ss *stringSet) find(s string) (int, bool) { - ss.compact() - i := sort.SearchStrings(ss.s, s) - return i, i != len(ss.s) && ss.s[i] == s -} - -func (ss *stringSet) slice() []string { - ss.compact() - return ss.s -} - -func (ss *stringSet) updateLater(v, key string) { - if ss.update == nil { - ss.update = map[string]string{} - } - ss.update[v] = key -} - -// join joins the string and ensures that all entries are of the same length. -func (ss *stringSet) join() string { - ss.setType(Indexed) - n := len(ss.s[0]) - for _, s := range ss.s { - if len(s) != n { - log.Panicf("join: not all entries are of the same length: %q", s) - } - } - ss.s = append(ss.s, strings.Repeat("\xff", n)) - return strings.Join(ss.s, "") -} - -// ianaEntry holds information for an entry in the IANA Language Subtag Repository. -// All types use the same entry. -// See http://tools.ietf.org/html/bcp47#section-5.1 for a description of the various -// fields. -type ianaEntry struct { - typ string - description []string - scope string - added string - preferred string - deprecated string - suppressScript string - macro string - prefix []string -} - -type builder struct { - w *gen.CodeWriter - hw io.Writer // MultiWriter for w and w.Hash - data *cldr.CLDR - supp *cldr.SupplementalData - - // indices - locale stringSet // common locales - lang stringSet // canonical language ids (2 or 3 letter ISO codes) with data - langNoIndex stringSet // 3-letter ISO codes with no associated data - script stringSet // 4-letter ISO codes - region stringSet // 2-letter ISO or 3-digit UN M49 codes - variant stringSet // 4-8-alphanumeric variant code. - - // Region codes that are groups with their corresponding group IDs. - groups map[int]index - - // langInfo - registry map[string]*ianaEntry -} - -type index uint - -func newBuilder(w *gen.CodeWriter) *builder { - r := gen.OpenCLDRCoreZip() - defer r.Close() - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - failOnError(err) - b := builder{ - w: w, - hw: io.MultiWriter(w, w.Hash), - data: data, - supp: data.Supplemental(), - } - b.parseRegistry() - return &b -} - -func (b *builder) parseRegistry() { - r := gen.OpenIANAFile("assignments/language-subtag-registry") - defer r.Close() - b.registry = make(map[string]*ianaEntry) - - scan := bufio.NewScanner(r) - scan.Split(bufio.ScanWords) - var record *ianaEntry - for more := scan.Scan(); more; { - key := scan.Text() - more = scan.Scan() - value := scan.Text() - switch key { - case "Type:": - record = &ianaEntry{typ: value} - case "Subtag:", "Tag:": - if s := strings.SplitN(value, "..", 2); len(s) > 1 { - for a := s[0]; a <= s[1]; a = inc(a) { - b.addToRegistry(a, record) - } - } else { - b.addToRegistry(value, record) - } - case "Suppress-Script:": - record.suppressScript = value - case "Added:": - record.added = value - case "Deprecated:": - record.deprecated = value - case "Macrolanguage:": - record.macro = value - case "Preferred-Value:": - record.preferred = value - case "Prefix:": - record.prefix = append(record.prefix, value) - case "Scope:": - record.scope = value - case "Description:": - buf := []byte(value) - for more = scan.Scan(); more; more = scan.Scan() { - b := scan.Bytes() - if b[0] == '%' || b[len(b)-1] == ':' { - break - } - buf = append(buf, ' ') - buf = append(buf, b...) - } - record.description = append(record.description, string(buf)) - continue - default: - continue - } - more = scan.Scan() - } - if scan.Err() != nil { - log.Panic(scan.Err()) - } -} - -func (b *builder) addToRegistry(key string, entry *ianaEntry) { - if info, ok := b.registry[key]; ok { - if info.typ != "language" || entry.typ != "extlang" { - log.Fatalf("parseRegistry: tag %q already exists", key) - } - } else { - b.registry[key] = entry - } -} - -var commentIndex = make(map[string]string) - -func init() { - for _, s := range comment { - key := strings.TrimSpace(strings.SplitN(s, " ", 2)[0]) - commentIndex[key] = s - } -} - -func (b *builder) comment(name string) { - if s := commentIndex[name]; len(s) > 0 { - b.w.WriteComment(s) - } else { - fmt.Fprintln(b.w) - } -} - -func (b *builder) pf(f string, x ...interface{}) { - fmt.Fprintf(b.hw, f, x...) - fmt.Fprint(b.hw, "\n") -} - -func (b *builder) p(x ...interface{}) { - fmt.Fprintln(b.hw, x...) -} - -func (b *builder) addSize(s int) { - b.w.Size += s - b.pf("// Size: %d bytes", s) -} - -func (b *builder) writeConst(name string, x interface{}) { - b.comment(name) - b.w.WriteConst(name, x) -} - -// writeConsts computes f(v) for all v in values and writes the results -// as constants named _v to a single constant block. -func (b *builder) writeConsts(f func(string) int, values ...string) { - b.pf("const (") - for _, v := range values { - b.pf("\t_%s = %v", v, f(v)) - } - b.pf(")") -} - -// writeType writes the type of the given value, which must be a struct. -func (b *builder) writeType(value interface{}) { - b.comment(reflect.TypeOf(value).Name()) - b.w.WriteType(value) -} - -func (b *builder) writeSlice(name string, ss interface{}) { - b.writeSliceAddSize(name, 0, ss) -} - -func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{}) { - b.comment(name) - b.w.Size += extraSize - v := reflect.ValueOf(ss) - t := v.Type().Elem() - b.pf("// Size: %d bytes, %d elements", v.Len()*int(t.Size())+extraSize, v.Len()) - - fmt.Fprintf(b.w, "var %s = ", name) - b.w.WriteArray(ss) - b.p() -} - -type fromTo struct { - from, to uint16 -} - -func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) { - ss.sortFunc(func(a, b string) bool { - return index(a) < index(b) - }) - m := []fromTo{} - for _, s := range ss.s { - m = append(m, fromTo{index(s), index(ss.update[s])}) - } - b.writeSlice(name, m) -} - -const base = 'z' - 'a' + 1 - -func strToInt(s string) uint { - v := uint(0) - for i := 0; i < len(s); i++ { - v *= base - v += uint(s[i] - 'a') - } - return v -} - -// converts the given integer to the original ASCII string passed to strToInt. -// len(s) must match the number of characters obtained. -func intToStr(v uint, s []byte) { - for i := len(s) - 1; i >= 0; i-- { - s[i] = byte(v%base) + 'a' - v /= base - } -} - -func (b *builder) writeBitVector(name string, ss []string) { - vec := make([]uint8, int(math.Ceil(math.Pow(base, float64(len(ss[0])))/8))) - for _, s := range ss { - v := strToInt(s) - vec[v/8] |= 1 << (v % 8) - } - b.writeSlice(name, vec) -} - -// TODO: convert this type into a list or two-stage trie. -func (b *builder) writeMapFunc(name string, m map[string]string, f func(string) uint16) { - b.comment(name) - v := reflect.ValueOf(m) - sz := v.Len() * (2 + int(v.Type().Key().Size())) - for _, k := range m { - sz += len(k) - } - b.addSize(sz) - keys := []string{} - b.pf(`var %s = map[string]uint16{`, name) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - b.pf("\t%q: %v,", k, f(m[k])) - } - b.p("}") -} - -func (b *builder) writeMap(name string, m interface{}) { - b.comment(name) - v := reflect.ValueOf(m) - sz := v.Len() * (2 + int(v.Type().Key().Size()) + int(v.Type().Elem().Size())) - b.addSize(sz) - f := strings.FieldsFunc(fmt.Sprintf("%#v", m), func(r rune) bool { - return strings.IndexRune("{}, ", r) != -1 - }) - sort.Strings(f[1:]) - b.pf(`var %s = %s{`, name, f[0]) - for _, kv := range f[1:] { - b.pf("\t%s,", kv) - } - b.p("}") -} - -func (b *builder) langIndex(s string) uint16 { - if s == "und" { - return 0 - } - if i, ok := b.lang.find(s); ok { - return uint16(i) - } - return uint16(strToInt(s)) + uint16(len(b.lang.s)) -} - -// inc advances the string to its lexicographical successor. -func inc(s string) string { - const maxTagLength = 4 - var buf [maxTagLength]byte - intToStr(strToInt(strings.ToLower(s))+1, buf[:len(s)]) - for i := 0; i < len(s); i++ { - if s[i] <= 'Z' { - buf[i] -= 'a' - 'A' - } - } - return string(buf[:len(s)]) -} - -func (b *builder) parseIndices() { - meta := b.supp.Metadata - - for k, v := range b.registry { - var ss *stringSet - switch v.typ { - case "language": - if len(k) == 2 || v.suppressScript != "" || v.scope == "special" { - b.lang.add(k) - continue - } else { - ss = &b.langNoIndex - } - case "region": - ss = &b.region - case "script": - ss = &b.script - case "variant": - ss = &b.variant - default: - continue - } - ss.add(k) - } - // Include any language for which there is data. - for _, lang := range b.data.Locales() { - if x := b.data.RawLDML(lang); false || - x.LocaleDisplayNames != nil || - x.Characters != nil || - x.Delimiters != nil || - x.Measurement != nil || - x.Dates != nil || - x.Numbers != nil || - x.Units != nil || - x.ListPatterns != nil || - x.Collations != nil || - x.Segmentations != nil || - x.Rbnf != nil || - x.Annotations != nil || - x.Metadata != nil { - - from := strings.Split(lang, "_") - if lang := from[0]; lang != "root" { - b.lang.add(lang) - } - } - } - // Include locales for plural rules, which uses a different structure. - for _, plurals := range b.data.Supplemental().Plurals { - for _, rules := range plurals.PluralRules { - for _, lang := range strings.Split(rules.Locales, " ") { - if lang = strings.Split(lang, "_")[0]; lang != "root" { - b.lang.add(lang) - } - } - } - } - // Include languages in likely subtags. - for _, m := range b.supp.LikelySubtags.LikelySubtag { - from := strings.Split(m.From, "_") - b.lang.add(from[0]) - } - // Include ISO-639 alpha-3 bibliographic entries. - for _, a := range meta.Alias.LanguageAlias { - if a.Reason == "bibliographic" { - b.langNoIndex.add(a.Type) - } - } - // Include regions in territoryAlias (not all are in the IANA registry!) - for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { - if len(reg.Type) == 2 { - b.region.add(reg.Type) - } - } - - for _, s := range b.lang.s { - if len(s) == 3 { - b.langNoIndex.remove(s) - } - } - b.writeConst("numLanguages", len(b.lang.slice())+len(b.langNoIndex.slice())) - b.writeConst("numScripts", len(b.script.slice())) - b.writeConst("numRegions", len(b.region.slice())) - - // Add dummy codes at the start of each list to represent "unspecified". - b.lang.add("---") - b.script.add("----") - b.region.add("---") - - // common locales - b.locale.parse(meta.DefaultContent.Locales) -} - -func (b *builder) computeRegionGroups() { - b.groups = make(map[int]index) - - // Create group indices. - for i := 1; b.region.s[i][0] < 'A'; i++ { // Base M49 indices on regionID. - b.groups[i] = index(len(b.groups)) - } - for _, g := range b.supp.TerritoryContainment.Group { - group := b.region.index(g.Type) - if _, ok := b.groups[group]; !ok { - b.groups[group] = index(len(b.groups)) - } - } - if len(b.groups) > 32 { - log.Fatalf("only 32 groups supported, found %d", len(b.groups)) - } - b.writeConst("nRegionGroups", len(b.groups)) -} - -var langConsts = []string{ - "af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es", - "et", "fa", "fi", "fil", "fr", "gu", "he", "hi", "hr", "hu", "hy", "id", "is", - "it", "ja", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml", - "mn", "mo", "mr", "ms", "mul", "my", "nb", "ne", "nl", "no", "pa", "pl", "pt", - "ro", "ru", "sh", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th", - "tl", "tn", "tr", "uk", "ur", "uz", "vi", "zh", "zu", - - // constants for grandfathered tags (if not already defined) - "jbo", "ami", "bnn", "hak", "tlh", "lb", "nv", "pwn", "tao", "tay", "tsu", - "nn", "sfb", "vgt", "sgg", "cmn", "nan", "hsn", -} - -// writeLanguage generates all tables needed for language canonicalization. -func (b *builder) writeLanguage() { - meta := b.supp.Metadata - - b.writeConst("nonCanonicalUnd", b.lang.index("und")) - b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...) - b.writeConst("langPrivateStart", b.langIndex("qaa")) - b.writeConst("langPrivateEnd", b.langIndex("qtz")) - - // Get language codes that need to be mapped (overlong 3-letter codes, - // deprecated 2-letter codes, legacy and grandfathered tags.) - langAliasMap := stringSet{} - aliasTypeMap := map[string]langAliasType{} - - // altLangISO3 get the alternative ISO3 names that need to be mapped. - altLangISO3 := stringSet{} - // Add dummy start to avoid the use of index 0. - altLangISO3.add("---") - altLangISO3.updateLater("---", "aa") - - lang := b.lang.clone() - for _, a := range meta.Alias.LanguageAlias { - if a.Replacement == "" { - a.Replacement = "und" - } - // TODO: support mapping to tags - repl := strings.SplitN(a.Replacement, "_", 2)[0] - if a.Reason == "overlong" { - if len(a.Replacement) == 2 && len(a.Type) == 3 { - lang.updateLater(a.Replacement, a.Type) - } - } else if len(a.Type) <= 3 { - switch a.Reason { - case "macrolanguage": - aliasTypeMap[a.Type] = langMacro - case "deprecated": - // handled elsewhere - continue - case "bibliographic", "legacy": - if a.Type == "no" { - continue - } - aliasTypeMap[a.Type] = langLegacy - default: - log.Fatalf("new %s alias: %s", a.Reason, a.Type) - } - langAliasMap.add(a.Type) - langAliasMap.updateLater(a.Type, repl) - } - } - // Manually add the mapping of "nb" (Norwegian) to its macro language. - // This can be removed if CLDR adopts this change. - langAliasMap.add("nb") - langAliasMap.updateLater("nb", "no") - aliasTypeMap["nb"] = langMacro - - for k, v := range b.registry { - // Also add deprecated values for 3-letter ISO codes, which CLDR omits. - if v.typ == "language" && v.deprecated != "" && v.preferred != "" { - langAliasMap.add(k) - langAliasMap.updateLater(k, v.preferred) - aliasTypeMap[k] = langDeprecated - } - } - // Fix CLDR mappings. - lang.updateLater("tl", "tgl") - lang.updateLater("sh", "hbs") - lang.updateLater("mo", "mol") - lang.updateLater("no", "nor") - lang.updateLater("tw", "twi") - lang.updateLater("nb", "nob") - lang.updateLater("ak", "aka") - - // Ensure that each 2-letter code is matched with a 3-letter code. - for _, v := range lang.s[1:] { - s, ok := lang.update[v] - if !ok { - if s, ok = lang.update[langAliasMap.update[v]]; !ok { - continue - } - lang.update[v] = s - } - if v[0] != s[0] { - altLangISO3.add(s) - altLangISO3.updateLater(s, v) - } - } - - // Complete canonialized language tags. - lang.freeze() - for i, v := range lang.s { - // We can avoid these manual entries by using the IANI registry directly. - // Seems easier to update the list manually, as changes are rare. - // The panic in this loop will trigger if we miss an entry. - add := "" - if s, ok := lang.update[v]; ok { - if s[0] == v[0] { - add = s[1:] - } else { - add = string([]byte{0, byte(altLangISO3.index(s))}) - } - } else if len(v) == 3 { - add = "\x00" - } else { - log.Panicf("no data for long form of %q", v) - } - lang.s[i] += add - } - b.writeConst("lang", tag.Index(lang.join())) - - b.writeConst("langNoIndexOffset", len(b.lang.s)) - - // space of all valid 3-letter language identifiers. - b.writeBitVector("langNoIndex", b.langNoIndex.slice()) - - altLangIndex := []uint16{} - for i, s := range altLangISO3.slice() { - altLangISO3.s[i] += string([]byte{byte(len(altLangIndex))}) - if i > 0 { - idx := b.lang.index(altLangISO3.update[s]) - altLangIndex = append(altLangIndex, uint16(idx)) - } - } - b.writeConst("altLangISO3", tag.Index(altLangISO3.join())) - b.writeSlice("altLangIndex", altLangIndex) - - b.writeSortedMap("langAliasMap", &langAliasMap, b.langIndex) - types := make([]langAliasType, len(langAliasMap.s)) - for i, s := range langAliasMap.s { - types[i] = aliasTypeMap[s] - } - b.writeSlice("langAliasTypes", types) -} - -var scriptConsts = []string{ - "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy", - "Zzzz", -} - -func (b *builder) writeScript() { - b.writeConsts(b.script.index, scriptConsts...) - b.writeConst("script", tag.Index(b.script.join())) - - supp := make([]uint8, len(b.lang.slice())) - for i, v := range b.lang.slice()[1:] { - if sc := b.registry[v].suppressScript; sc != "" { - supp[i+1] = uint8(b.script.index(sc)) - } - } - b.writeSlice("suppressScript", supp) - - // There is only one deprecated script in CLDR. This value is hard-coded. - // We check here if the code must be updated. - for _, a := range b.supp.Metadata.Alias.ScriptAlias { - if a.Type != "Qaai" { - log.Panicf("unexpected deprecated stript %q", a.Type) - } - } -} - -func parseM49(s string) int16 { - if len(s) == 0 { - return 0 - } - v, err := strconv.ParseUint(s, 10, 10) - failOnError(err) - return int16(v) -} - -var regionConsts = []string{ - "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US", - "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo. -} - -func (b *builder) writeRegion() { - b.writeConsts(b.region.index, regionConsts...) - - isoOffset := b.region.index("AA") - m49map := make([]int16, len(b.region.slice())) - fromM49map := make(map[int16]int) - altRegionISO3 := "" - altRegionIDs := []uint16{} - - b.writeConst("isoRegionOffset", isoOffset) - - // 2-letter region lookup and mapping to numeric codes. - regionISO := b.region.clone() - regionISO.s = regionISO.s[isoOffset:] - regionISO.sorted = false - - regionTypes := make([]byte, len(b.region.s)) - - // Is the region valid BCP 47? - for s, e := range b.registry { - if len(s) == 2 && s == strings.ToUpper(s) { - i := b.region.index(s) - for _, d := range e.description { - if strings.Contains(d, "Private use") { - regionTypes[i] = iso3166UserAssgined - } - } - regionTypes[i] |= bcp47Region - } - } - - // Is the region a valid ccTLD? - r := gen.OpenIANAFile("domains/root/db") - defer r.Close() - - buf, err := ioutil.ReadAll(r) - failOnError(err) - re := regexp.MustCompile(`"/domains/root/db/([a-z]{2}).html"`) - for _, m := range re.FindAllSubmatch(buf, -1) { - i := b.region.index(strings.ToUpper(string(m[1]))) - regionTypes[i] |= ccTLD - } - - b.writeSlice("regionTypes", regionTypes) - - iso3Set := make(map[string]int) - update := func(iso2, iso3 string) { - i := regionISO.index(iso2) - if j, ok := iso3Set[iso3]; !ok && iso3[0] == iso2[0] { - regionISO.s[i] += iso3[1:] - iso3Set[iso3] = -1 - } else { - if ok && j >= 0 { - regionISO.s[i] += string([]byte{0, byte(j)}) - } else { - iso3Set[iso3] = len(altRegionISO3) - regionISO.s[i] += string([]byte{0, byte(len(altRegionISO3))}) - altRegionISO3 += iso3 - altRegionIDs = append(altRegionIDs, uint16(isoOffset+i)) - } - } - } - for _, tc := range b.supp.CodeMappings.TerritoryCodes { - i := regionISO.index(tc.Type) + isoOffset - if d := m49map[i]; d != 0 { - log.Panicf("%s found as a duplicate UN.M49 code of %03d", tc.Numeric, d) - } - m49 := parseM49(tc.Numeric) - m49map[i] = m49 - if r := fromM49map[m49]; r == 0 { - fromM49map[m49] = i - } else if r != i { - dep := b.registry[regionISO.s[r-isoOffset]].deprecated - if t := b.registry[tc.Type]; t != nil && dep != "" && (t.deprecated == "" || t.deprecated > dep) { - fromM49map[m49] = i - } - } - } - for _, ta := range b.supp.Metadata.Alias.TerritoryAlias { - if len(ta.Type) == 3 && ta.Type[0] <= '9' && len(ta.Replacement) == 2 { - from := parseM49(ta.Type) - if r := fromM49map[from]; r == 0 { - fromM49map[from] = regionISO.index(ta.Replacement) + isoOffset - } - } - } - for _, tc := range b.supp.CodeMappings.TerritoryCodes { - if len(tc.Alpha3) == 3 { - update(tc.Type, tc.Alpha3) - } - } - // This entries are not included in territoryCodes. Mostly 3-letter variants - // of deleted codes and an entry for QU. - for _, m := range []struct{ iso2, iso3 string }{ - {"CT", "CTE"}, - {"DY", "DHY"}, - {"HV", "HVO"}, - {"JT", "JTN"}, - {"MI", "MID"}, - {"NH", "NHB"}, - {"NQ", "ATN"}, - {"PC", "PCI"}, - {"PU", "PUS"}, - {"PZ", "PCZ"}, - {"RH", "RHO"}, - {"VD", "VDR"}, - {"WK", "WAK"}, - // These three-letter codes are used for others as well. - {"FQ", "ATF"}, - } { - update(m.iso2, m.iso3) - } - for i, s := range regionISO.s { - if len(s) != 4 { - regionISO.s[i] = s + " " - } - } - b.writeConst("regionISO", tag.Index(regionISO.join())) - b.writeConst("altRegionISO3", altRegionISO3) - b.writeSlice("altRegionIDs", altRegionIDs) - - // Create list of deprecated regions. - // TODO: consider inserting SF -> FI. Not included by CLDR, but is the only - // Transitionally-reserved mapping not included. - regionOldMap := stringSet{} - // Include regions in territoryAlias (not all are in the IANA registry!) - for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { - if len(reg.Type) == 2 && reg.Reason == "deprecated" && len(reg.Replacement) == 2 { - regionOldMap.add(reg.Type) - regionOldMap.updateLater(reg.Type, reg.Replacement) - i, _ := regionISO.find(reg.Type) - j, _ := regionISO.find(reg.Replacement) - if k := m49map[i+isoOffset]; k == 0 { - m49map[i+isoOffset] = m49map[j+isoOffset] - } - } - } - b.writeSortedMap("regionOldMap", ®ionOldMap, func(s string) uint16 { - return uint16(b.region.index(s)) - }) - // 3-digit region lookup, groupings. - for i := 1; i < isoOffset; i++ { - m := parseM49(b.region.s[i]) - m49map[i] = m - fromM49map[m] = i - } - b.writeSlice("m49", m49map) - - const ( - searchBits = 7 - regionBits = 9 - ) - if len(m49map) >= 1<<regionBits { - log.Fatalf("Maximum number of regions exceeded: %d > %d", len(m49map), 1<<regionBits) - } - m49Index := [9]int16{} - fromM49 := []uint16{} - m49 := []int{} - for k, _ := range fromM49map { - m49 = append(m49, int(k)) - } - sort.Ints(m49) - for _, k := range m49[1:] { - val := (k & (1<<searchBits - 1)) << regionBits - fromM49 = append(fromM49, uint16(val|fromM49map[int16(k)])) - m49Index[1:][k>>searchBits] = int16(len(fromM49)) - } - b.writeSlice("m49Index", m49Index) - b.writeSlice("fromM49", fromM49) -} - -const ( - // TODO: put these lists in regionTypes as user data? Could be used for - // various optimizations and refinements and could be exposed in the API. - iso3166Except = "AC CP DG EA EU FX IC SU TA UK" - iso3166Trans = "AN BU CS NT TP YU ZR" // SF is not in our set of Regions. - // DY and RH are actually not deleted, but indeterminately reserved. - iso3166DelCLDR = "CT DD DY FQ HV JT MI NH NQ PC PU PZ RH VD WK YD" -) - -const ( - iso3166UserAssgined = 1 << iota - ccTLD - bcp47Region -) - -func find(list []string, s string) int { - for i, t := range list { - if t == s { - return i - } - } - return -1 -} - -// writeVariants generates per-variant information and creates a map from variant -// name to index value. We assign index values such that sorting multiple -// variants by index value will result in the correct order. -// There are two types of variants: specialized and general. Specialized variants -// are only applicable to certain language or language-script pairs. Generalized -// variants apply to any language. Generalized variants always sort after -// specialized variants. We will therefore always assign a higher index value -// to a generalized variant than any other variant. Generalized variants are -// sorted alphabetically among themselves. -// Specialized variants may also sort after other specialized variants. Such -// variants will be ordered after any of the variants they may follow. -// We assume that if a variant x is followed by a variant y, then for any prefix -// p of x, p-x is a prefix of y. This allows us to order tags based on the -// maximum of the length of any of its prefixes. -// TODO: it is possible to define a set of Prefix values on variants such that -// a total order cannot be defined to the point that this algorithm breaks. -// In other words, we cannot guarantee the same order of variants for the -// future using the same algorithm or for non-compliant combinations of -// variants. For this reason, consider using simple alphabetic sorting -// of variants and ignore Prefix restrictions altogether. -func (b *builder) writeVariant() { - generalized := stringSet{} - specialized := stringSet{} - specializedExtend := stringSet{} - // Collate the variants by type and check assumptions. - for _, v := range b.variant.slice() { - e := b.registry[v] - if len(e.prefix) == 0 { - generalized.add(v) - continue - } - c := strings.Split(e.prefix[0], "-") - hasScriptOrRegion := false - if len(c) > 1 { - _, hasScriptOrRegion = b.script.find(c[1]) - if !hasScriptOrRegion { - _, hasScriptOrRegion = b.region.find(c[1]) - - } - } - if len(c) == 1 || len(c) == 2 && hasScriptOrRegion { - // Variant is preceded by a language. - specialized.add(v) - continue - } - // Variant is preceded by another variant. - specializedExtend.add(v) - prefix := c[0] + "-" - if hasScriptOrRegion { - prefix += c[1] - } - for _, p := range e.prefix { - // Verify that the prefix minus the last element is a prefix of the - // predecessor element. - i := strings.LastIndex(p, "-") - pred := b.registry[p[i+1:]] - if find(pred.prefix, p[:i]) < 0 { - log.Fatalf("prefix %q for variant %q not consistent with predecessor spec", p, v) - } - // The sorting used below does not work in the general case. It works - // if we assume that variants that may be followed by others only have - // prefixes of the same length. Verify this. - count := strings.Count(p[:i], "-") - for _, q := range pred.prefix { - if c := strings.Count(q, "-"); c != count { - log.Fatalf("variant %q preceding %q has a prefix %q of size %d; want %d", p[i+1:], v, q, c, count) - } - } - if !strings.HasPrefix(p, prefix) { - log.Fatalf("prefix %q of variant %q should start with %q", p, v, prefix) - } - } - } - - // Sort extended variants. - a := specializedExtend.s - less := func(v, w string) bool { - // Sort by the maximum number of elements. - maxCount := func(s string) (max int) { - for _, p := range b.registry[s].prefix { - if c := strings.Count(p, "-"); c > max { - max = c - } - } - return - } - if cv, cw := maxCount(v), maxCount(w); cv != cw { - return cv < cw - } - // Sort by name as tie breaker. - return v < w - } - sort.Sort(funcSorter{less, sort.StringSlice(a)}) - specializedExtend.frozen = true - - // Create index from variant name to index. - variantIndex := make(map[string]uint8) - add := func(s []string) { - for _, v := range s { - variantIndex[v] = uint8(len(variantIndex)) - } - } - add(specialized.slice()) - add(specializedExtend.s) - numSpecialized := len(variantIndex) - add(generalized.slice()) - if n := len(variantIndex); n > 255 { - log.Fatalf("maximum number of variants exceeded: was %d; want <= 255", n) - } - b.writeMap("variantIndex", variantIndex) - b.writeConst("variantNumSpecialized", numSpecialized) -} - -func (b *builder) writeLanguageInfo() { -} - -// writeLikelyData writes tables that are used both for finding parent relations and for -// language matching. Each entry contains additional bits to indicate the status of the -// data to know when it cannot be used for parent relations. -func (b *builder) writeLikelyData() { - const ( - isList = 1 << iota - scriptInFrom - regionInFrom - ) - type ( // generated types - likelyScriptRegion struct { - region uint16 - script uint8 - flags uint8 - } - likelyLangScript struct { - lang uint16 - script uint8 - flags uint8 - } - likelyLangRegion struct { - lang uint16 - region uint16 - } - // likelyTag is used for getting likely tags for group regions, where - // the likely region might be a region contained in the group. - likelyTag struct { - lang uint16 - region uint16 - script uint8 - } - ) - var ( // generated variables - likelyRegionGroup = make([]likelyTag, len(b.groups)) - likelyLang = make([]likelyScriptRegion, len(b.lang.s)) - likelyRegion = make([]likelyLangScript, len(b.region.s)) - likelyScript = make([]likelyLangRegion, len(b.script.s)) - likelyLangList = []likelyScriptRegion{} - likelyRegionList = []likelyLangScript{} - ) - type fromTo struct { - from, to []string - } - langToOther := map[int][]fromTo{} - regionToOther := map[int][]fromTo{} - for _, m := range b.supp.LikelySubtags.LikelySubtag { - from := strings.Split(m.From, "_") - to := strings.Split(m.To, "_") - if len(to) != 3 { - log.Fatalf("invalid number of subtags in %q: found %d, want 3", m.To, len(to)) - } - if len(from) > 3 { - log.Fatalf("invalid number of subtags: found %d, want 1-3", len(from)) - } - if from[0] != to[0] && from[0] != "und" { - log.Fatalf("unexpected language change in expansion: %s -> %s", from, to) - } - if len(from) == 3 { - if from[2] != to[2] { - log.Fatalf("unexpected region change in expansion: %s -> %s", from, to) - } - if from[0] != "und" { - log.Fatalf("unexpected fully specified from tag: %s -> %s", from, to) - } - } - if len(from) == 1 || from[0] != "und" { - id := 0 - if from[0] != "und" { - id = b.lang.index(from[0]) - } - langToOther[id] = append(langToOther[id], fromTo{from, to}) - } else if len(from) == 2 && len(from[1]) == 4 { - sid := b.script.index(from[1]) - likelyScript[sid].lang = uint16(b.langIndex(to[0])) - likelyScript[sid].region = uint16(b.region.index(to[2])) - } else { - r := b.region.index(from[len(from)-1]) - if id, ok := b.groups[r]; ok { - if from[0] != "und" { - log.Fatalf("region changed unexpectedly: %s -> %s", from, to) - } - likelyRegionGroup[id].lang = uint16(b.langIndex(to[0])) - likelyRegionGroup[id].script = uint8(b.script.index(to[1])) - likelyRegionGroup[id].region = uint16(b.region.index(to[2])) - } else { - regionToOther[r] = append(regionToOther[r], fromTo{from, to}) - } - } - } - b.writeType(likelyLangRegion{}) - b.writeSlice("likelyScript", likelyScript) - - for id := range b.lang.s { - list := langToOther[id] - if len(list) == 1 { - likelyLang[id].region = uint16(b.region.index(list[0].to[2])) - likelyLang[id].script = uint8(b.script.index(list[0].to[1])) - } else if len(list) > 1 { - likelyLang[id].flags = isList - likelyLang[id].region = uint16(len(likelyLangList)) - likelyLang[id].script = uint8(len(list)) - for _, x := range list { - flags := uint8(0) - if len(x.from) > 1 { - if x.from[1] == x.to[2] { - flags = regionInFrom - } else { - flags = scriptInFrom - } - } - likelyLangList = append(likelyLangList, likelyScriptRegion{ - region: uint16(b.region.index(x.to[2])), - script: uint8(b.script.index(x.to[1])), - flags: flags, - }) - } - } - } - // TODO: merge suppressScript data with this table. - b.writeType(likelyScriptRegion{}) - b.writeSlice("likelyLang", likelyLang) - b.writeSlice("likelyLangList", likelyLangList) - - for id := range b.region.s { - list := regionToOther[id] - if len(list) == 1 { - likelyRegion[id].lang = uint16(b.langIndex(list[0].to[0])) - likelyRegion[id].script = uint8(b.script.index(list[0].to[1])) - if len(list[0].from) > 2 { - likelyRegion[id].flags = scriptInFrom - } - } else if len(list) > 1 { - likelyRegion[id].flags = isList - likelyRegion[id].lang = uint16(len(likelyRegionList)) - likelyRegion[id].script = uint8(len(list)) - for i, x := range list { - if len(x.from) == 2 && i != 0 || i > 0 && len(x.from) != 3 { - log.Fatalf("unspecified script must be first in list: %v at %d", x.from, i) - } - x := likelyLangScript{ - lang: uint16(b.langIndex(x.to[0])), - script: uint8(b.script.index(x.to[1])), - } - if len(list[0].from) > 2 { - x.flags = scriptInFrom - } - likelyRegionList = append(likelyRegionList, x) - } - } - } - b.writeType(likelyLangScript{}) - b.writeSlice("likelyRegion", likelyRegion) - b.writeSlice("likelyRegionList", likelyRegionList) - - b.writeType(likelyTag{}) - b.writeSlice("likelyRegionGroup", likelyRegionGroup) -} - -type mutualIntelligibility struct { - want, have uint16 - conf uint8 - oneway bool -} - -type scriptIntelligibility struct { - lang uint16 // langID or 0 if * - want, have uint8 - conf uint8 -} - -type sortByConf []mutualIntelligibility - -func (l sortByConf) Less(a, b int) bool { - return l[a].conf > l[b].conf -} - -func (l sortByConf) Swap(a, b int) { - l[a], l[b] = l[b], l[a] -} - -func (l sortByConf) Len() int { - return len(l) -} - -// toConf converts a percentage value [0, 100] to a confidence class. -func toConf(pct uint8) uint8 { - switch { - case pct == 100: - return 3 // Exact - case pct >= 90: - return 2 // High - case pct > 50: - return 1 // Low - default: - return 0 // No - } -} - -// writeMatchData writes tables with languages and scripts for which there is -// mutual intelligibility. The data is based on CLDR's languageMatching data. -// Note that we use a different algorithm than the one defined by CLDR and that -// we slightly modify the data. For example, we convert scores to confidence levels. -// We also drop all region-related data as we use a different algorithm to -// determine region equivalence. -func (b *builder) writeMatchData() { - b.writeType(mutualIntelligibility{}) - b.writeType(scriptIntelligibility{}) - lm := b.supp.LanguageMatching.LanguageMatches - cldr.MakeSlice(&lm).SelectAnyOf("type", "written") - - matchLang := []mutualIntelligibility{} - matchScript := []scriptIntelligibility{} - // Convert the languageMatch entries in lists keyed by desired language. - for _, m := range lm[0].LanguageMatch { - // Different versions of CLDR use different separators. - desired := strings.Replace(m.Desired, "-", "_", -1) - supported := strings.Replace(m.Supported, "-", "_", -1) - d := strings.Split(desired, "_") - s := strings.Split(supported, "_") - if len(d) != len(s) || len(d) > 2 { - // Skip all entries with regions and work around CLDR bug. - continue - } - pct, _ := strconv.ParseInt(m.Percent, 10, 8) - if len(d) == 2 && d[0] == s[0] && len(d[1]) == 4 { - // language-script pair. - lang := uint16(0) - if d[0] != "*" { - lang = uint16(b.langIndex(d[0])) - } - matchScript = append(matchScript, scriptIntelligibility{ - lang: lang, - want: uint8(b.script.index(d[1])), - have: uint8(b.script.index(s[1])), - conf: toConf(uint8(pct)), - }) - if m.Oneway != "true" { - matchScript = append(matchScript, scriptIntelligibility{ - lang: lang, - want: uint8(b.script.index(s[1])), - have: uint8(b.script.index(d[1])), - conf: toConf(uint8(pct)), - }) - } - } else if len(d) == 1 && d[0] != "*" { - if pct == 100 { - // nb == no is already handled by macro mapping. Check there - // really is only this case. - if d[0] != "no" || s[0] != "nb" { - log.Fatalf("unhandled equivalence %s == %s", s[0], d[0]) - } - continue - } - matchLang = append(matchLang, mutualIntelligibility{ - want: uint16(b.langIndex(d[0])), - have: uint16(b.langIndex(s[0])), - conf: uint8(pct), - oneway: m.Oneway == "true", - }) - } else { - // TODO: Handle other mappings. - a := []string{"*;*", "*_*;*_*", "es_MX;es_419"} - s := strings.Join([]string{desired, supported}, ";") - if i := sort.SearchStrings(a, s); i == len(a) || a[i] != s { - log.Printf("%q not handled", s) - } - } - } - sort.Stable(sortByConf(matchLang)) - // collapse percentage into confidence classes - for i, m := range matchLang { - matchLang[i].conf = toConf(m.conf) - } - b.writeSlice("matchLang", matchLang) - b.writeSlice("matchScript", matchScript) -} - -func (b *builder) writeRegionInclusionData() { - var ( - // mm holds for each group the set of groups with a distance of 1. - mm = make(map[int][]index) - - // containment holds for each group the transitive closure of - // containment of other groups. - containment = make(map[index][]index) - ) - for _, g := range b.supp.TerritoryContainment.Group { - group := b.region.index(g.Type) - groupIdx := b.groups[group] - for _, mem := range strings.Split(g.Contains, " ") { - r := b.region.index(mem) - mm[r] = append(mm[r], groupIdx) - if g, ok := b.groups[r]; ok { - mm[group] = append(mm[group], g) - containment[groupIdx] = append(containment[groupIdx], g) - } - } - } - - regionContainment := make([]uint32, len(b.groups)) - for _, g := range b.groups { - l := containment[g] - - // Compute the transitive closure of containment. - for i := 0; i < len(l); i++ { - l = append(l, containment[l[i]]...) - } - - // Compute the bitmask. - regionContainment[g] = 1 << g - for _, v := range l { - regionContainment[g] |= 1 << v - } - // log.Printf("%d: %X", g, regionContainment[g]) - } - b.writeSlice("regionContainment", regionContainment) - - regionInclusion := make([]uint8, len(b.region.s)) - bvs := make(map[uint32]index) - // Make the first bitvector positions correspond with the groups. - for r, i := range b.groups { - bv := uint32(1 << i) - for _, g := range mm[r] { - bv |= 1 << g - } - bvs[bv] = i - regionInclusion[r] = uint8(bvs[bv]) - } - for r := 1; r < len(b.region.s); r++ { - if _, ok := b.groups[r]; !ok { - bv := uint32(0) - for _, g := range mm[r] { - bv |= 1 << g - } - if bv == 0 { - // Pick the world for unspecified regions. - bv = 1 << b.groups[b.region.index("001")] - } - if _, ok := bvs[bv]; !ok { - bvs[bv] = index(len(bvs)) - } - regionInclusion[r] = uint8(bvs[bv]) - } - } - b.writeSlice("regionInclusion", regionInclusion) - regionInclusionBits := make([]uint32, len(bvs)) - for k, v := range bvs { - regionInclusionBits[v] = uint32(k) - } - // Add bit vectors for increasingly large distances until a fixed point is reached. - regionInclusionNext := []uint8{} - for i := 0; i < len(regionInclusionBits); i++ { - bits := regionInclusionBits[i] - next := bits - for i := uint(0); i < uint(len(b.groups)); i++ { - if bits&(1<<i) != 0 { - next |= regionInclusionBits[i] - } - } - if _, ok := bvs[next]; !ok { - bvs[next] = index(len(bvs)) - regionInclusionBits = append(regionInclusionBits, next) - } - regionInclusionNext = append(regionInclusionNext, uint8(bvs[next])) - } - b.writeSlice("regionInclusionBits", regionInclusionBits) - b.writeSlice("regionInclusionNext", regionInclusionNext) -} - -type parentRel struct { - lang uint16 - script uint8 - maxScript uint8 - toRegion uint16 - fromRegion []uint16 -} - -func (b *builder) writeParents() { - b.writeType(parentRel{}) - - parents := []parentRel{} - - // Construct parent overrides. - n := 0 - for _, p := range b.data.Supplemental().ParentLocales.ParentLocale { - // Skipping non-standard scripts to root is implemented using addTags. - if p.Parent == "root" { - continue - } - - sub := strings.Split(p.Parent, "_") - parent := parentRel{lang: b.langIndex(sub[0])} - if len(sub) == 2 { - // TODO: check that all undefined scripts are indeed Latn in these - // cases. - parent.maxScript = uint8(b.script.index("Latn")) - parent.toRegion = uint16(b.region.index(sub[1])) - } else { - parent.script = uint8(b.script.index(sub[1])) - parent.maxScript = parent.script - parent.toRegion = uint16(b.region.index(sub[2])) - } - for _, c := range strings.Split(p.Locales, " ") { - region := b.region.index(c[strings.LastIndex(c, "_")+1:]) - parent.fromRegion = append(parent.fromRegion, uint16(region)) - } - parents = append(parents, parent) - n += len(parent.fromRegion) - } - b.writeSliceAddSize("parents", n*2, parents) -} - -func main() { - gen.Init() - - gen.Repackage("gen_common.go", "common.go", "language") - - w := gen.NewCodeWriter() - defer w.WriteGoFile("tables.go", "language") - - fmt.Fprintln(w, `import "golang.org/x/text/internal/tag"`) - - b := newBuilder(w) - gen.WriteCLDRVersion(w) - - b.parseIndices() - b.writeType(fromTo{}) - b.writeLanguage() - b.writeScript() - b.writeRegion() - b.writeVariant() - // TODO: b.writeLocale() - b.computeRegionGroups() - b.writeLikelyData() - b.writeMatchData() - b.writeRegionInclusionData() - b.writeParents() -} diff --git a/vendor/golang.org/x/text/language/match.go b/vendor/golang.org/x/text/language/match.go deleted file mode 100644 index eec72bcc1..000000000 --- a/vendor/golang.org/x/text/language/match.go +++ /dev/null @@ -1,840 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package language - -import "errors" - -// Matcher is the interface that wraps the Match method. -// -// Match returns the best match for any of the given tags, along with -// a unique index associated with the returned tag and a confidence -// score. -type Matcher interface { - Match(t ...Tag) (tag Tag, index int, c Confidence) -} - -// Comprehends reports the confidence score for a speaker of a given language -// to being able to comprehend the written form of an alternative language. -func Comprehends(speaker, alternative Tag) Confidence { - _, _, c := NewMatcher([]Tag{alternative}).Match(speaker) - return c -} - -// NewMatcher returns a Matcher that matches an ordered list of preferred tags -// against a list of supported tags based on written intelligibility, closeness -// of dialect, equivalence of subtags and various other rules. It is initialized -// with the list of supported tags. The first element is used as the default -// value in case no match is found. -// -// Its Match method matches the first of the given Tags to reach a certain -// confidence threshold. The tags passed to Match should therefore be specified -// in order of preference. Extensions are ignored for matching. -// -// The index returned by the Match method corresponds to the index of the -// matched tag in t, but is augmented with the Unicode extension ('u')of the -// corresponding preferred tag. This allows user locale options to be passed -// transparently. -func NewMatcher(t []Tag) Matcher { - return newMatcher(t) -} - -func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) { - match, w, c := m.getBest(want...) - if match == nil { - t = m.default_.tag - } else { - t, index = match.tag, match.index - } - // Copy options from the user-provided tag into the result tag. This is hard - // to do after the fact, so we do it here. - // TODO: consider also adding in variants that are compatible with the - // matched language. - // TODO: Add back region if it is non-ambiguous? Or create another tag to - // preserve the region? - if u, ok := w.Extension('u'); ok { - t, _ = Raw.Compose(t, u) - } - return t, index, c -} - -type scriptRegionFlags uint8 - -const ( - isList = 1 << iota - scriptInFrom - regionInFrom -) - -func (t *Tag) setUndefinedLang(id langID) { - if t.lang == 0 { - t.lang = id - } -} - -func (t *Tag) setUndefinedScript(id scriptID) { - if t.script == 0 { - t.script = id - } -} - -func (t *Tag) setUndefinedRegion(id regionID) { - if t.region == 0 || t.region.contains(id) { - t.region = id - } -} - -// ErrMissingLikelyTagsData indicates no information was available -// to compute likely values of missing tags. -var ErrMissingLikelyTagsData = errors.New("missing likely tags data") - -// addLikelySubtags sets subtags to their most likely value, given the locale. -// In most cases this means setting fields for unknown values, but in some -// cases it may alter a value. It returns a ErrMissingLikelyTagsData error -// if the given locale cannot be expanded. -func (t Tag) addLikelySubtags() (Tag, error) { - id, err := addTags(t) - if err != nil { - return t, err - } else if id.equalTags(t) { - return t, nil - } - id.remakeString() - return id, nil -} - -// specializeRegion attempts to specialize a group region. -func specializeRegion(t *Tag) bool { - if i := regionInclusion[t.region]; i < nRegionGroups { - x := likelyRegionGroup[i] - if langID(x.lang) == t.lang && scriptID(x.script) == t.script { - t.region = regionID(x.region) - } - return true - } - return false -} - -func addTags(t Tag) (Tag, error) { - // We leave private use identifiers alone. - if t.private() { - return t, nil - } - if t.script != 0 && t.region != 0 { - if t.lang != 0 { - // already fully specified - specializeRegion(&t) - return t, nil - } - // Search matches for und-script-region. Note that for these cases - // region will never be a group so there is no need to check for this. - list := likelyRegion[t.region : t.region+1] - if x := list[0]; x.flags&isList != 0 { - list = likelyRegionList[x.lang : x.lang+uint16(x.script)] - } - for _, x := range list { - // Deviating from the spec. See match_test.go for details. - if scriptID(x.script) == t.script { - t.setUndefinedLang(langID(x.lang)) - return t, nil - } - } - } - if t.lang != 0 { - // Search matches for lang-script and lang-region, where lang != und. - if t.lang < langNoIndexOffset { - x := likelyLang[t.lang] - if x.flags&isList != 0 { - list := likelyLangList[x.region : x.region+uint16(x.script)] - if t.script != 0 { - for _, x := range list { - if scriptID(x.script) == t.script && x.flags&scriptInFrom != 0 { - t.setUndefinedRegion(regionID(x.region)) - return t, nil - } - } - } else if t.region != 0 { - count := 0 - goodScript := true - tt := t - for _, x := range list { - // We visit all entries for which the script was not - // defined, including the ones where the region was not - // defined. This allows for proper disambiguation within - // regions. - if x.flags&scriptInFrom == 0 && t.region.contains(regionID(x.region)) { - tt.region = regionID(x.region) - tt.setUndefinedScript(scriptID(x.script)) - goodScript = goodScript && tt.script == scriptID(x.script) - count++ - } - } - if count == 1 { - return tt, nil - } - // Even if we fail to find a unique Region, we might have - // an unambiguous script. - if goodScript { - t.script = tt.script - } - } - } - } - } else { - // Search matches for und-script. - if t.script != 0 { - x := likelyScript[t.script] - if x.region != 0 { - t.setUndefinedRegion(regionID(x.region)) - t.setUndefinedLang(langID(x.lang)) - return t, nil - } - } - // Search matches for und-region. If und-script-region exists, it would - // have been found earlier. - if t.region != 0 { - if i := regionInclusion[t.region]; i < nRegionGroups { - x := likelyRegionGroup[i] - if x.region != 0 { - t.setUndefinedLang(langID(x.lang)) - t.setUndefinedScript(scriptID(x.script)) - t.region = regionID(x.region) - } - } else { - x := likelyRegion[t.region] - if x.flags&isList != 0 { - x = likelyRegionList[x.lang] - } - if x.script != 0 && x.flags != scriptInFrom { - t.setUndefinedLang(langID(x.lang)) - t.setUndefinedScript(scriptID(x.script)) - return t, nil - } - } - } - } - - // Search matches for lang. - if t.lang < langNoIndexOffset { - x := likelyLang[t.lang] - if x.flags&isList != 0 { - x = likelyLangList[x.region] - } - if x.region != 0 { - t.setUndefinedScript(scriptID(x.script)) - t.setUndefinedRegion(regionID(x.region)) - } - specializeRegion(&t) - if t.lang == 0 { - t.lang = _en // default language - } - return t, nil - } - return t, ErrMissingLikelyTagsData -} - -func (t *Tag) setTagsFrom(id Tag) { - t.lang = id.lang - t.script = id.script - t.region = id.region -} - -// minimize removes the region or script subtags from t such that -// t.addLikelySubtags() == t.minimize().addLikelySubtags(). -func (t Tag) minimize() (Tag, error) { - t, err := minimizeTags(t) - if err != nil { - return t, err - } - t.remakeString() - return t, nil -} - -// minimizeTags mimics the behavior of the ICU 51 C implementation. -func minimizeTags(t Tag) (Tag, error) { - if t.equalTags(und) { - return t, nil - } - max, err := addTags(t) - if err != nil { - return t, err - } - for _, id := range [...]Tag{ - {lang: t.lang}, - {lang: t.lang, region: t.region}, - {lang: t.lang, script: t.script}, - } { - if x, err := addTags(id); err == nil && max.equalTags(x) { - t.setTagsFrom(id) - break - } - } - return t, nil -} - -// Tag Matching -// CLDR defines an algorithm for finding the best match between two sets of language -// tags. The basic algorithm defines how to score a possible match and then find -// the match with the best score -// (see http://www.unicode.org/reports/tr35/#LanguageMatching). -// Using scoring has several disadvantages. The scoring obfuscates the importance of -// the various factors considered, making the algorithm harder to understand. Using -// scoring also requires the full score to be computed for each pair of tags. -// -// We will use a different algorithm which aims to have the following properties: -// - clarity on the precedence of the various selection factors, and -// - improved performance by allowing early termination of a comparison. -// -// Matching algorithm (overview) -// Input: -// - supported: a set of supported tags -// - default: the default tag to return in case there is no match -// - desired: list of desired tags, ordered by preference, starting with -// the most-preferred. -// -// Algorithm: -// 1) Set the best match to the lowest confidence level -// 2) For each tag in "desired": -// a) For each tag in "supported": -// 1) compute the match between the two tags. -// 2) if the match is better than the previous best match, replace it -// with the new match. (see next section) -// b) if the current best match is above a certain threshold, return this -// match without proceeding to the next tag in "desired". [See Note 1] -// 3) If the best match so far is below a certain threshold, return "default". -// -// Ranking: -// We use two phases to determine whether one pair of tags are a better match -// than another pair of tags. First, we determine a rough confidence level. If the -// levels are different, the one with the highest confidence wins. -// Second, if the rough confidence levels are identical, we use a set of tie-breaker -// rules. -// -// The confidence level of matching a pair of tags is determined by finding the -// lowest confidence level of any matches of the corresponding subtags (the -// result is deemed as good as its weakest link). -// We define the following levels: -// Exact - An exact match of a subtag, before adding likely subtags. -// MaxExact - An exact match of a subtag, after adding likely subtags. -// [See Note 2]. -// High - High level of mutual intelligibility between different subtag -// variants. -// Low - Low level of mutual intelligibility between different subtag -// variants. -// No - No mutual intelligibility. -// -// The following levels can occur for each type of subtag: -// Base: Exact, MaxExact, High, Low, No -// Script: Exact, MaxExact [see Note 3], Low, No -// Region: Exact, MaxExact, High -// Variant: Exact, High -// Private: Exact, No -// -// Any result with a confidence level of Low or higher is deemed a possible match. -// Once a desired tag matches any of the supported tags with a level of MaxExact -// or higher, the next desired tag is not considered (see Step 2.b). -// Note that CLDR provides languageMatching data that defines close equivalence -// classes for base languages, scripts and regions. -// -// Tie-breaking -// If we get the same confidence level for two matches, we apply a sequence of -// tie-breaking rules. The first that succeeds defines the result. The rules are -// applied in the following order. -// 1) Original language was defined and was identical. -// 2) Original region was defined and was identical. -// 3) Distance between two maximized regions was the smallest. -// 4) Original script was defined and was identical. -// 5) Distance from want tag to have tag using the parent relation [see Note 5.] -// If there is still no winner after these rules are applied, the first match -// found wins. -// -// Notes: -// [1] Note that even if we may not have a perfect match, if a match is above a -// certain threshold, it is considered a better match than any other match -// to a tag later in the list of preferred language tags. -// [2] In practice, as matching of Exact is done in a separate phase from -// matching the other levels, we reuse the Exact level to mean MaxExact in -// the second phase. As a consequence, we only need the levels defined by -// the Confidence type. The MaxExact confidence level is mapped to High in -// the public API. -// [3] We do not differentiate between maximized script values that were derived -// from suppressScript versus most likely tag data. We determined that in -// ranking the two, one ranks just after the other. Moreover, the two cannot -// occur concurrently. As a consequence, they are identical for practical -// purposes. -// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign -// the MaxExact level to allow iw vs he to still be a closer match than -// en-AU vs en-US, for example. -// [5] In CLDR a locale inherits fields that are unspecified for this locale -// from its parent. Therefore, if a locale is a parent of another locale, -// it is a strong measure for closeness, especially when no other tie -// breaker rule applies. One could also argue it is inconsistent, for -// example, when pt-AO matches pt (which CLDR equates with pt-BR), even -// though its parent is pt-PT according to the inheritance rules. -// -// Implementation Details: -// There are several performance considerations worth pointing out. Most notably, -// we preprocess as much as possible (within reason) at the time of creation of a -// matcher. This includes: -// - creating a per-language map, which includes data for the raw base language -// and its canonicalized variant (if applicable), -// - expanding entries for the equivalence classes defined in CLDR's -// languageMatch data. -// The per-language map ensures that typically only a very small number of tags -// need to be considered. The pre-expansion of canonicalized subtags and -// equivalence classes reduces the amount of map lookups that need to be done at -// runtime. - -// matcher keeps a set of supported language tags, indexed by language. -type matcher struct { - default_ *haveTag - index map[langID]*matchHeader - passSettings bool -} - -// matchHeader has the lists of tags for exact matches and matches based on -// maximized and canonicalized tags for a given language. -type matchHeader struct { - exact []haveTag - max []haveTag -} - -// haveTag holds a supported Tag and its maximized script and region. The maximized -// or canonicalized language is not stored as it is not needed during matching. -type haveTag struct { - tag Tag - - // index of this tag in the original list of supported tags. - index int - - // conf is the maximum confidence that can result from matching this haveTag. - // When conf < Exact this means it was inserted after applying a CLDR equivalence rule. - conf Confidence - - // Maximized region and script. - maxRegion regionID - maxScript scriptID - - // altScript may be checked as an alternative match to maxScript. If altScript - // matches, the confidence level for this match is Low. Theoretically there - // could be multiple alternative scripts. This does not occur in practice. - altScript scriptID - - // nextMax is the index of the next haveTag with the same maximized tags. - nextMax uint16 -} - -func makeHaveTag(tag Tag, index int) (haveTag, langID) { - max := tag - if tag.lang != 0 { - max, _ = max.canonicalize(All) - max, _ = addTags(max) - max.remakeString() - } - return haveTag{tag, index, Exact, max.region, max.script, altScript(max.lang, max.script), 0}, max.lang -} - -// altScript returns an alternative script that may match the given script with -// a low confidence. At the moment, the langMatch data allows for at most one -// script to map to another and we rely on this to keep the code simple. -func altScript(l langID, s scriptID) scriptID { - for _, alt := range matchScript { - if (alt.lang == 0 || langID(alt.lang) == l) && scriptID(alt.have) == s { - return scriptID(alt.want) - } - } - return 0 -} - -// addIfNew adds a haveTag to the list of tags only if it is a unique tag. -// Tags that have the same maximized values are linked by index. -func (h *matchHeader) addIfNew(n haveTag, exact bool) { - // Don't add new exact matches. - for _, v := range h.exact { - if v.tag.equalsRest(n.tag) { - return - } - } - if exact { - h.exact = append(h.exact, n) - } - // Allow duplicate maximized tags, but create a linked list to allow quickly - // comparing the equivalents and bail out. - for i, v := range h.max { - if v.maxScript == n.maxScript && - v.maxRegion == n.maxRegion && - v.tag.variantOrPrivateTagStr() == n.tag.variantOrPrivateTagStr() { - for h.max[i].nextMax != 0 { - i = int(h.max[i].nextMax) - } - h.max[i].nextMax = uint16(len(h.max)) - break - } - } - h.max = append(h.max, n) -} - -// header returns the matchHeader for the given language. It creates one if -// it doesn't already exist. -func (m *matcher) header(l langID) *matchHeader { - if h := m.index[l]; h != nil { - return h - } - h := &matchHeader{} - m.index[l] = h - return h -} - -// newMatcher builds an index for the given supported tags and returns it as -// a matcher. It also expands the index by considering various equivalence classes -// for a given tag. -func newMatcher(supported []Tag) *matcher { - m := &matcher{ - index: make(map[langID]*matchHeader), - } - if len(supported) == 0 { - m.default_ = &haveTag{} - return m - } - // Add supported languages to the index. Add exact matches first to give - // them precedence. - for i, tag := range supported { - pair, _ := makeHaveTag(tag, i) - m.header(tag.lang).addIfNew(pair, true) - } - m.default_ = &m.header(supported[0].lang).exact[0] - for i, tag := range supported { - pair, max := makeHaveTag(tag, i) - if max != tag.lang { - m.header(max).addIfNew(pair, false) - } - } - - // update is used to add indexes in the map for equivalent languages. - // If force is true, the update will also apply to derived entries. To - // avoid applying a "transitive closure", use false. - update := func(want, have uint16, conf Confidence, force bool) { - if hh := m.index[langID(have)]; hh != nil { - if !force && len(hh.exact) == 0 { - return - } - hw := m.header(langID(want)) - for _, v := range hh.max { - if conf < v.conf { - v.conf = conf - } - v.nextMax = 0 // this value needs to be recomputed - if v.altScript != 0 { - v.altScript = altScript(langID(want), v.maxScript) - } - hw.addIfNew(v, conf == Exact && len(hh.exact) > 0) - } - } - } - - // Add entries for languages with mutual intelligibility as defined by CLDR's - // languageMatch data. - for _, ml := range matchLang { - update(ml.want, ml.have, Confidence(ml.conf), false) - if !ml.oneway { - update(ml.have, ml.want, Confidence(ml.conf), false) - } - } - - // Add entries for possible canonicalizations. This is an optimization to - // ensure that only one map lookup needs to be done at runtime per desired tag. - // First we match deprecated equivalents. If they are perfect equivalents - // (their canonicalization simply substitutes a different language code, but - // nothing else), the match confidence is Exact, otherwise it is High. - for i, lm := range langAliasMap { - if lm.from == _sh { - continue - } - - // If deprecated codes match and there is no fiddling with the script or - // or region, we consider it an exact match. - conf := Exact - if langAliasTypes[i] != langMacro { - if !isExactEquivalent(langID(lm.from)) { - conf = High - } - update(lm.to, lm.from, conf, true) - } - update(lm.from, lm.to, conf, true) - } - return m -} - -// getBest gets the best matching tag in m for any of the given tags, taking into -// account the order of preference of the given tags. -func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) { - best := bestMatch{} - for _, w := range want { - var max Tag - // Check for exact match first. - h := m.index[w.lang] - if w.lang != 0 { - // Base language is defined. - if h == nil { - continue - } - for i := range h.exact { - have := &h.exact[i] - if have.tag.equalsRest(w) { - return have, w, Exact - } - } - max, _ = w.canonicalize(Legacy | Deprecated) - max, _ = addTags(max) - } else { - // Base language is not defined. - if h != nil { - for i := range h.exact { - have := &h.exact[i] - if have.tag.equalsRest(w) { - return have, w, Exact - } - } - } - if w.script == 0 && w.region == 0 { - // We skip all tags matching und for approximate matching, including - // private tags. - continue - } - max, _ = addTags(w) - if h = m.index[max.lang]; h == nil { - continue - } - } - // Check for match based on maximized tag. - for i := range h.max { - have := &h.max[i] - best.update(have, w, max.script, max.region) - if best.conf == Exact { - for have.nextMax != 0 { - have = &h.max[have.nextMax] - best.update(have, w, max.script, max.region) - } - return best.have, best.want, High - } - } - } - if best.conf <= No { - if len(want) != 0 { - return nil, want[0], No - } - return nil, Tag{}, No - } - return best.have, best.want, best.conf -} - -// bestMatch accumulates the best match so far. -type bestMatch struct { - have *haveTag - want Tag - conf Confidence - // Cached results from applying tie-breaking rules. - origLang bool - origReg bool - regDist uint8 - origScript bool - parentDist uint8 // 255 if have is not an ancestor of want tag. -} - -// update updates the existing best match if the new pair is considered to be a -// better match. -// To determine if the given pair is a better match, it first computes the rough -// confidence level. If this surpasses the current match, it will replace it and -// update the tie-breaker rule cache. If there is a tie, it proceeds with applying -// a series of tie-breaker rules. If there is no conclusive winner after applying -// the tie-breaker rules, it leaves the current match as the preferred match. -func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion regionID) { - // Bail if the maximum attainable confidence is below that of the current best match. - c := have.conf - if c < m.conf { - return - } - if have.maxScript != maxScript { - // There is usually very little comprehension between different scripts. - // In a few cases there may still be Low comprehension. This possibility is - // pre-computed and stored in have.altScript. - if Low < m.conf || have.altScript != maxScript { - return - } - c = Low - } else if have.maxRegion != maxRegion { - // There is usually a small difference between languages across regions. - // We use the region distance (below) to disambiguate between equal matches. - if High < c { - c = High - } - } - - // We store the results of the computations of the tie-breaker rules along - // with the best match. There is no need to do the checks once we determine - // we have a winner, but we do still need to do the tie-breaker computations. - // We use "beaten" to keep track if we still need to do the checks. - beaten := false // true if the new pair defeats the current one. - if c != m.conf { - if c < m.conf { - return - } - beaten = true - } - - // Tie-breaker rules: - // We prefer if the pre-maximized language was specified and identical. - origLang := have.tag.lang == tag.lang && tag.lang != 0 - if !beaten && m.origLang != origLang { - if m.origLang { - return - } - beaten = true - } - - // We prefer if the pre-maximized region was specified and identical. - origReg := have.tag.region == tag.region && tag.region != 0 - if !beaten && m.origReg != origReg { - if m.origReg { - return - } - beaten = true - } - - // Next we prefer smaller distances between regions, as defined by regionDist. - regDist := regionDist(have.maxRegion, maxRegion, tag.lang) - if !beaten && m.regDist != regDist { - if regDist > m.regDist { - return - } - beaten = true - } - - // Next we prefer if the pre-maximized script was specified and identical. - origScript := have.tag.script == tag.script && tag.script != 0 - if !beaten && m.origScript != origScript { - if m.origScript { - return - } - beaten = true - } - - // Finally we prefer tags which have a closer parent relationship. - parentDist := parentDistance(have.tag.region, tag) - if !beaten && m.parentDist != parentDist { - if parentDist > m.parentDist { - return - } - beaten = true - } - - // Update m to the newly found best match. - if beaten { - m.have = have - m.want = tag - m.conf = c - m.origLang = origLang - m.origReg = origReg - m.origScript = origScript - m.regDist = regDist - m.parentDist = parentDist - } -} - -// parentDistance returns the number of times Parent must be called before the -// regions match. It is assumed that it has already been checked that lang and -// script are identical. If haveRegion does not occur in the ancestor chain of -// tag, it returns 255. -func parentDistance(haveRegion regionID, tag Tag) uint8 { - p := tag.Parent() - d := uint8(1) - for haveRegion != p.region { - if p.region == 0 { - return 255 - } - p = p.Parent() - d++ - } - return d -} - -// regionDist wraps regionDistance with some exceptions to the algorithmic distance. -func regionDist(a, b regionID, lang langID) uint8 { - if lang == _en { - // Two variants of non-US English are close to each other, regardless of distance. - if a != _US && b != _US { - return 2 - } - } - return uint8(regionDistance(a, b)) -} - -// regionDistance computes the distance between two regions based on the -// distance in the graph of region containments as defined in CLDR. It iterates -// over increasingly inclusive sets of groups, represented as bit vectors, until -// the source bit vector has bits in common with the destination vector. -func regionDistance(a, b regionID) int { - if a == b { - return 0 - } - p, q := regionInclusion[a], regionInclusion[b] - if p < nRegionGroups { - p, q = q, p - } - set := regionInclusionBits - if q < nRegionGroups && set[p]&(1<<q) != 0 { - return 1 - } - d := 2 - for goal := set[q]; set[p]&goal == 0; p = regionInclusionNext[p] { - d++ - } - return d -} - -func (t Tag) variants() string { - if t.pVariant == 0 { - return "" - } - return t.str[t.pVariant:t.pExt] -} - -// variantOrPrivateTagStr returns variants or private use tags. -func (t Tag) variantOrPrivateTagStr() string { - if t.pExt > 0 { - return t.str[t.pVariant:t.pExt] - } - return t.str[t.pVariant:] -} - -// equalsRest compares everything except the language. -func (a Tag) equalsRest(b Tag) bool { - // TODO: don't include extensions in this comparison. To do this efficiently, - // though, we should handle private tags separately. - return a.script == b.script && a.region == b.region && a.variantOrPrivateTagStr() == b.variantOrPrivateTagStr() -} - -// isExactEquivalent returns true if canonicalizing the language will not alter -// the script or region of a tag. -func isExactEquivalent(l langID) bool { - for _, o := range notEquivalent { - if o == l { - return false - } - } - return true -} - -var notEquivalent []langID - -func init() { - // Create a list of all languages for which canonicalization may alter the - // script or region. - for _, lm := range langAliasMap { - tag := Tag{lang: langID(lm.from)} - if tag, _ = tag.canonicalize(All); tag.script != 0 || tag.region != 0 { - notEquivalent = append(notEquivalent, langID(lm.from)) - } - } -} diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go deleted file mode 100644 index cfa28f56e..000000000 --- a/vendor/golang.org/x/text/language/parse.go +++ /dev/null @@ -1,859 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package language - -import ( - "bytes" - "errors" - "fmt" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/tag" -) - -// isAlpha returns true if the byte is not a digit. -// b must be an ASCII letter or digit. -func isAlpha(b byte) bool { - return b > '9' -} - -// isAlphaNum returns true if the string contains only ASCII letters or digits. -func isAlphaNum(s []byte) bool { - for _, c := range s { - if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') { - return false - } - } - return true -} - -// errSyntax is returned by any of the parsing functions when the -// input is not well-formed, according to BCP 47. -// TODO: return the position at which the syntax error occurred? -var errSyntax = errors.New("language: tag is not well-formed") - -// ValueError is returned by any of the parsing functions when the -// input is well-formed but the respective subtag is not recognized -// as a valid value. -type ValueError struct { - v [8]byte -} - -func mkErrInvalid(s []byte) error { - var e ValueError - copy(e.v[:], s) - return e -} - -func (e ValueError) tag() []byte { - n := bytes.IndexByte(e.v[:], 0) - if n == -1 { - n = 8 - } - return e.v[:n] -} - -// Error implements the error interface. -func (e ValueError) Error() string { - return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag()) -} - -// Subtag returns the subtag for which the error occurred. -func (e ValueError) Subtag() string { - return string(e.tag()) -} - -// scanner is used to scan BCP 47 tokens, which are separated by _ or -. -type scanner struct { - b []byte - bytes [max99thPercentileSize]byte - token []byte - start int // start position of the current token - end int // end position of the current token - next int // next point for scan - err error - done bool -} - -func makeScannerString(s string) scanner { - scan := scanner{} - if len(s) <= len(scan.bytes) { - scan.b = scan.bytes[:copy(scan.bytes[:], s)] - } else { - scan.b = []byte(s) - } - scan.init() - return scan -} - -// makeScanner returns a scanner using b as the input buffer. -// b is not copied and may be modified by the scanner routines. -func makeScanner(b []byte) scanner { - scan := scanner{b: b} - scan.init() - return scan -} - -func (s *scanner) init() { - for i, c := range s.b { - if c == '_' { - s.b[i] = '-' - } - } - s.scan() -} - -// restToLower converts the string between start and end to lower case. -func (s *scanner) toLower(start, end int) { - for i := start; i < end; i++ { - c := s.b[i] - if 'A' <= c && c <= 'Z' { - s.b[i] += 'a' - 'A' - } - } -} - -func (s *scanner) setError(e error) { - if s.err == nil || (e == errSyntax && s.err != errSyntax) { - s.err = e - } -} - -// resizeRange shrinks or grows the array at position oldStart such that -// a new string of size newSize can fit between oldStart and oldEnd. -// Sets the scan point to after the resized range. -func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) { - s.start = oldStart - if end := oldStart + newSize; end != oldEnd { - diff := end - oldEnd - if end < cap(s.b) { - b := make([]byte, len(s.b)+diff) - copy(b, s.b[:oldStart]) - copy(b[end:], s.b[oldEnd:]) - s.b = b - } else { - s.b = append(s.b[end:], s.b[oldEnd:]...) - } - s.next = end + (s.next - s.end) - s.end = end - } -} - -// replace replaces the current token with repl. -func (s *scanner) replace(repl string) { - s.resizeRange(s.start, s.end, len(repl)) - copy(s.b[s.start:], repl) -} - -// gobble removes the current token from the input. -// Caller must call scan after calling gobble. -func (s *scanner) gobble(e error) { - s.setError(e) - if s.start == 0 { - s.b = s.b[:+copy(s.b, s.b[s.next:])] - s.end = 0 - } else { - s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])] - s.end = s.start - 1 - } - s.next = s.start -} - -// deleteRange removes the given range from s.b before the current token. -func (s *scanner) deleteRange(start, end int) { - s.setError(errSyntax) - s.b = s.b[:start+copy(s.b[start:], s.b[end:])] - diff := end - start - s.next -= diff - s.start -= diff - s.end -= diff -} - -// scan parses the next token of a BCP 47 string. Tokens that are larger -// than 8 characters or include non-alphanumeric characters result in an error -// and are gobbled and removed from the output. -// It returns the end position of the last token consumed. -func (s *scanner) scan() (end int) { - end = s.end - s.token = nil - for s.start = s.next; s.next < len(s.b); { - i := bytes.IndexByte(s.b[s.next:], '-') - if i == -1 { - s.end = len(s.b) - s.next = len(s.b) - i = s.end - s.start - } else { - s.end = s.next + i - s.next = s.end + 1 - } - token := s.b[s.start:s.end] - if i < 1 || i > 8 || !isAlphaNum(token) { - s.gobble(errSyntax) - continue - } - s.token = token - return end - } - if n := len(s.b); n > 0 && s.b[n-1] == '-' { - s.setError(errSyntax) - s.b = s.b[:len(s.b)-1] - } - s.done = true - return end -} - -// acceptMinSize parses multiple tokens of the given size or greater. -// It returns the end position of the last token consumed. -func (s *scanner) acceptMinSize(min int) (end int) { - end = s.end - s.scan() - for ; len(s.token) >= min; s.scan() { - end = s.end - } - return end -} - -// Parse parses the given BCP 47 string and returns a valid Tag. If parsing -// failed it returns an error and any part of the tag that could be parsed. -// If parsing succeeded but an unknown value was found, it returns -// ValueError. The Tag returned in this case is just stripped of the unknown -// value. All other values are preserved. It accepts tags in the BCP 47 format -// and extensions to this standard defined in -// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. -// The resulting tag is canonicalized using the default canonicalization type. -func Parse(s string) (t Tag, err error) { - return Default.Parse(s) -} - -// Parse parses the given BCP 47 string and returns a valid Tag. If parsing -// failed it returns an error and any part of the tag that could be parsed. -// If parsing succeeded but an unknown value was found, it returns -// ValueError. The Tag returned in this case is just stripped of the unknown -// value. All other values are preserved. It accepts tags in the BCP 47 format -// and extensions to this standard defined in -// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. -// The resulting tag is canonicalized using the the canonicalization type c. -func (c CanonType) Parse(s string) (t Tag, err error) { - // TODO: consider supporting old-style locale key-value pairs. - if s == "" { - return und, errSyntax - } - if len(s) <= maxAltTaglen { - b := [maxAltTaglen]byte{} - for i, c := range s { - // Generating invalid UTF-8 is okay as it won't match. - if 'A' <= c && c <= 'Z' { - c += 'a' - 'A' - } else if c == '_' { - c = '-' - } - b[i] = byte(c) - } - if t, ok := grandfathered(b); ok { - return t, nil - } - } - scan := makeScannerString(s) - t, err = parse(&scan, s) - t, changed := t.canonicalize(c) - if changed { - t.remakeString() - } - return t, err -} - -func parse(scan *scanner, s string) (t Tag, err error) { - t = und - var end int - if n := len(scan.token); n <= 1 { - scan.toLower(0, len(scan.b)) - if n == 0 || scan.token[0] != 'x' { - return t, errSyntax - } - end = parseExtensions(scan) - } else if n >= 4 { - return und, errSyntax - } else { // the usual case - t, end = parseTag(scan) - if n := len(scan.token); n == 1 { - t.pExt = uint16(end) - end = parseExtensions(scan) - } else if end < len(scan.b) { - scan.setError(errSyntax) - scan.b = scan.b[:end] - } - } - if int(t.pVariant) < len(scan.b) { - if end < len(s) { - s = s[:end] - } - if len(s) > 0 && tag.Compare(s, scan.b) == 0 { - t.str = s - } else { - t.str = string(scan.b) - } - } else { - t.pVariant, t.pExt = 0, 0 - } - return t, scan.err -} - -// parseTag parses language, script, region and variants. -// It returns a Tag and the end position in the input that was parsed. -func parseTag(scan *scanner) (t Tag, end int) { - var e error - // TODO: set an error if an unknown lang, script or region is encountered. - t.lang, e = getLangID(scan.token) - scan.setError(e) - scan.replace(t.lang.String()) - langStart := scan.start - end = scan.scan() - for len(scan.token) == 3 && isAlpha(scan.token[0]) { - // From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent - // to a tag of the form <extlang>. - lang, e := getLangID(scan.token) - if lang != 0 { - t.lang = lang - copy(scan.b[langStart:], lang.String()) - scan.b[langStart+3] = '-' - scan.start = langStart + 4 - } - scan.gobble(e) - end = scan.scan() - } - if len(scan.token) == 4 && isAlpha(scan.token[0]) { - t.script, e = getScriptID(script, scan.token) - if t.script == 0 { - scan.gobble(e) - } - end = scan.scan() - } - if n := len(scan.token); n >= 2 && n <= 3 { - t.region, e = getRegionID(scan.token) - if t.region == 0 { - scan.gobble(e) - } else { - scan.replace(t.region.String()) - } - end = scan.scan() - } - scan.toLower(scan.start, len(scan.b)) - t.pVariant = byte(end) - end = parseVariants(scan, end, t) - t.pExt = uint16(end) - return t, end -} - -var separator = []byte{'-'} - -// parseVariants scans tokens as long as each token is a valid variant string. -// Duplicate variants are removed. -func parseVariants(scan *scanner, end int, t Tag) int { - start := scan.start - varIDBuf := [4]uint8{} - variantBuf := [4][]byte{} - varID := varIDBuf[:0] - variant := variantBuf[:0] - last := -1 - needSort := false - for ; len(scan.token) >= 4; scan.scan() { - // TODO: measure the impact of needing this conversion and redesign - // the data structure if there is an issue. - v, ok := variantIndex[string(scan.token)] - if !ok { - // unknown variant - // TODO: allow user-defined variants? - scan.gobble(mkErrInvalid(scan.token)) - continue - } - varID = append(varID, v) - variant = append(variant, scan.token) - if !needSort { - if last < int(v) { - last = int(v) - } else { - needSort = true - // There is no legal combinations of more than 7 variants - // (and this is by no means a useful sequence). - const maxVariants = 8 - if len(varID) > maxVariants { - break - } - } - } - end = scan.end - } - if needSort { - sort.Sort(variantsSort{varID, variant}) - k, l := 0, -1 - for i, v := range varID { - w := int(v) - if l == w { - // Remove duplicates. - continue - } - varID[k] = varID[i] - variant[k] = variant[i] - k++ - l = w - } - if str := bytes.Join(variant[:k], separator); len(str) == 0 { - end = start - 1 - } else { - scan.resizeRange(start, end, len(str)) - copy(scan.b[scan.start:], str) - end = scan.end - } - } - return end -} - -type variantsSort struct { - i []uint8 - v [][]byte -} - -func (s variantsSort) Len() int { - return len(s.i) -} - -func (s variantsSort) Swap(i, j int) { - s.i[i], s.i[j] = s.i[j], s.i[i] - s.v[i], s.v[j] = s.v[j], s.v[i] -} - -func (s variantsSort) Less(i, j int) bool { - return s.i[i] < s.i[j] -} - -type bytesSort [][]byte - -func (b bytesSort) Len() int { - return len(b) -} - -func (b bytesSort) Swap(i, j int) { - b[i], b[j] = b[j], b[i] -} - -func (b bytesSort) Less(i, j int) bool { - return bytes.Compare(b[i], b[j]) == -1 -} - -// parseExtensions parses and normalizes the extensions in the buffer. -// It returns the last position of scan.b that is part of any extension. -// It also trims scan.b to remove excess parts accordingly. -func parseExtensions(scan *scanner) int { - start := scan.start - exts := [][]byte{} - private := []byte{} - end := scan.end - for len(scan.token) == 1 { - extStart := scan.start - ext := scan.token[0] - end = parseExtension(scan) - extension := scan.b[extStart:end] - if len(extension) < 3 || (ext != 'x' && len(extension) < 4) { - scan.setError(errSyntax) - end = extStart - continue - } else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) { - scan.b = scan.b[:end] - return end - } else if ext == 'x' { - private = extension - break - } - exts = append(exts, extension) - } - sort.Sort(bytesSort(exts)) - if len(private) > 0 { - exts = append(exts, private) - } - scan.b = scan.b[:start] - if len(exts) > 0 { - scan.b = append(scan.b, bytes.Join(exts, separator)...) - } else if start > 0 { - // Strip trailing '-'. - scan.b = scan.b[:start-1] - } - return end -} - -// parseExtension parses a single extension and returns the position of -// the extension end. -func parseExtension(scan *scanner) int { - start, end := scan.start, scan.end - switch scan.token[0] { - case 'u': - attrStart := end - scan.scan() - for last := []byte{}; len(scan.token) > 2; scan.scan() { - if bytes.Compare(scan.token, last) != -1 { - // Attributes are unsorted. Start over from scratch. - p := attrStart + 1 - scan.next = p - attrs := [][]byte{} - for scan.scan(); len(scan.token) > 2; scan.scan() { - attrs = append(attrs, scan.token) - end = scan.end - } - sort.Sort(bytesSort(attrs)) - copy(scan.b[p:], bytes.Join(attrs, separator)) - break - } - last = scan.token - end = scan.end - } - var last, key []byte - for attrEnd := end; len(scan.token) == 2; last = key { - key = scan.token - keyEnd := scan.end - end = scan.acceptMinSize(3) - // TODO: check key value validity - if keyEnd == end || bytes.Compare(key, last) != 1 { - // We have an invalid key or the keys are not sorted. - // Start scanning keys from scratch and reorder. - p := attrEnd + 1 - scan.next = p - keys := [][]byte{} - for scan.scan(); len(scan.token) == 2; { - keyStart, keyEnd := scan.start, scan.end - end = scan.acceptMinSize(3) - if keyEnd != end { - keys = append(keys, scan.b[keyStart:end]) - } else { - scan.setError(errSyntax) - end = keyStart - } - } - sort.Sort(bytesSort(keys)) - reordered := bytes.Join(keys, separator) - if e := p + len(reordered); e < end { - scan.deleteRange(e, end) - end = e - } - copy(scan.b[p:], bytes.Join(keys, separator)) - break - } - } - case 't': - scan.scan() - if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) { - _, end = parseTag(scan) - scan.toLower(start, end) - } - for len(scan.token) == 2 && !isAlpha(scan.token[1]) { - end = scan.acceptMinSize(3) - } - case 'x': - end = scan.acceptMinSize(1) - default: - end = scan.acceptMinSize(2) - } - return end -} - -// Compose creates a Tag from individual parts, which may be of type Tag, Base, -// Script, Region, Variant, []Variant, Extension, []Extension or error. If a -// Base, Script or Region or slice of type Variant or Extension is passed more -// than once, the latter will overwrite the former. Variants and Extensions are -// accumulated, but if two extensions of the same type are passed, the latter -// will replace the former. A Tag overwrites all former values and typically -// only makes sense as the first argument. The resulting tag is returned after -// canonicalizing using the Default CanonType. If one or more errors are -// encountered, one of the errors is returned. -func Compose(part ...interface{}) (t Tag, err error) { - return Default.Compose(part...) -} - -// Compose creates a Tag from individual parts, which may be of type Tag, Base, -// Script, Region, Variant, []Variant, Extension, []Extension or error. If a -// Base, Script or Region or slice of type Variant or Extension is passed more -// than once, the latter will overwrite the former. Variants and Extensions are -// accumulated, but if two extensions of the same type are passed, the latter -// will replace the former. A Tag overwrites all former values and typically -// only makes sense as the first argument. The resulting tag is returned after -// canonicalizing using CanonType c. If one or more errors are encountered, -// one of the errors is returned. -func (c CanonType) Compose(part ...interface{}) (t Tag, err error) { - var b builder - if err = b.update(part...); err != nil { - return und, err - } - t, _ = b.tag.canonicalize(c) - - if len(b.ext) > 0 || len(b.variant) > 0 { - sort.Sort(sortVariant(b.variant)) - sort.Strings(b.ext) - if b.private != "" { - b.ext = append(b.ext, b.private) - } - n := maxCoreSize + tokenLen(b.variant...) + tokenLen(b.ext...) - buf := make([]byte, n) - p := t.genCoreBytes(buf) - t.pVariant = byte(p) - p += appendTokens(buf[p:], b.variant...) - t.pExt = uint16(p) - p += appendTokens(buf[p:], b.ext...) - t.str = string(buf[:p]) - } else if b.private != "" { - t.str = b.private - t.remakeString() - } - return -} - -type builder struct { - tag Tag - - private string // the x extension - ext []string - variant []string - - err error -} - -func (b *builder) addExt(e string) { - if e == "" { - } else if e[0] == 'x' { - b.private = e - } else { - b.ext = append(b.ext, e) - } -} - -var errInvalidArgument = errors.New("invalid Extension or Variant") - -func (b *builder) update(part ...interface{}) (err error) { - replace := func(l *[]string, s string, eq func(a, b string) bool) bool { - if s == "" { - b.err = errInvalidArgument - return true - } - for i, v := range *l { - if eq(v, s) { - (*l)[i] = s - return true - } - } - return false - } - for _, x := range part { - switch v := x.(type) { - case Tag: - b.tag.lang = v.lang - b.tag.region = v.region - b.tag.script = v.script - if v.str != "" { - b.variant = nil - for x, s := "", v.str[v.pVariant:v.pExt]; s != ""; { - x, s = nextToken(s) - b.variant = append(b.variant, x) - } - b.ext, b.private = nil, "" - for i, e := int(v.pExt), ""; i < len(v.str); { - i, e = getExtension(v.str, i) - b.addExt(e) - } - } - case Base: - b.tag.lang = v.langID - case Script: - b.tag.script = v.scriptID - case Region: - b.tag.region = v.regionID - case Variant: - if !replace(&b.variant, v.variant, func(a, b string) bool { return a == b }) { - b.variant = append(b.variant, v.variant) - } - case Extension: - if !replace(&b.ext, v.s, func(a, b string) bool { return a[0] == b[0] }) { - b.addExt(v.s) - } - case []Variant: - b.variant = nil - for _, x := range v { - b.update(x) - } - case []Extension: - b.ext, b.private = nil, "" - for _, e := range v { - b.update(e) - } - // TODO: support parsing of raw strings based on morphology or just extensions? - case error: - err = v - } - } - return -} - -func tokenLen(token ...string) (n int) { - for _, t := range token { - n += len(t) + 1 - } - return -} - -func appendTokens(b []byte, token ...string) int { - p := 0 - for _, t := range token { - b[p] = '-' - copy(b[p+1:], t) - p += 1 + len(t) - } - return p -} - -type sortVariant []string - -func (s sortVariant) Len() int { - return len(s) -} - -func (s sortVariant) Swap(i, j int) { - s[j], s[i] = s[i], s[j] -} - -func (s sortVariant) Less(i, j int) bool { - return variantIndex[s[i]] < variantIndex[s[j]] -} - -func findExt(list []string, x byte) int { - for i, e := range list { - if e[0] == x { - return i - } - } - return -1 -} - -// getExtension returns the name, body and end position of the extension. -func getExtension(s string, p int) (end int, ext string) { - if s[p] == '-' { - p++ - } - if s[p] == 'x' { - return len(s), s[p:] - } - end = nextExtension(s, p) - return end, s[p:end] -} - -// nextExtension finds the next extension within the string, searching -// for the -<char>- pattern from position p. -// In the fast majority of cases, language tags will have at most -// one extension and extensions tend to be small. -func nextExtension(s string, p int) int { - for n := len(s) - 3; p < n; { - if s[p] == '-' { - if s[p+2] == '-' { - return p - } - p += 3 - } else { - p++ - } - } - return len(s) -} - -var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight") - -// ParseAcceptLanguage parses the contents of a Accept-Language header as -// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and -// a list of corresponding quality weights. It is more permissive than RFC 2616 -// and may return non-nil slices even if the input is not valid. -// The Tags will be sorted by highest weight first and then by first occurrence. -// Tags with a weight of zero will be dropped. An error will be returned if the -// input could not be parsed. -func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) { - var entry string - for s != "" { - if entry, s = split(s, ','); entry == "" { - continue - } - - entry, weight := split(entry, ';') - - // Scan the language. - t, err := Parse(entry) - if err != nil { - id, ok := acceptFallback[entry] - if !ok { - return nil, nil, err - } - t = Tag{lang: id} - } - - // Scan the optional weight. - w := 1.0 - if weight != "" { - weight = consume(weight, 'q') - weight = consume(weight, '=') - // consume returns the empty string when a token could not be - // consumed, resulting in an error for ParseFloat. - if w, err = strconv.ParseFloat(weight, 32); err != nil { - return nil, nil, errInvalidWeight - } - // Drop tags with a quality weight of 0. - if w <= 0 { - continue - } - } - - tag = append(tag, t) - q = append(q, float32(w)) - } - sortStable(&tagSort{tag, q}) - return tag, q, nil -} - -// consume removes a leading token c from s and returns the result or the empty -// string if there is no such token. -func consume(s string, c byte) string { - if s == "" || s[0] != c { - return "" - } - return strings.TrimSpace(s[1:]) -} - -func split(s string, c byte) (head, tail string) { - if i := strings.IndexByte(s, c); i >= 0 { - return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+1:]) - } - return strings.TrimSpace(s), "" -} - -// Add hack mapping to deal with a small number of cases that that occur -// in Accept-Language (with reasonable frequency). -var acceptFallback = map[string]langID{ - "english": _en, - "deutsch": _de, - "italian": _it, - "french": _fr, - "*": _mul, // defined in the spec to match all languages. -} - -type tagSort struct { - tag []Tag - q []float32 -} - -func (s *tagSort) Len() int { - return len(s.q) -} - -func (s *tagSort) Less(i, j int) bool { - return s.q[i] > s.q[j] -} - -func (s *tagSort) Swap(i, j int) { - s.tag[i], s.tag[j] = s.tag[j], s.tag[i] - s.q[i], s.q[j] = s.q[j], s.q[i] -} diff --git a/vendor/golang.org/x/text/language/tables.go b/vendor/golang.org/x/text/language/tables.go deleted file mode 100644 index 5de0f856a..000000000 --- a/vendor/golang.org/x/text/language/tables.go +++ /dev/null @@ -1,2791 +0,0 @@ -// This file was generated by go generate; DO NOT EDIT - -package language - -import "golang.org/x/text/internal/tag" - -// CLDRVersion is the CLDR version from which the tables in this package are derived. -const CLDRVersion = "29" - -const numLanguages = 8654 - -const numScripts = 230 - -const numRegions = 354 - -type fromTo struct { - from uint16 - to uint16 -} - -const nonCanonicalUnd = 649 -const ( - _af = 10 - _am = 17 - _ar = 21 - _az = 36 - _bg = 56 - _bn = 75 - _ca = 97 - _cs = 121 - _da = 128 - _de = 133 - _el = 154 - _en = 155 - _es = 157 - _et = 159 - _fa = 164 - _fi = 168 - _fil = 170 - _fr = 175 - _gu = 211 - _he = 224 - _hi = 225 - _hr = 238 - _hu = 242 - _hy = 243 - _id = 248 - _is = 258 - _it = 259 - _ja = 263 - _ka = 273 - _kk = 303 - _km = 307 - _kn = 309 - _ko = 310 - _ky = 333 - _lo = 357 - _lt = 361 - _lv = 368 - _mk = 396 - _ml = 397 - _mn = 399 - _mo = 402 - _mr = 406 - _ms = 410 - _mul = 414 - _my = 421 - _nb = 431 - _ne = 436 - _nl = 445 - _no = 449 - _pa = 471 - _pl = 487 - _pt = 495 - _ro = 515 - _ru = 519 - _sh = 549 - _si = 552 - _sk = 554 - _sl = 556 - _sq = 570 - _sr = 571 - _sv = 583 - _sw = 584 - _ta = 593 - _te = 600 - _th = 605 - _tl = 616 - _tn = 619 - _tr = 623 - _uk = 646 - _ur = 652 - _uz = 653 - _vi = 658 - _zh = 708 - _zu = 710 - _jbo = 265 - _ami = 1033 - _bnn = 1740 - _hak = 221 - _tlh = 13850 - _lb = 340 - _nv = 458 - _pwn = 11438 - _tao = 13571 - _tay = 13581 - _tsu = 14045 - _nn = 447 - _sfb = 13012 - _vgt = 15084 - _sgg = 13043 - _cmn = 2390 - _nan = 428 - _hsn = 240 -) - -const langPrivateStart = 0x2d09 - -const langPrivateEnd = 0x2f10 - -// lang holds an alphabetically sorted list of ISO-639 language identifiers. -// All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. -// For 2-byte language identifiers, the two successive bytes have the following meaning: -// - if the first letter of the 2- and 3-letter ISO codes are the same: -// the second and third letter of the 3-letter ISO code. -// - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. -// For 3-byte language identifiers the 4th byte is 0. -var lang tag.Index = "" + // Size: 2856 bytes - "---\x00aaarabbkabr\x00ace\x00ach\x00ada\x00ady\x00aeveaeb\x00affragq\x00" + - "aho\x00akkaakk\x00aln\x00alt\x00ammhamo\x00anrgaoz\x00arraarc\x00arn\x00" + - "aro\x00arq\x00ary\x00arz\x00assmasa\x00ase\x00ast\x00atj\x00avvaawa\x00a" + - "yymazzebaakbal\x00ban\x00bap\x00bar\x00bas\x00bax\x00bbc\x00bbj\x00bci" + - "\x00beelbej\x00bem\x00bew\x00bez\x00bfd\x00bfq\x00bft\x00bfy\x00bgulbgc" + - "\x00bgn\x00bgx\x00bhihbhb\x00bhi\x00bhk\x00bho\x00biisbik\x00bin\x00bjj" + - "\x00bjn\x00bkm\x00bku\x00blt\x00bmambmq\x00bnenboodbpy\x00bqi\x00bqv\x00" + - "brrebra\x00brh\x00brx\x00bsosbsq\x00bss\x00bto\x00btv\x00bua\x00buc\x00b" + - "ug\x00bum\x00bvb\x00byn\x00byv\x00bze\x00caatcch\x00ccp\x00ceheceb\x00cg" + - "g\x00chhachk\x00chm\x00cho\x00chp\x00chr\x00cja\x00cjm\x00ckb\x00cooscop" + - "\x00cps\x00crrecrj\x00crk\x00crl\x00crm\x00crs\x00csescsb\x00csw\x00ctd" + - "\x00cuhucvhvcyymdaandak\x00dar\x00dav\x00dcc\x00deeuden\x00dgr\x00dje" + - "\x00dnj\x00doi\x00dsb\x00dtm\x00dtp\x00dty\x00dua\x00dvivdyo\x00dyu\x00d" + - "zzoebu\x00eeweefi\x00egl\x00egy\x00eky\x00elllenngeopoes\x00\x05esu\x00e" + - "tstett\x00euusewo\x00ext\x00faasfan\x00ffulffm\x00fiinfia\x00fil\x00fit" + - "\x00fjijfoaofon\x00frrafrc\x00frp\x00frr\x00frs\x00fud\x00fuq\x00fur\x00" + - "fuv\x00fvr\x00fyrygalegaa\x00gag\x00gan\x00gay\x00gbm\x00gbz\x00gcr\x00g" + - "dlagez\x00ggn\x00gil\x00gjk\x00gju\x00gllgglk\x00gnrngom\x00gon\x00gor" + - "\x00gos\x00got\x00grc\x00grt\x00gsw\x00guujgub\x00guc\x00gur\x00guw\x00g" + - "uz\x00gvlvgvr\x00gwi\x00haauhak\x00haw\x00haz\x00heebhiinhif\x00hil\x00h" + - "lu\x00hmd\x00hnd\x00hne\x00hnj\x00hnn\x00hno\x00homohoc\x00hoj\x00hrrvhs" + - "b\x00hsn\x00htathuunhyyehzerianaiba\x00ibb\x00idndieleigboiiiiikpkikt" + - "\x00ilo\x00inndinh\x00iodoisslittaiukuiw\x00\x03izh\x00japnjam\x00jbo" + - "\x00jgo\x00ji\x00\x06jmc\x00jml\x00jut\x00jvavjwavkaatkaa\x00kab\x00kac" + - "\x00kaj\x00kam\x00kao\x00kbd\x00kcg\x00kck\x00kde\x00kdt\x00kea\x00ken" + - "\x00kfo\x00kfr\x00kfy\x00kgonkge\x00kgp\x00kha\x00khb\x00khn\x00khq\x00k" + - "ht\x00khw\x00kiikkiu\x00kjuakjg\x00kkazkkj\x00klalkln\x00kmhmkmb\x00knan" + - "koorkoi\x00kok\x00kos\x00kpe\x00kraukrc\x00kri\x00krj\x00krl\x00kru\x00k" + - "sasksb\x00ksf\x00ksh\x00kuurkum\x00kvomkvr\x00kvx\x00kw\x00\x01kxm\x00kx" + - "p\x00kyirlaatlab\x00lad\x00lag\x00lah\x00laj\x00lbtzlbe\x00lbw\x00lcp" + - "\x00lep\x00lez\x00lgugliimlif\x00lij\x00lis\x00ljp\x00lki\x00lkt\x00lmn" + - "\x00lmo\x00lninloaolol\x00loz\x00lrc\x00ltitltg\x00luublua\x00luo\x00luy" + - "\x00luz\x00lvavlwl\x00lzh\x00lzz\x00mad\x00maf\x00mag\x00mai\x00mak\x00m" + - "an\x00mas\x00maz\x00mdf\x00mdh\x00mdr\x00men\x00mer\x00mfa\x00mfe\x00mgl" + - "gmgh\x00mgo\x00mgp\x00mgy\x00mhahmirimin\x00mis\x00mkkdmlalmls\x00mnonmn" + - "i\x00mnw\x00moolmoe\x00moh\x00mos\x00mrarmrd\x00mrj\x00mro\x00mssamtltmt" + - "r\x00mua\x00mul\x00mus\x00mvy\x00mwk\x00mwr\x00mwv\x00mxc\x00myyamyv\x00" + - "myx\x00myz\x00mzn\x00naaunah\x00nan\x00nap\x00naq\x00nbobnch\x00nddendc" + - "\x00nds\x00neepnew\x00ngdongl\x00nhe\x00nhw\x00nij\x00niu\x00njo\x00nlld" + - "nmg\x00nnnonnh\x00noornod\x00noe\x00non\x00nqo\x00nrblnsk\x00nso\x00nus" + - "\x00nvavnxq\x00nyyanym\x00nyn\x00nzi\x00occiojjiomrmorriosssosa\x00otk" + - "\x00paanpag\x00pal\x00pam\x00pap\x00pau\x00pcd\x00pcm\x00pdc\x00pdt\x00p" + - "eo\x00pfl\x00phn\x00pilipka\x00pko\x00plolpms\x00pnt\x00pon\x00pra\x00pr" + - "d\x00prg\x00psusptorpuu\x00quuequc\x00qug\x00raj\x00rcf\x00rej\x00rgn" + - "\x00ria\x00rif\x00rjs\x00rkt\x00rmohrmf\x00rmo\x00rmt\x00rmu\x00rnunrng" + - "\x00roonrob\x00rof\x00rtm\x00ruusrue\x00rug\x00rw\x00\x04rwk\x00ryu\x00s" + - "aansaf\x00sah\x00saq\x00sas\x00sat\x00saz\x00sbp\x00scrdsck\x00scn\x00sc" + - "o\x00scs\x00sdndsdc\x00sdh\x00semesef\x00seh\x00sei\x00ses\x00sgagsga" + - "\x00sgs\x00sh\x00\x02shi\x00shn\x00siinsid\x00sklkskr\x00sllvsli\x00sly" + - "\x00smmosma\x00smi\x00smj\x00smn\x00smp\x00sms\x00snnasnk\x00soomsou\x00" + - "sqqisrrpsrb\x00srn\x00srr\x00srx\x00ssswssy\x00stotstq\x00suunsuk\x00sus" + - "\x00svweswwaswb\x00swc\x00swg\x00swv\x00sxn\x00syl\x00syr\x00szl\x00taam" + - "taj\x00tbw\x00tcy\x00tdd\x00tdg\x00tdh\x00teeltem\x00teo\x00tet\x00tggkt" + - "hhathl\x00thq\x00thr\x00tiirtig\x00tiv\x00tkuktkl\x00tkr\x00tkt\x00tlglt" + - "ly\x00tmh\x00tnsntoontog\x00tpi\x00trurtru\x00trv\x00tssotsd\x00tsf\x00t" + - "sg\x00tsj\x00ttatttj\x00tts\x00ttt\x00tum\x00tvl\x00twwitwq\x00txg\x00ty" + - "ahtyv\x00tzm\x00udm\x00ugiguga\x00ukkruli\x00umb\x00und\x00unr\x00unx" + - "\x00urrduzzbvai\x00veenvec\x00vep\x00viievic\x00vls\x00vmf\x00vmw\x00voo" + - "lvot\x00vro\x00vun\x00walnwae\x00wal\x00war\x00wbp\x00wbq\x00wbr\x00wls" + - "\x00wni\x00woolwtm\x00wuu\x00xav\x00xcr\x00xhhoxlc\x00xld\x00xmf\x00xmn" + - "\x00xmr\x00xna\x00xnr\x00xog\x00xpr\x00xsa\x00xsr\x00yao\x00yap\x00yav" + - "\x00ybb\x00yiidyooryrl\x00yua\x00yue\x00zahazag\x00zbl\x00zdj\x00zea\x00" + - "zgh\x00zhhozmi\x00zuulzxx\x00zza\x00\xff\xff\xff\xff" - -const langNoIndexOffset = 713 - -// langNoIndex is a bit vector of all 3-letter language codes that are not used as an index -// in lookup tables. The language ids for these language codes are derived directly -// from the letters and are not consecutive. -// Size: 2197 bytes, 2197 elements -var langNoIndex = [2197]uint8{ - // Entry 0 - 3F - 0xff, 0xfd, 0xfd, 0xfe, 0xef, 0xf7, 0xbf, 0xd2, - 0xfb, 0xbf, 0xfe, 0xfa, 0xb7, 0x1d, 0x3c, 0x57, - 0x6f, 0x97, 0x73, 0xf8, 0xff, 0xef, 0xff, 0x70, - 0xaf, 0x03, 0xff, 0xff, 0xcf, 0x05, 0x85, 0x62, - 0xe9, 0xbf, 0xfd, 0xff, 0xff, 0xf7, 0xfd, 0x77, - 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, - 0xc9, 0xff, 0xff, 0xff, 0x4d, 0xb8, 0x0a, 0x6a, - 0x7e, 0xfa, 0xe3, 0xfe, 0x7e, 0xff, 0x77, 0xff, - // Entry 40 - 7F - 0xff, 0xff, 0xff, 0xdf, 0x2b, 0xf4, 0xf1, 0xe0, - 0x5d, 0xe7, 0x9f, 0x14, 0x07, 0x20, 0xdf, 0xed, - 0x9f, 0x3f, 0xc9, 0x21, 0xf8, 0x3f, 0x94, 0xf7, - 0x7e, 0xff, 0xff, 0xff, 0xfe, 0x7f, 0xff, 0xff, - 0xff, 0xff, 0x5f, 0xfc, 0xdb, 0xfd, 0xbf, 0xb5, - 0x7b, 0xdf, 0x7f, 0xf7, 0xeb, 0xfe, 0xff, 0xa7, - 0xbd, 0xff, 0x7f, 0xf7, 0xff, 0xef, 0xef, 0xef, - 0xff, 0xff, 0x9f, 0xff, 0xff, 0xef, 0xff, 0xdf, - // Entry 80 - BF - 0xff, 0xff, 0xf3, 0xff, 0xfb, 0x2f, 0xff, 0xff, - 0xfb, 0xee, 0xff, 0xbd, 0xdb, 0xff, 0xdf, 0xf7, - 0xff, 0xfa, 0xfd, 0xff, 0x7e, 0xaf, 0x7b, 0xfe, - 0x7f, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xdf, 0xff, - 0xff, 0xdf, 0xfb, 0xff, 0xfd, 0xfc, 0xfb, 0xff, - 0xff, 0xff, 0xff, 0xf7, 0x7f, 0xbf, 0xfd, 0xd5, - 0xa5, 0x77, 0x40, 0xff, 0x9c, 0xc1, 0x41, 0x2c, - 0x08, 0x24, 0x41, 0x00, 0x50, 0x40, 0x00, 0x80, - // Entry C0 - FF - 0xfb, 0x4a, 0xf2, 0x9f, 0xb4, 0x42, 0x41, 0x96, - 0x9b, 0x14, 0x88, 0xf6, 0x7b, 0xe7, 0x17, 0x56, - 0x55, 0x7d, 0x0e, 0x1c, 0x37, 0x71, 0xf3, 0xef, - 0x97, 0xff, 0x5d, 0x38, 0x64, 0x08, 0x00, 0x10, - 0xbc, 0x87, 0xaf, 0xdf, 0xff, 0xf7, 0x73, 0x35, - 0x3e, 0x87, 0xc7, 0xdf, 0xff, 0x00, 0x81, 0x00, - 0xb0, 0x05, 0x80, 0x00, 0x00, 0x00, 0x00, 0x03, - 0x40, 0x00, 0x40, 0x92, 0x21, 0xd0, 0xbf, 0x5d, - // Entry 100 - 13F - 0xfd, 0xde, 0xfe, 0x5e, 0x00, 0x00, 0x02, 0x64, - 0x8d, 0x19, 0xc1, 0xdf, 0x79, 0x22, 0x00, 0x00, - 0x00, 0xdf, 0x6d, 0xdc, 0x26, 0xe5, 0xd9, 0xf3, - 0xfe, 0xff, 0xfd, 0xcb, 0x9f, 0x14, 0x01, 0x0c, - 0x86, 0x00, 0xd1, 0x00, 0xf0, 0xc5, 0x67, 0x5f, - 0x56, 0x89, 0x5e, 0xb7, 0xec, 0xef, 0x03, 0x00, - 0x02, 0x00, 0x00, 0x00, 0xc0, 0x77, 0xda, 0x57, - 0x90, 0x69, 0x01, 0x2c, 0x96, 0x79, 0xe0, 0xff, - // Entry 140 - 17F - 0xff, 0x7f, 0x00, 0x00, 0x00, 0x01, 0x08, 0x56, - 0x01, 0x00, 0x00, 0xb0, 0x14, 0x03, 0x50, 0x16, - 0x0a, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x09, - 0x00, 0x00, 0x60, 0x10, 0x00, 0x00, 0x00, 0x10, - 0x00, 0x00, 0x44, 0x00, 0x00, 0x10, 0x00, 0x04, - 0x08, 0x00, 0x00, 0x04, 0x00, 0x80, 0x28, 0x04, - 0x00, 0x00, 0x50, 0xd5, 0x2d, 0x00, 0x64, 0x35, - 0x24, 0x53, 0xf5, 0xd4, 0xbd, 0xe2, 0xcd, 0x03, - // Entry 180 - 1BF - 0x00, 0x80, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x17, 0x39, 0x01, 0xdd, 0x57, 0x98, - 0x21, 0x98, 0xa5, 0x00, 0x00, 0x01, 0x40, 0x82, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x40, 0x00, 0x44, 0x00, 0x00, 0xb0, 0xfe, - 0xa9, 0x39, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x40, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - // Entry 1C0 - 1FF - 0x00, 0x01, 0x28, 0x05, 0x00, 0x00, 0x00, 0x00, - 0x04, 0x20, 0x04, 0xa6, 0x08, 0x04, 0x00, 0x08, - 0x81, 0x50, 0x00, 0x00, 0x08, 0x11, 0x86, 0x40, - 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x06, 0x55, - 0x02, 0x10, 0x08, 0x04, 0x00, 0x00, 0x00, 0x60, - 0x3b, 0x83, 0x11, 0x00, 0x00, 0x00, 0x11, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0xbe, 0xdf, 0xff, 0xfe, 0xbf, - // Entry 200 - 23F - 0xdf, 0xc7, 0x83, 0x82, 0xc0, 0xff, 0xdf, 0x27, - 0xcf, 0x5f, 0xe7, 0x01, 0x10, 0x20, 0xb2, 0xc5, - 0xa4, 0x45, 0x25, 0x9b, 0x03, 0xcf, 0xf0, 0xdf, - 0x03, 0xc4, 0x08, 0x10, 0x01, 0x0e, 0x01, 0xe3, - 0x92, 0x54, 0xdb, 0x38, 0xf1, 0x7f, 0xf7, 0x6d, - 0xf9, 0xff, 0x1c, 0x7d, 0x04, 0x08, 0x00, 0x01, - 0x21, 0x12, 0x6c, 0x5f, 0xdd, 0x0f, 0x85, 0x4f, - 0x40, 0x40, 0x00, 0x04, 0xf9, 0xfd, 0xbd, 0xd4, - // Entry 240 - 27F - 0xe8, 0x13, 0xf4, 0x27, 0xa3, 0x0d, 0x00, 0x00, - 0x20, 0x7b, 0x39, 0x02, 0x05, 0x84, 0x00, 0xf0, - 0xbf, 0x7f, 0xda, 0x00, 0x18, 0x04, 0x81, 0x00, - 0x00, 0x00, 0x80, 0x10, 0x94, 0x1c, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0x00, 0x04, - 0x08, 0xb4, 0x7c, 0xa5, 0x0c, 0x40, 0x00, 0x00, - 0x11, 0x04, 0x04, 0x6c, 0x00, 0x20, 0x70, 0xff, - 0xfb, 0x7f, 0x60, 0x00, 0x05, 0x9b, 0xdd, 0x6e, - // Entry 280 - 2BF - 0x03, 0x00, 0x11, 0x00, 0x00, 0x00, 0x40, 0x05, - 0xb5, 0xb6, 0x80, 0x08, 0x04, 0x00, 0x04, 0x51, - 0xe2, 0xff, 0xfd, 0x3f, 0x05, 0x09, 0x08, 0x05, - 0x40, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x00, 0xa1, 0x02, 0x60, - 0xe5, 0x48, 0x14, 0x89, 0x20, 0xc0, 0x47, 0x80, - 0x07, 0x00, 0x00, 0x00, 0xcc, 0x50, 0x40, 0x24, - 0x85, 0x47, 0x84, 0x40, 0x20, 0x10, 0x00, 0x20, - // Entry 2C0 - 2FF - 0x02, 0x50, 0x88, 0x11, 0x00, 0xd1, 0x6c, 0xee, - 0x50, 0x27, 0x1d, 0x11, 0x69, 0x06, 0x59, 0xe9, - 0x33, 0x08, 0x00, 0x20, 0x05, 0x40, 0x10, 0x00, - 0x00, 0x00, 0x50, 0x44, 0x96, 0x49, 0xd6, 0x5d, - 0xa7, 0x81, 0x47, 0x97, 0xfb, 0x00, 0x10, 0x00, - 0x08, 0x00, 0x80, 0x00, 0x40, 0x45, 0x00, 0x01, - 0x02, 0x00, 0x01, 0x40, 0x80, 0x00, 0x04, 0x08, - 0xf8, 0xeb, 0xf6, 0x39, 0xc4, 0x89, 0x16, 0x00, - // Entry 300 - 33F - 0x00, 0x0c, 0x04, 0x01, 0x20, 0x20, 0xdd, 0xa2, - 0x01, 0x00, 0x00, 0x00, 0x12, 0x04, 0x00, 0x00, - 0x04, 0x10, 0xf0, 0x9d, 0x95, 0x13, 0x04, 0x80, - 0x00, 0x01, 0xd0, 0x12, 0x40, 0x00, 0x10, 0xb0, - 0x10, 0x62, 0x4c, 0xd2, 0x02, 0x01, 0x4a, 0x00, - 0x46, 0x04, 0x00, 0x08, 0x02, 0x00, 0x20, 0xc0, - 0x00, 0x80, 0x06, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x00, 0xf0, 0xd8, 0x6f, 0x15, 0x02, 0x08, 0x00, - // Entry 340 - 37F - 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01, - 0x00, 0x10, 0x00, 0x00, 0x00, 0xf8, 0x85, 0xe3, - 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0x7f, 0xfb, - 0xff, 0xfc, 0xfe, 0xdf, 0xff, 0xff, 0xff, 0xf6, - 0xfb, 0xfe, 0xf7, 0x1f, 0xff, 0xb3, 0xed, 0xff, - 0xdb, 0xed, 0xff, 0xfe, 0xff, 0xfe, 0xdf, 0xff, - 0xff, 0xff, 0xf7, 0xff, 0xfd, 0xff, 0xff, 0xff, - 0xfd, 0xff, 0xdf, 0xaf, 0x9c, 0xff, 0xfb, 0xff, - // Entry 380 - 3BF - 0xff, 0xff, 0xff, 0xff, 0xef, 0xd2, 0xbb, 0xdf, - 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xef, - 0xfd, 0xff, 0xff, 0xf7, 0xfd, 0xff, 0xff, 0xff, - 0xef, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0x5f, 0xd3, 0x7b, 0xfd, 0xd9, 0xdf, 0xef, - 0xbc, 0x18, 0x05, 0x2c, 0xff, 0x07, 0xf0, 0xff, - 0xf7, 0x5f, 0x00, 0x08, 0x00, 0xc3, 0x3d, 0x1b, - 0x06, 0xe6, 0x72, 0xf0, 0xdd, 0x3c, 0x7f, 0x44, - // Entry 3C0 - 3FF - 0x02, 0x30, 0x9f, 0x7a, 0x16, 0xfd, 0xff, 0x57, - 0xf2, 0xff, 0x39, 0xff, 0xf2, 0x1e, 0x95, 0xf7, - 0xf7, 0xff, 0x45, 0x80, 0x01, 0x02, 0x00, 0x00, - 0x40, 0x54, 0x9f, 0x8a, 0xd9, 0xd9, 0x0e, 0x11, - 0x84, 0x51, 0xc0, 0xf3, 0xfb, 0x47, 0x00, 0x01, - 0x05, 0xd1, 0x50, 0x58, 0x00, 0x00, 0x00, 0x10, - 0x04, 0x02, 0x00, 0x00, 0x0a, 0x00, 0x17, 0xd2, - 0xf9, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - // Entry 400 - 43F - 0xd7, 0x6f, 0xff, 0xff, 0xdf, 0x7d, 0xbb, 0xff, - 0xff, 0xff, 0xf7, 0xf3, 0xef, 0xff, 0xff, 0xf7, - 0xff, 0xdf, 0xdb, 0x7f, 0xff, 0xff, 0x7f, 0xff, - 0xff, 0xff, 0xef, 0xff, 0xbc, 0xff, 0xff, 0xfb, - 0xff, 0xfb, 0xff, 0xde, 0x76, 0xbd, 0xff, 0xf7, - 0xff, 0xff, 0xf7, 0xff, 0xff, 0xdf, 0xf3, 0xfe, - 0xef, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x7f, 0xde, - 0xf7, 0xbb, 0xef, 0xf7, 0xff, 0xfb, 0xbf, 0xdf, - // Entry 440 - 47F - 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0x5f, 0x7d, - 0x7f, 0xff, 0xff, 0xf7, 0xe5, 0xfc, 0xff, 0xfd, - 0x7f, 0x7f, 0xff, 0x9e, 0xae, 0xff, 0xee, 0xff, - 0x7f, 0xf7, 0x7b, 0x02, 0x82, 0x04, 0xff, 0xf7, - 0xff, 0xbf, 0xd7, 0xef, 0xfe, 0xdf, 0xf7, 0xfe, - 0xe2, 0x8e, 0xe7, 0xff, 0xf7, 0xff, 0x56, 0xbd, - 0xcd, 0xff, 0xfb, 0xff, 0xff, 0xdf, 0xef, 0xff, - 0xe5, 0xdf, 0x7d, 0x0f, 0xa7, 0x51, 0x04, 0x44, - // Entry 480 - 4BF - 0x13, 0xd0, 0x5d, 0xaf, 0xa6, 0xfd, 0xb9, 0xff, - 0x63, 0x5d, 0x5b, 0xff, 0xff, 0xbf, 0x3f, 0x20, - 0x14, 0x00, 0x57, 0x51, 0x82, 0x65, 0xf5, 0x49, - 0xe2, 0xff, 0xfc, 0xdf, 0x00, 0x05, 0xc5, 0x05, - 0x00, 0x22, 0x00, 0x74, 0x69, 0x10, 0x08, 0x04, - 0x41, 0x00, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x51, 0x60, 0x05, 0x04, 0x01, 0x00, 0x00, - 0x06, 0x01, 0x20, 0x00, 0x18, 0x01, 0x92, 0xb1, - // Entry 4C0 - 4FF - 0xfd, 0x67, 0x4b, 0x06, 0x95, 0x06, 0x57, 0xed, - 0xfb, 0x4c, 0x9d, 0x7b, 0x83, 0x04, 0x62, 0x40, - 0x00, 0x15, 0x42, 0x00, 0x00, 0x00, 0x54, 0x83, - 0xf9, 0x4f, 0x10, 0x8c, 0xc9, 0x46, 0xde, 0xf7, - 0x13, 0x31, 0x00, 0x20, 0x00, 0x00, 0x00, 0x90, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x10, 0x00, - 0x01, 0x40, 0x00, 0xf0, 0x5b, 0xf4, 0xbe, 0x7d, - 0xba, 0xcf, 0xf7, 0xaf, 0x42, 0x04, 0x84, 0x41, - // Entry 500 - 53F - 0xb0, 0xff, 0x79, 0x7a, 0x04, 0x00, 0x00, 0x49, - 0x2d, 0x14, 0x27, 0x77, 0xed, 0xf1, 0xbf, 0xef, - 0x3f, 0x00, 0x00, 0x02, 0xc6, 0xa0, 0x1e, 0xfc, - 0xbb, 0xff, 0xfd, 0xfb, 0xb7, 0xfd, 0xf5, 0xff, - 0xfd, 0xfc, 0xd5, 0xed, 0x47, 0xf4, 0x7f, 0x10, - 0x01, 0x01, 0x84, 0x6d, 0xff, 0xf7, 0xdd, 0xf9, - 0x5f, 0x05, 0x86, 0xef, 0xf5, 0x77, 0xbd, 0x3c, - 0x00, 0x00, 0x00, 0x43, 0x71, 0x42, 0x00, 0x40, - // Entry 540 - 57F - 0x00, 0x00, 0x01, 0x43, 0x19, 0x00, 0x08, 0x00, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - // Entry 580 - 5BF - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xab, 0xbd, 0xe7, 0x57, 0xee, 0x13, 0x5d, - 0x09, 0xc1, 0x40, 0x21, 0xfa, 0x17, 0x01, 0x80, - 0x00, 0x00, 0x00, 0x00, 0xf0, 0xde, 0xff, 0xbf, - 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, - 0x00, 0x30, 0x95, 0xe3, 0x10, 0x00, 0x00, 0x00, - 0x11, 0x04, 0x16, 0x00, 0x01, 0x02, 0x00, 0x81, - 0xa3, 0x01, 0x50, 0x00, 0x00, 0x83, 0x11, 0x40, - // Entry 5C0 - 5FF - 0x00, 0x00, 0x00, 0xf0, 0xdd, 0x7b, 0x7e, 0x02, - 0xaa, 0x10, 0x5d, 0xd8, 0x52, 0x00, 0x80, 0x20, - 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x02, 0x02, - 0x19, 0x00, 0x10, 0x02, 0x10, 0x61, 0x5a, 0x9d, - 0x31, 0x00, 0x00, 0x00, 0x01, 0x50, 0x02, 0x20, - 0x00, 0x00, 0x01, 0x00, 0x42, 0x00, 0x20, 0x00, - 0x00, 0x1f, 0xdf, 0xf2, 0xfd, 0xff, 0xfd, 0x3f, - 0x9f, 0x18, 0xcf, 0x9c, 0xbf, 0xaf, 0x5f, 0xfe, - // Entry 600 - 63F - 0x7b, 0x4b, 0x40, 0x10, 0xe1, 0xfd, 0xaf, 0xfd, - 0xb7, 0xf7, 0xff, 0xf3, 0xdf, 0xff, 0x6f, 0xf1, - 0x7b, 0xf1, 0x7f, 0xdf, 0x7f, 0xbf, 0xfe, 0xb7, - 0xee, 0x1c, 0xfb, 0xdb, 0xef, 0xdf, 0xff, 0xfd, - 0x7e, 0xbe, 0x57, 0xff, 0x6f, 0x81, 0x76, 0x1f, - 0xd4, 0x77, 0xf5, 0xfd, 0xff, 0xff, 0xeb, 0xfe, - 0xbf, 0x5f, 0x57, 0x1b, 0xeb, 0x5f, 0x50, 0x18, - 0x02, 0xfa, 0xff, 0x9d, 0x15, 0x97, 0x15, 0x0f, - // Entry 640 - 67F - 0x75, 0xc4, 0x7d, 0x81, 0x82, 0xf1, 0xd7, 0x7e, - 0xff, 0xff, 0xff, 0xef, 0xff, 0xfd, 0xdd, 0xde, - 0xfc, 0xfd, 0xf6, 0x5f, 0x7a, 0x1f, 0x40, 0x98, - 0x02, 0xff, 0xe3, 0xff, 0xf3, 0xd6, 0xf2, 0xff, - 0xfb, 0xdf, 0x7d, 0x50, 0x1e, 0x15, 0x7b, 0xb4, - 0xf5, 0xbe, 0xff, 0xff, 0xf3, 0xf7, 0xff, 0xf7, - 0x7f, 0xff, 0xff, 0xbe, 0xdb, 0xf7, 0xd7, 0xf9, - 0xef, 0x2f, 0x80, 0xbf, 0xc5, 0xff, 0xff, 0xf3, - // Entry 680 - 6BF - 0x97, 0x9d, 0xff, 0xff, 0xf7, 0xcf, 0xfd, 0xbf, - 0xde, 0x7f, 0x06, 0x1d, 0x57, 0xff, 0xf8, 0xda, - 0x5d, 0xce, 0x7d, 0x16, 0xb9, 0xea, 0x69, 0xa0, - 0x1a, 0x20, 0x00, 0x30, 0x02, 0x04, 0x24, 0x48, - 0x04, 0x00, 0x00, 0x40, 0xd4, 0x02, 0x04, 0x00, - 0x00, 0x04, 0x00, 0x04, 0x00, 0x20, 0x01, 0x06, - 0x50, 0x00, 0x08, 0x00, 0x00, 0x00, 0x24, 0x00, - 0x04, 0x00, 0x10, 0x8c, 0x58, 0xd5, 0x0d, 0x0f, - // Entry 6C0 - 6FF - 0x14, 0x4d, 0xf1, 0x16, 0x44, 0xd1, 0x42, 0x08, - 0x40, 0x00, 0x00, 0x40, 0x00, 0x08, 0x00, 0x00, - 0x00, 0xdc, 0xff, 0xeb, 0x1f, 0x58, 0x08, 0x41, - 0x04, 0xa0, 0x04, 0x00, 0x30, 0x12, 0x40, 0x22, - 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0x10, 0x10, 0xaf, - 0x6f, 0x93, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x80, 0x80, 0x25, 0x00, 0x00, - // Entry 700 - 73F - 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0x80, 0x86, 0xc2, 0x02, 0x00, 0x00, 0x00, 0x01, - 0xdf, 0x18, 0x00, 0x00, 0x02, 0xf0, 0xfd, 0x79, - 0x3b, 0x00, 0x25, 0x00, 0x00, 0x00, 0x02, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, - 0x03, 0x00, 0x09, 0x20, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x81, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // Entry 740 - 77F - 0x00, 0x00, 0x00, 0xef, 0xf7, 0xfd, 0xcf, 0x7e, - 0xa0, 0x11, 0x10, 0x00, 0x00, 0x92, 0x01, 0x44, - 0xcd, 0xf9, 0x5e, 0x00, 0x01, 0x00, 0x30, 0x14, - 0x04, 0x55, 0x10, 0x01, 0x04, 0xf6, 0x3f, 0x7a, - 0x05, 0x04, 0x00, 0xb0, 0x80, 0x00, 0x55, 0x55, - 0x97, 0x7c, 0x9f, 0x71, 0xcc, 0x78, 0xd1, 0x43, - 0xf5, 0x57, 0x67, 0x14, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x2c, 0xf7, 0xdb, 0x1f, 0x54, 0x60, - // Entry 780 - 7BF - 0x03, 0x68, 0x01, 0x10, 0x8b, 0x38, 0xaa, 0x01, - 0x00, 0x00, 0x30, 0x00, 0x24, 0x44, 0x00, 0x00, - 0x10, 0x03, 0x11, 0x02, 0x01, 0x00, 0x00, 0xf0, - 0xf5, 0xff, 0xd5, 0xd7, 0xbc, 0x70, 0xd6, 0x78, - 0x78, 0x15, 0x50, 0x00, 0xa4, 0x84, 0xe9, 0x41, - 0x00, 0x00, 0x00, 0x6b, 0x39, 0x52, 0x74, 0x00, - 0xe8, 0x30, 0x90, 0x6a, 0x92, 0x00, 0x00, 0x02, - 0xff, 0xef, 0xff, 0x4f, 0x85, 0x53, 0xf4, 0xed, - // Entry 7C0 - 7FF - 0xdd, 0xbf, 0x72, 0x19, 0xc7, 0x0c, 0xf5, 0x42, - 0x54, 0xdd, 0x77, 0x14, 0x00, 0x80, 0xc0, 0x56, - 0xcc, 0x16, 0x9e, 0xfb, 0x35, 0x7d, 0xef, 0xff, - 0xbd, 0xa4, 0xaf, 0x01, 0x44, 0x18, 0x01, 0x5d, - 0x4e, 0x4a, 0x08, 0x50, 0x28, 0x30, 0xe0, 0x80, - 0x10, 0x20, 0x24, 0x00, 0xff, 0x3f, 0xdf, 0x67, - 0xfe, 0x01, 0x06, 0x88, 0x0a, 0x40, 0x16, 0x01, - 0x01, 0x15, 0x2b, 0x3e, 0x01, 0x00, 0x00, 0x10, - // Entry 800 - 83F - 0x90, 0x69, 0x45, 0x02, 0x02, 0x01, 0xe1, 0xbf, - 0xbf, 0x03, 0x00, 0x00, 0x10, 0xd4, 0xa7, 0xd1, - 0x54, 0x9e, 0x44, 0xdf, 0xfd, 0x8f, 0x66, 0xb3, - 0x55, 0x20, 0xd4, 0xc3, 0xd8, 0x30, 0x3d, 0x80, - 0x00, 0x00, 0x00, 0x4c, 0xd4, 0x11, 0xc5, 0x84, - 0x6e, 0x50, 0x00, 0x22, 0x50, 0x6e, 0xbf, 0xdb, - 0x07, 0x00, 0x20, 0x10, 0x84, 0xb2, 0x45, 0x10, - 0x06, 0x44, 0x00, 0x00, 0x12, 0x02, 0x11, 0x00, - // Entry 840 - 87F - 0xf0, 0xfb, 0xfd, 0x3f, 0x05, 0x00, 0x12, 0x81, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x02, - 0x00, 0x00, 0x00, 0x00, 0x03, 0x30, 0x02, 0x28, - 0x84, 0x00, 0x33, 0xc0, 0x23, 0x24, 0x00, 0x00, - 0x00, 0xcb, 0xe4, 0x3a, 0x42, 0xc8, 0x14, 0xf1, - 0xef, 0xff, 0x7f, 0x16, 0x01, 0x01, 0x84, 0x50, - 0x07, 0xfc, 0xff, 0xff, 0x0f, 0x01, 0x00, 0x40, - 0x10, 0x38, 0x01, 0x01, 0x1c, 0x12, 0x40, 0xe1, - // Entry 880 - 8BF - 0x76, 0x16, 0x08, 0x03, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x24, - 0x0a, 0x00, 0x80, 0x00, 0x00, -} - -// altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives -// to 2-letter language codes that cannot be derived using the method described above. -// Each 3-letter code is followed by its 1-byte langID. -var altLangISO3 tag.Index = "---\x00cor\x00hbs\x01heb\x02kin\x03spa\x04yid\x05\xff\xff\xff\xff" - -// altLangIndex is used to convert indexes in altLangISO3 to langIDs. -// Size: 12 bytes, 6 elements -var altLangIndex = [6]uint16{ - 0x014a, 0x0225, 0x0105, 0x020a, 0x009d, 0x010b, -} - -// langAliasMap maps langIDs to their suggested replacements. -// Size: 640 bytes, 160 elements -var langAliasMap = [160]fromTo{ - 0: {from: 0xc4, to: 0xda}, - 1: {from: 0xff, to: 0xf8}, - 2: {from: 0x105, to: 0xe0}, - 3: {from: 0x10b, to: 0x2b9}, - 4: {from: 0x110, to: 0x10f}, - 5: {from: 0x192, to: 0x203}, - 6: {from: 0x1af, to: 0x1c1}, - 7: {from: 0x225, to: 0x23b}, - 8: {from: 0x268, to: 0xaa}, - 9: {from: 0x274, to: 0x252}, - 10: {from: 0x27d, to: 0xd}, - 11: {from: 0x2d5, to: 0x2db}, - 12: {from: 0x326, to: 0x93}, - 13: {from: 0x3c7, to: 0x1c48}, - 14: {from: 0x3e8, to: 0x23a}, - 15: {from: 0x3f9, to: 0x23a}, - 16: {from: 0x484, to: 0x15}, - 17: {from: 0x48f, to: 0xf3}, - 18: {from: 0x4d5, to: 0x1f38}, - 19: {from: 0x54a, to: 0x23}, - 20: {from: 0x550, to: 0x2732}, - 21: {from: 0x55c, to: 0x24}, - 22: {from: 0x57d, to: 0xa1}, - 23: {from: 0x5a3, to: 0x26}, - 24: {from: 0x5ac, to: 0x42}, - 25: {from: 0x615, to: 0x5a7}, - 26: {from: 0x65a, to: 0xc7a}, - 27: {from: 0x786, to: 0x1a5}, - 28: {from: 0x7cd, to: 0x16e}, - 29: {from: 0x7d4, to: 0x59}, - 30: {from: 0x855, to: 0x30b9}, - 31: {from: 0x8cf, to: 0x2c4}, - 32: {from: 0x90c, to: 0x23f1}, - 33: {from: 0x915, to: 0x95a}, - 34: {from: 0x932, to: 0x24f}, - 35: {from: 0x953, to: 0x3fc0}, - 36: {from: 0x956, to: 0x2c4}, - 37: {from: 0x995, to: 0x2b3e}, - 38: {from: 0x9c5, to: 0x2f18}, - 39: {from: 0xa50, to: 0x73}, - 40: {from: 0xa9f, to: 0x79}, - 41: {from: 0xb5f, to: 0x8a}, - 42: {from: 0xb6e, to: 0x1a2}, - 43: {from: 0xb8f, to: 0xb92}, - 44: {from: 0xb95, to: 0x2c8}, - 45: {from: 0xc76, to: 0x1df1}, - 46: {from: 0xc85, to: 0x2c31}, - 47: {from: 0xcd0, to: 0x1bd}, - 48: {from: 0xe67, to: 0x9f}, - 49: {from: 0xe9b, to: 0x179}, - 50: {from: 0xf37, to: 0xfc}, - 51: {from: 0x1010, to: 0xd}, - 52: {from: 0x11bb, to: 0xaf}, - 53: {from: 0x1207, to: 0xa6}, - 54: {from: 0x12b6, to: 0xb32}, - 55: {from: 0x12ba, to: 0x1d2}, - 56: {from: 0x12c9, to: 0x145c}, - 57: {from: 0x1317, to: 0x111}, - 58: {from: 0x131a, to: 0x85}, - 59: {from: 0x133a, to: 0x3a46}, - 60: {from: 0x1401, to: 0xcc}, - 61: {from: 0x145f, to: 0x9a}, - 62: {from: 0x1497, to: 0x278f}, - 63: {from: 0x14af, to: 0xca}, - 64: {from: 0x14be, to: 0xcd6}, - 65: {from: 0x1511, to: 0x12bb}, - 66: {from: 0x15a0, to: 0x154d}, - 67: {from: 0x15ad, to: 0x168a}, - 68: {from: 0x1621, to: 0x23f}, - 69: {from: 0x1710, to: 0x1a98}, - 70: {from: 0x180b, to: 0x2947}, - 71: {from: 0x1821, to: 0x102}, - 72: {from: 0x18f1, to: 0x104}, - 73: {from: 0x191d, to: 0x12ac}, - 74: {from: 0x1dcf, to: 0x3548}, - 75: {from: 0x1dd4, to: 0x1e74}, - 76: {from: 0x1df1, to: 0x18f}, - 77: {from: 0x1e7a, to: 0x145}, - 78: {from: 0x1e85, to: 0x13b}, - 79: {from: 0x1e89, to: 0x122}, - 80: {from: 0x1e90, to: 0x138}, - 81: {from: 0x1ea6, to: 0x1f82}, - 82: {from: 0x1ecc, to: 0x147}, - 83: {from: 0x1f30, to: 0x8d}, - 84: {from: 0x1f65, to: 0x12f8}, - 85: {from: 0x1f7d, to: 0x4235}, - 86: {from: 0x1f8b, to: 0x371a}, - 87: {from: 0x1fc4, to: 0x8d}, - 88: {from: 0x1fce, to: 0x8d}, - 89: {from: 0x1ff9, to: 0x6c1}, - 90: {from: 0x20ad, to: 0x2fbd}, - 91: {from: 0x2119, to: 0x30fc}, - 92: {from: 0x2209, to: 0x170}, - 93: {from: 0x227b, to: 0x18c}, - 94: {from: 0x2287, to: 0x189}, - 95: {from: 0x2291, to: 0x19a}, - 96: {from: 0x22e7, to: 0x8f2}, - 97: {from: 0x2340, to: 0x69}, - 98: {from: 0x23d5, to: 0x179}, - 99: {from: 0x2460, to: 0x244b}, - 100: {from: 0x2490, to: 0x1f4}, - 101: {from: 0x24be, to: 0x3a46}, - 102: {from: 0x24fc, to: 0x244b}, - 103: {from: 0x2520, to: 0x40ef}, - 104: {from: 0x2686, to: 0x25ce}, - 105: {from: 0x26ab, to: 0x1b4}, - 106: {from: 0x271d, to: 0x2b3e}, - 107: {from: 0x28b1, to: 0x1d1}, - 108: {from: 0x2993, to: 0x1d3}, - 109: {from: 0x29d6, to: 0x3a46}, - 110: {from: 0x2a93, to: 0x1ee}, - 111: {from: 0x2aaa, to: 0x32e}, - 112: {from: 0x2ade, to: 0xa4}, - 113: {from: 0x2adf, to: 0xa4}, - 114: {from: 0x2b96, to: 0x183}, - 115: {from: 0x2b9f, to: 0x1763}, - 116: {from: 0x2bb1, to: 0x2b2c}, - 117: {from: 0x2bb8, to: 0x152}, - 118: {from: 0x2beb, to: 0x37}, - 119: {from: 0x2bfc, to: 0x2019}, - 120: {from: 0x2c37, to: 0x2c32}, - 121: {from: 0x2c86, to: 0x2c6e}, - 122: {from: 0x2f2a, to: 0x1f1}, - 123: {from: 0x30fd, to: 0x3125}, - 124: {from: 0x31c1, to: 0x203}, - 125: {from: 0x3285, to: 0x1667}, - 126: {from: 0x337d, to: 0x22a}, - 127: {from: 0x33ef, to: 0x132}, - 128: {from: 0x340d, to: 0x215}, - 129: {from: 0x3494, to: 0x248}, - 130: {from: 0x3557, to: 0x8d}, - 131: {from: 0x35ad, to: 0x3689}, - 132: {from: 0x35c2, to: 0x2a32}, - 133: {from: 0x35c6, to: 0x4c}, - 134: {from: 0x35c9, to: 0x2fbf}, - 135: {from: 0x3603, to: 0x373d}, - 136: {from: 0x3629, to: 0x3d57}, - 137: {from: 0x363c, to: 0x376e}, - 138: {from: 0x364b, to: 0x1d3b}, - 139: {from: 0x364c, to: 0x2c31}, - 140: {from: 0x36f3, to: 0x26a}, - 141: {from: 0x38e5, to: 0xb28}, - 142: {from: 0x390f, to: 0xe91}, - 143: {from: 0x3a30, to: 0x28d}, - 144: {from: 0x3d54, to: 0x7f}, - 145: {from: 0x3f9f, to: 0x828}, - 146: {from: 0x4055, to: 0x30a}, - 147: {from: 0x4090, to: 0x3cf7}, - 148: {from: 0x410f, to: 0x13a}, - 149: {from: 0x4162, to: 0x3462}, - 150: {from: 0x4164, to: 0x86}, - 151: {from: 0x4246, to: 0x30b9}, - 152: {from: 0x427a, to: 0x2b9}, - 153: {from: 0x4361, to: 0x21a0}, - 154: {from: 0x4374, to: 0x2473}, - 155: {from: 0x43a7, to: 0x4645}, - 156: {from: 0x4445, to: 0x4437}, - 157: {from: 0x44d5, to: 0x44dc}, - 158: {from: 0x46ad, to: 0x19a}, - 159: {from: 0x473e, to: 0x2be}, -} - -// Size: 160 bytes, 160 elements -var langAliasTypes = [160]langAliasType{ - // Entry 0 - 3F - 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 1, 0, 0, 1, 2, 1, - 1, 2, 0, 1, 0, 1, 2, 1, 1, 0, 0, 2, 1, 1, 0, 2, - 0, 0, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, 1, 0, 0, 2, - 1, 1, 1, 1, 2, 1, 0, 1, 1, 2, 2, 0, 1, 2, 0, 1, - // Entry 40 - 7F - 0, 1, 1, 1, 1, 0, 0, 2, 1, 0, 0, 0, 1, 1, 1, 1, - 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 2, 2, 2, - 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, - 2, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 2, 0, 2, 1, - // Entry 80 - BF - 1, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, - 2, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, -} - -const ( - _Latn = 82 - _Hani = 50 - _Hans = 52 - _Hant = 53 - _Qaaa = 131 - _Qaai = 139 - _Qabx = 180 - _Zinh = 224 - _Zyyy = 229 - _Zzzz = 230 -) - -// script is an alphabetically sorted list of ISO 15924 codes. The index -// of the script in the string, divided by 4, is the internal scriptID. -var script tag.Index = "" + // Size: 928 bytes - "----AdlmAfakAghbAhomArabAranArmiArmnAvstBaliBamuBassBatkBengBhksBlisBopo" + - "BrahBraiBugiBuhdCakmCansCariChamCherCirtCoptCprtCyrlCyrsDevaDsrtDuplEgyd" + - "EgyhEgypElbaEthiGeokGeorGlagGothGranGrekGujrGuruHanbHangHaniHanoHansHant" + - "HatrHebrHiraHluwHmngHrktHungIndsItalJamoJavaJpanJurcKaliKanaKharKhmrKhoj" + - "KitlKitsKndaKoreKpelKthiLanaLaooLatfLatgLatnLekeLepcLimbLinaLinbLisuLoma" + - "LyciLydiMahjMandManiMarcMayaMendMercMeroMlymModiMongMoonMrooMteiMultMymr" + - "NarbNbatNewaNkgbNkooNshuOgamOlckOrkhOryaOsgeOsmaPalmPaucPermPhagPhliPhlp" + - "PhlvPhnxPiqdPlrdPrtiQaaaQaabQaacQaadQaaeQaafQaagQaahQaaiQaajQaakQaalQaam" + - "QaanQaaoQaapQaaqQaarQaasQaatQaauQaavQaawQaaxQaayQaazQabaQabbQabcQabdQabe" + - "QabfQabgQabhQabiQabjQabkQablQabmQabnQaboQabpQabqQabrQabsQabtQabuQabvQabw" + - "QabxRjngRoroRunrSamrSaraSarbSaurSgnwShawShrdSiddSindSinhSoraSundSyloSyrc" + - "SyreSyrjSyrnTagbTakrTaleTaluTamlTangTavtTeluTengTfngTglgThaaThaiTibtTirh" + - "UgarVaiiVispWaraWoleXpeoXsuxYiiiZinhZmthZsyeZsymZxxxZyyyZzzz\xff\xff\xff" + - "\xff" - -// suppressScript is an index from langID to the dominant script for that language, -// if it exists. If a script is given, it should be suppressed from the language tag. -// Size: 713 bytes, 713 elements -var suppressScript = [713]uint8{ - // Entry 0 - 3F - 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x27, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // Entry 40 - 7F - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, - // Entry 80 - BF - 0x52, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, - 0xd4, 0x00, 0x00, 0xd6, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x2d, 0x52, 0x52, 0x52, 0x00, 0x52, - 0x00, 0x52, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x52, 0x00, 0x00, 0x00, 0x52, 0x52, 0x00, 0x52, - 0x00, 0x00, 0x52, 0x52, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x52, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, - // Entry C0 - FF - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x52, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x52, 0x2e, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x37, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x52, - 0x00, 0x52, 0x52, 0x08, 0x00, 0x00, 0x00, 0x00, - 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, - // Entry 100 - 13F - 0x00, 0x00, 0x52, 0x52, 0x00, 0x37, 0x00, 0x41, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, - 0x00, 0x52, 0x00, 0x46, 0x00, 0x4a, 0x4b, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // Entry 140 - 17F - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x52, 0x4f, 0x00, 0x00, - 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, - // Entry 180 - 1BF - 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, - 0x52, 0x00, 0x00, 0x00, 0x1e, 0x64, 0x00, 0x00, - 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x20, 0x00, - 0x00, 0x00, 0x52, 0x52, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x00, 0x00, - 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x52, - 0x00, 0x52, 0x00, 0x52, 0x20, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x52, 0x00, 0x52, 0x00, 0x52, - // Entry 1C0 - 1FF - 0x00, 0x52, 0x00, 0x00, 0x00, 0x70, 0x52, 0x00, - 0x52, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x52, 0x75, 0x00, 0x00, 0x00, 0x2f, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x52, - 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, - // Entry 200 - 23F - 0x00, 0x52, 0x00, 0x52, 0x00, 0x00, 0x00, 0x1e, - 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xc1, 0x00, 0x52, 0x00, 0x52, 0x00, 0x00, 0x52, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x52, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, - // Entry 240 - 27F - 0x52, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x52, - 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xd0, 0x52, 0x00, 0x00, 0x00, 0xd5, 0x00, 0x00, - 0x00, 0x27, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, - 0x52, 0x00, 0x52, 0x52, 0x52, 0x00, 0x52, 0x52, - 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, - // Entry 280 - 2BF - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x52, - 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // Entry 2C0 - 2FF - 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, - 0x00, -} - -const ( - _001 = 1 - _419 = 30 - _BR = 64 - _CA = 72 - _ES = 109 - _GB = 121 - _MD = 186 - _PT = 236 - _UK = 304 - _US = 306 - _ZZ = 354 - _XA = 320 - _XC = 322 - _XK = 330 -) - -// isoRegionOffset needs to be added to the index of regionISO to obtain the regionID -// for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for -// the UN.M49 codes used for groups.) -const isoRegionOffset = 31 - -// regionTypes defines the status of a region for various standards. -// Size: 355 bytes, 355 elements -var regionTypes = [355]uint8{ - // Entry 0 - 3F - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - // Entry 40 - 7F - 0x06, 0x06, 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, 0x04, 0x00, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, - 0x04, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x04, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x04, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - // Entry 80 - BF - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x00, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, - // Entry C0 - FF - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, - 0x06, 0x06, 0x00, 0x06, 0x04, 0x06, 0x06, 0x06, - 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, - 0x00, 0x06, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - // Entry 100 - 13F - 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x02, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, - // Entry 140 - 17F - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x04, 0x06, 0x06, 0x04, 0x06, 0x06, - 0x04, 0x06, 0x05, -} - -// regionISO holds a list of alphabetically sorted 2-letter ISO region codes. -// Each 2-letter codes is followed by two bytes with the following meaning: -// - [A-Z}{2}: the first letter of the 2-letter code plus these two -// letters form the 3-letter ISO code. -// - 0, n: index into altRegionISO3. -var regionISO tag.Index = "" + // Size: 1300 bytes - "AAAAACSCADNDAEREAFFGAGTGAIIAALLBAMRMANNTAOGOAQTAARRGASSMATUTAUUSAWBWAXLA" + - "AZZEBAIHBBRBBDGDBEELBFFABGGRBHHRBIDIBJENBLLMBMMUBNRNBOOLBQESBRRABSHSBTTN" + - "BUURBVVTBWWABYLRBZLZCAANCCCKCDODCFAFCGOGCHHECIIVCKOKCLHLCMMRCNHNCOOLCPPT" + - "CRRICS\x00\x00CTTECUUBCVPVCWUWCXXRCYYPCZZEDDDRDEEUDGGADJJIDKNKDMMADOOMDY" + - "HYDZZAEA ECCUEESTEGGYEHSHERRIESSPETTHEU\x00\x03FIINFJJIFKLKFMSMFOROFQ" + - "\x00\x18FRRAFXXXGAABGBBRGDRDGEEOGFUFGGGYGHHAGIIBGLRLGMMBGNINGPLPGQNQGRRC" + - "GS\x00\x06GTTMGUUMGWNBGYUYHKKGHMMDHNNDHRRVHTTIHUUNHVVOIC IDDNIERLILSRIM" + - "MNINNDIOOTIQRQIRRNISSLITTAJEEYJMAMJOORJPPNJTTNKEENKGGZKHHMKIIRKM\x00\x09" + - "KNNAKP\x00\x0cKRORKWWTKY\x00\x0fKZAZLAAOLBBNLCCALIIELKKALRBRLSSOLTTULUUX" + - "LVVALYBYMAARMCCOMDDAMENEMFAFMGDGMHHLMIIDMKKDMLLIMMMRMNNGMOACMPNPMQTQMRRT" + - "MSSRMTLTMUUSMVDVMWWIMXEXMYYSMZOZNAAMNCCLNEERNFFKNGGANHHBNIICNLLDNOORNPPL" + - "NQ\x00\x1eNRRUNTTZNUIUNZZLOMMNPAANPCCIPEERPFYFPGNGPHHLPKAKPLOLPM\x00\x12" + - "PNCNPRRIPSSEPTRTPUUSPWLWPYRYPZCZQAATQMMMQNNNQOOOQPPPQQQQQRRRQSSSQTTTQU" + - "\x00\x03QVVVQWWWQXXXQYYYQZZZREEURHHOROOURS\x00\x15RUUSRWWASAAUSBLBSCYCSD" + - "DNSEWESGGPSHHNSIVNSJJMSKVKSLLESMMRSNENSOOMSRURSSSDSTTPSUUNSVLVSXXMSYYRSZ" + - "WZTAAATCCATDCDTF\x00\x18TGGOTHHATJJKTKKLTLLSTMKMTNUNTOONTPMPTRURTTTOTVUV" + - "TWWNTZZAUAKRUGGAUK UMMIUSSAUYRYUZZBVAATVCCTVDDRVEENVGGBVIIRVNNMVUUTWFLF" + - "WKAKWSSMXAAAXBBBXCCCXDDDXEEEXFFFXGGGXHHHXIIIXJJJXKKKXLLLXMMMXNNNXOOOXPPP" + - "XQQQXRRRXSSSXTTTXUUUXVVVXWWWXXXXXYYYXZZZYDMDYEEMYT\x00\x1bYUUGZAAFZMMBZR" + - "ARZWWEZZZZ\xff\xff\xff\xff" - -// altRegionISO3 holds a list of 3-letter region codes that cannot be -// mapped to 2-letter codes using the default algorithm. This is a short list. -var altRegionISO3 string = "SCGQUUSGSCOMPRKCYMSPMSRBATFMYTATN" - -// altRegionIDs holds a list of regionIDs the positions of which match those -// of the 3-letter ISO codes in altRegionISO3. -// Size: 22 bytes, 11 elements -var altRegionIDs = [11]uint16{ - 0x0056, 0x006f, 0x0086, 0x00a6, 0x00a8, 0x00ab, 0x00e8, 0x0103, - 0x011f, 0x015c, 0x00da, -} - -// Size: 80 bytes, 20 elements -var regionOldMap = [20]fromTo{ - 0: {from: 0x43, to: 0xc2}, - 1: {from: 0x57, to: 0xa5}, - 2: {from: 0x5e, to: 0x5f}, - 3: {from: 0x65, to: 0x3a}, - 4: {from: 0x77, to: 0x76}, - 5: {from: 0x91, to: 0x36}, - 6: {from: 0xa1, to: 0x131}, - 7: {from: 0xbf, to: 0x131}, - 8: {from: 0xd5, to: 0x13c}, - 9: {from: 0xda, to: 0x2a}, - 10: {from: 0xed, to: 0x131}, - 11: {from: 0xf0, to: 0xe0}, - 12: {from: 0xfa, to: 0x6f}, - 13: {from: 0x101, to: 0x161}, - 14: {from: 0x128, to: 0x124}, - 15: {from: 0x130, to: 0x79}, - 16: {from: 0x137, to: 0x13b}, - 17: {from: 0x13e, to: 0x131}, - 18: {from: 0x15a, to: 0x15b}, - 19: {from: 0x160, to: 0x4a}, -} - -// m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are -// codes indicating collections of regions. -// Size: 710 bytes, 355 elements -var m49 = [355]int16{ - // Entry 0 - 3F - 0, 1, 2, 3, 5, 9, 11, 13, - 14, 15, 17, 18, 19, 21, 29, 30, - 34, 35, 39, 53, 54, 57, 61, 142, - 143, 145, 150, 151, 154, 155, 419, 958, - 0, 20, 784, 4, 28, 660, 8, 51, - 530, 24, 10, 32, 16, 40, 36, 533, - 248, 31, 70, 52, 50, 56, 854, 100, - 48, 108, 204, 652, 60, 96, 68, 535, - // Entry 40 - 7F - 76, 44, 64, 104, 74, 72, 112, 84, - 124, 166, 180, 140, 178, 756, 384, 184, - 152, 120, 156, 170, 0, 188, 891, 296, - 192, 132, 531, 162, 196, 203, 278, 276, - 0, 262, 208, 212, 214, 204, 12, 0, - 218, 233, 818, 732, 232, 724, 231, 967, - 246, 242, 238, 583, 234, 0, 250, 249, - 266, 826, 308, 268, 254, 831, 288, 292, - // Entry 80 - BF - 304, 270, 324, 312, 226, 300, 239, 320, - 316, 624, 328, 344, 334, 340, 191, 332, - 348, 854, 0, 360, 372, 376, 833, 356, - 86, 368, 364, 352, 380, 832, 388, 400, - 392, 581, 404, 417, 116, 296, 174, 659, - 408, 410, 414, 136, 398, 418, 422, 662, - 438, 144, 430, 426, 440, 442, 428, 434, - 504, 492, 498, 499, 663, 450, 584, 581, - // Entry C0 - FF - 807, 466, 104, 496, 446, 580, 474, 478, - 500, 470, 480, 462, 454, 484, 458, 508, - 516, 540, 562, 574, 566, 548, 558, 528, - 578, 524, 10, 520, 536, 570, 554, 512, - 591, 0, 604, 258, 598, 608, 586, 616, - 666, 612, 630, 275, 620, 581, 585, 600, - 591, 634, 959, 960, 961, 962, 963, 964, - 965, 966, 967, 968, 969, 970, 971, 972, - // Entry 100 - 13F - 638, 716, 642, 688, 643, 646, 682, 90, - 690, 729, 752, 702, 654, 705, 744, 703, - 694, 674, 686, 706, 740, 728, 678, 810, - 222, 534, 760, 748, 0, 796, 148, 260, - 768, 764, 762, 772, 626, 795, 788, 776, - 626, 792, 780, 798, 158, 834, 804, 800, - 826, 581, 840, 858, 860, 336, 670, 704, - 862, 92, 850, 704, 548, 876, 581, 882, - // Entry 140 - 17F - 973, 974, 975, 976, 977, 978, 979, 980, - 981, 982, 983, 984, 985, 986, 987, 988, - 989, 990, 991, 992, 993, 994, 995, 996, - 997, 998, 720, 887, 175, 891, 710, 894, - 180, 716, 999, -} - -// m49Index gives indexes into fromM49 based on the three most significant bits -// of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in -// fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] -// for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. -// The region code is stored in the 9 lsb of the indexed value. -// Size: 18 bytes, 9 elements -var m49Index = [9]int16{ - 0, 59, 107, 142, 180, 219, 258, 290, - 332, -} - -// fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details. -// Size: 664 bytes, 332 elements -var fromM49 = [332]uint16{ - // Entry 0 - 3F - 0x0201, 0x0402, 0x0603, 0x0823, 0x0a04, 0x1026, 0x1205, 0x142a, - 0x1606, 0x1866, 0x1a07, 0x1c08, 0x1e09, 0x202c, 0x220a, 0x240b, - 0x260c, 0x2821, 0x2a0d, 0x3029, 0x3824, 0x3a0e, 0x3c0f, 0x3e31, - 0x402b, 0x4410, 0x4611, 0x482e, 0x4e12, 0x502d, 0x5841, 0x6038, - 0x6434, 0x6627, 0x6833, 0x6a13, 0x6c14, 0x7035, 0x7215, 0x783c, - 0x7a16, 0x8042, 0x883e, 0x8c32, 0x9045, 0x9444, 0x9840, 0xa847, - 0xac98, 0xb507, 0xb939, 0xc03d, 0xc837, 0xd0c2, 0xd839, 0xe046, - 0xe8a4, 0xf051, 0xf848, 0x0859, 0x10ab, 0x184b, 0x1c17, 0x1e18, - // Entry 40 - 7F - 0x20b1, 0x2219, 0x291e, 0x2c1a, 0x2e1b, 0x3050, 0x341c, 0x361d, - 0x3852, 0x3d2c, 0x445b, 0x4c49, 0x5453, 0x5ca6, 0x5f5c, 0x644c, - 0x684a, 0x704f, 0x7855, 0x7e8e, 0x8058, 0x885c, 0x965d, 0x983a, - 0xa062, 0xa863, 0xac64, 0xb468, 0xbd18, 0xc484, 0xcc6e, 0xce6e, - 0xd06c, 0xd269, 0xd474, 0xdc72, 0xde86, 0xe471, 0xec70, 0xf030, - 0xf277, 0xf476, 0xfc7c, 0x04e3, 0x091f, 0x0c61, 0x1478, 0x187b, - 0x1c81, 0x26eb, 0x285f, 0x2c5e, 0x305f, 0x407e, 0x487f, 0x50a5, - 0x5885, 0x6080, 0x687a, 0x7083, 0x7888, 0x8087, 0x8882, 0x908a, - // Entry 80 - BF - 0x988f, 0x9c8c, 0xa135, 0xa88d, 0xb08b, 0xb890, 0xc09b, 0xc897, - 0xd093, 0xd89a, 0xe099, 0xe894, 0xf095, 0xf89c, 0x004e, 0x089e, - 0x10a0, 0x1cac, 0x209f, 0x28a2, 0x30a8, 0x34a9, 0x3caa, 0x42a3, - 0x44ad, 0x461e, 0x4cae, 0x54b3, 0x58b6, 0x5cb2, 0x64b7, 0x6cb0, - 0x70b4, 0x74b5, 0x7cc4, 0x84bd, 0x8ccc, 0x94ce, 0x9ccb, 0xa4c1, - 0xacc9, 0xb4c6, 0xbcc7, 0xc0ca, 0xc8cd, 0xd8b9, 0xe0c3, 0xe4ba, - 0xe6bb, 0xe8c8, 0xf0b8, 0xf8cf, 0x00df, 0x08d0, 0x10db, 0x18d9, - 0x20d7, 0x2428, 0x265a, 0x2a2f, 0x2d19, 0x2e3f, 0x30dc, 0x38d1, - // Entry C0 - FF - 0x493c, 0x54de, 0x5cd6, 0x64d2, 0x6cd4, 0x74dd, 0x7cd3, 0x84d8, - 0x88c5, 0x8b31, 0x8e73, 0x90be, 0x92ee, 0x94e6, 0x9ee0, 0xace4, - 0xb0ef, 0xb8e2, 0xc0e5, 0xc8e9, 0xd0e7, 0xd8ec, 0xe089, 0xe524, - 0xecea, 0xf4f1, 0xfd00, 0x0502, 0x0704, 0x0d05, 0x183b, 0x1d0c, - 0x26a7, 0x2825, 0x2caf, 0x2ebc, 0x34e8, 0x3d36, 0x4511, 0x4d16, - 0x5506, 0x5d12, 0x6103, 0x6508, 0x6d10, 0x7d0b, 0x7f0f, 0x813b, - 0x830d, 0x8513, 0x8d5e, 0x9961, 0xa15a, 0xa86d, 0xb115, 0xb309, - 0xb86b, 0xc109, 0xc914, 0xd10e, 0xd91b, 0xe10a, 0xe84d, 0xf11a, - // Entry 100 - 13F - 0xf522, 0xf921, 0x0120, 0x0923, 0x1127, 0x192a, 0x2022, 0x2926, - 0x3129, 0x3725, 0x391d, 0x3d2b, 0x412f, 0x492e, 0x4ec0, 0x5517, - 0x646a, 0x7479, 0x7e7d, 0x809d, 0x8296, 0x852d, 0x9132, 0xa53a, - 0xac36, 0xb533, 0xb934, 0xbd38, 0xd93d, 0xe53f, 0xed5b, 0xef5b, - 0xf656, 0xfd5f, 0x7c1f, 0x7ef2, 0x80f3, 0x82f4, 0x84f5, 0x86f6, - 0x88f7, 0x8af8, 0x8cf9, 0x8e6f, 0x90fb, 0x92fc, 0x94fd, 0x96fe, - 0x98ff, 0x9b40, 0x9d41, 0x9f42, 0xa143, 0xa344, 0xa545, 0xa746, - 0xa947, 0xab48, 0xad49, 0xaf4a, 0xb14b, 0xb34c, 0xb54d, 0xb74e, - // Entry 140 - 17F - 0xb94f, 0xbb50, 0xbd51, 0xbf52, 0xc153, 0xc354, 0xc555, 0xc756, - 0xc957, 0xcb58, 0xcd59, 0xcf62, -} - -// Size: 1444 bytes -var variantIndex = map[string]uint8{ - "1606nict": 0x0, - "1694acad": 0x1, - "1901": 0x2, - "1959acad": 0x3, - "1994": 0x45, - "1996": 0x4, - "abl1943": 0x5, - "alalc97": 0x47, - "aluku": 0x6, - "ao1990": 0x7, - "arevela": 0x8, - "arevmda": 0x9, - "baku1926": 0xa, - "balanka": 0xb, - "barla": 0xc, - "basiceng": 0xd, - "bauddha": 0xe, - "biscayan": 0xf, - "biske": 0x40, - "bohoric": 0x10, - "boont": 0x11, - "colb1945": 0x12, - "cornu": 0x13, - "dajnko": 0x14, - "ekavsk": 0x15, - "emodeng": 0x16, - "fonipa": 0x48, - "fonupa": 0x49, - "fonxsamp": 0x4a, - "hepburn": 0x17, - "heploc": 0x46, - "hognorsk": 0x18, - "ijekavsk": 0x19, - "itihasa": 0x1a, - "jauer": 0x1b, - "jyutping": 0x1c, - "kkcor": 0x1d, - "kociewie": 0x1e, - "kscor": 0x1f, - "laukika": 0x20, - "lipaw": 0x41, - "luna1918": 0x21, - "metelko": 0x22, - "monoton": 0x23, - "ndyuka": 0x24, - "nedis": 0x25, - "newfound": 0x26, - "njiva": 0x42, - "nulik": 0x27, - "osojs": 0x43, - "oxendict": 0x28, - "pamaka": 0x29, - "petr1708": 0x2a, - "pinyin": 0x2b, - "polyton": 0x2c, - "puter": 0x2d, - "rigik": 0x2e, - "rozaj": 0x2f, - "rumgr": 0x30, - "scotland": 0x31, - "scouse": 0x32, - "simple": 0x4b, - "solba": 0x44, - "sotav": 0x33, - "surmiran": 0x34, - "sursilv": 0x35, - "sutsilv": 0x36, - "tarask": 0x37, - "uccor": 0x38, - "ucrcor": 0x39, - "ulster": 0x3a, - "unifon": 0x3b, - "vaidika": 0x3c, - "valencia": 0x3d, - "vallader": 0x3e, - "wadegile": 0x3f, -} - -// variantNumSpecialized is the number of specialized variants in variants. -const variantNumSpecialized = 71 - -// nRegionGroups is the number of region groups. -const nRegionGroups = 32 - -type likelyLangRegion struct { - lang uint16 - region uint16 -} - -// likelyScript is a lookup table, indexed by scriptID, for the most likely -// languages and regions given a script. -// Size: 928 bytes, 232 elements -var likelyScript = [232]likelyLangRegion{ - 1: {lang: 0xa6, region: 0x82}, - 3: {lang: 0x159, region: 0x104}, - 4: {lang: 0xc, region: 0x97}, - 5: {lang: 0x15, region: 0x6a}, - 7: {lang: 0x16, region: 0x9a}, - 8: {lang: 0xf3, region: 0x27}, - 9: {lang: 0x8, region: 0x9a}, - 10: {lang: 0x27, region: 0x93}, - 11: {lang: 0x2b, region: 0x51}, - 12: {lang: 0x55, region: 0xb2}, - 13: {lang: 0x2c, region: 0x93}, - 14: {lang: 0x4b, region: 0x34}, - 15: {lang: 0x20d, region: 0x97}, - 17: {lang: 0x2c4, region: 0x12c}, - 18: {lang: 0x1e5, region: 0x97}, - 19: {lang: 0xaf, region: 0x76}, - 20: {lang: 0x5b, region: 0x93}, - 21: {lang: 0x47, region: 0xe5}, - 22: {lang: 0x63, region: 0x34}, - 23: {lang: 0x73, region: 0x48}, - 24: {lang: 0x2a8, region: 0x129}, - 25: {lang: 0x6e, region: 0x13b}, - 26: {lang: 0x6c, region: 0x132}, - 28: {lang: 0x71, region: 0x6a}, - 29: {lang: 0xd0, region: 0x5c}, - 30: {lang: 0x207, region: 0x104}, - 32: {lang: 0xe1, region: 0x97}, - 34: {lang: 0xaf, region: 0x76}, - 37: {lang: 0x98, region: 0x6a}, - 38: {lang: 0x23a, region: 0x26}, - 39: {lang: 0x11, region: 0x6e}, - 41: {lang: 0x111, region: 0x7b}, - 42: {lang: 0x7d, region: 0x37}, - 43: {lang: 0xcf, region: 0x12e}, - 44: {lang: 0x20d, region: 0x97}, - 45: {lang: 0x9a, region: 0x85}, - 46: {lang: 0xd3, region: 0x97}, - 47: {lang: 0x1d7, region: 0x97}, - 48: {lang: 0x2c4, region: 0x12c}, - 49: {lang: 0x136, region: 0xa9}, - 50: {lang: 0x2c4, region: 0x52}, - 51: {lang: 0xe9, region: 0xe5}, - 52: {lang: 0x2c4, region: 0x52}, - 53: {lang: 0x2c4, region: 0x12c}, - 54: {lang: 0x18b, region: 0x99}, - 55: {lang: 0xe0, region: 0x95}, - 56: {lang: 0x107, region: 0xa0}, - 57: {lang: 0xe4, region: 0x129}, - 58: {lang: 0xe8, region: 0xad}, - 60: {lang: 0xf2, region: 0x90}, - 62: {lang: 0xa0, region: 0x9c}, - 63: {lang: 0x136, region: 0xa9}, - 64: {lang: 0x10f, region: 0x93}, - 65: {lang: 0x107, region: 0xa0}, - 67: {lang: 0x99, region: 0xc2}, - 68: {lang: 0x107, region: 0xa0}, - 69: {lang: 0x1eb, region: 0xe6}, - 70: {lang: 0x133, region: 0xa4}, - 71: {lang: 0x21a, region: 0x97}, - 74: {lang: 0x135, region: 0x97}, - 75: {lang: 0x136, region: 0xa9}, - 77: {lang: 0x40, region: 0x97}, - 78: {lang: 0x1c2, region: 0x121}, - 79: {lang: 0x165, region: 0xad}, - 84: {lang: 0x158, region: 0x97}, - 85: {lang: 0x15c, region: 0x97}, - 86: {lang: 0x14f, region: 0x85}, - 87: {lang: 0xd0, region: 0x85}, - 88: {lang: 0x15e, region: 0x52}, - 90: {lang: 0x2aa, region: 0x129}, - 91: {lang: 0x2ab, region: 0x129}, - 92: {lang: 0xe1, region: 0x97}, - 93: {lang: 0x1a8, region: 0x9a}, - 94: {lang: 0x2ad, region: 0x52}, - 95: {lang: 0x4c, region: 0x52}, - 97: {lang: 0x17f, region: 0x110}, - 98: {lang: 0x2ae, region: 0x109}, - 99: {lang: 0x2ae, region: 0x109}, - 100: {lang: 0x18d, region: 0x97}, - 101: {lang: 0x196, region: 0x97}, - 102: {lang: 0x18f, region: 0x52}, - 104: {lang: 0x199, region: 0x34}, - 105: {lang: 0x190, region: 0x97}, - 106: {lang: 0x22b, region: 0xe6}, - 107: {lang: 0x1a5, region: 0xc2}, - 108: {lang: 0x2af, region: 0x106}, - 109: {lang: 0x16, region: 0x9f}, - 110: {lang: 0x1b5, region: 0xd9}, - 112: {lang: 0x179, region: 0x82}, - 114: {lang: 0x223, region: 0x94}, - 115: {lang: 0x212, region: 0x97}, - 116: {lang: 0x1d6, region: 0xc3}, - 117: {lang: 0x1d3, region: 0x97}, - 118: {lang: 0x1d5, region: 0x132}, - 119: {lang: 0x238, region: 0x113}, - 120: {lang: 0x16, region: 0x11a}, - 121: {lang: 0x7c, region: 0xc2}, - 122: {lang: 0x147, region: 0x104}, - 123: {lang: 0x172, region: 0x52}, - 124: {lang: 0x1d9, region: 0x9a}, - 125: {lang: 0x1d9, region: 0x52}, - 127: {lang: 0x1e3, region: 0xae}, - 129: {lang: 0xe5, region: 0x52}, - 130: {lang: 0x2b2, region: 0x9a}, - 181: {lang: 0x1f6, region: 0x93}, - 183: {lang: 0x1c4, region: 0x10a}, - 184: {lang: 0x234, region: 0x95}, - 186: {lang: 0x2b3, region: 0x15b}, - 187: {lang: 0x213, region: 0x97}, - 188: {lang: 0x1e, region: 0x132}, - 189: {lang: 0x9b, region: 0x79}, - 190: {lang: 0x20d, region: 0x97}, - 191: {lang: 0x20d, region: 0x97}, - 192: {lang: 0x21a, region: 0x97}, - 193: {lang: 0x228, region: 0xb1}, - 194: {lang: 0x23c, region: 0x97}, - 195: {lang: 0x244, region: 0x93}, - 196: {lang: 0x24e, region: 0x34}, - 197: {lang: 0x24f, region: 0x99}, - 201: {lang: 0x253, region: 0xe5}, - 202: {lang: 0x8a, region: 0x97}, - 203: {lang: 0x255, region: 0x52}, - 204: {lang: 0x126, region: 0x52}, - 205: {lang: 0x251, region: 0x97}, - 206: {lang: 0x27f, region: 0x52}, - 207: {lang: 0x48, region: 0x13b}, - 208: {lang: 0x258, region: 0x97}, - 210: {lang: 0x2c3, region: 0xb8}, - 211: {lang: 0xaa, region: 0xe5}, - 212: {lang: 0x90, region: 0xcb}, - 213: {lang: 0x25d, region: 0x121}, - 214: {lang: 0x4c, region: 0x52}, - 215: {lang: 0x177, region: 0x97}, - 216: {lang: 0x285, region: 0x11a}, - 217: {lang: 0x28e, region: 0xb2}, - 219: {lang: 0xec, region: 0x97}, - 221: {lang: 0x1e1, region: 0x9a}, - 222: {lang: 0xe, region: 0x99}, - 223: {lang: 0xfb, region: 0x52}, -} - -type likelyScriptRegion struct { - region uint16 - script uint8 - flags uint8 -} - -// likelyLang is a lookup table, indexed by langID, for the most likely -// scripts and regions given incomplete information. If more entries exist for a -// given language, region and script are the index and size respectively -// of the list in likelyLangList. -// Size: 2852 bytes, 713 elements -var likelyLang = [713]likelyScriptRegion{ - 0: {region: 0x132, script: 0x52, flags: 0x0}, - 1: {region: 0x6e, script: 0x52, flags: 0x0}, - 2: {region: 0x7b, script: 0x1e, flags: 0x0}, - 3: {region: 0x7e, script: 0x52, flags: 0x0}, - 4: {region: 0x93, script: 0x52, flags: 0x0}, - 5: {region: 0x12f, script: 0x52, flags: 0x0}, - 6: {region: 0x7e, script: 0x52, flags: 0x0}, - 7: {region: 0x104, script: 0x1e, flags: 0x0}, - 8: {region: 0x9a, script: 0x9, flags: 0x0}, - 9: {region: 0x126, script: 0x5, flags: 0x0}, - 10: {region: 0x15e, script: 0x52, flags: 0x0}, - 11: {region: 0x51, script: 0x52, flags: 0x0}, - 12: {region: 0x97, script: 0x4, flags: 0x0}, - 13: {region: 0x7e, script: 0x52, flags: 0x0}, - 14: {region: 0x99, script: 0xde, flags: 0x0}, - 15: {region: 0x14a, script: 0x52, flags: 0x0}, - 16: {region: 0x104, script: 0x1e, flags: 0x0}, - 17: {region: 0x6e, script: 0x27, flags: 0x0}, - 18: {region: 0xd4, script: 0x52, flags: 0x0}, - 20: {region: 0x93, script: 0x52, flags: 0x0}, - 21: {region: 0x6a, script: 0x5, flags: 0x0}, - 22: {region: 0x0, script: 0x3, flags: 0x1}, - 23: {region: 0x50, script: 0x52, flags: 0x0}, - 24: {region: 0x3e, script: 0x52, flags: 0x0}, - 25: {region: 0x66, script: 0x5, flags: 0x0}, - 26: {region: 0xb8, script: 0x5, flags: 0x0}, - 27: {region: 0x6a, script: 0x5, flags: 0x0}, - 28: {region: 0x97, script: 0xe, flags: 0x0}, - 29: {region: 0x12d, script: 0x52, flags: 0x0}, - 30: {region: 0x132, script: 0xbc, flags: 0x0}, - 31: {region: 0x6d, script: 0x52, flags: 0x0}, - 32: {region: 0x48, script: 0x52, flags: 0x0}, - 33: {region: 0x104, script: 0x1e, flags: 0x0}, - 34: {region: 0x97, script: 0x20, flags: 0x0}, - 35: {region: 0x3e, script: 0x52, flags: 0x0}, - 36: {region: 0x3, script: 0x5, flags: 0x1}, - 37: {region: 0x104, script: 0x1e, flags: 0x0}, - 38: {region: 0xe6, script: 0x5, flags: 0x0}, - 39: {region: 0x93, script: 0x52, flags: 0x0}, - 40: {region: 0xd9, script: 0x20, flags: 0x0}, - 41: {region: 0x2d, script: 0x52, flags: 0x0}, - 42: {region: 0x51, script: 0x52, flags: 0x0}, - 43: {region: 0x51, script: 0xb, flags: 0x0}, - 44: {region: 0x93, script: 0x52, flags: 0x0}, - 45: {region: 0x51, script: 0x52, flags: 0x0}, - 46: {region: 0x4e, script: 0x52, flags: 0x0}, - 47: {region: 0x46, script: 0x1e, flags: 0x0}, - 48: {region: 0x109, script: 0x5, flags: 0x0}, - 49: {region: 0x15f, script: 0x52, flags: 0x0}, - 50: {region: 0x93, script: 0x52, flags: 0x0}, - 51: {region: 0x12d, script: 0x52, flags: 0x0}, - 52: {region: 0x51, script: 0x52, flags: 0x0}, - 53: {region: 0x97, script: 0xcd, flags: 0x0}, - 54: {region: 0xe6, script: 0x5, flags: 0x0}, - 55: {region: 0x97, script: 0x20, flags: 0x0}, - 56: {region: 0x37, script: 0x1e, flags: 0x0}, - 57: {region: 0x97, script: 0x20, flags: 0x0}, - 58: {region: 0xe6, script: 0x5, flags: 0x0}, - 59: {region: 0x129, script: 0x2d, flags: 0x0}, - 61: {region: 0x97, script: 0x20, flags: 0x0}, - 62: {region: 0x97, script: 0x20, flags: 0x0}, - 63: {region: 0xe5, script: 0x52, flags: 0x0}, - 64: {region: 0x97, script: 0x20, flags: 0x0}, - 65: {region: 0x13c, script: 0x52, flags: 0x0}, - 66: {region: 0xe5, script: 0x52, flags: 0x0}, - 67: {region: 0xd4, script: 0x52, flags: 0x0}, - 68: {region: 0x97, script: 0x20, flags: 0x0}, - 69: {region: 0x93, script: 0x52, flags: 0x0}, - 70: {region: 0x51, script: 0x52, flags: 0x0}, - 71: {region: 0xe5, script: 0x52, flags: 0x0}, - 72: {region: 0x13b, script: 0xcf, flags: 0x0}, - 73: {region: 0xc1, script: 0x52, flags: 0x0}, - 74: {region: 0xc1, script: 0x52, flags: 0x0}, - 75: {region: 0x34, script: 0xe, flags: 0x0}, - 76: {region: 0x52, script: 0xd6, flags: 0x0}, - 77: {region: 0x97, script: 0xe, flags: 0x0}, - 78: {region: 0x9a, script: 0x5, flags: 0x0}, - 79: {region: 0x4e, script: 0x52, flags: 0x0}, - 80: {region: 0x76, script: 0x52, flags: 0x0}, - 81: {region: 0x97, script: 0x20, flags: 0x0}, - 82: {region: 0xe6, script: 0x5, flags: 0x0}, - 83: {region: 0x97, script: 0x20, flags: 0x0}, - 84: {region: 0x32, script: 0x52, flags: 0x0}, - 85: {region: 0xb2, script: 0xc, flags: 0x0}, - 86: {region: 0x51, script: 0x52, flags: 0x0}, - 87: {region: 0xe5, script: 0x52, flags: 0x0}, - 88: {region: 0xe6, script: 0x20, flags: 0x0}, - 89: {region: 0x104, script: 0x1e, flags: 0x0}, - 90: {region: 0x15c, script: 0x52, flags: 0x0}, - 91: {region: 0x93, script: 0x52, flags: 0x0}, - 92: {region: 0x51, script: 0x52, flags: 0x0}, - 93: {region: 0x84, script: 0x52, flags: 0x0}, - 94: {region: 0x6c, script: 0x27, flags: 0x0}, - 95: {region: 0x51, script: 0x52, flags: 0x0}, - 96: {region: 0xc1, script: 0x52, flags: 0x0}, - 97: {region: 0x6d, script: 0x52, flags: 0x0}, - 98: {region: 0xd4, script: 0x52, flags: 0x0}, - 99: {region: 0x8, script: 0x2, flags: 0x1}, - 100: {region: 0x104, script: 0x1e, flags: 0x0}, - 101: {region: 0xe5, script: 0x52, flags: 0x0}, - 102: {region: 0x12f, script: 0x52, flags: 0x0}, - 103: {region: 0x88, script: 0x52, flags: 0x0}, - 104: {region: 0x73, script: 0x52, flags: 0x0}, - 105: {region: 0x104, script: 0x1e, flags: 0x0}, - 106: {region: 0x132, script: 0x52, flags: 0x0}, - 107: {region: 0x48, script: 0x52, flags: 0x0}, - 108: {region: 0x132, script: 0x1a, flags: 0x0}, - 109: {region: 0xa4, script: 0x5, flags: 0x0}, - 110: {region: 0x13b, script: 0x19, flags: 0x0}, - 111: {region: 0x99, script: 0x5, flags: 0x0}, - 112: {region: 0x76, script: 0x52, flags: 0x0}, - 113: {region: 0x6a, script: 0x1c, flags: 0x0}, - 114: {region: 0xe5, script: 0x52, flags: 0x0}, - 115: {region: 0x48, script: 0x17, flags: 0x0}, - 116: {region: 0x48, script: 0x17, flags: 0x0}, - 117: {region: 0x48, script: 0x17, flags: 0x0}, - 118: {region: 0x48, script: 0x17, flags: 0x0}, - 119: {region: 0x48, script: 0x17, flags: 0x0}, - 120: {region: 0x108, script: 0x52, flags: 0x0}, - 121: {region: 0x5d, script: 0x52, flags: 0x0}, - 122: {region: 0xe7, script: 0x52, flags: 0x0}, - 123: {region: 0x48, script: 0x17, flags: 0x0}, - 124: {region: 0xc2, script: 0x79, flags: 0x0}, - 125: {region: 0xa, script: 0x2, flags: 0x1}, - 126: {region: 0x104, script: 0x1e, flags: 0x0}, - 127: {region: 0x79, script: 0x52, flags: 0x0}, - 128: {region: 0x62, script: 0x52, flags: 0x0}, - 129: {region: 0x132, script: 0x52, flags: 0x0}, - 130: {region: 0x104, script: 0x1e, flags: 0x0}, - 131: {region: 0xa2, script: 0x52, flags: 0x0}, - 132: {region: 0x97, script: 0x5, flags: 0x0}, - 133: {region: 0x5f, script: 0x52, flags: 0x0}, - 134: {region: 0x48, script: 0x52, flags: 0x0}, - 135: {region: 0x48, script: 0x52, flags: 0x0}, - 136: {region: 0xd2, script: 0x52, flags: 0x0}, - 137: {region: 0x4e, script: 0x52, flags: 0x0}, - 138: {region: 0x97, script: 0x5, flags: 0x0}, - 139: {region: 0x5f, script: 0x52, flags: 0x0}, - 140: {region: 0xc1, script: 0x52, flags: 0x0}, - 141: {region: 0xce, script: 0x52, flags: 0x0}, - 142: {region: 0xd9, script: 0x20, flags: 0x0}, - 143: {region: 0x51, script: 0x52, flags: 0x0}, - 144: {region: 0xcb, script: 0xd4, flags: 0x0}, - 145: {region: 0x112, script: 0x52, flags: 0x0}, - 146: {region: 0x36, script: 0x52, flags: 0x0}, - 147: {region: 0x42, script: 0xd6, flags: 0x0}, - 148: {region: 0xa2, script: 0x52, flags: 0x0}, - 149: {region: 0x7e, script: 0x52, flags: 0x0}, - 150: {region: 0xd4, script: 0x52, flags: 0x0}, - 151: {region: 0x9c, script: 0x52, flags: 0x0}, - 152: {region: 0x6a, script: 0x25, flags: 0x0}, - 153: {region: 0xc2, script: 0x43, flags: 0x0}, - 154: {region: 0x85, script: 0x2d, flags: 0x0}, - 155: {region: 0xc, script: 0x2, flags: 0x1}, - 156: {region: 0x1, script: 0x52, flags: 0x0}, - 157: {region: 0x6d, script: 0x52, flags: 0x0}, - 158: {region: 0x132, script: 0x52, flags: 0x0}, - 159: {region: 0x69, script: 0x52, flags: 0x0}, - 160: {region: 0x9c, script: 0x3e, flags: 0x0}, - 161: {region: 0x6d, script: 0x52, flags: 0x0}, - 162: {region: 0x51, script: 0x52, flags: 0x0}, - 163: {region: 0x6d, script: 0x52, flags: 0x0}, - 164: {region: 0x9a, script: 0x5, flags: 0x0}, - 165: {region: 0x84, script: 0x52, flags: 0x0}, - 166: {region: 0xe, script: 0x2, flags: 0x1}, - 167: {region: 0xc1, script: 0x52, flags: 0x0}, - 168: {region: 0x70, script: 0x52, flags: 0x0}, - 169: {region: 0x109, script: 0x5, flags: 0x0}, - 170: {region: 0xe5, script: 0x52, flags: 0x0}, - 171: {region: 0x10a, script: 0x52, flags: 0x0}, - 172: {region: 0x71, script: 0x52, flags: 0x0}, - 173: {region: 0x74, script: 0x52, flags: 0x0}, - 174: {region: 0x3a, script: 0x52, flags: 0x0}, - 175: {region: 0x76, script: 0x52, flags: 0x0}, - 176: {region: 0x132, script: 0x52, flags: 0x0}, - 177: {region: 0x76, script: 0x52, flags: 0x0}, - 178: {region: 0x5f, script: 0x52, flags: 0x0}, - 179: {region: 0x5f, script: 0x52, flags: 0x0}, - 180: {region: 0x13d, script: 0x52, flags: 0x0}, - 181: {region: 0xd2, script: 0x52, flags: 0x0}, - 182: {region: 0x9c, script: 0x52, flags: 0x0}, - 183: {region: 0xd4, script: 0x52, flags: 0x0}, - 184: {region: 0x109, script: 0x52, flags: 0x0}, - 185: {region: 0xd7, script: 0x52, flags: 0x0}, - 186: {region: 0x94, script: 0x52, flags: 0x0}, - 187: {region: 0x7e, script: 0x52, flags: 0x0}, - 188: {region: 0xba, script: 0x52, flags: 0x0}, - 189: {region: 0x52, script: 0x34, flags: 0x0}, - 190: {region: 0x93, script: 0x52, flags: 0x0}, - 191: {region: 0x97, script: 0x20, flags: 0x0}, - 192: {region: 0x9a, script: 0x5, flags: 0x0}, - 193: {region: 0x7c, script: 0x52, flags: 0x0}, - 194: {region: 0x79, script: 0x52, flags: 0x0}, - 195: {region: 0x6e, script: 0x27, flags: 0x0}, - 196: {region: 0xd9, script: 0x20, flags: 0x0}, - 197: {region: 0xa5, script: 0x52, flags: 0x0}, - 198: {region: 0xe6, script: 0x5, flags: 0x0}, - 199: {region: 0xe6, script: 0x5, flags: 0x0}, - 200: {region: 0x6d, script: 0x52, flags: 0x0}, - 201: {region: 0x9a, script: 0x5, flags: 0x0}, - 202: {region: 0xef, script: 0x52, flags: 0x0}, - 203: {region: 0x97, script: 0x20, flags: 0x0}, - 204: {region: 0x97, script: 0xd0, flags: 0x0}, - 205: {region: 0x93, script: 0x52, flags: 0x0}, - 206: {region: 0xd7, script: 0x52, flags: 0x0}, - 207: {region: 0x12e, script: 0x2b, flags: 0x0}, - 208: {region: 0x10, script: 0x2, flags: 0x1}, - 209: {region: 0x97, script: 0xe, flags: 0x0}, - 210: {region: 0x4d, script: 0x52, flags: 0x0}, - 211: {region: 0x97, script: 0x2e, flags: 0x0}, - 212: {region: 0x40, script: 0x52, flags: 0x0}, - 213: {region: 0x53, script: 0x52, flags: 0x0}, - 214: {region: 0x7e, script: 0x52, flags: 0x0}, - 216: {region: 0xa2, script: 0x52, flags: 0x0}, - 217: {region: 0x96, script: 0x52, flags: 0x0}, - 218: {region: 0xd9, script: 0x20, flags: 0x0}, - 219: {region: 0x48, script: 0x52, flags: 0x0}, - 220: {region: 0x12, script: 0x3, flags: 0x1}, - 221: {region: 0x52, script: 0x34, flags: 0x0}, - 222: {region: 0x132, script: 0x52, flags: 0x0}, - 223: {region: 0x23, script: 0x5, flags: 0x0}, - 224: {region: 0x95, script: 0x37, flags: 0x0}, - 225: {region: 0x97, script: 0x20, flags: 0x0}, - 226: {region: 0x71, script: 0x52, flags: 0x0}, - 227: {region: 0xe5, script: 0x52, flags: 0x0}, - 228: {region: 0x129, script: 0x39, flags: 0x0}, - 229: {region: 0x52, script: 0x81, flags: 0x0}, - 230: {region: 0xe6, script: 0x5, flags: 0x0}, - 231: {region: 0x97, script: 0x20, flags: 0x0}, - 232: {region: 0xad, script: 0x3a, flags: 0x0}, - 233: {region: 0xe5, script: 0x52, flags: 0x0}, - 234: {region: 0xe6, script: 0x5, flags: 0x0}, - 235: {region: 0xe4, script: 0x52, flags: 0x0}, - 236: {region: 0x97, script: 0x20, flags: 0x0}, - 237: {region: 0x97, script: 0x20, flags: 0x0}, - 238: {region: 0x8e, script: 0x52, flags: 0x0}, - 239: {region: 0x5f, script: 0x52, flags: 0x0}, - 240: {region: 0x52, script: 0x34, flags: 0x0}, - 241: {region: 0x8f, script: 0x52, flags: 0x0}, - 242: {region: 0x90, script: 0x52, flags: 0x0}, - 243: {region: 0x27, script: 0x8, flags: 0x0}, - 244: {region: 0xd0, script: 0x52, flags: 0x0}, - 245: {region: 0x76, script: 0x52, flags: 0x0}, - 246: {region: 0xce, script: 0x52, flags: 0x0}, - 247: {region: 0xd4, script: 0x52, flags: 0x0}, - 248: {region: 0x93, script: 0x52, flags: 0x0}, - 250: {region: 0xd4, script: 0x52, flags: 0x0}, - 251: {region: 0x52, script: 0xdf, flags: 0x0}, - 252: {region: 0x132, script: 0x52, flags: 0x0}, - 253: {region: 0x48, script: 0x52, flags: 0x0}, - 254: {region: 0xe5, script: 0x52, flags: 0x0}, - 255: {region: 0x93, script: 0x52, flags: 0x0}, - 256: {region: 0x104, script: 0x1e, flags: 0x0}, - 258: {region: 0x9b, script: 0x52, flags: 0x0}, - 259: {region: 0x9c, script: 0x52, flags: 0x0}, - 260: {region: 0x48, script: 0x17, flags: 0x0}, - 261: {region: 0x95, script: 0x37, flags: 0x0}, - 262: {region: 0x104, script: 0x52, flags: 0x0}, - 263: {region: 0xa0, script: 0x41, flags: 0x0}, - 264: {region: 0x9e, script: 0x52, flags: 0x0}, - 266: {region: 0x51, script: 0x52, flags: 0x0}, - 267: {region: 0x12e, script: 0x37, flags: 0x0}, - 268: {region: 0x12d, script: 0x52, flags: 0x0}, - 269: {region: 0xd9, script: 0x20, flags: 0x0}, - 270: {region: 0x62, script: 0x52, flags: 0x0}, - 271: {region: 0x93, script: 0x52, flags: 0x0}, - 272: {region: 0x93, script: 0x52, flags: 0x0}, - 273: {region: 0x7b, script: 0x29, flags: 0x0}, - 274: {region: 0x134, script: 0x1e, flags: 0x0}, - 275: {region: 0x66, script: 0x52, flags: 0x0}, - 276: {region: 0xc2, script: 0x52, flags: 0x0}, - 277: {region: 0xd4, script: 0x52, flags: 0x0}, - 278: {region: 0xa2, script: 0x52, flags: 0x0}, - 279: {region: 0xc1, script: 0x52, flags: 0x0}, - 280: {region: 0x104, script: 0x1e, flags: 0x0}, - 281: {region: 0xd4, script: 0x52, flags: 0x0}, - 282: {region: 0x161, script: 0x52, flags: 0x0}, - 283: {region: 0x12d, script: 0x52, flags: 0x0}, - 284: {region: 0x121, script: 0xd5, flags: 0x0}, - 285: {region: 0x59, script: 0x52, flags: 0x0}, - 286: {region: 0x51, script: 0x52, flags: 0x0}, - 287: {region: 0x4e, script: 0x52, flags: 0x0}, - 288: {region: 0x97, script: 0x20, flags: 0x0}, - 289: {region: 0x97, script: 0x20, flags: 0x0}, - 290: {region: 0x4a, script: 0x52, flags: 0x0}, - 291: {region: 0x93, script: 0x52, flags: 0x0}, - 292: {region: 0x40, script: 0x52, flags: 0x0}, - 293: {region: 0x97, script: 0x52, flags: 0x0}, - 294: {region: 0x52, script: 0xcc, flags: 0x0}, - 295: {region: 0x97, script: 0x20, flags: 0x0}, - 296: {region: 0xc1, script: 0x52, flags: 0x0}, - 297: {region: 0x97, script: 0x6b, flags: 0x0}, - 298: {region: 0xe6, script: 0x5, flags: 0x0}, - 299: {region: 0xa2, script: 0x52, flags: 0x0}, - 300: {region: 0x129, script: 0x52, flags: 0x0}, - 301: {region: 0xd0, script: 0x52, flags: 0x0}, - 302: {region: 0xad, script: 0x4f, flags: 0x0}, - 303: {region: 0x15, script: 0x6, flags: 0x1}, - 304: {region: 0x51, script: 0x52, flags: 0x0}, - 305: {region: 0x80, script: 0x52, flags: 0x0}, - 306: {region: 0xa2, script: 0x52, flags: 0x0}, - 307: {region: 0xa4, script: 0x46, flags: 0x0}, - 308: {region: 0x29, script: 0x52, flags: 0x0}, - 309: {region: 0x97, script: 0x4a, flags: 0x0}, - 310: {region: 0xa9, script: 0x4b, flags: 0x0}, - 311: {region: 0x104, script: 0x1e, flags: 0x0}, - 312: {region: 0x97, script: 0x20, flags: 0x0}, - 313: {region: 0x73, script: 0x52, flags: 0x0}, - 314: {region: 0xb2, script: 0x52, flags: 0x0}, - 316: {region: 0x104, script: 0x1e, flags: 0x0}, - 317: {region: 0x110, script: 0x52, flags: 0x0}, - 318: {region: 0xe5, script: 0x52, flags: 0x0}, - 319: {region: 0x104, script: 0x52, flags: 0x0}, - 320: {region: 0x97, script: 0x20, flags: 0x0}, - 321: {region: 0x97, script: 0x5, flags: 0x0}, - 322: {region: 0x12d, script: 0x52, flags: 0x0}, - 323: {region: 0x51, script: 0x52, flags: 0x0}, - 324: {region: 0x5f, script: 0x52, flags: 0x0}, - 325: {region: 0x1b, script: 0x3, flags: 0x1}, - 326: {region: 0x104, script: 0x1e, flags: 0x0}, - 327: {region: 0x104, script: 0x1e, flags: 0x0}, - 328: {region: 0x93, script: 0x52, flags: 0x0}, - 329: {region: 0xe6, script: 0x5, flags: 0x0}, - 330: {region: 0x79, script: 0x52, flags: 0x0}, - 331: {region: 0x121, script: 0xd5, flags: 0x0}, - 332: {region: 0xe6, script: 0x5, flags: 0x0}, - 333: {region: 0x1e, script: 0x5, flags: 0x1}, - 334: {region: 0x135, script: 0x52, flags: 0x0}, - 335: {region: 0x85, script: 0x56, flags: 0x0}, - 336: {region: 0x95, script: 0x37, flags: 0x0}, - 337: {region: 0x12d, script: 0x52, flags: 0x0}, - 338: {region: 0xe6, script: 0x5, flags: 0x0}, - 339: {region: 0x12f, script: 0x52, flags: 0x0}, - 340: {region: 0xb5, script: 0x52, flags: 0x0}, - 341: {region: 0x104, script: 0x1e, flags: 0x0}, - 342: {region: 0x93, script: 0x52, flags: 0x0}, - 343: {region: 0x52, script: 0xd5, flags: 0x0}, - 344: {region: 0x97, script: 0x54, flags: 0x0}, - 345: {region: 0x104, script: 0x1e, flags: 0x0}, - 346: {region: 0x12f, script: 0x52, flags: 0x0}, - 347: {region: 0xd7, script: 0x52, flags: 0x0}, - 348: {region: 0x23, script: 0x2, flags: 0x1}, - 349: {region: 0x9c, script: 0x52, flags: 0x0}, - 350: {region: 0x52, script: 0x58, flags: 0x0}, - 351: {region: 0x93, script: 0x52, flags: 0x0}, - 352: {region: 0x9a, script: 0x5, flags: 0x0}, - 353: {region: 0x132, script: 0x52, flags: 0x0}, - 354: {region: 0x97, script: 0xd0, flags: 0x0}, - 355: {region: 0x9c, script: 0x52, flags: 0x0}, - 356: {region: 0x4a, script: 0x52, flags: 0x0}, - 357: {region: 0xad, script: 0x4f, flags: 0x0}, - 358: {region: 0x4a, script: 0x52, flags: 0x0}, - 359: {region: 0x15f, script: 0x52, flags: 0x0}, - 360: {region: 0x9a, script: 0x5, flags: 0x0}, - 361: {region: 0xb4, script: 0x52, flags: 0x0}, - 362: {region: 0xb6, script: 0x52, flags: 0x0}, - 363: {region: 0x4a, script: 0x52, flags: 0x0}, - 364: {region: 0x4a, script: 0x52, flags: 0x0}, - 365: {region: 0xa2, script: 0x52, flags: 0x0}, - 366: {region: 0xa2, script: 0x52, flags: 0x0}, - 367: {region: 0x9a, script: 0x5, flags: 0x0}, - 368: {region: 0xb6, script: 0x52, flags: 0x0}, - 369: {region: 0x121, script: 0xd5, flags: 0x0}, - 370: {region: 0x52, script: 0x34, flags: 0x0}, - 371: {region: 0x129, script: 0x52, flags: 0x0}, - 372: {region: 0x93, script: 0x52, flags: 0x0}, - 373: {region: 0x51, script: 0x52, flags: 0x0}, - 374: {region: 0x97, script: 0x20, flags: 0x0}, - 375: {region: 0x97, script: 0x20, flags: 0x0}, - 376: {region: 0x93, script: 0x52, flags: 0x0}, - 377: {region: 0x25, script: 0x3, flags: 0x1}, - 378: {region: 0xa2, script: 0x52, flags: 0x0}, - 379: {region: 0xcd, script: 0x52, flags: 0x0}, - 380: {region: 0x104, script: 0x1e, flags: 0x0}, - 381: {region: 0xe5, script: 0x52, flags: 0x0}, - 382: {region: 0x93, script: 0x52, flags: 0x0}, - 383: {region: 0x110, script: 0x52, flags: 0x0}, - 384: {region: 0xa2, script: 0x52, flags: 0x0}, - 385: {region: 0x121, script: 0x5, flags: 0x0}, - 386: {region: 0xca, script: 0x52, flags: 0x0}, - 387: {region: 0xbd, script: 0x52, flags: 0x0}, - 388: {region: 0xcf, script: 0x52, flags: 0x0}, - 389: {region: 0x51, script: 0x52, flags: 0x0}, - 390: {region: 0xd9, script: 0x20, flags: 0x0}, - 391: {region: 0x12d, script: 0x52, flags: 0x0}, - 392: {region: 0xbe, script: 0x52, flags: 0x0}, - 393: {region: 0xde, script: 0x52, flags: 0x0}, - 394: {region: 0x93, script: 0x52, flags: 0x0}, - 395: {region: 0x99, script: 0x36, flags: 0x0}, - 396: {region: 0xc0, script: 0x1e, flags: 0x0}, - 397: {region: 0x97, script: 0x64, flags: 0x0}, - 398: {region: 0x109, script: 0x52, flags: 0x0}, - 399: {region: 0x28, script: 0x3, flags: 0x1}, - 400: {region: 0x97, script: 0xe, flags: 0x0}, - 401: {region: 0xc2, script: 0x6b, flags: 0x0}, - 403: {region: 0x48, script: 0x52, flags: 0x0}, - 404: {region: 0x48, script: 0x52, flags: 0x0}, - 405: {region: 0x36, script: 0x52, flags: 0x0}, - 406: {region: 0x97, script: 0x20, flags: 0x0}, - 407: {region: 0xd9, script: 0x20, flags: 0x0}, - 408: {region: 0x104, script: 0x1e, flags: 0x0}, - 409: {region: 0x34, script: 0x68, flags: 0x0}, - 410: {region: 0x2b, script: 0x3, flags: 0x1}, - 411: {region: 0xc9, script: 0x52, flags: 0x0}, - 412: {region: 0x97, script: 0x20, flags: 0x0}, - 413: {region: 0x51, script: 0x52, flags: 0x0}, - 415: {region: 0x132, script: 0x52, flags: 0x0}, - 416: {region: 0xe6, script: 0x5, flags: 0x0}, - 417: {region: 0xc1, script: 0x52, flags: 0x0}, - 418: {region: 0x97, script: 0x20, flags: 0x0}, - 419: {region: 0x93, script: 0x52, flags: 0x0}, - 420: {region: 0x161, script: 0x52, flags: 0x0}, - 421: {region: 0xc2, script: 0x6b, flags: 0x0}, - 422: {region: 0x104, script: 0x1e, flags: 0x0}, - 423: {region: 0x12f, script: 0x52, flags: 0x0}, - 424: {region: 0x9a, script: 0x5d, flags: 0x0}, - 425: {region: 0x9a, script: 0x5, flags: 0x0}, - 426: {region: 0xdb, script: 0x52, flags: 0x0}, - 428: {region: 0x52, script: 0x34, flags: 0x0}, - 429: {region: 0x9c, script: 0x52, flags: 0x0}, - 430: {region: 0xd0, script: 0x52, flags: 0x0}, - 431: {region: 0xd8, script: 0x52, flags: 0x0}, - 432: {region: 0xcd, script: 0x52, flags: 0x0}, - 433: {region: 0x161, script: 0x52, flags: 0x0}, - 434: {region: 0xcf, script: 0x52, flags: 0x0}, - 435: {region: 0x5f, script: 0x52, flags: 0x0}, - 436: {region: 0xd9, script: 0x20, flags: 0x0}, - 437: {region: 0xd9, script: 0x20, flags: 0x0}, - 438: {region: 0xd0, script: 0x52, flags: 0x0}, - 439: {region: 0xcf, script: 0x52, flags: 0x0}, - 440: {region: 0xcd, script: 0x52, flags: 0x0}, - 441: {region: 0xcd, script: 0x52, flags: 0x0}, - 442: {region: 0x93, script: 0x52, flags: 0x0}, - 443: {region: 0xdd, script: 0x52, flags: 0x0}, - 444: {region: 0x97, script: 0x52, flags: 0x0}, - 445: {region: 0xd7, script: 0x52, flags: 0x0}, - 446: {region: 0x51, script: 0x52, flags: 0x0}, - 447: {region: 0xd8, script: 0x52, flags: 0x0}, - 448: {region: 0x51, script: 0x52, flags: 0x0}, - 449: {region: 0xd8, script: 0x52, flags: 0x0}, - 450: {region: 0x121, script: 0x4e, flags: 0x0}, - 451: {region: 0x97, script: 0x20, flags: 0x0}, - 452: {region: 0x10a, script: 0xb7, flags: 0x0}, - 453: {region: 0x82, script: 0x70, flags: 0x0}, - 454: {region: 0x15e, script: 0x52, flags: 0x0}, - 455: {region: 0x48, script: 0x17, flags: 0x0}, - 456: {region: 0x15e, script: 0x52, flags: 0x0}, - 457: {region: 0x115, script: 0x52, flags: 0x0}, - 458: {region: 0x132, script: 0x52, flags: 0x0}, - 459: {region: 0x52, script: 0x52, flags: 0x0}, - 460: {region: 0xcc, script: 0x52, flags: 0x0}, - 461: {region: 0x12d, script: 0x52, flags: 0x0}, - 462: {region: 0x12f, script: 0x52, flags: 0x0}, - 463: {region: 0x7e, script: 0x52, flags: 0x0}, - 464: {region: 0x76, script: 0x52, flags: 0x0}, - 466: {region: 0x6e, script: 0x52, flags: 0x0}, - 467: {region: 0x97, script: 0x75, flags: 0x0}, - 468: {region: 0x7b, script: 0x1e, flags: 0x0}, - 469: {region: 0x132, script: 0x76, flags: 0x0}, - 470: {region: 0xc3, script: 0x74, flags: 0x0}, - 471: {region: 0x2e, script: 0x3, flags: 0x1}, - 472: {region: 0xe5, script: 0x52, flags: 0x0}, - 473: {region: 0x31, script: 0x2, flags: 0x1}, - 474: {region: 0xe5, script: 0x52, flags: 0x0}, - 475: {region: 0x2f, script: 0x52, flags: 0x0}, - 476: {region: 0xee, script: 0x52, flags: 0x0}, - 477: {region: 0x76, script: 0x52, flags: 0x0}, - 478: {region: 0xd4, script: 0x52, flags: 0x0}, - 479: {region: 0x132, script: 0x52, flags: 0x0}, - 480: {region: 0x48, script: 0x52, flags: 0x0}, - 481: {region: 0x9a, script: 0xdd, flags: 0x0}, - 482: {region: 0x5f, script: 0x52, flags: 0x0}, - 483: {region: 0xae, script: 0x7f, flags: 0x0}, - 485: {region: 0x97, script: 0x12, flags: 0x0}, - 486: {region: 0xa2, script: 0x52, flags: 0x0}, - 487: {region: 0xe7, script: 0x52, flags: 0x0}, - 488: {region: 0x9c, script: 0x52, flags: 0x0}, - 489: {region: 0x85, script: 0x2d, flags: 0x0}, - 490: {region: 0x73, script: 0x52, flags: 0x0}, - 491: {region: 0xe6, script: 0x45, flags: 0x0}, - 492: {region: 0x9a, script: 0x5, flags: 0x0}, - 493: {region: 0x1, script: 0x52, flags: 0x0}, - 494: {region: 0x23, script: 0x5, flags: 0x0}, - 495: {region: 0x40, script: 0x52, flags: 0x0}, - 496: {region: 0x78, script: 0x52, flags: 0x0}, - 497: {region: 0xe2, script: 0x52, flags: 0x0}, - 498: {region: 0x87, script: 0x52, flags: 0x0}, - 499: {region: 0x68, script: 0x52, flags: 0x0}, - 500: {region: 0x97, script: 0x20, flags: 0x0}, - 501: {region: 0x100, script: 0x52, flags: 0x0}, - 502: {region: 0x93, script: 0x52, flags: 0x0}, - 503: {region: 0x9c, script: 0x52, flags: 0x0}, - 504: {region: 0x97, script: 0x52, flags: 0x0}, - 505: {region: 0x33, script: 0x2, flags: 0x1}, - 506: {region: 0xd9, script: 0x20, flags: 0x0}, - 507: {region: 0x34, script: 0xe, flags: 0x0}, - 508: {region: 0x4d, script: 0x52, flags: 0x0}, - 509: {region: 0x70, script: 0x52, flags: 0x0}, - 510: {region: 0x4d, script: 0x52, flags: 0x0}, - 511: {region: 0x9a, script: 0x5, flags: 0x0}, - 512: {region: 0x10a, script: 0x52, flags: 0x0}, - 513: {region: 0x39, script: 0x52, flags: 0x0}, - 514: {region: 0xcf, script: 0x52, flags: 0x0}, - 515: {region: 0x102, script: 0x52, flags: 0x0}, - 516: {region: 0x93, script: 0x52, flags: 0x0}, - 517: {region: 0x12d, script: 0x52, flags: 0x0}, - 518: {region: 0x71, script: 0x52, flags: 0x0}, - 519: {region: 0x104, script: 0x1e, flags: 0x0}, - 520: {region: 0x12e, script: 0x1e, flags: 0x0}, - 521: {region: 0x107, script: 0x52, flags: 0x0}, - 522: {region: 0x105, script: 0x52, flags: 0x0}, - 523: {region: 0x12d, script: 0x52, flags: 0x0}, - 524: {region: 0xa0, script: 0x44, flags: 0x0}, - 525: {region: 0x97, script: 0x20, flags: 0x0}, - 526: {region: 0x7e, script: 0x52, flags: 0x0}, - 527: {region: 0x104, script: 0x1e, flags: 0x0}, - 528: {region: 0xa2, script: 0x52, flags: 0x0}, - 529: {region: 0x93, script: 0x52, flags: 0x0}, - 530: {region: 0x97, script: 0x52, flags: 0x0}, - 531: {region: 0x97, script: 0xbb, flags: 0x0}, - 532: {region: 0x12d, script: 0x52, flags: 0x0}, - 533: {region: 0x9c, script: 0x52, flags: 0x0}, - 534: {region: 0x97, script: 0x20, flags: 0x0}, - 535: {region: 0x9c, script: 0x52, flags: 0x0}, - 536: {region: 0x79, script: 0x52, flags: 0x0}, - 537: {region: 0x48, script: 0x52, flags: 0x0}, - 538: {region: 0x35, script: 0x4, flags: 0x1}, - 539: {region: 0x9c, script: 0x52, flags: 0x0}, - 540: {region: 0x9a, script: 0x5, flags: 0x0}, - 541: {region: 0xd8, script: 0x52, flags: 0x0}, - 542: {region: 0x4e, script: 0x52, flags: 0x0}, - 543: {region: 0xcf, script: 0x52, flags: 0x0}, - 544: {region: 0xcd, script: 0x52, flags: 0x0}, - 545: {region: 0xc1, script: 0x52, flags: 0x0}, - 546: {region: 0x4b, script: 0x52, flags: 0x0}, - 547: {region: 0x94, script: 0x72, flags: 0x0}, - 548: {region: 0xb4, script: 0x52, flags: 0x0}, - 550: {region: 0xb8, script: 0xd2, flags: 0x0}, - 551: {region: 0xc2, script: 0x6b, flags: 0x0}, - 552: {region: 0xb1, script: 0xc1, flags: 0x0}, - 553: {region: 0x6e, script: 0x52, flags: 0x0}, - 554: {region: 0x10f, script: 0x52, flags: 0x0}, - 555: {region: 0xe6, script: 0x5, flags: 0x0}, - 556: {region: 0x10d, script: 0x52, flags: 0x0}, - 557: {region: 0xe7, script: 0x52, flags: 0x0}, - 558: {region: 0x93, script: 0x52, flags: 0x0}, - 559: {region: 0x13f, script: 0x52, flags: 0x0}, - 560: {region: 0x10a, script: 0x52, flags: 0x0}, - 562: {region: 0x10a, script: 0x52, flags: 0x0}, - 563: {region: 0x70, script: 0x52, flags: 0x0}, - 564: {region: 0x95, script: 0xb8, flags: 0x0}, - 565: {region: 0x70, script: 0x52, flags: 0x0}, - 566: {region: 0x161, script: 0x52, flags: 0x0}, - 567: {region: 0xc1, script: 0x52, flags: 0x0}, - 568: {region: 0x113, script: 0x52, flags: 0x0}, - 569: {region: 0x121, script: 0xd5, flags: 0x0}, - 570: {region: 0x26, script: 0x52, flags: 0x0}, - 571: {region: 0x39, script: 0x5, flags: 0x1}, - 572: {region: 0x97, script: 0xc2, flags: 0x0}, - 573: {region: 0x114, script: 0x52, flags: 0x0}, - 574: {region: 0x112, script: 0x52, flags: 0x0}, - 575: {region: 0x97, script: 0x20, flags: 0x0}, - 576: {region: 0x15e, script: 0x52, flags: 0x0}, - 577: {region: 0x6c, script: 0x52, flags: 0x0}, - 578: {region: 0x15e, script: 0x52, flags: 0x0}, - 579: {region: 0x5f, script: 0x52, flags: 0x0}, - 580: {region: 0x93, script: 0x52, flags: 0x0}, - 581: {region: 0x12d, script: 0x52, flags: 0x0}, - 582: {region: 0x82, script: 0x52, flags: 0x0}, - 583: {region: 0x10a, script: 0x52, flags: 0x0}, - 584: {region: 0x12d, script: 0x52, flags: 0x0}, - 585: {region: 0x15c, script: 0x5, flags: 0x0}, - 586: {region: 0x4a, script: 0x52, flags: 0x0}, - 587: {region: 0x5f, script: 0x52, flags: 0x0}, - 588: {region: 0x97, script: 0x20, flags: 0x0}, - 589: {region: 0x93, script: 0x52, flags: 0x0}, - 590: {region: 0x34, script: 0xe, flags: 0x0}, - 591: {region: 0x99, script: 0xc5, flags: 0x0}, - 592: {region: 0xe7, script: 0x52, flags: 0x0}, - 593: {region: 0x97, script: 0xcd, flags: 0x0}, - 594: {region: 0xd9, script: 0x20, flags: 0x0}, - 595: {region: 0xe5, script: 0x52, flags: 0x0}, - 596: {region: 0x97, script: 0x4a, flags: 0x0}, - 597: {region: 0x52, script: 0xcb, flags: 0x0}, - 598: {region: 0xd9, script: 0x20, flags: 0x0}, - 599: {region: 0xd9, script: 0x20, flags: 0x0}, - 600: {region: 0x97, script: 0xd0, flags: 0x0}, - 601: {region: 0x110, script: 0x52, flags: 0x0}, - 602: {region: 0x12f, script: 0x52, flags: 0x0}, - 603: {region: 0x124, script: 0x52, flags: 0x0}, - 604: {region: 0x3e, script: 0x3, flags: 0x1}, - 605: {region: 0x121, script: 0xd5, flags: 0x0}, - 606: {region: 0xd9, script: 0x20, flags: 0x0}, - 607: {region: 0xd9, script: 0x20, flags: 0x0}, - 608: {region: 0xd9, script: 0x20, flags: 0x0}, - 609: {region: 0x6e, script: 0x27, flags: 0x0}, - 610: {region: 0x6c, script: 0x27, flags: 0x0}, - 611: {region: 0xd4, script: 0x52, flags: 0x0}, - 612: {region: 0x125, script: 0x52, flags: 0x0}, - 613: {region: 0x123, script: 0x52, flags: 0x0}, - 614: {region: 0x31, script: 0x52, flags: 0x0}, - 615: {region: 0xd9, script: 0x20, flags: 0x0}, - 616: {region: 0xe5, script: 0x52, flags: 0x0}, - 617: {region: 0x31, script: 0x52, flags: 0x0}, - 618: {region: 0xd2, script: 0x52, flags: 0x0}, - 619: {region: 0x15e, script: 0x52, flags: 0x0}, - 620: {region: 0x127, script: 0x52, flags: 0x0}, - 621: {region: 0xcc, script: 0x52, flags: 0x0}, - 622: {region: 0xe4, script: 0x52, flags: 0x0}, - 623: {region: 0x129, script: 0x52, flags: 0x0}, - 624: {region: 0x129, script: 0x52, flags: 0x0}, - 625: {region: 0x12c, script: 0x52, flags: 0x0}, - 626: {region: 0x15e, script: 0x52, flags: 0x0}, - 627: {region: 0x85, script: 0x2d, flags: 0x0}, - 628: {region: 0xd9, script: 0x20, flags: 0x0}, - 629: {region: 0xe5, script: 0x52, flags: 0x0}, - 630: {region: 0x42, script: 0xd6, flags: 0x0}, - 631: {region: 0x104, script: 0x1e, flags: 0x0}, - 632: {region: 0x12f, script: 0x52, flags: 0x0}, - 633: {region: 0x121, script: 0xd5, flags: 0x0}, - 634: {region: 0x31, script: 0x52, flags: 0x0}, - 635: {region: 0xcc, script: 0x52, flags: 0x0}, - 636: {region: 0x12b, script: 0x52, flags: 0x0}, - 638: {region: 0xd2, script: 0x52, flags: 0x0}, - 639: {region: 0x52, script: 0xce, flags: 0x0}, - 640: {region: 0xe3, script: 0x52, flags: 0x0}, - 641: {region: 0x104, script: 0x1e, flags: 0x0}, - 642: {region: 0xb8, script: 0x52, flags: 0x0}, - 643: {region: 0x104, script: 0x1e, flags: 0x0}, - 644: {region: 0x41, script: 0x4, flags: 0x1}, - 645: {region: 0x11a, script: 0xd8, flags: 0x0}, - 646: {region: 0x12e, script: 0x1e, flags: 0x0}, - 647: {region: 0x73, script: 0x52, flags: 0x0}, - 648: {region: 0x29, script: 0x52, flags: 0x0}, - 650: {region: 0x45, script: 0x3, flags: 0x1}, - 651: {region: 0x97, script: 0xe, flags: 0x0}, - 652: {region: 0xe6, script: 0x5, flags: 0x0}, - 653: {region: 0x48, script: 0x4, flags: 0x1}, - 654: {region: 0xb2, script: 0xd9, flags: 0x0}, - 655: {region: 0x15e, script: 0x52, flags: 0x0}, - 656: {region: 0x9c, script: 0x52, flags: 0x0}, - 657: {region: 0x104, script: 0x52, flags: 0x0}, - 658: {region: 0x13b, script: 0x52, flags: 0x0}, - 659: {region: 0x119, script: 0x52, flags: 0x0}, - 660: {region: 0x35, script: 0x52, flags: 0x0}, - 661: {region: 0x5f, script: 0x52, flags: 0x0}, - 662: {region: 0xcf, script: 0x52, flags: 0x0}, - 663: {region: 0x1, script: 0x52, flags: 0x0}, - 664: {region: 0x104, script: 0x52, flags: 0x0}, - 665: {region: 0x69, script: 0x52, flags: 0x0}, - 666: {region: 0x12d, script: 0x52, flags: 0x0}, - 667: {region: 0x35, script: 0x52, flags: 0x0}, - 668: {region: 0x4d, script: 0x52, flags: 0x0}, - 669: {region: 0x6e, script: 0x27, flags: 0x0}, - 670: {region: 0xe5, script: 0x52, flags: 0x0}, - 671: {region: 0x2e, script: 0x52, flags: 0x0}, - 672: {region: 0x97, script: 0xd0, flags: 0x0}, - 673: {region: 0x97, script: 0x20, flags: 0x0}, - 674: {region: 0x13d, script: 0x52, flags: 0x0}, - 675: {region: 0xa6, script: 0x5, flags: 0x0}, - 676: {region: 0x112, script: 0x52, flags: 0x0}, - 677: {region: 0x97, script: 0x20, flags: 0x0}, - 678: {region: 0x52, script: 0x34, flags: 0x0}, - 679: {region: 0x40, script: 0x52, flags: 0x0}, - 680: {region: 0x129, script: 0x18, flags: 0x0}, - 681: {region: 0x15e, script: 0x52, flags: 0x0}, - 682: {region: 0x129, script: 0x5a, flags: 0x0}, - 683: {region: 0x129, script: 0x5b, flags: 0x0}, - 684: {region: 0x7b, script: 0x29, flags: 0x0}, - 685: {region: 0x52, script: 0x5e, flags: 0x0}, - 686: {region: 0x109, script: 0x62, flags: 0x0}, - 687: {region: 0x106, script: 0x6c, flags: 0x0}, - 688: {region: 0x97, script: 0x20, flags: 0x0}, - 689: {region: 0x12f, script: 0x52, flags: 0x0}, - 690: {region: 0x9a, script: 0x82, flags: 0x0}, - 691: {region: 0x15b, script: 0xba, flags: 0x0}, - 692: {region: 0xd9, script: 0x20, flags: 0x0}, - 693: {region: 0xcf, script: 0x52, flags: 0x0}, - 694: {region: 0x73, script: 0x52, flags: 0x0}, - 695: {region: 0x51, script: 0x52, flags: 0x0}, - 696: {region: 0x51, script: 0x52, flags: 0x0}, - 697: {region: 0x1, script: 0x37, flags: 0x0}, - 698: {region: 0xd4, script: 0x52, flags: 0x0}, - 699: {region: 0x40, script: 0x52, flags: 0x0}, - 700: {region: 0xcd, script: 0x52, flags: 0x0}, - 701: {region: 0x4c, script: 0x3, flags: 0x1}, - 702: {region: 0x52, script: 0x52, flags: 0x0}, - 703: {region: 0x109, script: 0x52, flags: 0x0}, - 705: {region: 0xa6, script: 0x5, flags: 0x0}, - 706: {region: 0xd7, script: 0x52, flags: 0x0}, - 707: {region: 0xb8, script: 0xd2, flags: 0x0}, - 708: {region: 0x4f, script: 0x14, flags: 0x1}, - 709: {region: 0xce, script: 0x52, flags: 0x0}, - 710: {region: 0x15e, script: 0x52, flags: 0x0}, - 712: {region: 0x129, script: 0x52, flags: 0x0}, -} - -// likelyLangList holds lists info associated with likelyLang. -// Size: 396 bytes, 99 elements -var likelyLangList = [99]likelyScriptRegion{ - 0: {region: 0x9a, script: 0x7, flags: 0x0}, - 1: {region: 0x9f, script: 0x6d, flags: 0x2}, - 2: {region: 0x11a, script: 0x78, flags: 0x2}, - 3: {region: 0x31, script: 0x52, flags: 0x0}, - 4: {region: 0x99, script: 0x5, flags: 0x4}, - 5: {region: 0x9a, script: 0x5, flags: 0x4}, - 6: {region: 0x104, script: 0x1e, flags: 0x4}, - 7: {region: 0x9a, script: 0x5, flags: 0x2}, - 8: {region: 0x97, script: 0xe, flags: 0x0}, - 9: {region: 0x34, script: 0x16, flags: 0x2}, - 10: {region: 0x104, script: 0x1e, flags: 0x0}, - 11: {region: 0x37, script: 0x2a, flags: 0x2}, - 12: {region: 0x132, script: 0x52, flags: 0x0}, - 13: {region: 0x79, script: 0xbd, flags: 0x2}, - 14: {region: 0x112, script: 0x52, flags: 0x0}, - 15: {region: 0x82, script: 0x1, flags: 0x2}, - 16: {region: 0x5c, script: 0x1d, flags: 0x0}, - 17: {region: 0x85, script: 0x57, flags: 0x2}, - 18: {region: 0xd4, script: 0x52, flags: 0x0}, - 19: {region: 0x51, script: 0x5, flags: 0x4}, - 20: {region: 0x109, script: 0x5, flags: 0x4}, - 21: {region: 0xac, script: 0x1e, flags: 0x0}, - 22: {region: 0x23, script: 0x5, flags: 0x4}, - 23: {region: 0x52, script: 0x5, flags: 0x4}, - 24: {region: 0x9a, script: 0x5, flags: 0x4}, - 25: {region: 0xc3, script: 0x5, flags: 0x4}, - 26: {region: 0x52, script: 0x5, flags: 0x2}, - 27: {region: 0x129, script: 0x52, flags: 0x0}, - 28: {region: 0xae, script: 0x5, flags: 0x4}, - 29: {region: 0x99, script: 0x5, flags: 0x2}, - 30: {region: 0xa3, script: 0x1e, flags: 0x0}, - 31: {region: 0x52, script: 0x5, flags: 0x4}, - 32: {region: 0x129, script: 0x52, flags: 0x4}, - 33: {region: 0x52, script: 0x5, flags: 0x2}, - 34: {region: 0x129, script: 0x52, flags: 0x2}, - 35: {region: 0xd9, script: 0x20, flags: 0x0}, - 36: {region: 0x97, script: 0x55, flags: 0x2}, - 37: {region: 0x81, script: 0x52, flags: 0x0}, - 38: {region: 0x82, script: 0x70, flags: 0x4}, - 39: {region: 0x82, script: 0x70, flags: 0x2}, - 40: {region: 0xc3, script: 0x1e, flags: 0x0}, - 41: {region: 0x52, script: 0x66, flags: 0x4}, - 42: {region: 0x52, script: 0x66, flags: 0x2}, - 43: {region: 0xce, script: 0x52, flags: 0x0}, - 44: {region: 0x49, script: 0x5, flags: 0x4}, - 45: {region: 0x93, script: 0x5, flags: 0x4}, - 46: {region: 0x97, script: 0x2f, flags: 0x0}, - 47: {region: 0xe6, script: 0x5, flags: 0x4}, - 48: {region: 0xe6, script: 0x5, flags: 0x2}, - 49: {region: 0x9a, script: 0x7c, flags: 0x0}, - 50: {region: 0x52, script: 0x7d, flags: 0x2}, - 51: {region: 0xb8, script: 0xd2, flags: 0x0}, - 52: {region: 0xd7, script: 0x52, flags: 0x4}, - 53: {region: 0xe6, script: 0x5, flags: 0x0}, - 54: {region: 0x97, script: 0x20, flags: 0x2}, - 55: {region: 0x97, script: 0x47, flags: 0x2}, - 56: {region: 0x97, script: 0xc0, flags: 0x2}, - 57: {region: 0x103, script: 0x1e, flags: 0x0}, - 58: {region: 0xbb, script: 0x52, flags: 0x4}, - 59: {region: 0x102, script: 0x52, flags: 0x4}, - 60: {region: 0x104, script: 0x52, flags: 0x4}, - 61: {region: 0x129, script: 0x52, flags: 0x4}, - 62: {region: 0x122, script: 0x1e, flags: 0x0}, - 63: {region: 0xe6, script: 0x5, flags: 0x4}, - 64: {region: 0xe6, script: 0x5, flags: 0x2}, - 65: {region: 0x52, script: 0x5, flags: 0x0}, - 66: {region: 0xac, script: 0x1e, flags: 0x4}, - 67: {region: 0xc3, script: 0x1e, flags: 0x4}, - 68: {region: 0xac, script: 0x1e, flags: 0x2}, - 69: {region: 0x97, script: 0xe, flags: 0x0}, - 70: {region: 0xd9, script: 0x20, flags: 0x4}, - 71: {region: 0xd9, script: 0x20, flags: 0x2}, - 72: {region: 0x134, script: 0x52, flags: 0x0}, - 73: {region: 0x23, script: 0x5, flags: 0x4}, - 74: {region: 0x52, script: 0x1e, flags: 0x4}, - 75: {region: 0x23, script: 0x5, flags: 0x2}, - 76: {region: 0x8b, script: 0x35, flags: 0x0}, - 77: {region: 0x52, script: 0x34, flags: 0x4}, - 78: {region: 0x52, script: 0x34, flags: 0x2}, - 79: {region: 0x52, script: 0x34, flags: 0x0}, - 80: {region: 0x2e, script: 0x35, flags: 0x4}, - 81: {region: 0x3d, script: 0x35, flags: 0x4}, - 82: {region: 0x79, script: 0x35, flags: 0x4}, - 83: {region: 0x7c, script: 0x35, flags: 0x4}, - 84: {region: 0x8b, script: 0x35, flags: 0x4}, - 85: {region: 0x93, script: 0x35, flags: 0x4}, - 86: {region: 0xc4, script: 0x35, flags: 0x4}, - 87: {region: 0xce, script: 0x35, flags: 0x4}, - 88: {region: 0xe0, script: 0x35, flags: 0x4}, - 89: {region: 0xe3, script: 0x35, flags: 0x4}, - 90: {region: 0xe5, script: 0x35, flags: 0x4}, - 91: {region: 0x114, script: 0x35, flags: 0x4}, - 92: {region: 0x121, script: 0x35, flags: 0x4}, - 93: {region: 0x12c, script: 0x35, flags: 0x4}, - 94: {region: 0x132, script: 0x35, flags: 0x4}, - 95: {region: 0x13b, script: 0x35, flags: 0x4}, - 96: {region: 0x12c, script: 0x11, flags: 0x2}, - 97: {region: 0x12c, script: 0x30, flags: 0x2}, - 98: {region: 0x12c, script: 0x35, flags: 0x2}, -} - -type likelyLangScript struct { - lang uint16 - script uint8 - flags uint8 -} - -// likelyRegion is a lookup table, indexed by regionID, for the most likely -// languages and scripts given incomplete information. If more entries exist -// for a given regionID, lang and script are the index and size respectively -// of the list in likelyRegionList. -// TODO: exclude containers and user-definable regions from the list. -// Size: 1420 bytes, 355 elements -var likelyRegion = [355]likelyLangScript{ - 33: {lang: 0x61, script: 0x52, flags: 0x0}, - 34: {lang: 0x15, script: 0x5, flags: 0x0}, - 35: {lang: 0x0, script: 0x2, flags: 0x1}, - 38: {lang: 0x2, script: 0x2, flags: 0x1}, - 39: {lang: 0x4, script: 0x2, flags: 0x1}, - 41: {lang: 0x1ef, script: 0x52, flags: 0x0}, - 42: {lang: 0x0, script: 0x52, flags: 0x0}, - 43: {lang: 0x9d, script: 0x52, flags: 0x0}, - 44: {lang: 0x22f, script: 0x52, flags: 0x0}, - 45: {lang: 0x85, script: 0x52, flags: 0x0}, - 47: {lang: 0x1bd, script: 0x52, flags: 0x0}, - 48: {lang: 0x247, script: 0x52, flags: 0x0}, - 49: {lang: 0x24, script: 0x52, flags: 0x0}, - 50: {lang: 0x6, script: 0x2, flags: 0x1}, - 52: {lang: 0x4b, script: 0xe, flags: 0x0}, - 53: {lang: 0x1bd, script: 0x52, flags: 0x0}, - 54: {lang: 0xaf, script: 0x52, flags: 0x0}, - 55: {lang: 0x38, script: 0x1e, flags: 0x0}, - 56: {lang: 0x15, script: 0x5, flags: 0x0}, - 57: {lang: 0x201, script: 0x52, flags: 0x0}, - 58: {lang: 0xaf, script: 0x52, flags: 0x0}, - 59: {lang: 0xaf, script: 0x52, flags: 0x0}, - 61: {lang: 0x19a, script: 0x52, flags: 0x0}, - 62: {lang: 0x9d, script: 0x52, flags: 0x0}, - 63: {lang: 0x1db, script: 0x52, flags: 0x0}, - 64: {lang: 0x1ef, script: 0x52, flags: 0x0}, - 66: {lang: 0x8, script: 0x2, flags: 0x1}, - 68: {lang: 0x0, script: 0x52, flags: 0x0}, - 70: {lang: 0x2f, script: 0x1e, flags: 0x0}, - 72: {lang: 0x2b9, script: 0x37, flags: 0x2}, - 73: {lang: 0x19a, script: 0x5, flags: 0x2}, - 74: {lang: 0x248, script: 0x52, flags: 0x0}, - 75: {lang: 0xaf, script: 0x52, flags: 0x0}, - 76: {lang: 0xaf, script: 0x52, flags: 0x0}, - 77: {lang: 0x85, script: 0x52, flags: 0x0}, - 78: {lang: 0xaf, script: 0x52, flags: 0x0}, - 80: {lang: 0x9d, script: 0x52, flags: 0x0}, - 81: {lang: 0xaf, script: 0x52, flags: 0x0}, - 82: {lang: 0xa, script: 0x5, flags: 0x1}, - 83: {lang: 0x9d, script: 0x52, flags: 0x0}, - 84: {lang: 0x0, script: 0x52, flags: 0x0}, - 85: {lang: 0x9d, script: 0x52, flags: 0x0}, - 88: {lang: 0x9d, script: 0x52, flags: 0x0}, - 89: {lang: 0x1ef, script: 0x52, flags: 0x0}, - 90: {lang: 0x1db, script: 0x52, flags: 0x0}, - 92: {lang: 0xf, script: 0x2, flags: 0x1}, - 93: {lang: 0x79, script: 0x52, flags: 0x0}, - 95: {lang: 0x85, script: 0x52, flags: 0x0}, - 97: {lang: 0x1, script: 0x52, flags: 0x0}, - 98: {lang: 0x80, script: 0x52, flags: 0x0}, - 100: {lang: 0x9d, script: 0x52, flags: 0x0}, - 102: {lang: 0x11, script: 0x2, flags: 0x1}, - 103: {lang: 0x9d, script: 0x52, flags: 0x0}, - 104: {lang: 0x9d, script: 0x52, flags: 0x0}, - 105: {lang: 0x9f, script: 0x52, flags: 0x0}, - 106: {lang: 0x15, script: 0x5, flags: 0x0}, - 107: {lang: 0x15, script: 0x5, flags: 0x0}, - 108: {lang: 0x261, script: 0x27, flags: 0x0}, - 109: {lang: 0x9d, script: 0x52, flags: 0x0}, - 110: {lang: 0x13, script: 0x2, flags: 0x1}, - 112: {lang: 0xa8, script: 0x52, flags: 0x0}, - 113: {lang: 0xe2, script: 0x20, flags: 0x2}, - 116: {lang: 0xad, script: 0x52, flags: 0x0}, - 118: {lang: 0xaf, script: 0x52, flags: 0x0}, - 120: {lang: 0xaf, script: 0x52, flags: 0x0}, - 121: {lang: 0x15, script: 0x2, flags: 0x1}, - 123: {lang: 0x17, script: 0x3, flags: 0x1}, - 124: {lang: 0xaf, script: 0x52, flags: 0x0}, - 126: {lang: 0xd, script: 0x52, flags: 0x0}, - 128: {lang: 0x131, script: 0x52, flags: 0x0}, - 130: {lang: 0xaf, script: 0x52, flags: 0x0}, - 131: {lang: 0xaf, script: 0x52, flags: 0x0}, - 132: {lang: 0x9d, script: 0x52, flags: 0x0}, - 133: {lang: 0x1a, script: 0x2, flags: 0x1}, - 134: {lang: 0x0, script: 0x52, flags: 0x0}, - 135: {lang: 0x9d, script: 0x52, flags: 0x0}, - 137: {lang: 0x1ef, script: 0x52, flags: 0x0}, - 139: {lang: 0x2c4, script: 0x35, flags: 0x0}, - 140: {lang: 0x0, script: 0x52, flags: 0x0}, - 141: {lang: 0x9d, script: 0x52, flags: 0x0}, - 142: {lang: 0xee, script: 0x52, flags: 0x0}, - 143: {lang: 0xf1, script: 0x52, flags: 0x0}, - 144: {lang: 0xf2, script: 0x52, flags: 0x0}, - 146: {lang: 0x9d, script: 0x52, flags: 0x0}, - 147: {lang: 0x1c, script: 0x2, flags: 0x1}, - 149: {lang: 0xe0, script: 0x37, flags: 0x0}, - 151: {lang: 0x1e, script: 0x3, flags: 0x1}, - 153: {lang: 0x15, script: 0x5, flags: 0x0}, - 154: {lang: 0x21, script: 0x2, flags: 0x1}, - 155: {lang: 0x102, script: 0x52, flags: 0x0}, - 156: {lang: 0x103, script: 0x52, flags: 0x0}, - 159: {lang: 0x15, script: 0x5, flags: 0x0}, - 160: {lang: 0x107, script: 0x41, flags: 0x0}, - 162: {lang: 0x248, script: 0x52, flags: 0x0}, - 163: {lang: 0x14d, script: 0x1e, flags: 0x0}, - 164: {lang: 0x23, script: 0x3, flags: 0x1}, - 166: {lang: 0x26, script: 0x2, flags: 0x1}, - 168: {lang: 0x136, script: 0x4b, flags: 0x0}, - 169: {lang: 0x136, script: 0x4b, flags: 0x0}, - 170: {lang: 0x15, script: 0x5, flags: 0x0}, - 172: {lang: 0x207, script: 0x1e, flags: 0x0}, - 173: {lang: 0x28, script: 0x2, flags: 0x1}, - 174: {lang: 0x15, script: 0x5, flags: 0x0}, - 176: {lang: 0x85, script: 0x52, flags: 0x0}, - 177: {lang: 0x228, script: 0xc1, flags: 0x0}, - 179: {lang: 0x242, script: 0x52, flags: 0x0}, - 180: {lang: 0x169, script: 0x52, flags: 0x0}, - 181: {lang: 0xaf, script: 0x52, flags: 0x0}, - 182: {lang: 0x170, script: 0x52, flags: 0x0}, - 183: {lang: 0x15, script: 0x5, flags: 0x0}, - 184: {lang: 0x2a, script: 0x2, flags: 0x1}, - 185: {lang: 0xaf, script: 0x52, flags: 0x0}, - 186: {lang: 0x2c, script: 0x2, flags: 0x1}, - 187: {lang: 0x23b, script: 0x52, flags: 0x0}, - 188: {lang: 0xaf, script: 0x52, flags: 0x0}, - 189: {lang: 0x183, script: 0x52, flags: 0x0}, - 192: {lang: 0x2e, script: 0x2, flags: 0x1}, - 193: {lang: 0x49, script: 0x52, flags: 0x0}, - 194: {lang: 0x30, script: 0x2, flags: 0x1}, - 195: {lang: 0x32, script: 0x2, flags: 0x1}, - 196: {lang: 0x34, script: 0x2, flags: 0x1}, - 198: {lang: 0xaf, script: 0x52, flags: 0x0}, - 199: {lang: 0x36, script: 0x2, flags: 0x1}, - 201: {lang: 0x19b, script: 0x52, flags: 0x0}, - 202: {lang: 0x38, script: 0x3, flags: 0x1}, - 203: {lang: 0x90, script: 0xd4, flags: 0x0}, - 205: {lang: 0x9d, script: 0x52, flags: 0x0}, - 206: {lang: 0x19a, script: 0x52, flags: 0x0}, - 207: {lang: 0x1ef, script: 0x52, flags: 0x0}, - 208: {lang: 0xa, script: 0x52, flags: 0x0}, - 209: {lang: 0xaf, script: 0x52, flags: 0x0}, - 210: {lang: 0xdc, script: 0x52, flags: 0x0}, - 212: {lang: 0xdc, script: 0x5, flags: 0x2}, - 214: {lang: 0x9d, script: 0x52, flags: 0x0}, - 215: {lang: 0x1bd, script: 0x52, flags: 0x0}, - 216: {lang: 0x1af, script: 0x52, flags: 0x0}, - 217: {lang: 0x1b4, script: 0x20, flags: 0x0}, - 223: {lang: 0x15, script: 0x5, flags: 0x0}, - 224: {lang: 0x9d, script: 0x52, flags: 0x0}, - 226: {lang: 0x9d, script: 0x52, flags: 0x0}, - 227: {lang: 0xaf, script: 0x52, flags: 0x0}, - 228: {lang: 0x26e, script: 0x52, flags: 0x0}, - 229: {lang: 0xaa, script: 0x52, flags: 0x0}, - 230: {lang: 0x3b, script: 0x3, flags: 0x1}, - 231: {lang: 0x3e, script: 0x2, flags: 0x1}, - 232: {lang: 0xaf, script: 0x52, flags: 0x0}, - 234: {lang: 0x9d, script: 0x52, flags: 0x0}, - 235: {lang: 0x15, script: 0x5, flags: 0x0}, - 236: {lang: 0x1ef, script: 0x52, flags: 0x0}, - 238: {lang: 0x1dc, script: 0x52, flags: 0x0}, - 239: {lang: 0xca, script: 0x52, flags: 0x0}, - 241: {lang: 0x15, script: 0x5, flags: 0x0}, - 256: {lang: 0xaf, script: 0x52, flags: 0x0}, - 258: {lang: 0x40, script: 0x2, flags: 0x1}, - 259: {lang: 0x23b, script: 0x1e, flags: 0x0}, - 260: {lang: 0x42, script: 0x2, flags: 0x1}, - 261: {lang: 0x20a, script: 0x52, flags: 0x0}, - 262: {lang: 0x15, script: 0x5, flags: 0x0}, - 264: {lang: 0xaf, script: 0x52, flags: 0x0}, - 265: {lang: 0x15, script: 0x5, flags: 0x0}, - 266: {lang: 0x44, script: 0x2, flags: 0x1}, - 269: {lang: 0x22c, script: 0x52, flags: 0x0}, - 270: {lang: 0x1af, script: 0x52, flags: 0x0}, - 271: {lang: 0x46, script: 0x2, flags: 0x1}, - 273: {lang: 0x103, script: 0x52, flags: 0x0}, - 274: {lang: 0xaf, script: 0x52, flags: 0x0}, - 275: {lang: 0x238, script: 0x52, flags: 0x0}, - 276: {lang: 0x1bd, script: 0x52, flags: 0x0}, - 278: {lang: 0x1ef, script: 0x52, flags: 0x0}, - 280: {lang: 0x9d, script: 0x52, flags: 0x0}, - 282: {lang: 0x48, script: 0x2, flags: 0x1}, - 286: {lang: 0xaf, script: 0x52, flags: 0x0}, - 287: {lang: 0xaf, script: 0x52, flags: 0x0}, - 288: {lang: 0xaf, script: 0x52, flags: 0x0}, - 289: {lang: 0x4a, script: 0x3, flags: 0x1}, - 290: {lang: 0x4d, script: 0x2, flags: 0x1}, - 291: {lang: 0x265, script: 0x52, flags: 0x0}, - 292: {lang: 0x1ef, script: 0x52, flags: 0x0}, - 293: {lang: 0x264, script: 0x52, flags: 0x0}, - 294: {lang: 0x4f, script: 0x2, flags: 0x1}, - 295: {lang: 0x26c, script: 0x52, flags: 0x0}, - 297: {lang: 0x51, script: 0x4, flags: 0x1}, - 299: {lang: 0x27c, script: 0x52, flags: 0x0}, - 300: {lang: 0x55, script: 0x2, flags: 0x1}, - 301: {lang: 0x248, script: 0x52, flags: 0x0}, - 302: {lang: 0x57, script: 0x3, flags: 0x1}, - 303: {lang: 0x248, script: 0x52, flags: 0x0}, - 306: {lang: 0x2b9, script: 0x37, flags: 0x2}, - 307: {lang: 0x9d, script: 0x52, flags: 0x0}, - 308: {lang: 0x28d, script: 0x52, flags: 0x0}, - 309: {lang: 0x103, script: 0x52, flags: 0x0}, - 312: {lang: 0x9d, script: 0x52, flags: 0x0}, - 315: {lang: 0x292, script: 0x52, flags: 0x0}, - 316: {lang: 0x41, script: 0x52, flags: 0x0}, - 317: {lang: 0xaf, script: 0x52, flags: 0x0}, - 319: {lang: 0x22f, script: 0x52, flags: 0x0}, - 330: {lang: 0x5a, script: 0x2, flags: 0x1}, - 347: {lang: 0x15, script: 0x5, flags: 0x0}, - 348: {lang: 0x5c, script: 0x2, flags: 0x1}, - 353: {lang: 0x236, script: 0x52, flags: 0x0}, -} - -// likelyRegionList holds lists info associated with likelyRegion. -// Size: 376 bytes, 94 elements -var likelyRegionList = [94]likelyLangScript{ - 0: {lang: 0xa4, script: 0x5, flags: 0x0}, - 1: {lang: 0x264, script: 0x52, flags: 0x0}, - 2: {lang: 0x23a, script: 0x52, flags: 0x0}, - 3: {lang: 0x18c, script: 0x1e, flags: 0x0}, - 4: {lang: 0xf3, script: 0x8, flags: 0x0}, - 5: {lang: 0x145, script: 0x52, flags: 0x0}, - 6: {lang: 0x54, script: 0x52, flags: 0x0}, - 7: {lang: 0x23b, script: 0x1e, flags: 0x0}, - 8: {lang: 0x93, script: 0xd6, flags: 0x0}, - 9: {lang: 0x1b4, script: 0x20, flags: 0x0}, - 10: {lang: 0x2c4, script: 0x34, flags: 0x0}, - 11: {lang: 0x284, script: 0x5, flags: 0x0}, - 12: {lang: 0x2bd, script: 0x35, flags: 0x0}, - 13: {lang: 0x2be, script: 0x52, flags: 0x0}, - 14: {lang: 0x157, script: 0xd5, flags: 0x0}, - 15: {lang: 0x9a, script: 0x2d, flags: 0x0}, - 16: {lang: 0x26f, script: 0x52, flags: 0x0}, - 17: {lang: 0x15, script: 0x5, flags: 0x0}, - 18: {lang: 0xaf, script: 0x52, flags: 0x0}, - 19: {lang: 0x11, script: 0x27, flags: 0x0}, - 20: {lang: 0x9b, script: 0x52, flags: 0x0}, - 21: {lang: 0x141, script: 0x5, flags: 0x2}, - 22: {lang: 0x2b9, script: 0x37, flags: 0x2}, - 23: {lang: 0x111, script: 0x29, flags: 0x0}, - 24: {lang: 0x2, script: 0x1e, flags: 0x0}, - 25: {lang: 0x145, script: 0x52, flags: 0x0}, - 26: {lang: 0x9a, script: 0x2d, flags: 0x0}, - 27: {lang: 0x18c, script: 0x1e, flags: 0x0}, - 28: {lang: 0xf8, script: 0x52, flags: 0x0}, - 29: {lang: 0x19a, script: 0x5, flags: 0x0}, - 30: {lang: 0xe1, script: 0x20, flags: 0x0}, - 31: {lang: 0x28c, script: 0x5, flags: 0x0}, - 32: {lang: 0x129, script: 0x6b, flags: 0x0}, - 33: {lang: 0xa4, script: 0x5, flags: 0x0}, - 34: {lang: 0x264, script: 0x52, flags: 0x0}, - 35: {lang: 0x133, script: 0x46, flags: 0x0}, - 36: {lang: 0x6d, script: 0x5, flags: 0x0}, - 37: {lang: 0x11c, script: 0xd5, flags: 0x0}, - 38: {lang: 0x15, script: 0x5, flags: 0x0}, - 39: {lang: 0xaf, script: 0x52, flags: 0x0}, - 40: {lang: 0x165, script: 0x4f, flags: 0x0}, - 41: {lang: 0x11c, script: 0xd5, flags: 0x0}, - 42: {lang: 0x15, script: 0x5, flags: 0x0}, - 43: {lang: 0xaf, script: 0x52, flags: 0x0}, - 44: {lang: 0x203, script: 0x52, flags: 0x0}, - 45: {lang: 0x286, script: 0x1e, flags: 0x0}, - 46: {lang: 0x18c, script: 0x1e, flags: 0x0}, - 47: {lang: 0x23a, script: 0x52, flags: 0x0}, - 48: {lang: 0x1a5, script: 0x6b, flags: 0x0}, - 49: {lang: 0x114, script: 0x52, flags: 0x0}, - 50: {lang: 0x18f, script: 0x1e, flags: 0x0}, - 51: {lang: 0x12f, script: 0x5, flags: 0x0}, - 52: {lang: 0x2c4, script: 0x35, flags: 0x0}, - 53: {lang: 0x1ef, script: 0x52, flags: 0x0}, - 54: {lang: 0x15, script: 0x5, flags: 0x0}, - 55: {lang: 0xaf, script: 0x52, flags: 0x0}, - 56: {lang: 0x182, script: 0x52, flags: 0x0}, - 57: {lang: 0x28c, script: 0x5, flags: 0x0}, - 58: {lang: 0x40, script: 0x20, flags: 0x0}, - 59: {lang: 0x28c, script: 0x5, flags: 0x0}, - 60: {lang: 0x28c, script: 0x5, flags: 0x0}, - 61: {lang: 0x58, script: 0x20, flags: 0x0}, - 62: {lang: 0x1e7, script: 0x52, flags: 0x0}, - 63: {lang: 0x2f, script: 0x1e, flags: 0x0}, - 64: {lang: 0x203, script: 0x52, flags: 0x0}, - 65: {lang: 0x38, script: 0x1e, flags: 0x0}, - 66: {lang: 0x207, script: 0x1e, flags: 0x0}, - 67: {lang: 0x13f, script: 0x52, flags: 0x0}, - 68: {lang: 0x247, script: 0x52, flags: 0x0}, - 69: {lang: 0x2b9, script: 0x37, flags: 0x0}, - 70: {lang: 0x22a, script: 0x52, flags: 0x0}, - 71: {lang: 0x286, script: 0x1e, flags: 0x0}, - 72: {lang: 0x15, script: 0x5, flags: 0x0}, - 73: {lang: 0xaf, script: 0x52, flags: 0x0}, - 74: {lang: 0x25d, script: 0xd5, flags: 0x0}, - 75: {lang: 0x181, script: 0x5, flags: 0x0}, - 76: {lang: 0x191, script: 0x6b, flags: 0x0}, - 77: {lang: 0x25c, script: 0x1e, flags: 0x0}, - 78: {lang: 0xa4, script: 0x5, flags: 0x0}, - 79: {lang: 0x15, script: 0x5, flags: 0x0}, - 80: {lang: 0xaf, script: 0x52, flags: 0x0}, - 81: {lang: 0x26f, script: 0x52, flags: 0x0}, - 82: {lang: 0x24, script: 0x5, flags: 0x0}, - 83: {lang: 0x118, script: 0x1e, flags: 0x0}, - 84: {lang: 0x3b, script: 0x2d, flags: 0x0}, - 85: {lang: 0x2c4, script: 0x35, flags: 0x0}, - 86: {lang: 0x271, script: 0x52, flags: 0x0}, - 87: {lang: 0x286, script: 0x1e, flags: 0x0}, - 88: {lang: 0x2b9, script: 0x37, flags: 0x0}, - 89: {lang: 0x1e7, script: 0x52, flags: 0x0}, - 90: {lang: 0x23a, script: 0x52, flags: 0x0}, - 91: {lang: 0x23b, script: 0x1e, flags: 0x0}, - 92: {lang: 0xaf, script: 0x52, flags: 0x0}, - 93: {lang: 0x249, script: 0x5, flags: 0x0}, -} - -type likelyTag struct { - lang uint16 - region uint16 - script uint8 -} - -// Size: 192 bytes, 32 elements -var likelyRegionGroup = [32]likelyTag{ - 1: {lang: 0x9b, region: 0xd4, script: 0x52}, - 2: {lang: 0x9b, region: 0x132, script: 0x52}, - 3: {lang: 0x1ef, region: 0x40, script: 0x52}, - 4: {lang: 0x9b, region: 0x2e, script: 0x52}, - 5: {lang: 0x9b, region: 0xd4, script: 0x52}, - 6: {lang: 0x9d, region: 0xcd, script: 0x52}, - 7: {lang: 0x248, region: 0x12d, script: 0x52}, - 8: {lang: 0x15, region: 0x6a, script: 0x5}, - 9: {lang: 0x248, region: 0x4a, script: 0x52}, - 10: {lang: 0x9b, region: 0x15e, script: 0x52}, - 11: {lang: 0x9b, region: 0x132, script: 0x52}, - 12: {lang: 0x9b, region: 0x132, script: 0x52}, - 13: {lang: 0x9d, region: 0x58, script: 0x52}, - 14: {lang: 0x2c4, region: 0x52, script: 0x34}, - 15: {lang: 0xe1, region: 0x97, script: 0x20}, - 16: {lang: 0xf8, region: 0x93, script: 0x52}, - 17: {lang: 0x103, region: 0x9c, script: 0x52}, - 18: {lang: 0x9b, region: 0x2e, script: 0x52}, - 19: {lang: 0x9b, region: 0xe4, script: 0x52}, - 20: {lang: 0x9b, region: 0x88, script: 0x52}, - 21: {lang: 0x22f, region: 0x13f, script: 0x52}, - 22: {lang: 0x2c4, region: 0x52, script: 0x34}, - 23: {lang: 0x28d, region: 0x134, script: 0x52}, - 24: {lang: 0x15, region: 0x106, script: 0x5}, - 25: {lang: 0x207, region: 0x104, script: 0x1e}, - 26: {lang: 0x207, region: 0x104, script: 0x1e}, - 27: {lang: 0x9b, region: 0x79, script: 0x52}, - 28: {lang: 0x85, region: 0x5f, script: 0x52}, - 29: {lang: 0x9d, region: 0x1e, script: 0x52}, - 30: {lang: 0x9b, region: 0x98, script: 0x52}, - 31: {lang: 0x9b, region: 0x79, script: 0x52}, -} - -type mutualIntelligibility struct { - want uint16 - have uint16 - conf uint8 - oneway bool -} - -type scriptIntelligibility struct { - lang uint16 - want uint8 - have uint8 - conf uint8 -} - -// matchLang holds pairs of langIDs of base languages that are typically -// mutually intelligible. Each pair is associated with a confidence and -// whether the intelligibility goes one or both ways. -// Size: 708 bytes, 118 elements -var matchLang = [118]mutualIntelligibility{ - 0: {want: 0x1c1, have: 0x1af, conf: 0x2, oneway: false}, - 1: {want: 0x145, have: 0x6f, conf: 0x2, oneway: false}, - 2: {want: 0xee, have: 0x54, conf: 0x2, oneway: false}, - 3: {want: 0x225, have: 0x54, conf: 0x2, oneway: false}, - 4: {want: 0x23b, have: 0x54, conf: 0x2, oneway: false}, - 5: {want: 0x225, have: 0xee, conf: 0x2, oneway: false}, - 6: {want: 0x23b, have: 0xee, conf: 0x2, oneway: false}, - 7: {want: 0x225, have: 0x23b, conf: 0x2, oneway: false}, - 8: {want: 0x241, have: 0x1, conf: 0x2, oneway: false}, - 9: {want: 0xd2, have: 0x85, conf: 0x2, oneway: true}, - 10: {want: 0x154, have: 0x85, conf: 0x2, oneway: true}, - 11: {want: 0x80, have: 0x1c1, conf: 0x2, oneway: false}, - 12: {want: 0x80, have: 0x1af, conf: 0x2, oneway: false}, - 13: {want: 0x6f, have: 0x145, conf: 0x2, oneway: false}, - 14: {want: 0x2, have: 0x207, conf: 0x2, oneway: true}, - 15: {want: 0x5, have: 0x9b, conf: 0x2, oneway: true}, - 16: {want: 0xa, have: 0x1bd, conf: 0x2, oneway: true}, - 17: {want: 0xd, have: 0x9b, conf: 0x2, oneway: true}, - 18: {want: 0x23, have: 0x9d, conf: 0x2, oneway: true}, - 19: {want: 0x24, have: 0x207, conf: 0x2, oneway: true}, - 20: {want: 0x2f, have: 0x207, conf: 0x2, oneway: true}, - 21: {want: 0x31, have: 0x9b, conf: 0x2, oneway: true}, - 22: {want: 0x3c, have: 0xe1, conf: 0x2, oneway: true}, - 23: {want: 0x4b, have: 0x9b, conf: 0x2, oneway: true}, - 24: {want: 0x50, have: 0xaf, conf: 0x2, oneway: true}, - 25: {want: 0x65, have: 0xaa, conf: 0x2, oneway: true}, - 26: {want: 0x6c, have: 0x9b, conf: 0x2, oneway: true}, - 27: {want: 0x6f, have: 0x15, conf: 0x2, oneway: true}, - 28: {want: 0x70, have: 0xaf, conf: 0x2, oneway: true}, - 29: {want: 0x78, have: 0xaf, conf: 0x2, oneway: true}, - 30: {want: 0x7f, have: 0x9b, conf: 0x2, oneway: true}, - 31: {want: 0x95, have: 0x9b, conf: 0x2, oneway: true}, - 32: {want: 0x9c, have: 0x9b, conf: 0x2, oneway: true}, - 33: {want: 0x9f, have: 0xa8, conf: 0x2, oneway: true}, - 34: {want: 0xa1, have: 0x9d, conf: 0x2, oneway: true}, - 35: {want: 0xad, have: 0x80, conf: 0x2, oneway: true}, - 36: {want: 0xb9, have: 0x1bd, conf: 0x2, oneway: true}, - 37: {want: 0xba, have: 0x9b, conf: 0x2, oneway: true}, - 38: {want: 0xbb, have: 0x9b, conf: 0x2, oneway: true}, - 39: {want: 0xc2, have: 0x9b, conf: 0x2, oneway: true}, - 40: {want: 0xc8, have: 0x9d, conf: 0x2, oneway: true}, - 41: {want: 0xca, have: 0x9d, conf: 0x2, oneway: true}, - 42: {want: 0xd3, have: 0xe1, conf: 0x2, oneway: true}, - 43: {want: 0xdc, have: 0x9b, conf: 0x2, oneway: true}, - 44: {want: 0xde, have: 0x9b, conf: 0x2, oneway: true}, - 45: {want: 0xf1, have: 0xaf, conf: 0x2, oneway: true}, - 46: {want: 0xf3, have: 0x207, conf: 0x2, oneway: true}, - 47: {want: 0xf5, have: 0x9b, conf: 0x2, oneway: true}, - 48: {want: 0xfa, have: 0x9b, conf: 0x2, oneway: true}, - 49: {want: 0x102, have: 0x9b, conf: 0x2, oneway: true}, - 50: {want: 0x10f, have: 0xf8, conf: 0x2, oneway: true}, - 51: {want: 0x111, have: 0x9b, conf: 0x2, oneway: true}, - 52: {want: 0x122, have: 0xaf, conf: 0x2, oneway: true}, - 53: {want: 0x12f, have: 0x207, conf: 0x2, oneway: true}, - 54: {want: 0x133, have: 0x9b, conf: 0x2, oneway: true}, - 55: {want: 0x135, have: 0x9b, conf: 0x2, oneway: true}, - 56: {want: 0x13d, have: 0x9b, conf: 0x2, oneway: true}, - 57: {want: 0x145, have: 0x26f, conf: 0x2, oneway: true}, - 58: {want: 0x14d, have: 0x207, conf: 0x2, oneway: true}, - 59: {want: 0x14e, have: 0x103, conf: 0x2, oneway: true}, - 60: {want: 0x15a, have: 0x9b, conf: 0x2, oneway: true}, - 61: {want: 0x164, have: 0xaf, conf: 0x2, oneway: true}, - 62: {want: 0x165, have: 0x9b, conf: 0x2, oneway: true}, - 63: {want: 0x167, have: 0x9b, conf: 0x2, oneway: true}, - 64: {want: 0x16c, have: 0xaf, conf: 0x2, oneway: true}, - 65: {want: 0x182, have: 0x9b, conf: 0x2, oneway: true}, - 66: {want: 0x183, have: 0xaf, conf: 0x2, oneway: true}, - 67: {want: 0x189, have: 0x9b, conf: 0x2, oneway: true}, - 68: {want: 0x18c, have: 0x38, conf: 0x2, oneway: true}, - 69: {want: 0x18d, have: 0x9b, conf: 0x2, oneway: true}, - 70: {want: 0x18f, have: 0x207, conf: 0x2, oneway: true}, - 71: {want: 0x196, have: 0xe1, conf: 0x2, oneway: true}, - 72: {want: 0x19a, have: 0xf8, conf: 0x2, oneway: true}, - 73: {want: 0x19b, have: 0x9b, conf: 0x2, oneway: true}, - 74: {want: 0x1a5, have: 0x9b, conf: 0x2, oneway: true}, - 75: {want: 0x1b4, have: 0x9b, conf: 0x2, oneway: true}, - 76: {want: 0x1bf, have: 0x1af, conf: 0x2, oneway: false}, - 77: {want: 0x1bf, have: 0x1c1, conf: 0x2, oneway: true}, - 78: {want: 0x1c8, have: 0x9b, conf: 0x2, oneway: true}, - 79: {want: 0x1cc, have: 0x9b, conf: 0x2, oneway: true}, - 80: {want: 0x1ce, have: 0x9b, conf: 0x2, oneway: true}, - 81: {want: 0x1d0, have: 0xaf, conf: 0x2, oneway: true}, - 82: {want: 0x1d2, have: 0x9b, conf: 0x2, oneway: true}, - 83: {want: 0x1d3, have: 0x9b, conf: 0x2, oneway: true}, - 84: {want: 0x1d7, have: 0x9b, conf: 0x2, oneway: true}, - 85: {want: 0x1de, have: 0x9b, conf: 0x2, oneway: true}, - 86: {want: 0x1ee, have: 0x9b, conf: 0x2, oneway: true}, - 87: {want: 0x1f1, have: 0x9d, conf: 0x2, oneway: true}, - 88: {want: 0x1fc, have: 0x85, conf: 0x2, oneway: true}, - 89: {want: 0x201, have: 0x9b, conf: 0x2, oneway: true}, - 90: {want: 0x20a, have: 0xaf, conf: 0x2, oneway: true}, - 91: {want: 0x20d, have: 0xe1, conf: 0x2, oneway: true}, - 92: {want: 0x21a, have: 0x9b, conf: 0x2, oneway: true}, - 93: {want: 0x228, have: 0x9b, conf: 0x2, oneway: true}, - 94: {want: 0x236, have: 0x9b, conf: 0x2, oneway: true}, - 95: {want: 0x238, have: 0x9b, conf: 0x2, oneway: true}, - 96: {want: 0x23a, have: 0x9b, conf: 0x2, oneway: true}, - 97: {want: 0x242, have: 0x9b, conf: 0x2, oneway: true}, - 98: {want: 0x244, have: 0xf8, conf: 0x2, oneway: true}, - 99: {want: 0x248, have: 0x9b, conf: 0x2, oneway: true}, - 100: {want: 0x251, have: 0x9b, conf: 0x2, oneway: true}, - 101: {want: 0x258, have: 0x9b, conf: 0x2, oneway: true}, - 102: {want: 0x25c, have: 0x207, conf: 0x2, oneway: true}, - 103: {want: 0x261, have: 0x9b, conf: 0x2, oneway: true}, - 104: {want: 0x264, have: 0x207, conf: 0x2, oneway: true}, - 105: {want: 0x361a, have: 0x9b, conf: 0x2, oneway: true}, - 106: {want: 0x26b, have: 0x9b, conf: 0x2, oneway: true}, - 107: {want: 0x26c, have: 0x9b, conf: 0x2, oneway: true}, - 108: {want: 0x277, have: 0x207, conf: 0x2, oneway: true}, - 109: {want: 0x27b, have: 0x9b, conf: 0x2, oneway: true}, - 110: {want: 0x284, have: 0x2c4, conf: 0x2, oneway: true}, - 111: {want: 0x28c, have: 0x9b, conf: 0x2, oneway: true}, - 112: {want: 0x28d, have: 0x207, conf: 0x2, oneway: true}, - 113: {want: 0x2a4, have: 0xaf, conf: 0x2, oneway: true}, - 114: {want: 0x2a9, have: 0x9b, conf: 0x2, oneway: true}, - 115: {want: 0x2b9, have: 0x9b, conf: 0x2, oneway: true}, - 116: {want: 0x2ba, have: 0x9b, conf: 0x2, oneway: true}, - 117: {want: 0x2c6, have: 0x9b, conf: 0x2, oneway: true}, -} - -// matchScript holds pairs of scriptIDs where readers of one script -// can typically also read the other. Each is associated with a confidence. -// Size: 24 bytes, 4 elements -var matchScript = [4]scriptIntelligibility{ - 0: {lang: 0x23b, want: 0x52, have: 0x1e, conf: 0x2}, - 1: {lang: 0x23b, want: 0x1e, have: 0x52, conf: 0x2}, - 2: {lang: 0x0, want: 0x34, have: 0x35, conf: 0x1}, - 3: {lang: 0x0, want: 0x35, have: 0x34, conf: 0x1}, -} - -// Size: 128 bytes, 32 elements -var regionContainment = [32]uint32{ - 0xffffffff, 0x000007a2, 0x00003044, 0x00000008, - 0x403c0010, 0x00000020, 0x00000040, 0x00000080, - 0x00000100, 0x00000200, 0x00000400, 0x2000384c, - 0x00001000, 0x00002000, 0x00004000, 0x00008000, - 0x00010000, 0x00020000, 0x00040000, 0x00080000, - 0x00100000, 0x00200000, 0x01c1c000, 0x00800000, - 0x01000000, 0x1e020000, 0x04000000, 0x08000000, - 0x10000000, 0x20002048, 0x40000000, 0x80000000, -} - -// regionInclusion maps region identifiers to sets of regions in regionInclusionBits, -// where each set holds all groupings that are directly connected in a region -// containment graph. -// Size: 355 bytes, 355 elements -var regionInclusion = [355]uint8{ - // Entry 0 - 3F - 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, - 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, - 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, - 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x25, 0x22, 0x23, - 0x25, 0x26, 0x21, 0x27, 0x28, 0x29, 0x2a, 0x25, - 0x2b, 0x23, 0x22, 0x25, 0x24, 0x29, 0x2c, 0x2d, - 0x23, 0x2e, 0x2c, 0x25, 0x2f, 0x30, 0x27, 0x25, - // Entry 40 - 7F - 0x27, 0x25, 0x24, 0x30, 0x21, 0x31, 0x32, 0x33, - 0x2f, 0x21, 0x26, 0x26, 0x26, 0x34, 0x2c, 0x28, - 0x27, 0x26, 0x35, 0x27, 0x21, 0x33, 0x22, 0x20, - 0x25, 0x2c, 0x25, 0x21, 0x36, 0x2d, 0x34, 0x29, - 0x21, 0x2e, 0x37, 0x25, 0x25, 0x20, 0x38, 0x38, - 0x27, 0x37, 0x38, 0x38, 0x2e, 0x39, 0x2e, 0x1f, - 0x37, 0x3a, 0x27, 0x3b, 0x2b, 0x20, 0x29, 0x34, - 0x26, 0x37, 0x25, 0x23, 0x27, 0x2b, 0x2c, 0x22, - // Entry 80 - BF - 0x2f, 0x2c, 0x2c, 0x25, 0x26, 0x39, 0x21, 0x33, - 0x3b, 0x2c, 0x27, 0x35, 0x21, 0x33, 0x39, 0x25, - 0x2d, 0x20, 0x38, 0x30, 0x37, 0x23, 0x2b, 0x24, - 0x21, 0x23, 0x24, 0x2b, 0x39, 0x2b, 0x25, 0x23, - 0x35, 0x20, 0x2e, 0x3c, 0x30, 0x3b, 0x2e, 0x25, - 0x35, 0x35, 0x23, 0x25, 0x3c, 0x30, 0x23, 0x25, - 0x34, 0x24, 0x2c, 0x31, 0x37, 0x29, 0x37, 0x38, - 0x38, 0x34, 0x32, 0x22, 0x25, 0x2e, 0x3b, 0x20, - // Entry C0 - FF - 0x22, 0x2c, 0x30, 0x35, 0x35, 0x3b, 0x25, 0x2c, - 0x25, 0x39, 0x2e, 0x24, 0x2e, 0x33, 0x30, 0x2e, - 0x31, 0x3a, 0x2c, 0x2a, 0x2c, 0x20, 0x33, 0x29, - 0x2b, 0x24, 0x20, 0x3b, 0x23, 0x28, 0x2a, 0x23, - 0x33, 0x20, 0x27, 0x28, 0x3a, 0x30, 0x24, 0x2d, - 0x2f, 0x28, 0x25, 0x23, 0x39, 0x20, 0x3b, 0x27, - 0x20, 0x23, 0x20, 0x20, 0x1e, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - // Entry 100 - 13F - 0x2e, 0x20, 0x2d, 0x22, 0x32, 0x2e, 0x23, 0x3a, - 0x2e, 0x38, 0x37, 0x30, 0x2c, 0x39, 0x2b, 0x2d, - 0x2c, 0x22, 0x2c, 0x2e, 0x27, 0x2e, 0x26, 0x32, - 0x33, 0x25, 0x23, 0x31, 0x21, 0x25, 0x26, 0x21, - 0x2c, 0x30, 0x3c, 0x28, 0x30, 0x3c, 0x38, 0x28, - 0x30, 0x23, 0x25, 0x28, 0x35, 0x2e, 0x32, 0x2e, - 0x20, 0x21, 0x2f, 0x27, 0x3c, 0x22, 0x25, 0x20, - 0x27, 0x25, 0x25, 0x30, 0x3a, 0x28, 0x20, 0x28, - // Entry 140 - 17F - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x22, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x23, 0x23, 0x2e, 0x22, 0x31, 0x2e, - 0x26, 0x2e, 0x20, -} - -// regionInclusionBits is an array of bit vectors where every vector represents -// a set of region groupings. These sets are used to compute the distance -// between two regions for the purpose of language matching. -// Size: 288 bytes, 72 elements -var regionInclusionBits = [72]uint32{ - // Entry 0 - 1F - 0x82400813, 0x000007a3, 0x00003844, 0x20000808, - 0x403c0011, 0x00000022, 0x20000844, 0x00000082, - 0x00000102, 0x00000202, 0x00000402, 0x2000384d, - 0x00001804, 0x20002804, 0x00404000, 0x00408000, - 0x00410000, 0x02020000, 0x00040010, 0x00080010, - 0x00100010, 0x00200010, 0x01c1c001, 0x00c00000, - 0x01400000, 0x1e020001, 0x06000000, 0x0a000000, - 0x12000000, 0x20002848, 0x40000010, 0x80000001, - // Entry 20 - 3F - 0x00000001, 0x40000000, 0x00020000, 0x01000000, - 0x00008000, 0x00002000, 0x00000200, 0x00000008, - 0x00200000, 0x90000000, 0x00040000, 0x08000000, - 0x00000020, 0x84000000, 0x00000080, 0x00001000, - 0x00010000, 0x00000400, 0x04000000, 0x00000040, - 0x10000000, 0x00004000, 0x81000000, 0x88000000, - 0x00000100, 0x80020000, 0x00080000, 0x00100000, - 0x00800000, 0xffffffff, 0x82400fb3, 0xc27c0813, - // Entry 40 - 5F - 0xa240385f, 0x83c1c813, 0x9e420813, 0x92000001, - 0x86000001, 0x81400001, 0x8a000001, 0x82020001, -} - -// regionInclusionNext marks, for each entry in regionInclusionBits, the set of -// all groups that are reachable from the groups set in the respective entry. -// Size: 72 bytes, 72 elements -var regionInclusionNext = [72]uint8{ - // Entry 0 - 3F - 0x3d, 0x3e, 0x0b, 0x0b, 0x3f, 0x01, 0x0b, 0x01, - 0x01, 0x01, 0x01, 0x40, 0x0b, 0x0b, 0x16, 0x16, - 0x16, 0x19, 0x04, 0x04, 0x04, 0x04, 0x41, 0x16, - 0x16, 0x42, 0x19, 0x19, 0x19, 0x0b, 0x04, 0x00, - 0x00, 0x1e, 0x11, 0x18, 0x0f, 0x0d, 0x09, 0x03, - 0x15, 0x43, 0x12, 0x1b, 0x05, 0x44, 0x07, 0x0c, - 0x10, 0x0a, 0x1a, 0x06, 0x1c, 0x0e, 0x45, 0x46, - 0x08, 0x47, 0x13, 0x14, 0x17, 0x3d, 0x3d, 0x3d, - // Entry 40 - 7F - 0x3d, 0x3d, 0x3d, 0x42, 0x42, 0x41, 0x42, 0x42, -} - -type parentRel struct { - lang uint16 - script uint8 - maxScript uint8 - toRegion uint16 - fromRegion []uint16 -} - -// Size: 412 bytes, 5 elements -var parents = [5]parentRel{ - 0: {lang: 0x9b, script: 0x0, maxScript: 0x52, toRegion: 0x1, fromRegion: []uint16{0x1a, 0x24, 0x25, 0x2e, 0x33, 0x35, 0x3c, 0x41, 0x45, 0x47, 0x48, 0x49, 0x4f, 0x51, 0x5b, 0x5c, 0x60, 0x63, 0x6c, 0x71, 0x72, 0x73, 0x79, 0x7a, 0x7d, 0x7e, 0x7f, 0x81, 0x8a, 0x8b, 0x94, 0x95, 0x96, 0x97, 0x98, 0x9d, 0x9e, 0xa2, 0xa5, 0xa7, 0xab, 0xaf, 0xb2, 0xb3, 0xbd, 0xc4, 0xc8, 0xc9, 0xca, 0xcc, 0xce, 0xd0, 0xd3, 0xd4, 0xdb, 0xdd, 0xde, 0xe4, 0xe5, 0xe6, 0xe9, 0xee, 0x105, 0x107, 0x108, 0x109, 0x10b, 0x10c, 0x110, 0x115, 0x119, 0x11b, 0x11d, 0x123, 0x127, 0x12a, 0x12b, 0x12d, 0x12f, 0x136, 0x139, 0x13c, 0x13f, 0x15e, 0x15f, 0x161}}, - 1: {lang: 0x9b, script: 0x0, maxScript: 0x52, toRegion: 0x1a, fromRegion: []uint16{0x2d, 0x4d, 0x5f, 0x62, 0x70, 0xd7, 0x10a, 0x10d}}, - 2: {lang: 0x9d, script: 0x0, maxScript: 0x52, toRegion: 0x1e, fromRegion: []uint16{0x2b, 0x3e, 0x40, 0x50, 0x53, 0x55, 0x58, 0x64, 0x68, 0x87, 0x8d, 0xcd, 0xd6, 0xe0, 0xe2, 0xea, 0xef, 0x118, 0x132, 0x133, 0x138}}, - 3: {lang: 0x1ef, script: 0x0, maxScript: 0x52, toRegion: 0xec, fromRegion: []uint16{0x29, 0x4d, 0x59, 0x84, 0x89, 0xb5, 0xc4, 0xcf, 0x116, 0x124}}, - 4: {lang: 0x2c4, script: 0x35, maxScript: 0x35, toRegion: 0x8b, fromRegion: []uint16{0xc4}}, -} - -// Total table size 20315 bytes (19KiB); checksum: C16EF251 diff --git a/vendor/golang.org/x/text/language/tags.go b/vendor/golang.org/x/text/language/tags.go deleted file mode 100644 index de30155a2..000000000 --- a/vendor/golang.org/x/text/language/tags.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package language - -// TODO: Various sets of commonly use tags and regions. - -// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. -// It simplifies safe initialization of Tag values. -func MustParse(s string) Tag { - t, err := Parse(s) - if err != nil { - panic(err) - } - return t -} - -// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. -// It simplifies safe initialization of Tag values. -func (c CanonType) MustParse(s string) Tag { - t, err := c.Parse(s) - if err != nil { - panic(err) - } - return t -} - -// MustParseBase is like ParseBase, but panics if the given base cannot be parsed. -// It simplifies safe initialization of Base values. -func MustParseBase(s string) Base { - b, err := ParseBase(s) - if err != nil { - panic(err) - } - return b -} - -// MustParseScript is like ParseScript, but panics if the given script cannot be -// parsed. It simplifies safe initialization of Script values. -func MustParseScript(s string) Script { - scr, err := ParseScript(s) - if err != nil { - panic(err) - } - return scr -} - -// MustParseRegion is like ParseRegion, but panics if the given region cannot be -// parsed. It simplifies safe initialization of Region values. -func MustParseRegion(s string) Region { - r, err := ParseRegion(s) - if err != nil { - panic(err) - } - return r -} - -var ( - und = Tag{} - - Und Tag = Tag{} - - Afrikaans Tag = Tag{lang: _af} // af - Amharic Tag = Tag{lang: _am} // am - Arabic Tag = Tag{lang: _ar} // ar - ModernStandardArabic Tag = Tag{lang: _ar, region: _001} // ar-001 - Azerbaijani Tag = Tag{lang: _az} // az - Bulgarian Tag = Tag{lang: _bg} // bg - Bengali Tag = Tag{lang: _bn} // bn - Catalan Tag = Tag{lang: _ca} // ca - Czech Tag = Tag{lang: _cs} // cs - Danish Tag = Tag{lang: _da} // da - German Tag = Tag{lang: _de} // de - Greek Tag = Tag{lang: _el} // el - English Tag = Tag{lang: _en} // en - AmericanEnglish Tag = Tag{lang: _en, region: _US} // en-US - BritishEnglish Tag = Tag{lang: _en, region: _GB} // en-GB - Spanish Tag = Tag{lang: _es} // es - EuropeanSpanish Tag = Tag{lang: _es, region: _ES} // es-ES - LatinAmericanSpanish Tag = Tag{lang: _es, region: _419} // es-419 - Estonian Tag = Tag{lang: _et} // et - Persian Tag = Tag{lang: _fa} // fa - Finnish Tag = Tag{lang: _fi} // fi - Filipino Tag = Tag{lang: _fil} // fil - French Tag = Tag{lang: _fr} // fr - CanadianFrench Tag = Tag{lang: _fr, region: _CA} // fr-CA - Gujarati Tag = Tag{lang: _gu} // gu - Hebrew Tag = Tag{lang: _he} // he - Hindi Tag = Tag{lang: _hi} // hi - Croatian Tag = Tag{lang: _hr} // hr - Hungarian Tag = Tag{lang: _hu} // hu - Armenian Tag = Tag{lang: _hy} // hy - Indonesian Tag = Tag{lang: _id} // id - Icelandic Tag = Tag{lang: _is} // is - Italian Tag = Tag{lang: _it} // it - Japanese Tag = Tag{lang: _ja} // ja - Georgian Tag = Tag{lang: _ka} // ka - Kazakh Tag = Tag{lang: _kk} // kk - Khmer Tag = Tag{lang: _km} // km - Kannada Tag = Tag{lang: _kn} // kn - Korean Tag = Tag{lang: _ko} // ko - Kirghiz Tag = Tag{lang: _ky} // ky - Lao Tag = Tag{lang: _lo} // lo - Lithuanian Tag = Tag{lang: _lt} // lt - Latvian Tag = Tag{lang: _lv} // lv - Macedonian Tag = Tag{lang: _mk} // mk - Malayalam Tag = Tag{lang: _ml} // ml - Mongolian Tag = Tag{lang: _mn} // mn - Marathi Tag = Tag{lang: _mr} // mr - Malay Tag = Tag{lang: _ms} // ms - Burmese Tag = Tag{lang: _my} // my - Nepali Tag = Tag{lang: _ne} // ne - Dutch Tag = Tag{lang: _nl} // nl - Norwegian Tag = Tag{lang: _no} // no - Punjabi Tag = Tag{lang: _pa} // pa - Polish Tag = Tag{lang: _pl} // pl - Portuguese Tag = Tag{lang: _pt} // pt - BrazilianPortuguese Tag = Tag{lang: _pt, region: _BR} // pt-BR - EuropeanPortuguese Tag = Tag{lang: _pt, region: _PT} // pt-PT - Romanian Tag = Tag{lang: _ro} // ro - Russian Tag = Tag{lang: _ru} // ru - Sinhala Tag = Tag{lang: _si} // si - Slovak Tag = Tag{lang: _sk} // sk - Slovenian Tag = Tag{lang: _sl} // sl - Albanian Tag = Tag{lang: _sq} // sq - Serbian Tag = Tag{lang: _sr} // sr - SerbianLatin Tag = Tag{lang: _sr, script: _Latn} // sr-Latn - Swedish Tag = Tag{lang: _sv} // sv - Swahili Tag = Tag{lang: _sw} // sw - Tamil Tag = Tag{lang: _ta} // ta - Telugu Tag = Tag{lang: _te} // te - Thai Tag = Tag{lang: _th} // th - Turkish Tag = Tag{lang: _tr} // tr - Ukrainian Tag = Tag{lang: _uk} // uk - Urdu Tag = Tag{lang: _ur} // ur - Uzbek Tag = Tag{lang: _uz} // uz - Vietnamese Tag = Tag{lang: _vi} // vi - Chinese Tag = Tag{lang: _zh} // zh - SimplifiedChinese Tag = Tag{lang: _zh, script: _Hans} // zh-Hans - TraditionalChinese Tag = Tag{lang: _zh, script: _Hant} // zh-Hant - Zulu Tag = Tag{lang: _zu} // zu -) diff --git a/vendor/golang.org/x/text/runes/cond.go b/vendor/golang.org/x/text/runes/cond.go deleted file mode 100644 index ae7a92158..000000000 --- a/vendor/golang.org/x/text/runes/cond.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package runes - -import ( - "unicode/utf8" - - "golang.org/x/text/transform" -) - -// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is. -// This is done for various reasons: -// - To retain the semantics of the Nop transformer: if input is passed to a Nop -// one would expect it to be unchanged. -// - It would be very expensive to pass a converted RuneError to a transformer: -// a transformer might need more source bytes after RuneError, meaning that -// the only way to pass it safely is to create a new buffer and manage the -// intermingling of RuneErrors and normal input. -// - Many transformers leave ill-formed UTF-8 as is, so this is not -// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a -// logical consequence of the operation (as for Map) or if it otherwise would -// pose security concerns (as for Remove). -// - An alternative would be to return an error on ill-formed UTF-8, but this -// would be inconsistent with other operations. - -// If returns a transformer that applies tIn to consecutive runes for which -// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset -// is called on tIn and tNotIn at the start of each run. A Nop transformer will -// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated -// to RuneError to determine which transformer to apply, but is passed as is to -// the respective transformer. -func If(s Set, tIn, tNotIn transform.Transformer) Transformer { - if tIn == nil && tNotIn == nil { - return Transformer{transform.Nop} - } - if tIn == nil { - tIn = transform.Nop - } - if tNotIn == nil { - tNotIn = transform.Nop - } - a := &cond{ - tIn: tIn, - tNotIn: tNotIn, - f: s.Contains, - } - a.Reset() - return Transformer{a} -} - -type cond struct { - tIn, tNotIn transform.Transformer - f func(rune) bool - check func(rune) bool // current check to perform - t transform.Transformer // current transformer to use -} - -// Reset implements transform.Transformer. -func (t *cond) Reset() { - t.check = t.is - t.t = t.tIn - t.t.Reset() // notIn will be reset on first usage. -} - -func (t *cond) is(r rune) bool { - if t.f(r) { - return true - } - t.check = t.isNot - t.t = t.tNotIn - t.tNotIn.Reset() - return false -} - -func (t *cond) isNot(r rune) bool { - if !t.f(r) { - return true - } - t.check = t.is - t.t = t.tIn - t.tIn.Reset() - return false -} - -func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - p := 0 - for nSrc < len(src) && err == nil { - // Don't process too much at a time, as the work might be wasted if the - // destination buffer isn't large enough to hold the result or a - // transform returns an error early. - const maxChunk = 4096 - max := len(src) - if n := nSrc + maxChunk; n < len(src) { - max = n - } - atEnd := false - size := 0 - current := t.t - for ; p < max; p += size { - var r rune - r, size = utf8.DecodeRune(src[p:]) - if r == utf8.RuneError && size == 1 { - if !atEOF && !utf8.FullRune(src[p:]) { - err = transform.ErrShortSrc - break - } - } - if !t.check(r) { - // The next rune will be the start of a new run. - atEnd = true - break - } - } - nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src))) - nDst += nDst2 - nSrc += nSrc2 - if err2 != nil { - return nDst, nSrc, err2 - } - // At this point either err != nil or t.check will pass for the rune at p. - p = nSrc + size - } - return nDst, nSrc, err -} diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go deleted file mode 100644 index 291de656b..000000000 --- a/vendor/golang.org/x/text/runes/runes.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package runes provide transforms for UTF-8 encoded text. -package runes - -import ( - "unicode" - "unicode/utf8" - - "golang.org/x/text/transform" -) - -// A Set is a collection of runes. -type Set interface { - // Contains returns true if r is contained in the set. - Contains(r rune) bool -} - -type setFunc func(rune) bool - -func (s setFunc) Contains(r rune) bool { - return s(r) -} - -// Note: using funcs here instead of wrapping types result in cleaner -// documentation and a smaller API. - -// In creates a Set with a Contains method that returns true for all runes in -// the given RangeTable. -func In(rt *unicode.RangeTable) Set { - return setFunc(func(r rune) bool { return unicode.Is(rt, r) }) -} - -// In creates a Set with a Contains method that returns true for all runes not -// in the given RangeTable. -func NotIn(rt *unicode.RangeTable) Set { - return setFunc(func(r rune) bool { return !unicode.Is(rt, r) }) -} - -// Predicate creates a Set with a Contains method that returns f(r). -func Predicate(f func(rune) bool) Set { - return setFunc(f) -} - -// Transformer implements the transform.Transformer interface. -type Transformer struct { - transform.Transformer -} - -// Bytes returns a new byte slice with the result of converting b using t. It -// calls Reset on t. It returns nil if any error was found. This can only happen -// if an error-producing Transformer is passed to If. -func (t Transformer) Bytes(b []byte) []byte { - b, _, err := transform.Bytes(t, b) - if err != nil { - return nil - } - return b -} - -// String returns a string with the result of converting s using t. It calls -// Reset on t. It returns the empty string if any error was found. This can only -// happen if an error-producing Transformer is passed to If. -func (t Transformer) String(s string) string { - s, _, err := transform.String(t, s) - if err != nil { - return "" - } - return s -} - -// TODO: -// - Copy: copying strings and bytes in whole-rune units. -// - Validation (maybe) -// - Well-formed-ness (maybe) - -const runeErrorString = string(utf8.RuneError) - -// Remove returns a Transformer that removes runes r for which s.Contains(r). -// Illegal input bytes are replaced by RuneError before being passed to f. -func Remove(s Set) Transformer { - if f, ok := s.(setFunc); ok { - // This little trick cuts the running time of BenchmarkRemove for sets - // created by Predicate roughly in half. - // TODO: special-case RangeTables as well. - return Transformer{remove(f)} - } - return Transformer{remove(s.Contains)} -} - -// TODO: remove transform.RemoveFunc. - -type remove func(r rune) bool - -func (remove) Reset() {} - -// Transform implements transform.Transformer. -func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for r, size := rune(0), 0; nSrc < len(src); { - if r = rune(src[nSrc]); r < utf8.RuneSelf { - size = 1 - } else { - r, size = utf8.DecodeRune(src[nSrc:]) - - if size == 1 { - // Invalid rune. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - // We replace illegal bytes with RuneError. Not doing so might - // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. - // The resulting byte sequence may subsequently contain runes - // for which t(r) is true that were passed unnoticed. - if !t(utf8.RuneError) { - if nDst+3 > len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst+0] = runeErrorString[0] - dst[nDst+1] = runeErrorString[1] - dst[nDst+2] = runeErrorString[2] - nDst += 3 - } - nSrc++ - continue - } - } - - if t(r) { - nSrc += size - continue - } - if nDst+size > len(dst) { - err = transform.ErrShortDst - break - } - for i := 0; i < size; i++ { - dst[nDst] = src[nSrc] - nDst++ - nSrc++ - } - } - return -} - -// Map returns a Transformer that maps the runes in the input using the given -// mapping. Illegal bytes in the input are converted to utf8.RuneError before -// being passed to the mapping func. -func Map(mapping func(rune) rune) Transformer { - return Transformer{mapper(mapping)} -} - -type mapper func(rune) rune - -func (mapper) Reset() {} - -// Transform implements transform.Transformer. -func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - var replacement rune - var b [utf8.UTFMax]byte - - for r, size := rune(0), 0; nSrc < len(src); { - if r = rune(src[nSrc]); r < utf8.RuneSelf { - if replacement = t(r); replacement < utf8.RuneSelf { - if nDst == len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst] = byte(replacement) - nDst++ - nSrc++ - continue - } - size = 1 - } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { - // Invalid rune. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - - if replacement = t(utf8.RuneError); replacement == utf8.RuneError { - if nDst+3 > len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst+0] = runeErrorString[0] - dst[nDst+1] = runeErrorString[1] - dst[nDst+2] = runeErrorString[2] - nDst += 3 - nSrc++ - continue - } - } else if replacement = t(r); replacement == r { - if nDst+size > len(dst) { - err = transform.ErrShortDst - break - } - for i := 0; i < size; i++ { - dst[nDst] = src[nSrc] - nDst++ - nSrc++ - } - continue - } - - n := utf8.EncodeRune(b[:], replacement) - - if nDst+n > len(dst) { - err = transform.ErrShortDst - break - } - for i := 0; i < n; i++ { - dst[nDst] = b[i] - nDst++ - } - nSrc += size - } - return -} - -// ReplaceIllFormed returns a transformer that replaces all input bytes that are -// not part of a well-formed UTF-8 code sequence with utf8.RuneError. -func ReplaceIllFormed() Transformer { - return Transformer{&replaceIllFormed{}} -} - -type replaceIllFormed struct{ transform.NopResetter } - -func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - r, size := utf8.DecodeRune(src[nSrc:]) - - // Look for an ASCII rune. - if r < utf8.RuneSelf { - if nDst == len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst] = byte(r) - nDst++ - nSrc++ - continue - } - - // Look for a valid non-ASCII rune. - if r != utf8.RuneError || size != 1 { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - err = transform.ErrShortDst - break - } - nDst += size - nSrc += size - continue - } - - // Look for short source data. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - - // We have an invalid rune. - if nDst+3 > len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst+0] = runeErrorString[0] - dst[nDst+1] = runeErrorString[1] - dst[nDst+2] = runeErrorString[2] - nDst += 3 - nSrc++ - } - return nDst, nSrc, err -} diff --git a/vendor/golang.org/x/text/secure/precis/class.go b/vendor/golang.org/x/text/secure/precis/class.go deleted file mode 100644 index f6b56413b..000000000 --- a/vendor/golang.org/x/text/secure/precis/class.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package precis - -import ( - "unicode/utf8" -) - -// TODO: Add contextual character rules from Appendix A of RFC5892. - -// A class is a set of characters that match certain derived properties. The -// PRECIS framework defines two classes: The Freeform class and the Identifier -// class. The freeform class should be used for profiles where expressiveness is -// prioritized over safety such as nicknames or passwords. The identifier class -// should be used for profiles where safety is the first priority such as -// addressable network labels and usernames. -type class struct { - validFrom property -} - -// Contains satisfies the runes.Set interface and returns whether the given rune -// is a member of the class. -func (c class) Contains(r rune) bool { - b := make([]byte, 4) - n := utf8.EncodeRune(b, r) - - trieval, _ := dpTrie.lookup(b[:n]) - return c.validFrom <= property(trieval) -} - -var ( - identifier = &class{validFrom: pValid} - freeform = &class{validFrom: idDisOrFreePVal} -) diff --git a/vendor/golang.org/x/text/secure/precis/context.go b/vendor/golang.org/x/text/secure/precis/context.go deleted file mode 100644 index 2dcaf29d7..000000000 --- a/vendor/golang.org/x/text/secure/precis/context.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package precis - -import "errors" - -// This file contains tables and code related to context rules. - -type catBitmap uint16 - -const ( - // These bits, once set depending on the current value, are never unset. - bJapanese catBitmap = 1 << iota - bArabicIndicDigit - bExtendedArabicIndicDigit - - // These bits are set on each iteration depending on the current value. - bJoinStart - bJoinMid - bJoinEnd - bVirama - bLatinSmallL - bGreek - bHebrew - - // These bits indicated which of the permanent bits need to be set at the - // end of the checks. - bMustHaveJapn - - permanent = bJapanese | bArabicIndicDigit | bExtendedArabicIndicDigit | bMustHaveJapn -) - -const finalShift = 10 - -var errContext = errors.New("precis: contextual rule violated") - -func init() { - // Programmatically set these required bits as, manually setting them seems - // too error prone. - for i, ct := range categoryTransitions { - categoryTransitions[i].keep |= permanent - categoryTransitions[i].accept |= ct.term - } -} - -var categoryTransitions = []struct { - keep catBitmap // mask selecting which bits to keep from the previous state - set catBitmap // mask for which bits to set for this transition - - // These bitmaps are used for rules that require lookahead. - // term&accept == term must be true, which is enforced programmatically. - term catBitmap // bits accepted as termination condition - accept catBitmap // bits that pass, but not sufficient as termination - - // The rule function cannot take a *context as an argument, as it would - // cause the context to escape, adding significant overhead. - rule func(beforeBits catBitmap) (doLookahead bool, err error) -}{ - joiningL: {set: bJoinStart}, - joiningD: {set: bJoinStart | bJoinEnd}, - joiningT: {keep: bJoinStart, set: bJoinMid}, - joiningR: {set: bJoinEnd}, - viramaModifier: {set: bVirama}, - viramaJoinT: {set: bVirama | bJoinMid}, - latinSmallL: {set: bLatinSmallL}, - greek: {set: bGreek}, - greekJoinT: {set: bGreek | bJoinMid}, - hebrew: {set: bHebrew}, - hebrewJoinT: {set: bHebrew | bJoinMid}, - japanese: {set: bJapanese}, - katakanaMiddleDot: {set: bMustHaveJapn}, - - zeroWidthNonJoiner: { - term: bJoinEnd, - accept: bJoinMid, - rule: func(before catBitmap) (doLookAhead bool, err error) { - if before&bVirama != 0 { - return false, nil - } - if before&bJoinStart == 0 { - return false, errContext - } - return true, nil - }, - }, - zeroWidthJoiner: { - rule: func(before catBitmap) (doLookAhead bool, err error) { - if before&bVirama == 0 { - err = errContext - } - return false, err - }, - }, - middleDot: { - term: bLatinSmallL, - rule: func(before catBitmap) (doLookAhead bool, err error) { - if before&bLatinSmallL == 0 { - return false, errContext - } - return true, nil - }, - }, - greekLowerNumeralSign: { - set: bGreek, - term: bGreek, - rule: func(before catBitmap) (doLookAhead bool, err error) { - return true, nil - }, - }, - hebrewPreceding: { - set: bHebrew, - rule: func(before catBitmap) (doLookAhead bool, err error) { - if before&bHebrew == 0 { - err = errContext - } - return false, err - }, - }, - arabicIndicDigit: { - set: bArabicIndicDigit, - rule: func(before catBitmap) (doLookAhead bool, err error) { - if before&bExtendedArabicIndicDigit != 0 { - err = errContext - } - return false, err - }, - }, - extendedArabicIndicDigit: { - set: bExtendedArabicIndicDigit, - rule: func(before catBitmap) (doLookAhead bool, err error) { - if before&bArabicIndicDigit != 0 { - err = errContext - } - return false, err - }, - }, -} diff --git a/vendor/golang.org/x/text/secure/precis/doc.go b/vendor/golang.org/x/text/secure/precis/doc.go deleted file mode 100644 index aa76205e6..000000000 --- a/vendor/golang.org/x/text/secure/precis/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package precis contains types and functions for the preparation, -// enforcement, and comparison of internationalized strings ("PRECIS") as -// defined in RFC 7564. It also contains several pre-defined profiles for -// passwords, nicknames, and usernames as defined in RFC 7613 and RFC 7700. -// -// BE ADVISED: This package is under construction and the API may change in -// backwards incompatible ways and without notice. -package precis - -//go:generate go run gen.go gen_trieval.go diff --git a/vendor/golang.org/x/text/secure/precis/gen.go b/vendor/golang.org/x/text/secure/precis/gen.go deleted file mode 100644 index dba9004a6..000000000 --- a/vendor/golang.org/x/text/secure/precis/gen.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Unicode table generator. -// Data read from the web. - -// +build ignore - -package main - -import ( - "flag" - "log" - "unicode" - "unicode/utf8" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" - "golang.org/x/text/unicode/norm" - "golang.org/x/text/unicode/rangetable" -) - -var outputFile = flag.String("output", "tables.go", "output file for generated tables; default tables.go") - -var assigned, disallowedRunes *unicode.RangeTable - -var runeCategory = map[rune]category{} - -var overrides = map[category]category{ - viramaModifier: viramaJoinT, - greek: greekJoinT, - hebrew: hebrewJoinT, -} - -func setCategory(r rune, cat category) { - if c, ok := runeCategory[r]; ok { - if override, ok := overrides[c]; cat == joiningT && ok { - cat = override - } else { - log.Fatalf("%U: multiple categories for rune (%v and %v)", r, c, cat) - } - } - runeCategory[r] = cat -} - -func init() { - if numCategories > 1<<propShift { - log.Fatalf("Number of categories is %d; may at most be %d", numCategories, 1<<propShift) - } -} - -func main() { - gen.Init() - - // Load data - runes := []rune{} - // PrecisIgnorableProperties: https://tools.ietf.org/html/rfc7564#section-9.13 - ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { - if p.String(1) == "Default_Ignorable_Code_Point" { - runes = append(runes, p.Rune(0)) - } - }) - ucd.Parse(gen.OpenUCDFile("PropList.txt"), func(p *ucd.Parser) { - switch p.String(1) { - case "Noncharacter_Code_Point": - runes = append(runes, p.Rune(0)) - } - }) - // OldHangulJamo: https://tools.ietf.org/html/rfc5892#section-2.9 - ucd.Parse(gen.OpenUCDFile("HangulSyllableType.txt"), func(p *ucd.Parser) { - switch p.String(1) { - case "L", "V", "T": - runes = append(runes, p.Rune(0)) - } - }) - - disallowedRunes = rangetable.New(runes...) - assigned = rangetable.Assigned(unicode.Version) - - // Load category data. - runeCategory['l'] = latinSmallL - ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) { - const cccVirama = 9 - if p.Int(ucd.CanonicalCombiningClass) == cccVirama { - setCategory(p.Rune(0), viramaModifier) - } - }) - ucd.Parse(gen.OpenUCDFile("Scripts.txt"), func(p *ucd.Parser) { - switch p.String(1) { - case "Greek": - setCategory(p.Rune(0), greek) - case "Hebrew": - setCategory(p.Rune(0), hebrew) - case "Hiragana", "Katakana", "Han": - setCategory(p.Rune(0), japanese) - } - }) - - // Set the rule categories associated with exceptions. This overrides any - // previously set categories. The original categories are manually - // reintroduced in the categoryTransitions table. - for r, e := range exceptions { - if e.cat != 0 { - runeCategory[r] = e.cat - } - } - cat := map[string]category{ - "L": joiningL, - "D": joiningD, - "T": joiningT, - - "R": joiningR, - } - ucd.Parse(gen.OpenUCDFile("extracted/DerivedJoiningType.txt"), func(p *ucd.Parser) { - switch v := p.String(1); v { - case "L", "D", "T", "R": - setCategory(p.Rune(0), cat[v]) - } - }) - - writeTables() - gen.Repackage("gen_trieval.go", "trieval.go", "precis") -} - -type exception struct { - prop property - cat category -} - -func init() { - // Programmatically add the Arabic and Indic digits to the exceptions map. - // See comment in the exceptions map below why these are marked disallowed. - for i := rune(0); i <= 9; i++ { - exceptions[0x0660+i] = exception{ - prop: disallowed, - cat: arabicIndicDigit, - } - exceptions[0x06F0+i] = exception{ - prop: disallowed, - cat: extendedArabicIndicDigit, - } - } -} - -// The Exceptions class as defined in RFC 5892 -// https://tools.ietf.org/html/rfc5892#section-2.6 -var exceptions = map[rune]exception{ - 0x00DF: {prop: pValid}, - 0x03C2: {prop: pValid}, - 0x06FD: {prop: pValid}, - 0x06FE: {prop: pValid}, - 0x0F0B: {prop: pValid}, - 0x3007: {prop: pValid}, - - // ContextO|J rules are marked as disallowed, taking a "guilty until proven - // innocent" approach. The main reason for this is that the check for - // whether a context rule should be applied can be moved to the logic for - // handing disallowed runes, taken it off the common path. The exception to - // this rule is for katakanaMiddleDot, as the rule logic is handled without - // using a rule function. - - // ContextJ (Join control) - 0x200C: {prop: disallowed, cat: zeroWidthNonJoiner}, - 0x200D: {prop: disallowed, cat: zeroWidthJoiner}, - - // ContextO - 0x00B7: {prop: disallowed, cat: middleDot}, - 0x0375: {prop: disallowed, cat: greekLowerNumeralSign}, - 0x05F3: {prop: disallowed, cat: hebrewPreceding}, // punctuation Geresh - 0x05F4: {prop: disallowed, cat: hebrewPreceding}, // punctuation Gershayim - 0x30FB: {prop: pValid, cat: katakanaMiddleDot}, - - // These are officially ContextO, but the implementation does not require - // special treatment of these, so we simply mark them as valid. - 0x0660: {prop: pValid}, - 0x0661: {prop: pValid}, - 0x0662: {prop: pValid}, - 0x0663: {prop: pValid}, - 0x0664: {prop: pValid}, - 0x0665: {prop: pValid}, - 0x0666: {prop: pValid}, - 0x0667: {prop: pValid}, - 0x0668: {prop: pValid}, - 0x0669: {prop: pValid}, - 0x06F0: {prop: pValid}, - 0x06F1: {prop: pValid}, - 0x06F2: {prop: pValid}, - 0x06F3: {prop: pValid}, - 0x06F4: {prop: pValid}, - 0x06F5: {prop: pValid}, - 0x06F6: {prop: pValid}, - 0x06F7: {prop: pValid}, - 0x06F8: {prop: pValid}, - 0x06F9: {prop: pValid}, - - 0x0640: {prop: disallowed}, - 0x07FA: {prop: disallowed}, - 0x302E: {prop: disallowed}, - 0x302F: {prop: disallowed}, - 0x3031: {prop: disallowed}, - 0x3032: {prop: disallowed}, - 0x3033: {prop: disallowed}, - 0x3034: {prop: disallowed}, - 0x3035: {prop: disallowed}, - 0x303B: {prop: disallowed}, -} - -// LetterDigits: https://tools.ietf.org/html/rfc5892#section-2.1 -// r in {Ll, Lu, Lo, Nd, Lm, Mn, Mc}. -func isLetterDigits(r rune) bool { - return unicode.In(r, - unicode.Ll, unicode.Lu, unicode.Lm, unicode.Lo, // Letters - unicode.Mn, unicode.Mc, // Modifiers - unicode.Nd, // Digits - ) -} - -func isIdDisAndFreePVal(r rune) bool { - return unicode.In(r, - // OtherLetterDigits: https://tools.ietf.org/html/rfc7564#section-9.18 - // r in in {Lt, Nl, No, Me} - unicode.Lt, unicode.Nl, unicode.No, // Other letters / numbers - unicode.Me, // Modifiers - - // Spaces: https://tools.ietf.org/html/rfc7564#section-9.14 - // r in in {Zs} - unicode.Zs, - - // Symbols: https://tools.ietf.org/html/rfc7564#section-9.15 - // r in {Sm, Sc, Sk, So} - unicode.Sm, unicode.Sc, unicode.Sk, unicode.So, - - // Punctuation: https://tools.ietf.org/html/rfc7564#section-9.16 - // r in {Pc, Pd, Ps, Pe, Pi, Pf, Po} - unicode.Pc, unicode.Pd, unicode.Ps, unicode.Pe, - unicode.Pi, unicode.Pf, unicode.Po, - ) -} - -// HasCompat: https://tools.ietf.org/html/rfc7564#section-9.17 -func hasCompat(r rune) bool { - return !norm.NFKC.IsNormalString(string(r)) -} - -// From https://tools.ietf.org/html/rfc5892: -// -// If .cp. .in. Exceptions Then Exceptions(cp); -// Else If .cp. .in. BackwardCompatible Then BackwardCompatible(cp); -// Else If .cp. .in. Unassigned Then UNASSIGNED; -// Else If .cp. .in. ASCII7 Then PVALID; -// Else If .cp. .in. JoinControl Then CONTEXTJ; -// Else If .cp. .in. OldHangulJamo Then DISALLOWED; -// Else If .cp. .in. PrecisIgnorableProperties Then DISALLOWED; -// Else If .cp. .in. Controls Then DISALLOWED; -// Else If .cp. .in. HasCompat Then ID_DIS or FREE_PVAL; -// Else If .cp. .in. LetterDigits Then PVALID; -// Else If .cp. .in. OtherLetterDigits Then ID_DIS or FREE_PVAL; -// Else If .cp. .in. Spaces Then ID_DIS or FREE_PVAL; -// Else If .cp. .in. Symbols Then ID_DIS or FREE_PVAL; -// Else If .cp. .in. Punctuation Then ID_DIS or FREE_PVAL; -// Else DISALLOWED; - -func writeTables() { - propTrie := triegen.NewTrie("derivedProperties") - w := gen.NewCodeWriter() - defer w.WriteGoFile(*outputFile, "precis") - gen.WriteUnicodeVersion(w) - - // Iterate over all the runes... - for i := rune(0); i < unicode.MaxRune; i++ { - r := rune(i) - - if !utf8.ValidRune(r) { - continue - } - - e, ok := exceptions[i] - p := e.prop - switch { - case ok: - case !unicode.In(r, assigned): - p = unassigned - case r >= 0x0021 && r <= 0x007e: // Is ASCII 7 - p = pValid - case unicode.In(r, disallowedRunes, unicode.Cc): - p = disallowed - case hasCompat(r): - p = idDisOrFreePVal - case isLetterDigits(r): - p = pValid - case isIdDisAndFreePVal(r): - p = idDisOrFreePVal - default: - p = disallowed - } - cat := runeCategory[r] - // Don't set category for runes that are disallowed. - if p == disallowed { - cat = exceptions[r].cat - } - propTrie.Insert(r, uint64(p)|uint64(cat)) - } - sz, err := propTrie.Gen(w) - if err != nil { - log.Fatal(err) - } - w.Size += sz -} diff --git a/vendor/golang.org/x/text/secure/precis/gen_trieval.go b/vendor/golang.org/x/text/secure/precis/gen_trieval.go deleted file mode 100644 index 308510c9a..000000000 --- a/vendor/golang.org/x/text/secure/precis/gen_trieval.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// entry is the entry of a trie table -// 7..6 property (unassigned, disallowed, maybe, valid) -// 5..0 category -type entry uint8 - -const ( - propShift = 6 - propMask = 0xc0 - catMask = 0x3f -) - -func (e entry) property() property { return property(e & propMask) } -func (e entry) category() category { return category(e & catMask) } - -type property uint8 - -// The order of these constants matter. A Profile may consider runes to be -// allowed either from pValid or idDisOrFreePVal. -const ( - unassigned property = iota << propShift - disallowed - idDisOrFreePVal // disallowed for Identifier, pValid for FreeForm - pValid -) - -// compute permutations of all properties and specialCategories. -type category uint8 - -const ( - other category = iota - - // Special rune types - joiningL - joiningD - joiningT - joiningR - viramaModifier - viramaJoinT // Virama + JoiningT - latinSmallL // U+006c - greek - greekJoinT // Greek + JoiningT - hebrew - hebrewJoinT // Hebrew + JoiningT - japanese // hirigana, katakana, han - - // Special rune types associated with contextual rules defined in - // https://tools.ietf.org/html/rfc5892#appendix-A. - // ContextO - zeroWidthNonJoiner // rule 1 - zeroWidthJoiner // rule 2 - // ContextJ - middleDot // rule 3 - greekLowerNumeralSign // rule 4 - hebrewPreceding // rule 5 and 6 - katakanaMiddleDot // rule 7 - arabicIndicDigit // rule 8 - extendedArabicIndicDigit // rule 9 - - numCategories -) diff --git a/vendor/golang.org/x/text/secure/precis/nickname.go b/vendor/golang.org/x/text/secure/precis/nickname.go deleted file mode 100644 index cd54b9e69..000000000 --- a/vendor/golang.org/x/text/secure/precis/nickname.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package precis - -import ( - "unicode" - "unicode/utf8" - - "golang.org/x/text/transform" -) - -type nickAdditionalMapping struct { - // TODO: This transformer needs to be stateless somehow… - notStart bool - prevSpace bool -} - -func (t *nickAdditionalMapping) Reset() { - t.prevSpace = false - t.notStart = false -} - -func (t *nickAdditionalMapping) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - // RFC 7700 §2.1. Rules - // - // 2. Additional Mapping Rule: The additional mapping rule consists of - // the following sub-rules. - // - // 1. Any instances of non-ASCII space MUST be mapped to ASCII - // space (U+0020); a non-ASCII space is any Unicode code point - // having a general category of "Zs", naturally with the - // exception of U+0020. - // - // 2. Any instances of the ASCII space character at the beginning - // or end of a nickname MUST be removed (e.g., "stpeter " is - // mapped to "stpeter"). - // - // 3. Interior sequences of more than one ASCII space character - // MUST be mapped to a single ASCII space character (e.g., - // "St Peter" is mapped to "St Peter"). - - for nSrc < len(src) { - r, size := utf8.DecodeRune(src[nSrc:]) - if size == 0 { // Incomplete UTF-8 encoding - if !atEOF { - return nDst, nSrc, transform.ErrShortSrc - } - size = 1 - } - if unicode.Is(unicode.Zs, r) { - t.prevSpace = true - } else { - if t.prevSpace && t.notStart { - dst[nDst] = ' ' - nDst += 1 - } - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - nDst += size - return nDst, nSrc, transform.ErrShortDst - } - nDst += size - t.prevSpace = false - t.notStart = true - } - nSrc += size - } - return nDst, nSrc, nil -} diff --git a/vendor/golang.org/x/text/secure/precis/options.go b/vendor/golang.org/x/text/secure/precis/options.go deleted file mode 100644 index ec63783ef..000000000 --- a/vendor/golang.org/x/text/secure/precis/options.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package precis - -import ( - "golang.org/x/text/cases" - "golang.org/x/text/runes" - "golang.org/x/text/transform" - "golang.org/x/text/unicode/norm" - "golang.org/x/text/width" -) - -// An Option is used to define the behavior and rules of a Profile. -type Option func(*options) - -type options struct { - // Preparation options - foldWidth bool - - // Enforcement options - cases transform.Transformer - disallow runes.Set - norm norm.Form - additional []func() transform.Transformer - width *width.Transformer - disallowEmpty bool - bidiRule bool - - // Comparison options - ignorecase bool -} - -func getOpts(o ...Option) (res options) { - for _, f := range o { - f(&res) - } - return -} - -var ( - // The IgnoreCase option causes the profile to perform a case insensitive - // comparison during the PRECIS comparison step. - IgnoreCase Option = ignoreCase - - // The FoldWidth option causes the profile to map non-canonical wide and - // narrow variants to their decomposition mapping. This is useful for - // profiles that are based on the identifier class which would otherwise - // disallow such characters. - FoldWidth Option = foldWidth - - // The DisallowEmpty option causes the enforcement step to return an error if - // the resulting string would be empty. - DisallowEmpty Option = disallowEmpty - - // The BidiRule option causes the Bidi Rule defined in RFC 5893 to be - // applied. - BidiRule Option = bidiRule -) - -var ( - ignoreCase = func(o *options) { - o.ignorecase = true - } - foldWidth = func(o *options) { - o.foldWidth = true - } - disallowEmpty = func(o *options) { - o.disallowEmpty = true - } - bidiRule = func(o *options) { - o.bidiRule = true - } -) - -// The AdditionalMapping option defines the additional mapping rule for the -// Profile by applying Transformer's in sequence. -func AdditionalMapping(t ...func() transform.Transformer) Option { - return func(o *options) { - o.additional = t - } -} - -// The Norm option defines a Profile's normalization rule. Defaults to NFC. -func Norm(f norm.Form) Option { - return func(o *options) { - o.norm = f - } -} - -// The FoldCase option defines a Profile's case mapping rule. Options can be -// provided to determine the type of case folding used. -func FoldCase(opts ...cases.Option) Option { - return func(o *options) { - o.cases = cases.Fold(opts...) - } -} - -// The Disallow option further restricts a Profile's allowed characters beyond -// what is disallowed by the underlying string class. -func Disallow(set runes.Set) Option { - return func(o *options) { - o.disallow = set - } -} diff --git a/vendor/golang.org/x/text/secure/precis/profile.go b/vendor/golang.org/x/text/secure/precis/profile.go deleted file mode 100644 index fd5c422fb..000000000 --- a/vendor/golang.org/x/text/secure/precis/profile.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package precis - -import ( - "errors" - "unicode/utf8" - - "golang.org/x/text/runes" - "golang.org/x/text/secure/bidirule" - "golang.org/x/text/transform" - "golang.org/x/text/width" -) - -var ( - errDisallowedRune = errors.New("precis: disallowed rune encountered") -) - -var dpTrie = newDerivedPropertiesTrie(0) - -// A Profile represents a set of rules for normalizing and validating strings in -// the PRECIS framework. -type Profile struct { - options - class *class -} - -// NewIdentifier creates a new PRECIS profile based on the Identifier string -// class. Profiles created from this class are suitable for use where safety is -// prioritized over expressiveness like network identifiers, user accounts, chat -// rooms, and file names. -func NewIdentifier(opts ...Option) *Profile { - return &Profile{ - options: getOpts(opts...), - class: identifier, - } -} - -// NewFreeform creates a new PRECIS profile based on the Freeform string class. -// Profiles created from this class are suitable for use where expressiveness is -// prioritized over safety like passwords, and display-elements such as -// nicknames in a chat room. -func NewFreeform(opts ...Option) *Profile { - return &Profile{ - options: getOpts(opts...), - class: freeform, - } -} - -// NewTransformer creates a new transform.Transformer that performs the PRECIS -// preparation and enforcement steps on the given UTF-8 encoded bytes. -func (p *Profile) NewTransformer() *Transformer { - var ts []transform.Transformer - - // These transforms are applied in the order defined in - // https://tools.ietf.org/html/rfc7564#section-7 - - if p.options.foldWidth { - ts = append(ts, width.Fold) - } - - for _, f := range p.options.additional { - ts = append(ts, f()) - } - - if p.options.cases != nil { - ts = append(ts, p.options.cases) - } - - ts = append(ts, p.options.norm) - - if p.options.bidiRule { - ts = append(ts, bidirule.New()) - } - - ts = append(ts, &checker{p: p, allowed: p.Allowed()}) - - // TODO: Add the disallow empty rule with a dummy transformer? - - return &Transformer{transform.Chain(ts...)} -} - -var errEmptyString = errors.New("precis: transformation resulted in empty string") - -type buffers struct { - src []byte - buf [2][]byte - next int -} - -func (b *buffers) init(n int) { - b.buf[0] = make([]byte, 0, n) - b.buf[1] = make([]byte, 0, n) -} - -func (b *buffers) apply(t transform.Transformer) (err error) { - // TODO: use Span, once available. - x := b.next & 1 - b.src, _, err = transform.Append(t, b.buf[x][:0], b.src) - b.buf[x] = b.src - b.next++ - return err -} - -func (b *buffers) enforce(p *Profile, src []byte) (str []byte, err error) { - b.src = src - - // These transforms are applied in the order defined in - // https://tools.ietf.org/html/rfc7564#section-7 - - // TODO: allow different width transforms options. - if p.options.foldWidth { - // TODO: use Span, once available. - if err = b.apply(width.Fold); err != nil { - return nil, err - } - } - for _, f := range p.options.additional { - if err = b.apply(f()); err != nil { - return nil, err - } - } - if p.options.cases != nil { - if err = b.apply(p.options.cases); err != nil { - return nil, err - } - } - if n := p.norm.QuickSpan(b.src); n < len(b.src) { - x := b.next & 1 - n = copy(b.buf[x], b.src[:n]) - b.src, _, err = transform.Append(p.norm, b.buf[x][:n], b.src[n:]) - b.buf[x] = b.src - b.next++ - if err != nil { - return nil, err - } - } - if p.options.bidiRule { - if err := b.apply(bidirule.New()); err != nil { - return nil, err - } - } - c := checker{p: p} - if _, err := c.span(b.src, true); err != nil { - return nil, err - } - if p.disallow != nil { - for i := 0; i < len(b.src); { - r, size := utf8.DecodeRune(b.src[i:]) - if p.disallow.Contains(r) { - return nil, errDisallowedRune - } - i += size - } - } - - // TODO: Add the disallow empty rule with a dummy transformer? - - if p.options.disallowEmpty && len(b.src) == 0 { - return nil, errEmptyString - } - return b.src, nil -} - -// Append appends the result of applying p to src writing the result to dst. -// It returns an error if the input string is invalid. -func (p *Profile) Append(dst, src []byte) ([]byte, error) { - var buf buffers - buf.init(8 + len(src) + len(src)>>2) - b, err := buf.enforce(p, src) - if err != nil { - return nil, err - } - return append(dst, b...), nil -} - -// Bytes returns a new byte slice with the result of applying the profile to b. -func (p *Profile) Bytes(b []byte) ([]byte, error) { - var buf buffers - buf.init(8 + len(b) + len(b)>>2) - b, err := buf.enforce(p, b) - if err != nil { - return nil, err - } - if buf.next == 0 { - c := make([]byte, len(b)) - copy(c, b) - return c, nil - } - return b, nil -} - -// String returns a string with the result of applying the profile to s. -func (p *Profile) String(s string) (string, error) { - var buf buffers - buf.init(8 + len(s) + len(s)>>2) - b, err := buf.enforce(p, []byte(s)) - if err != nil { - return "", err - } - return string(b), nil -} - -// Compare enforces both strings, and then compares them for bit-string identity -// (byte-for-byte equality). If either string cannot be enforced, the comparison -// is false. -func (p *Profile) Compare(a, b string) bool { - a, err := p.String(a) - if err != nil { - return false - } - b, err = p.String(b) - if err != nil { - return false - } - - // TODO: This is out of order. Need to extract the transformation logic and - // put this in where the normal case folding would go (but only for - // comparison). - if p.options.ignorecase { - a = width.Fold.String(a) - b = width.Fold.String(a) - } - - return a == b -} - -// Allowed returns a runes.Set containing every rune that is a member of the -// underlying profile's string class and not disallowed by any profile specific -// rules. -func (p *Profile) Allowed() runes.Set { - if p.options.disallow != nil { - return runes.Predicate(func(r rune) bool { - return p.class.Contains(r) && !p.options.disallow.Contains(r) - }) - } - return p.class -} - -type checker struct { - p *Profile - allowed runes.Set - - beforeBits catBitmap - termBits catBitmap - acceptBits catBitmap -} - -func (c *checker) Reset() { - c.beforeBits = 0 - c.termBits = 0 - c.acceptBits = 0 -} - -func (c *checker) span(src []byte, atEOF bool) (n int, err error) { - for n < len(src) { - e, sz := dpTrie.lookup(src[n:]) - d := categoryTransitions[category(e&catMask)] - if sz == 0 { - if !atEOF { - return n, transform.ErrShortSrc - } - return n, errDisallowedRune - } - if property(e) < c.p.class.validFrom { - if d.rule == nil { - return n, errDisallowedRune - } - doLookAhead, err := d.rule(c.beforeBits) - if err != nil { - return n, err - } - if doLookAhead { - c.beforeBits &= d.keep - c.beforeBits |= d.set - // We may still have a lookahead rule which we will require to - // complete (by checking termBits == 0) before setting the new - // bits. - if c.termBits != 0 && (!c.checkLookahead() || c.termBits == 0) { - return n, err - } - c.termBits = d.term - c.acceptBits = d.accept - n += sz - continue - } - } - c.beforeBits &= d.keep - c.beforeBits |= d.set - if c.termBits != 0 && !c.checkLookahead() { - return n, errContext - } - n += sz - } - if m := c.beforeBits >> finalShift; c.beforeBits&m != m || c.termBits != 0 { - err = errContext - } - return n, err -} - -func (c *checker) checkLookahead() bool { - switch { - case c.beforeBits&c.termBits != 0: - c.termBits = 0 - c.acceptBits = 0 - case c.beforeBits&c.acceptBits != 0: - default: - return false - } - return true -} - -// TODO: we may get rid of this transform if transform.Chain understands -// something like a Spanner interface. -func (c checker) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - short := false - if len(dst) < len(src) { - src = src[:len(dst)] - atEOF = false - short = true - } - nSrc, err = c.span(src, atEOF) - nDst = copy(dst, src[:nSrc]) - if short && (err == transform.ErrShortSrc || err == nil) { - err = transform.ErrShortDst - } - return nDst, nSrc, err -} diff --git a/vendor/golang.org/x/text/secure/precis/profiles.go b/vendor/golang.org/x/text/secure/precis/profiles.go deleted file mode 100644 index ad50ae857..000000000 --- a/vendor/golang.org/x/text/secure/precis/profiles.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package precis - -import ( - "unicode" - - "golang.org/x/text/runes" - "golang.org/x/text/transform" - "golang.org/x/text/unicode/norm" -) - -var ( - Nickname *Profile = nickname // Implements the Nickname profile specified in RFC 7700. - UsernameCaseMapped *Profile = usernameCaseMap // Implements the UsernameCaseMapped profile specified in RFC 7613. - UsernameCasePreserved *Profile = usernameNoCaseMap // Implements the UsernameCasePreserved profile specified in RFC 7613. - OpaqueString *Profile = opaquestring // Implements the OpaqueString profile defined in RFC 7613 for passwords and other secure labels. -) - -// TODO: mvl: "Ultimately, I would manually define the structs for the internal -// profiles. This avoid pulling in unneeded tables when they are not used." -var ( - nickname = NewFreeform( - AdditionalMapping(func() transform.Transformer { - return &nickAdditionalMapping{} - }), - IgnoreCase, - Norm(norm.NFKC), - DisallowEmpty, - ) - usernameCaseMap = NewIdentifier( - FoldWidth, - FoldCase(), - Norm(norm.NFC), - BidiRule, - ) - usernameNoCaseMap = NewIdentifier( - FoldWidth, - Norm(norm.NFC), - BidiRule, - ) - opaquestring = NewFreeform( - AdditionalMapping(func() transform.Transformer { - return runes.Map(func(r rune) rune { - if unicode.Is(unicode.Zs, r) { - return ' ' - } - return r - }) - }), - Norm(norm.NFC), - DisallowEmpty, - ) -) diff --git a/vendor/golang.org/x/text/secure/precis/tables.go b/vendor/golang.org/x/text/secure/precis/tables.go deleted file mode 100644 index a9b500deb..000000000 --- a/vendor/golang.org/x/text/secure/precis/tables.go +++ /dev/null @@ -1,3788 +0,0 @@ -// This file was generated by go generate; DO NOT EDIT - -package precis - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "9.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *derivedPropertiesTrie) lookup(s []byte) (v uint8, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return derivedPropertiesValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := derivedPropertiesIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := derivedPropertiesIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = derivedPropertiesIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := derivedPropertiesIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = derivedPropertiesIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = derivedPropertiesIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *derivedPropertiesTrie) lookupUnsafe(s []byte) uint8 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return derivedPropertiesValues[c0] - } - i := derivedPropertiesIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = derivedPropertiesIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = derivedPropertiesIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *derivedPropertiesTrie) lookupString(s string) (v uint8, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return derivedPropertiesValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := derivedPropertiesIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := derivedPropertiesIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = derivedPropertiesIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := derivedPropertiesIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = derivedPropertiesIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = derivedPropertiesIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *derivedPropertiesTrie) lookupStringUnsafe(s string) uint8 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return derivedPropertiesValues[c0] - } - i := derivedPropertiesIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = derivedPropertiesIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = derivedPropertiesIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// derivedPropertiesTrie. Total size: 25344 bytes (24.75 KiB). Checksum: c5b977d76d42d8a. -type derivedPropertiesTrie struct{} - -func newDerivedPropertiesTrie(i int) *derivedPropertiesTrie { - return &derivedPropertiesTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *derivedPropertiesTrie) lookupValue(n uint32, b byte) uint8 { - switch { - default: - return uint8(derivedPropertiesValues[n<<6+uint32(b)]) - } -} - -// derivedPropertiesValues: 324 blocks, 20736 entries, 20736 bytes -// The third block is the zero block. -var derivedPropertiesValues = [20736]uint8{ - // Block 0x0, offset 0x0 - 0x00: 0x0040, 0x01: 0x0040, 0x02: 0x0040, 0x03: 0x0040, 0x04: 0x0040, 0x05: 0x0040, - 0x06: 0x0040, 0x07: 0x0040, 0x08: 0x0040, 0x09: 0x0040, 0x0a: 0x0040, 0x0b: 0x0040, - 0x0c: 0x0040, 0x0d: 0x0040, 0x0e: 0x0040, 0x0f: 0x0040, 0x10: 0x0040, 0x11: 0x0040, - 0x12: 0x0040, 0x13: 0x0040, 0x14: 0x0040, 0x15: 0x0040, 0x16: 0x0040, 0x17: 0x0040, - 0x18: 0x0040, 0x19: 0x0040, 0x1a: 0x0040, 0x1b: 0x0040, 0x1c: 0x0040, 0x1d: 0x0040, - 0x1e: 0x0040, 0x1f: 0x0040, 0x20: 0x0080, 0x21: 0x00c0, 0x22: 0x00c0, 0x23: 0x00c0, - 0x24: 0x00c0, 0x25: 0x00c0, 0x26: 0x00c0, 0x27: 0x00c0, 0x28: 0x00c0, 0x29: 0x00c0, - 0x2a: 0x00c0, 0x2b: 0x00c0, 0x2c: 0x00c0, 0x2d: 0x00c0, 0x2e: 0x00c0, 0x2f: 0x00c0, - 0x30: 0x00c0, 0x31: 0x00c0, 0x32: 0x00c0, 0x33: 0x00c0, 0x34: 0x00c0, 0x35: 0x00c0, - 0x36: 0x00c0, 0x37: 0x00c0, 0x38: 0x00c0, 0x39: 0x00c0, 0x3a: 0x00c0, 0x3b: 0x00c0, - 0x3c: 0x00c0, 0x3d: 0x00c0, 0x3e: 0x00c0, 0x3f: 0x00c0, - // Block 0x1, offset 0x40 - 0x40: 0x00c0, 0x41: 0x00c0, 0x42: 0x00c0, 0x43: 0x00c0, 0x44: 0x00c0, 0x45: 0x00c0, - 0x46: 0x00c0, 0x47: 0x00c0, 0x48: 0x00c0, 0x49: 0x00c0, 0x4a: 0x00c0, 0x4b: 0x00c0, - 0x4c: 0x00c0, 0x4d: 0x00c0, 0x4e: 0x00c0, 0x4f: 0x00c0, 0x50: 0x00c0, 0x51: 0x00c0, - 0x52: 0x00c0, 0x53: 0x00c0, 0x54: 0x00c0, 0x55: 0x00c0, 0x56: 0x00c0, 0x57: 0x00c0, - 0x58: 0x00c0, 0x59: 0x00c0, 0x5a: 0x00c0, 0x5b: 0x00c0, 0x5c: 0x00c0, 0x5d: 0x00c0, - 0x5e: 0x00c0, 0x5f: 0x00c0, 0x60: 0x00c0, 0x61: 0x00c0, 0x62: 0x00c0, 0x63: 0x00c0, - 0x64: 0x00c0, 0x65: 0x00c0, 0x66: 0x00c0, 0x67: 0x00c0, 0x68: 0x00c0, 0x69: 0x00c0, - 0x6a: 0x00c0, 0x6b: 0x00c0, 0x6c: 0x00c7, 0x6d: 0x00c0, 0x6e: 0x00c0, 0x6f: 0x00c0, - 0x70: 0x00c0, 0x71: 0x00c0, 0x72: 0x00c0, 0x73: 0x00c0, 0x74: 0x00c0, 0x75: 0x00c0, - 0x76: 0x00c0, 0x77: 0x00c0, 0x78: 0x00c0, 0x79: 0x00c0, 0x7a: 0x00c0, 0x7b: 0x00c0, - 0x7c: 0x00c0, 0x7d: 0x00c0, 0x7e: 0x00c0, 0x7f: 0x0040, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, - 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, - 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, - 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, - 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, - 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x0080, 0xe1: 0x0080, 0xe2: 0x0080, 0xe3: 0x0080, - 0xe4: 0x0080, 0xe5: 0x0080, 0xe6: 0x0080, 0xe7: 0x0080, 0xe8: 0x0080, 0xe9: 0x0080, - 0xea: 0x0080, 0xeb: 0x0080, 0xec: 0x0080, 0xed: 0x0040, 0xee: 0x0080, 0xef: 0x0080, - 0xf0: 0x0080, 0xf1: 0x0080, 0xf2: 0x0080, 0xf3: 0x0080, 0xf4: 0x0080, 0xf5: 0x0080, - 0xf6: 0x0080, 0xf7: 0x004f, 0xf8: 0x0080, 0xf9: 0x0080, 0xfa: 0x0080, 0xfb: 0x0080, - 0xfc: 0x0080, 0xfd: 0x0080, 0xfe: 0x0080, 0xff: 0x0080, - // Block 0x4, offset 0x100 - 0x100: 0x00c0, 0x101: 0x00c0, 0x102: 0x00c0, 0x103: 0x00c0, 0x104: 0x00c0, 0x105: 0x00c0, - 0x106: 0x00c0, 0x107: 0x00c0, 0x108: 0x00c0, 0x109: 0x00c0, 0x10a: 0x00c0, 0x10b: 0x00c0, - 0x10c: 0x00c0, 0x10d: 0x00c0, 0x10e: 0x00c0, 0x10f: 0x00c0, 0x110: 0x00c0, 0x111: 0x00c0, - 0x112: 0x00c0, 0x113: 0x00c0, 0x114: 0x00c0, 0x115: 0x00c0, 0x116: 0x00c0, 0x117: 0x0080, - 0x118: 0x00c0, 0x119: 0x00c0, 0x11a: 0x00c0, 0x11b: 0x00c0, 0x11c: 0x00c0, 0x11d: 0x00c0, - 0x11e: 0x00c0, 0x11f: 0x00c0, 0x120: 0x00c0, 0x121: 0x00c0, 0x122: 0x00c0, 0x123: 0x00c0, - 0x124: 0x00c0, 0x125: 0x00c0, 0x126: 0x00c0, 0x127: 0x00c0, 0x128: 0x00c0, 0x129: 0x00c0, - 0x12a: 0x00c0, 0x12b: 0x00c0, 0x12c: 0x00c0, 0x12d: 0x00c0, 0x12e: 0x00c0, 0x12f: 0x00c0, - 0x130: 0x00c0, 0x131: 0x00c0, 0x132: 0x00c0, 0x133: 0x00c0, 0x134: 0x00c0, 0x135: 0x00c0, - 0x136: 0x00c0, 0x137: 0x0080, 0x138: 0x00c0, 0x139: 0x00c0, 0x13a: 0x00c0, 0x13b: 0x00c0, - 0x13c: 0x00c0, 0x13d: 0x00c0, 0x13e: 0x00c0, 0x13f: 0x00c0, - // Block 0x5, offset 0x140 - 0x140: 0x00c0, 0x141: 0x00c0, 0x142: 0x00c0, 0x143: 0x00c0, 0x144: 0x00c0, 0x145: 0x00c0, - 0x146: 0x00c0, 0x147: 0x00c0, 0x148: 0x00c0, 0x149: 0x00c0, 0x14a: 0x00c0, 0x14b: 0x00c0, - 0x14c: 0x00c0, 0x14d: 0x00c0, 0x14e: 0x00c0, 0x14f: 0x00c0, 0x150: 0x00c0, 0x151: 0x00c0, - 0x152: 0x00c0, 0x153: 0x00c0, 0x154: 0x00c0, 0x155: 0x00c0, 0x156: 0x00c0, 0x157: 0x00c0, - 0x158: 0x00c0, 0x159: 0x00c0, 0x15a: 0x00c0, 0x15b: 0x00c0, 0x15c: 0x00c0, 0x15d: 0x00c0, - 0x15e: 0x00c0, 0x15f: 0x00c0, 0x160: 0x00c0, 0x161: 0x00c0, 0x162: 0x00c0, 0x163: 0x00c0, - 0x164: 0x00c0, 0x165: 0x00c0, 0x166: 0x00c0, 0x167: 0x00c0, 0x168: 0x00c0, 0x169: 0x00c0, - 0x16a: 0x00c0, 0x16b: 0x00c0, 0x16c: 0x00c0, 0x16d: 0x00c0, 0x16e: 0x00c0, 0x16f: 0x00c0, - 0x170: 0x00c0, 0x171: 0x00c0, 0x172: 0x0080, 0x173: 0x0080, 0x174: 0x00c0, 0x175: 0x00c0, - 0x176: 0x00c0, 0x177: 0x00c0, 0x178: 0x00c0, 0x179: 0x00c0, 0x17a: 0x00c0, 0x17b: 0x00c0, - 0x17c: 0x00c0, 0x17d: 0x00c0, 0x17e: 0x00c0, 0x17f: 0x0080, - // Block 0x6, offset 0x180 - 0x180: 0x0080, 0x181: 0x00c0, 0x182: 0x00c0, 0x183: 0x00c0, 0x184: 0x00c0, 0x185: 0x00c0, - 0x186: 0x00c0, 0x187: 0x00c0, 0x188: 0x00c0, 0x189: 0x0080, 0x18a: 0x00c0, 0x18b: 0x00c0, - 0x18c: 0x00c0, 0x18d: 0x00c0, 0x18e: 0x00c0, 0x18f: 0x00c0, 0x190: 0x00c0, 0x191: 0x00c0, - 0x192: 0x00c0, 0x193: 0x00c0, 0x194: 0x00c0, 0x195: 0x00c0, 0x196: 0x00c0, 0x197: 0x00c0, - 0x198: 0x00c0, 0x199: 0x00c0, 0x19a: 0x00c0, 0x19b: 0x00c0, 0x19c: 0x00c0, 0x19d: 0x00c0, - 0x19e: 0x00c0, 0x19f: 0x00c0, 0x1a0: 0x00c0, 0x1a1: 0x00c0, 0x1a2: 0x00c0, 0x1a3: 0x00c0, - 0x1a4: 0x00c0, 0x1a5: 0x00c0, 0x1a6: 0x00c0, 0x1a7: 0x00c0, 0x1a8: 0x00c0, 0x1a9: 0x00c0, - 0x1aa: 0x00c0, 0x1ab: 0x00c0, 0x1ac: 0x00c0, 0x1ad: 0x00c0, 0x1ae: 0x00c0, 0x1af: 0x00c0, - 0x1b0: 0x00c0, 0x1b1: 0x00c0, 0x1b2: 0x00c0, 0x1b3: 0x00c0, 0x1b4: 0x00c0, 0x1b5: 0x00c0, - 0x1b6: 0x00c0, 0x1b7: 0x00c0, 0x1b8: 0x00c0, 0x1b9: 0x00c0, 0x1ba: 0x00c0, 0x1bb: 0x00c0, - 0x1bc: 0x00c0, 0x1bd: 0x00c0, 0x1be: 0x00c0, 0x1bf: 0x0080, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x00c0, 0x1c1: 0x00c0, 0x1c2: 0x00c0, 0x1c3: 0x00c0, 0x1c4: 0x00c0, 0x1c5: 0x00c0, - 0x1c6: 0x00c0, 0x1c7: 0x00c0, 0x1c8: 0x00c0, 0x1c9: 0x00c0, 0x1ca: 0x00c0, 0x1cb: 0x00c0, - 0x1cc: 0x00c0, 0x1cd: 0x00c0, 0x1ce: 0x00c0, 0x1cf: 0x00c0, 0x1d0: 0x00c0, 0x1d1: 0x00c0, - 0x1d2: 0x00c0, 0x1d3: 0x00c0, 0x1d4: 0x00c0, 0x1d5: 0x00c0, 0x1d6: 0x00c0, 0x1d7: 0x00c0, - 0x1d8: 0x00c0, 0x1d9: 0x00c0, 0x1da: 0x00c0, 0x1db: 0x00c0, 0x1dc: 0x00c0, 0x1dd: 0x00c0, - 0x1de: 0x00c0, 0x1df: 0x00c0, 0x1e0: 0x00c0, 0x1e1: 0x00c0, 0x1e2: 0x00c0, 0x1e3: 0x00c0, - 0x1e4: 0x00c0, 0x1e5: 0x00c0, 0x1e6: 0x00c0, 0x1e7: 0x00c0, 0x1e8: 0x00c0, 0x1e9: 0x00c0, - 0x1ea: 0x00c0, 0x1eb: 0x00c0, 0x1ec: 0x00c0, 0x1ed: 0x00c0, 0x1ee: 0x00c0, 0x1ef: 0x00c0, - 0x1f0: 0x00c0, 0x1f1: 0x00c0, 0x1f2: 0x00c0, 0x1f3: 0x00c0, 0x1f4: 0x00c0, 0x1f5: 0x00c0, - 0x1f6: 0x00c0, 0x1f7: 0x00c0, 0x1f8: 0x00c0, 0x1f9: 0x00c0, 0x1fa: 0x00c0, 0x1fb: 0x00c0, - 0x1fc: 0x00c0, 0x1fd: 0x00c0, 0x1fe: 0x00c0, 0x1ff: 0x00c0, - // Block 0x8, offset 0x200 - 0x200: 0x00c0, 0x201: 0x00c0, 0x202: 0x00c0, 0x203: 0x00c0, 0x204: 0x0080, 0x205: 0x0080, - 0x206: 0x0080, 0x207: 0x0080, 0x208: 0x0080, 0x209: 0x0080, 0x20a: 0x0080, 0x20b: 0x0080, - 0x20c: 0x0080, 0x20d: 0x00c0, 0x20e: 0x00c0, 0x20f: 0x00c0, 0x210: 0x00c0, 0x211: 0x00c0, - 0x212: 0x00c0, 0x213: 0x00c0, 0x214: 0x00c0, 0x215: 0x00c0, 0x216: 0x00c0, 0x217: 0x00c0, - 0x218: 0x00c0, 0x219: 0x00c0, 0x21a: 0x00c0, 0x21b: 0x00c0, 0x21c: 0x00c0, 0x21d: 0x00c0, - 0x21e: 0x00c0, 0x21f: 0x00c0, 0x220: 0x00c0, 0x221: 0x00c0, 0x222: 0x00c0, 0x223: 0x00c0, - 0x224: 0x00c0, 0x225: 0x00c0, 0x226: 0x00c0, 0x227: 0x00c0, 0x228: 0x00c0, 0x229: 0x00c0, - 0x22a: 0x00c0, 0x22b: 0x00c0, 0x22c: 0x00c0, 0x22d: 0x00c0, 0x22e: 0x00c0, 0x22f: 0x00c0, - 0x230: 0x00c0, 0x231: 0x0080, 0x232: 0x0080, 0x233: 0x0080, 0x234: 0x00c0, 0x235: 0x00c0, - 0x236: 0x00c0, 0x237: 0x00c0, 0x238: 0x00c0, 0x239: 0x00c0, 0x23a: 0x00c0, 0x23b: 0x00c0, - 0x23c: 0x00c0, 0x23d: 0x00c0, 0x23e: 0x00c0, 0x23f: 0x00c0, - // Block 0x9, offset 0x240 - 0x240: 0x00c0, 0x241: 0x00c0, 0x242: 0x00c0, 0x243: 0x00c0, 0x244: 0x00c0, 0x245: 0x00c0, - 0x246: 0x00c0, 0x247: 0x00c0, 0x248: 0x00c0, 0x249: 0x00c0, 0x24a: 0x00c0, 0x24b: 0x00c0, - 0x24c: 0x00c0, 0x24d: 0x00c0, 0x24e: 0x00c0, 0x24f: 0x00c0, 0x250: 0x00c0, 0x251: 0x00c0, - 0x252: 0x00c0, 0x253: 0x00c0, 0x254: 0x00c0, 0x255: 0x00c0, 0x256: 0x00c0, 0x257: 0x00c0, - 0x258: 0x00c0, 0x259: 0x00c0, 0x25a: 0x00c0, 0x25b: 0x00c0, 0x25c: 0x00c0, 0x25d: 0x00c0, - 0x25e: 0x00c0, 0x25f: 0x00c0, 0x260: 0x00c0, 0x261: 0x00c0, 0x262: 0x00c0, 0x263: 0x00c0, - 0x264: 0x00c0, 0x265: 0x00c0, 0x266: 0x00c0, 0x267: 0x00c0, 0x268: 0x00c0, 0x269: 0x00c0, - 0x26a: 0x00c0, 0x26b: 0x00c0, 0x26c: 0x00c0, 0x26d: 0x00c0, 0x26e: 0x00c0, 0x26f: 0x00c0, - 0x270: 0x0080, 0x271: 0x0080, 0x272: 0x0080, 0x273: 0x0080, 0x274: 0x0080, 0x275: 0x0080, - 0x276: 0x0080, 0x277: 0x0080, 0x278: 0x0080, 0x279: 0x00c0, 0x27a: 0x00c0, 0x27b: 0x00c0, - 0x27c: 0x00c0, 0x27d: 0x00c0, 0x27e: 0x00c0, 0x27f: 0x00c0, - // Block 0xa, offset 0x280 - 0x280: 0x00c0, 0x281: 0x00c0, 0x282: 0x0080, 0x283: 0x0080, 0x284: 0x0080, 0x285: 0x0080, - 0x286: 0x00c0, 0x287: 0x00c0, 0x288: 0x00c0, 0x289: 0x00c0, 0x28a: 0x00c0, 0x28b: 0x00c0, - 0x28c: 0x00c0, 0x28d: 0x00c0, 0x28e: 0x00c0, 0x28f: 0x00c0, 0x290: 0x00c0, 0x291: 0x00c0, - 0x292: 0x0080, 0x293: 0x0080, 0x294: 0x0080, 0x295: 0x0080, 0x296: 0x0080, 0x297: 0x0080, - 0x298: 0x0080, 0x299: 0x0080, 0x29a: 0x0080, 0x29b: 0x0080, 0x29c: 0x0080, 0x29d: 0x0080, - 0x29e: 0x0080, 0x29f: 0x0080, 0x2a0: 0x0080, 0x2a1: 0x0080, 0x2a2: 0x0080, 0x2a3: 0x0080, - 0x2a4: 0x0080, 0x2a5: 0x0080, 0x2a6: 0x0080, 0x2a7: 0x0080, 0x2a8: 0x0080, 0x2a9: 0x0080, - 0x2aa: 0x0080, 0x2ab: 0x0080, 0x2ac: 0x00c0, 0x2ad: 0x0080, 0x2ae: 0x00c0, 0x2af: 0x0080, - 0x2b0: 0x0080, 0x2b1: 0x0080, 0x2b2: 0x0080, 0x2b3: 0x0080, 0x2b4: 0x0080, 0x2b5: 0x0080, - 0x2b6: 0x0080, 0x2b7: 0x0080, 0x2b8: 0x0080, 0x2b9: 0x0080, 0x2ba: 0x0080, 0x2bb: 0x0080, - 0x2bc: 0x0080, 0x2bd: 0x0080, 0x2be: 0x0080, 0x2bf: 0x0080, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x00c3, 0x2c1: 0x00c3, 0x2c2: 0x00c3, 0x2c3: 0x00c3, 0x2c4: 0x00c3, 0x2c5: 0x00c3, - 0x2c6: 0x00c3, 0x2c7: 0x00c3, 0x2c8: 0x00c3, 0x2c9: 0x00c3, 0x2ca: 0x00c3, 0x2cb: 0x00c3, - 0x2cc: 0x00c3, 0x2cd: 0x00c3, 0x2ce: 0x00c3, 0x2cf: 0x00c3, 0x2d0: 0x00c3, 0x2d1: 0x00c3, - 0x2d2: 0x00c3, 0x2d3: 0x00c3, 0x2d4: 0x00c3, 0x2d5: 0x00c3, 0x2d6: 0x00c3, 0x2d7: 0x00c3, - 0x2d8: 0x00c3, 0x2d9: 0x00c3, 0x2da: 0x00c3, 0x2db: 0x00c3, 0x2dc: 0x00c3, 0x2dd: 0x00c3, - 0x2de: 0x00c3, 0x2df: 0x00c3, 0x2e0: 0x00c3, 0x2e1: 0x00c3, 0x2e2: 0x00c3, 0x2e3: 0x00c3, - 0x2e4: 0x00c3, 0x2e5: 0x00c3, 0x2e6: 0x00c3, 0x2e7: 0x00c3, 0x2e8: 0x00c3, 0x2e9: 0x00c3, - 0x2ea: 0x00c3, 0x2eb: 0x00c3, 0x2ec: 0x00c3, 0x2ed: 0x00c3, 0x2ee: 0x00c3, 0x2ef: 0x00c3, - 0x2f0: 0x00c3, 0x2f1: 0x00c3, 0x2f2: 0x00c3, 0x2f3: 0x00c3, 0x2f4: 0x00c3, 0x2f5: 0x00c3, - 0x2f6: 0x00c3, 0x2f7: 0x00c3, 0x2f8: 0x00c3, 0x2f9: 0x00c3, 0x2fa: 0x00c3, 0x2fb: 0x00c3, - 0x2fc: 0x00c3, 0x2fd: 0x00c3, 0x2fe: 0x00c3, 0x2ff: 0x00c3, - // Block 0xc, offset 0x300 - 0x300: 0x0083, 0x301: 0x0083, 0x302: 0x00c3, 0x303: 0x0083, 0x304: 0x0083, 0x305: 0x00c3, - 0x306: 0x00c3, 0x307: 0x00c3, 0x308: 0x00c3, 0x309: 0x00c3, 0x30a: 0x00c3, 0x30b: 0x00c3, - 0x30c: 0x00c3, 0x30d: 0x00c3, 0x30e: 0x00c3, 0x30f: 0x0040, 0x310: 0x00c3, 0x311: 0x00c3, - 0x312: 0x00c3, 0x313: 0x00c3, 0x314: 0x00c3, 0x315: 0x00c3, 0x316: 0x00c3, 0x317: 0x00c3, - 0x318: 0x00c3, 0x319: 0x00c3, 0x31a: 0x00c3, 0x31b: 0x00c3, 0x31c: 0x00c3, 0x31d: 0x00c3, - 0x31e: 0x00c3, 0x31f: 0x00c3, 0x320: 0x00c3, 0x321: 0x00c3, 0x322: 0x00c3, 0x323: 0x00c3, - 0x324: 0x00c3, 0x325: 0x00c3, 0x326: 0x00c3, 0x327: 0x00c3, 0x328: 0x00c3, 0x329: 0x00c3, - 0x32a: 0x00c3, 0x32b: 0x00c3, 0x32c: 0x00c3, 0x32d: 0x00c3, 0x32e: 0x00c3, 0x32f: 0x00c3, - 0x330: 0x00c8, 0x331: 0x00c8, 0x332: 0x00c8, 0x333: 0x00c8, 0x334: 0x0080, 0x335: 0x0050, - 0x336: 0x00c8, 0x337: 0x00c8, 0x33a: 0x0088, 0x33b: 0x00c8, - 0x33c: 0x00c8, 0x33d: 0x00c8, 0x33e: 0x0080, 0x33f: 0x00c8, - // Block 0xd, offset 0x340 - 0x344: 0x0088, 0x345: 0x0080, - 0x346: 0x00c8, 0x347: 0x0080, 0x348: 0x00c8, 0x349: 0x00c8, 0x34a: 0x00c8, - 0x34c: 0x00c8, 0x34e: 0x00c8, 0x34f: 0x00c8, 0x350: 0x00c8, 0x351: 0x00c8, - 0x352: 0x00c8, 0x353: 0x00c8, 0x354: 0x00c8, 0x355: 0x00c8, 0x356: 0x00c8, 0x357: 0x00c8, - 0x358: 0x00c8, 0x359: 0x00c8, 0x35a: 0x00c8, 0x35b: 0x00c8, 0x35c: 0x00c8, 0x35d: 0x00c8, - 0x35e: 0x00c8, 0x35f: 0x00c8, 0x360: 0x00c8, 0x361: 0x00c8, 0x363: 0x00c8, - 0x364: 0x00c8, 0x365: 0x00c8, 0x366: 0x00c8, 0x367: 0x00c8, 0x368: 0x00c8, 0x369: 0x00c8, - 0x36a: 0x00c8, 0x36b: 0x00c8, 0x36c: 0x00c8, 0x36d: 0x00c8, 0x36e: 0x00c8, 0x36f: 0x00c8, - 0x370: 0x00c8, 0x371: 0x00c8, 0x372: 0x00c8, 0x373: 0x00c8, 0x374: 0x00c8, 0x375: 0x00c8, - 0x376: 0x00c8, 0x377: 0x00c8, 0x378: 0x00c8, 0x379: 0x00c8, 0x37a: 0x00c8, 0x37b: 0x00c8, - 0x37c: 0x00c8, 0x37d: 0x00c8, 0x37e: 0x00c8, 0x37f: 0x00c8, - // Block 0xe, offset 0x380 - 0x380: 0x00c8, 0x381: 0x00c8, 0x382: 0x00c8, 0x383: 0x00c8, 0x384: 0x00c8, 0x385: 0x00c8, - 0x386: 0x00c8, 0x387: 0x00c8, 0x388: 0x00c8, 0x389: 0x00c8, 0x38a: 0x00c8, 0x38b: 0x00c8, - 0x38c: 0x00c8, 0x38d: 0x00c8, 0x38e: 0x00c8, 0x38f: 0x00c8, 0x390: 0x0088, 0x391: 0x0088, - 0x392: 0x0088, 0x393: 0x0088, 0x394: 0x0088, 0x395: 0x0088, 0x396: 0x0088, 0x397: 0x00c8, - 0x398: 0x00c8, 0x399: 0x00c8, 0x39a: 0x00c8, 0x39b: 0x00c8, 0x39c: 0x00c8, 0x39d: 0x00c8, - 0x39e: 0x00c8, 0x39f: 0x00c8, 0x3a0: 0x00c8, 0x3a1: 0x00c8, 0x3a2: 0x00c0, 0x3a3: 0x00c0, - 0x3a4: 0x00c0, 0x3a5: 0x00c0, 0x3a6: 0x00c0, 0x3a7: 0x00c0, 0x3a8: 0x00c0, 0x3a9: 0x00c0, - 0x3aa: 0x00c0, 0x3ab: 0x00c0, 0x3ac: 0x00c0, 0x3ad: 0x00c0, 0x3ae: 0x00c0, 0x3af: 0x00c0, - 0x3b0: 0x0088, 0x3b1: 0x0088, 0x3b2: 0x0088, 0x3b3: 0x00c8, 0x3b4: 0x0088, 0x3b5: 0x0088, - 0x3b6: 0x0088, 0x3b7: 0x00c8, 0x3b8: 0x00c8, 0x3b9: 0x0088, 0x3ba: 0x00c8, 0x3bb: 0x00c8, - 0x3bc: 0x00c8, 0x3bd: 0x00c8, 0x3be: 0x00c8, 0x3bf: 0x00c8, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x00c0, 0x3c1: 0x00c0, 0x3c2: 0x0080, 0x3c3: 0x00c3, 0x3c4: 0x00c3, 0x3c5: 0x00c3, - 0x3c6: 0x00c3, 0x3c7: 0x00c3, 0x3c8: 0x0083, 0x3c9: 0x0083, 0x3ca: 0x00c0, 0x3cb: 0x00c0, - 0x3cc: 0x00c0, 0x3cd: 0x00c0, 0x3ce: 0x00c0, 0x3cf: 0x00c0, 0x3d0: 0x00c0, 0x3d1: 0x00c0, - 0x3d2: 0x00c0, 0x3d3: 0x00c0, 0x3d4: 0x00c0, 0x3d5: 0x00c0, 0x3d6: 0x00c0, 0x3d7: 0x00c0, - 0x3d8: 0x00c0, 0x3d9: 0x00c0, 0x3da: 0x00c0, 0x3db: 0x00c0, 0x3dc: 0x00c0, 0x3dd: 0x00c0, - 0x3de: 0x00c0, 0x3df: 0x00c0, 0x3e0: 0x00c0, 0x3e1: 0x00c0, 0x3e2: 0x00c0, 0x3e3: 0x00c0, - 0x3e4: 0x00c0, 0x3e5: 0x00c0, 0x3e6: 0x00c0, 0x3e7: 0x00c0, 0x3e8: 0x00c0, 0x3e9: 0x00c0, - 0x3ea: 0x00c0, 0x3eb: 0x00c0, 0x3ec: 0x00c0, 0x3ed: 0x00c0, 0x3ee: 0x00c0, 0x3ef: 0x00c0, - 0x3f0: 0x00c0, 0x3f1: 0x00c0, 0x3f2: 0x00c0, 0x3f3: 0x00c0, 0x3f4: 0x00c0, 0x3f5: 0x00c0, - 0x3f6: 0x00c0, 0x3f7: 0x00c0, 0x3f8: 0x00c0, 0x3f9: 0x00c0, 0x3fa: 0x00c0, 0x3fb: 0x00c0, - 0x3fc: 0x00c0, 0x3fd: 0x00c0, 0x3fe: 0x00c0, 0x3ff: 0x00c0, - // Block 0x10, offset 0x400 - 0x400: 0x00c0, 0x401: 0x00c0, 0x402: 0x00c0, 0x403: 0x00c0, 0x404: 0x00c0, 0x405: 0x00c0, - 0x406: 0x00c0, 0x407: 0x00c0, 0x408: 0x00c0, 0x409: 0x00c0, 0x40a: 0x00c0, 0x40b: 0x00c0, - 0x40c: 0x00c0, 0x40d: 0x00c0, 0x40e: 0x00c0, 0x40f: 0x00c0, 0x410: 0x00c0, 0x411: 0x00c0, - 0x412: 0x00c0, 0x413: 0x00c0, 0x414: 0x00c0, 0x415: 0x00c0, 0x416: 0x00c0, 0x417: 0x00c0, - 0x418: 0x00c0, 0x419: 0x00c0, 0x41a: 0x00c0, 0x41b: 0x00c0, 0x41c: 0x00c0, 0x41d: 0x00c0, - 0x41e: 0x00c0, 0x41f: 0x00c0, 0x420: 0x00c0, 0x421: 0x00c0, 0x422: 0x00c0, 0x423: 0x00c0, - 0x424: 0x00c0, 0x425: 0x00c0, 0x426: 0x00c0, 0x427: 0x00c0, 0x428: 0x00c0, 0x429: 0x00c0, - 0x42a: 0x00c0, 0x42b: 0x00c0, 0x42c: 0x00c0, 0x42d: 0x00c0, 0x42e: 0x00c0, 0x42f: 0x00c0, - 0x431: 0x00c0, 0x432: 0x00c0, 0x433: 0x00c0, 0x434: 0x00c0, 0x435: 0x00c0, - 0x436: 0x00c0, 0x437: 0x00c0, 0x438: 0x00c0, 0x439: 0x00c0, 0x43a: 0x00c0, 0x43b: 0x00c0, - 0x43c: 0x00c0, 0x43d: 0x00c0, 0x43e: 0x00c0, 0x43f: 0x00c0, - // Block 0x11, offset 0x440 - 0x440: 0x00c0, 0x441: 0x00c0, 0x442: 0x00c0, 0x443: 0x00c0, 0x444: 0x00c0, 0x445: 0x00c0, - 0x446: 0x00c0, 0x447: 0x00c0, 0x448: 0x00c0, 0x449: 0x00c0, 0x44a: 0x00c0, 0x44b: 0x00c0, - 0x44c: 0x00c0, 0x44d: 0x00c0, 0x44e: 0x00c0, 0x44f: 0x00c0, 0x450: 0x00c0, 0x451: 0x00c0, - 0x452: 0x00c0, 0x453: 0x00c0, 0x454: 0x00c0, 0x455: 0x00c0, 0x456: 0x00c0, - 0x459: 0x00c0, 0x45a: 0x0080, 0x45b: 0x0080, 0x45c: 0x0080, 0x45d: 0x0080, - 0x45e: 0x0080, 0x45f: 0x0080, 0x461: 0x00c0, 0x462: 0x00c0, 0x463: 0x00c0, - 0x464: 0x00c0, 0x465: 0x00c0, 0x466: 0x00c0, 0x467: 0x00c0, 0x468: 0x00c0, 0x469: 0x00c0, - 0x46a: 0x00c0, 0x46b: 0x00c0, 0x46c: 0x00c0, 0x46d: 0x00c0, 0x46e: 0x00c0, 0x46f: 0x00c0, - 0x470: 0x00c0, 0x471: 0x00c0, 0x472: 0x00c0, 0x473: 0x00c0, 0x474: 0x00c0, 0x475: 0x00c0, - 0x476: 0x00c0, 0x477: 0x00c0, 0x478: 0x00c0, 0x479: 0x00c0, 0x47a: 0x00c0, 0x47b: 0x00c0, - 0x47c: 0x00c0, 0x47d: 0x00c0, 0x47e: 0x00c0, 0x47f: 0x00c0, - // Block 0x12, offset 0x480 - 0x480: 0x00c0, 0x481: 0x00c0, 0x482: 0x00c0, 0x483: 0x00c0, 0x484: 0x00c0, 0x485: 0x00c0, - 0x486: 0x00c0, 0x487: 0x0080, 0x489: 0x0080, 0x48a: 0x0080, - 0x48d: 0x0080, 0x48e: 0x0080, 0x48f: 0x0080, 0x491: 0x00cb, - 0x492: 0x00cb, 0x493: 0x00cb, 0x494: 0x00cb, 0x495: 0x00cb, 0x496: 0x00cb, 0x497: 0x00cb, - 0x498: 0x00cb, 0x499: 0x00cb, 0x49a: 0x00cb, 0x49b: 0x00cb, 0x49c: 0x00cb, 0x49d: 0x00cb, - 0x49e: 0x00cb, 0x49f: 0x00cb, 0x4a0: 0x00cb, 0x4a1: 0x00cb, 0x4a2: 0x00cb, 0x4a3: 0x00cb, - 0x4a4: 0x00cb, 0x4a5: 0x00cb, 0x4a6: 0x00cb, 0x4a7: 0x00cb, 0x4a8: 0x00cb, 0x4a9: 0x00cb, - 0x4aa: 0x00cb, 0x4ab: 0x00cb, 0x4ac: 0x00cb, 0x4ad: 0x00cb, 0x4ae: 0x00cb, 0x4af: 0x00cb, - 0x4b0: 0x00cb, 0x4b1: 0x00cb, 0x4b2: 0x00cb, 0x4b3: 0x00cb, 0x4b4: 0x00cb, 0x4b5: 0x00cb, - 0x4b6: 0x00cb, 0x4b7: 0x00cb, 0x4b8: 0x00cb, 0x4b9: 0x00cb, 0x4ba: 0x00cb, 0x4bb: 0x00cb, - 0x4bc: 0x00cb, 0x4bd: 0x00cb, 0x4be: 0x008a, 0x4bf: 0x00cb, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x008a, 0x4c1: 0x00cb, 0x4c2: 0x00cb, 0x4c3: 0x008a, 0x4c4: 0x00cb, 0x4c5: 0x00cb, - 0x4c6: 0x008a, 0x4c7: 0x00cb, - 0x4d0: 0x00ca, 0x4d1: 0x00ca, - 0x4d2: 0x00ca, 0x4d3: 0x00ca, 0x4d4: 0x00ca, 0x4d5: 0x00ca, 0x4d6: 0x00ca, 0x4d7: 0x00ca, - 0x4d8: 0x00ca, 0x4d9: 0x00ca, 0x4da: 0x00ca, 0x4db: 0x00ca, 0x4dc: 0x00ca, 0x4dd: 0x00ca, - 0x4de: 0x00ca, 0x4df: 0x00ca, 0x4e0: 0x00ca, 0x4e1: 0x00ca, 0x4e2: 0x00ca, 0x4e3: 0x00ca, - 0x4e4: 0x00ca, 0x4e5: 0x00ca, 0x4e6: 0x00ca, 0x4e7: 0x00ca, 0x4e8: 0x00ca, 0x4e9: 0x00ca, - 0x4ea: 0x00ca, - 0x4f0: 0x00ca, 0x4f1: 0x00ca, 0x4f2: 0x00ca, 0x4f3: 0x0051, 0x4f4: 0x0051, - // Block 0x14, offset 0x500 - 0x500: 0x0040, 0x501: 0x0040, 0x502: 0x0040, 0x503: 0x0040, 0x504: 0x0040, 0x505: 0x0040, - 0x506: 0x0080, 0x507: 0x0080, 0x508: 0x0080, 0x509: 0x0080, 0x50a: 0x0080, 0x50b: 0x0080, - 0x50c: 0x0080, 0x50d: 0x0080, 0x50e: 0x0080, 0x50f: 0x0080, 0x510: 0x00c3, 0x511: 0x00c3, - 0x512: 0x00c3, 0x513: 0x00c3, 0x514: 0x00c3, 0x515: 0x00c3, 0x516: 0x00c3, 0x517: 0x00c3, - 0x518: 0x00c3, 0x519: 0x00c3, 0x51a: 0x00c3, 0x51b: 0x0080, 0x51c: 0x0040, - 0x51e: 0x0080, 0x51f: 0x0080, 0x520: 0x00c2, 0x521: 0x00c0, 0x522: 0x00c4, 0x523: 0x00c4, - 0x524: 0x00c4, 0x525: 0x00c4, 0x526: 0x00c2, 0x527: 0x00c4, 0x528: 0x00c2, 0x529: 0x00c4, - 0x52a: 0x00c2, 0x52b: 0x00c2, 0x52c: 0x00c2, 0x52d: 0x00c2, 0x52e: 0x00c2, 0x52f: 0x00c4, - 0x530: 0x00c4, 0x531: 0x00c4, 0x532: 0x00c4, 0x533: 0x00c2, 0x534: 0x00c2, 0x535: 0x00c2, - 0x536: 0x00c2, 0x537: 0x00c2, 0x538: 0x00c2, 0x539: 0x00c2, 0x53a: 0x00c2, 0x53b: 0x00c2, - 0x53c: 0x00c2, 0x53d: 0x00c2, 0x53e: 0x00c2, 0x53f: 0x00c2, - // Block 0x15, offset 0x540 - 0x540: 0x0040, 0x541: 0x00c2, 0x542: 0x00c2, 0x543: 0x00c2, 0x544: 0x00c2, 0x545: 0x00c2, - 0x546: 0x00c2, 0x547: 0x00c2, 0x548: 0x00c4, 0x549: 0x00c2, 0x54a: 0x00c2, 0x54b: 0x00c3, - 0x54c: 0x00c3, 0x54d: 0x00c3, 0x54e: 0x00c3, 0x54f: 0x00c3, 0x550: 0x00c3, 0x551: 0x00c3, - 0x552: 0x00c3, 0x553: 0x00c3, 0x554: 0x00c3, 0x555: 0x00c3, 0x556: 0x00c3, 0x557: 0x00c3, - 0x558: 0x00c3, 0x559: 0x00c3, 0x55a: 0x00c3, 0x55b: 0x00c3, 0x55c: 0x00c3, 0x55d: 0x00c3, - 0x55e: 0x00c3, 0x55f: 0x00c3, 0x560: 0x0053, 0x561: 0x0053, 0x562: 0x0053, 0x563: 0x0053, - 0x564: 0x0053, 0x565: 0x0053, 0x566: 0x0053, 0x567: 0x0053, 0x568: 0x0053, 0x569: 0x0053, - 0x56a: 0x0080, 0x56b: 0x0080, 0x56c: 0x0080, 0x56d: 0x0080, 0x56e: 0x00c2, 0x56f: 0x00c2, - 0x570: 0x00c3, 0x571: 0x00c4, 0x572: 0x00c4, 0x573: 0x00c4, 0x574: 0x00c0, 0x575: 0x0084, - 0x576: 0x0084, 0x577: 0x0084, 0x578: 0x0082, 0x579: 0x00c2, 0x57a: 0x00c2, 0x57b: 0x00c2, - 0x57c: 0x00c2, 0x57d: 0x00c2, 0x57e: 0x00c2, 0x57f: 0x00c2, - // Block 0x16, offset 0x580 - 0x580: 0x00c2, 0x581: 0x00c2, 0x582: 0x00c2, 0x583: 0x00c2, 0x584: 0x00c2, 0x585: 0x00c2, - 0x586: 0x00c2, 0x587: 0x00c2, 0x588: 0x00c4, 0x589: 0x00c4, 0x58a: 0x00c4, 0x58b: 0x00c4, - 0x58c: 0x00c4, 0x58d: 0x00c4, 0x58e: 0x00c4, 0x58f: 0x00c4, 0x590: 0x00c4, 0x591: 0x00c4, - 0x592: 0x00c4, 0x593: 0x00c4, 0x594: 0x00c4, 0x595: 0x00c4, 0x596: 0x00c4, 0x597: 0x00c4, - 0x598: 0x00c4, 0x599: 0x00c4, 0x59a: 0x00c2, 0x59b: 0x00c2, 0x59c: 0x00c2, 0x59d: 0x00c2, - 0x59e: 0x00c2, 0x59f: 0x00c2, 0x5a0: 0x00c2, 0x5a1: 0x00c2, 0x5a2: 0x00c2, 0x5a3: 0x00c2, - 0x5a4: 0x00c2, 0x5a5: 0x00c2, 0x5a6: 0x00c2, 0x5a7: 0x00c2, 0x5a8: 0x00c2, 0x5a9: 0x00c2, - 0x5aa: 0x00c2, 0x5ab: 0x00c2, 0x5ac: 0x00c2, 0x5ad: 0x00c2, 0x5ae: 0x00c2, 0x5af: 0x00c2, - 0x5b0: 0x00c2, 0x5b1: 0x00c2, 0x5b2: 0x00c2, 0x5b3: 0x00c2, 0x5b4: 0x00c2, 0x5b5: 0x00c2, - 0x5b6: 0x00c2, 0x5b7: 0x00c2, 0x5b8: 0x00c2, 0x5b9: 0x00c2, 0x5ba: 0x00c2, 0x5bb: 0x00c2, - 0x5bc: 0x00c2, 0x5bd: 0x00c2, 0x5be: 0x00c2, 0x5bf: 0x00c2, - // Block 0x17, offset 0x5c0 - 0x5c0: 0x00c4, 0x5c1: 0x00c2, 0x5c2: 0x00c2, 0x5c3: 0x00c4, 0x5c4: 0x00c4, 0x5c5: 0x00c4, - 0x5c6: 0x00c4, 0x5c7: 0x00c4, 0x5c8: 0x00c4, 0x5c9: 0x00c4, 0x5ca: 0x00c4, 0x5cb: 0x00c4, - 0x5cc: 0x00c2, 0x5cd: 0x00c4, 0x5ce: 0x00c2, 0x5cf: 0x00c4, 0x5d0: 0x00c2, 0x5d1: 0x00c2, - 0x5d2: 0x00c4, 0x5d3: 0x00c4, 0x5d4: 0x0080, 0x5d5: 0x00c4, 0x5d6: 0x00c3, 0x5d7: 0x00c3, - 0x5d8: 0x00c3, 0x5d9: 0x00c3, 0x5da: 0x00c3, 0x5db: 0x00c3, 0x5dc: 0x00c3, 0x5dd: 0x0040, - 0x5de: 0x0080, 0x5df: 0x00c3, 0x5e0: 0x00c3, 0x5e1: 0x00c3, 0x5e2: 0x00c3, 0x5e3: 0x00c3, - 0x5e4: 0x00c3, 0x5e5: 0x00c0, 0x5e6: 0x00c0, 0x5e7: 0x00c3, 0x5e8: 0x00c3, 0x5e9: 0x0080, - 0x5ea: 0x00c3, 0x5eb: 0x00c3, 0x5ec: 0x00c3, 0x5ed: 0x00c3, 0x5ee: 0x00c4, 0x5ef: 0x00c4, - 0x5f0: 0x0054, 0x5f1: 0x0054, 0x5f2: 0x0054, 0x5f3: 0x0054, 0x5f4: 0x0054, 0x5f5: 0x0054, - 0x5f6: 0x0054, 0x5f7: 0x0054, 0x5f8: 0x0054, 0x5f9: 0x0054, 0x5fa: 0x00c2, 0x5fb: 0x00c2, - 0x5fc: 0x00c2, 0x5fd: 0x00c0, 0x5fe: 0x00c0, 0x5ff: 0x00c2, - // Block 0x18, offset 0x600 - 0x600: 0x0080, 0x601: 0x0080, 0x602: 0x0080, 0x603: 0x0080, 0x604: 0x0080, 0x605: 0x0080, - 0x606: 0x0080, 0x607: 0x0080, 0x608: 0x0080, 0x609: 0x0080, 0x60a: 0x0080, 0x60b: 0x0080, - 0x60c: 0x0080, 0x60d: 0x0080, 0x60f: 0x0040, 0x610: 0x00c4, 0x611: 0x00c3, - 0x612: 0x00c2, 0x613: 0x00c2, 0x614: 0x00c2, 0x615: 0x00c4, 0x616: 0x00c4, 0x617: 0x00c4, - 0x618: 0x00c4, 0x619: 0x00c4, 0x61a: 0x00c2, 0x61b: 0x00c2, 0x61c: 0x00c2, 0x61d: 0x00c2, - 0x61e: 0x00c4, 0x61f: 0x00c2, 0x620: 0x00c2, 0x621: 0x00c2, 0x622: 0x00c2, 0x623: 0x00c2, - 0x624: 0x00c2, 0x625: 0x00c2, 0x626: 0x00c2, 0x627: 0x00c2, 0x628: 0x00c4, 0x629: 0x00c2, - 0x62a: 0x00c4, 0x62b: 0x00c2, 0x62c: 0x00c4, 0x62d: 0x00c2, 0x62e: 0x00c2, 0x62f: 0x00c4, - 0x630: 0x00c3, 0x631: 0x00c3, 0x632: 0x00c3, 0x633: 0x00c3, 0x634: 0x00c3, 0x635: 0x00c3, - 0x636: 0x00c3, 0x637: 0x00c3, 0x638: 0x00c3, 0x639: 0x00c3, 0x63a: 0x00c3, 0x63b: 0x00c3, - 0x63c: 0x00c3, 0x63d: 0x00c3, 0x63e: 0x00c3, 0x63f: 0x00c3, - // Block 0x19, offset 0x640 - 0x640: 0x00c3, 0x641: 0x00c3, 0x642: 0x00c3, 0x643: 0x00c3, 0x644: 0x00c3, 0x645: 0x00c3, - 0x646: 0x00c3, 0x647: 0x00c3, 0x648: 0x00c3, 0x649: 0x00c3, 0x64a: 0x00c3, - 0x64d: 0x00c4, 0x64e: 0x00c2, 0x64f: 0x00c2, 0x650: 0x00c2, 0x651: 0x00c2, - 0x652: 0x00c2, 0x653: 0x00c2, 0x654: 0x00c2, 0x655: 0x00c2, 0x656: 0x00c2, 0x657: 0x00c2, - 0x658: 0x00c2, 0x659: 0x00c4, 0x65a: 0x00c4, 0x65b: 0x00c4, 0x65c: 0x00c2, 0x65d: 0x00c2, - 0x65e: 0x00c2, 0x65f: 0x00c2, 0x660: 0x00c2, 0x661: 0x00c2, 0x662: 0x00c2, 0x663: 0x00c2, - 0x664: 0x00c2, 0x665: 0x00c2, 0x666: 0x00c2, 0x667: 0x00c2, 0x668: 0x00c2, 0x669: 0x00c2, - 0x66a: 0x00c2, 0x66b: 0x00c4, 0x66c: 0x00c4, 0x66d: 0x00c2, 0x66e: 0x00c2, 0x66f: 0x00c2, - 0x670: 0x00c2, 0x671: 0x00c4, 0x672: 0x00c2, 0x673: 0x00c4, 0x674: 0x00c4, 0x675: 0x00c2, - 0x676: 0x00c2, 0x677: 0x00c2, 0x678: 0x00c4, 0x679: 0x00c4, 0x67a: 0x00c2, 0x67b: 0x00c2, - 0x67c: 0x00c2, 0x67d: 0x00c2, 0x67e: 0x00c2, 0x67f: 0x00c2, - // Block 0x1a, offset 0x680 - 0x680: 0x00c0, 0x681: 0x00c0, 0x682: 0x00c0, 0x683: 0x00c0, 0x684: 0x00c0, 0x685: 0x00c0, - 0x686: 0x00c0, 0x687: 0x00c0, 0x688: 0x00c0, 0x689: 0x00c0, 0x68a: 0x00c0, 0x68b: 0x00c0, - 0x68c: 0x00c0, 0x68d: 0x00c0, 0x68e: 0x00c0, 0x68f: 0x00c0, 0x690: 0x00c0, 0x691: 0x00c0, - 0x692: 0x00c0, 0x693: 0x00c0, 0x694: 0x00c0, 0x695: 0x00c0, 0x696: 0x00c0, 0x697: 0x00c0, - 0x698: 0x00c0, 0x699: 0x00c0, 0x69a: 0x00c0, 0x69b: 0x00c0, 0x69c: 0x00c0, 0x69d: 0x00c0, - 0x69e: 0x00c0, 0x69f: 0x00c0, 0x6a0: 0x00c0, 0x6a1: 0x00c0, 0x6a2: 0x00c0, 0x6a3: 0x00c0, - 0x6a4: 0x00c0, 0x6a5: 0x00c0, 0x6a6: 0x00c3, 0x6a7: 0x00c3, 0x6a8: 0x00c3, 0x6a9: 0x00c3, - 0x6aa: 0x00c3, 0x6ab: 0x00c3, 0x6ac: 0x00c3, 0x6ad: 0x00c3, 0x6ae: 0x00c3, 0x6af: 0x00c3, - 0x6b0: 0x00c3, 0x6b1: 0x00c0, - // Block 0x1b, offset 0x6c0 - 0x6c0: 0x00c0, 0x6c1: 0x00c0, 0x6c2: 0x00c0, 0x6c3: 0x00c0, 0x6c4: 0x00c0, 0x6c5: 0x00c0, - 0x6c6: 0x00c0, 0x6c7: 0x00c0, 0x6c8: 0x00c0, 0x6c9: 0x00c0, 0x6ca: 0x00c2, 0x6cb: 0x00c2, - 0x6cc: 0x00c2, 0x6cd: 0x00c2, 0x6ce: 0x00c2, 0x6cf: 0x00c2, 0x6d0: 0x00c2, 0x6d1: 0x00c2, - 0x6d2: 0x00c2, 0x6d3: 0x00c2, 0x6d4: 0x00c2, 0x6d5: 0x00c2, 0x6d6: 0x00c2, 0x6d7: 0x00c2, - 0x6d8: 0x00c2, 0x6d9: 0x00c2, 0x6da: 0x00c2, 0x6db: 0x00c2, 0x6dc: 0x00c2, 0x6dd: 0x00c2, - 0x6de: 0x00c2, 0x6df: 0x00c2, 0x6e0: 0x00c2, 0x6e1: 0x00c2, 0x6e2: 0x00c2, 0x6e3: 0x00c2, - 0x6e4: 0x00c2, 0x6e5: 0x00c2, 0x6e6: 0x00c2, 0x6e7: 0x00c2, 0x6e8: 0x00c2, 0x6e9: 0x00c2, - 0x6ea: 0x00c2, 0x6eb: 0x00c3, 0x6ec: 0x00c3, 0x6ed: 0x00c3, 0x6ee: 0x00c3, 0x6ef: 0x00c3, - 0x6f0: 0x00c3, 0x6f1: 0x00c3, 0x6f2: 0x00c3, 0x6f3: 0x00c3, 0x6f4: 0x00c0, 0x6f5: 0x00c0, - 0x6f6: 0x0080, 0x6f7: 0x0080, 0x6f8: 0x0080, 0x6f9: 0x0080, 0x6fa: 0x0040, - // Block 0x1c, offset 0x700 - 0x700: 0x00c0, 0x701: 0x00c0, 0x702: 0x00c0, 0x703: 0x00c0, 0x704: 0x00c0, 0x705: 0x00c0, - 0x706: 0x00c0, 0x707: 0x00c0, 0x708: 0x00c0, 0x709: 0x00c0, 0x70a: 0x00c0, 0x70b: 0x00c0, - 0x70c: 0x00c0, 0x70d: 0x00c0, 0x70e: 0x00c0, 0x70f: 0x00c0, 0x710: 0x00c0, 0x711: 0x00c0, - 0x712: 0x00c0, 0x713: 0x00c0, 0x714: 0x00c0, 0x715: 0x00c0, 0x716: 0x00c3, 0x717: 0x00c3, - 0x718: 0x00c3, 0x719: 0x00c3, 0x71a: 0x00c0, 0x71b: 0x00c3, 0x71c: 0x00c3, 0x71d: 0x00c3, - 0x71e: 0x00c3, 0x71f: 0x00c3, 0x720: 0x00c3, 0x721: 0x00c3, 0x722: 0x00c3, 0x723: 0x00c3, - 0x724: 0x00c0, 0x725: 0x00c3, 0x726: 0x00c3, 0x727: 0x00c3, 0x728: 0x00c0, 0x729: 0x00c3, - 0x72a: 0x00c3, 0x72b: 0x00c3, 0x72c: 0x00c3, 0x72d: 0x00c3, - 0x730: 0x0080, 0x731: 0x0080, 0x732: 0x0080, 0x733: 0x0080, 0x734: 0x0080, 0x735: 0x0080, - 0x736: 0x0080, 0x737: 0x0080, 0x738: 0x0080, 0x739: 0x0080, 0x73a: 0x0080, 0x73b: 0x0080, - 0x73c: 0x0080, 0x73d: 0x0080, 0x73e: 0x0080, - // Block 0x1d, offset 0x740 - 0x740: 0x00c4, 0x741: 0x00c2, 0x742: 0x00c2, 0x743: 0x00c2, 0x744: 0x00c2, 0x745: 0x00c2, - 0x746: 0x00c4, 0x747: 0x00c4, 0x748: 0x00c2, 0x749: 0x00c4, 0x74a: 0x00c2, 0x74b: 0x00c2, - 0x74c: 0x00c2, 0x74d: 0x00c2, 0x74e: 0x00c2, 0x74f: 0x00c2, 0x750: 0x00c2, 0x751: 0x00c2, - 0x752: 0x00c2, 0x753: 0x00c2, 0x754: 0x00c4, 0x755: 0x00c2, 0x756: 0x00c0, 0x757: 0x00c0, - 0x758: 0x00c0, 0x759: 0x00c3, 0x75a: 0x00c3, 0x75b: 0x00c3, - 0x75e: 0x0080, - // Block 0x1e, offset 0x780 - 0x7a0: 0x00c2, 0x7a1: 0x00c2, 0x7a2: 0x00c2, 0x7a3: 0x00c2, - 0x7a4: 0x00c2, 0x7a5: 0x00c2, 0x7a6: 0x00c2, 0x7a7: 0x00c2, 0x7a8: 0x00c2, 0x7a9: 0x00c2, - 0x7aa: 0x00c4, 0x7ab: 0x00c4, 0x7ac: 0x00c4, 0x7ad: 0x00c0, 0x7ae: 0x00c4, 0x7af: 0x00c2, - 0x7b0: 0x00c2, 0x7b1: 0x00c4, 0x7b2: 0x00c4, 0x7b3: 0x00c2, 0x7b4: 0x00c2, - 0x7b6: 0x00c2, 0x7b7: 0x00c2, 0x7b8: 0x00c2, 0x7b9: 0x00c4, 0x7ba: 0x00c2, 0x7bb: 0x00c2, - 0x7bc: 0x00c2, 0x7bd: 0x00c2, - // Block 0x1f, offset 0x7c0 - 0x7d4: 0x00c3, 0x7d5: 0x00c3, 0x7d6: 0x00c3, 0x7d7: 0x00c3, - 0x7d8: 0x00c3, 0x7d9: 0x00c3, 0x7da: 0x00c3, 0x7db: 0x00c3, 0x7dc: 0x00c3, 0x7dd: 0x00c3, - 0x7de: 0x00c3, 0x7df: 0x00c3, 0x7e0: 0x00c3, 0x7e1: 0x00c3, 0x7e2: 0x0040, 0x7e3: 0x00c3, - 0x7e4: 0x00c3, 0x7e5: 0x00c3, 0x7e6: 0x00c3, 0x7e7: 0x00c3, 0x7e8: 0x00c3, 0x7e9: 0x00c3, - 0x7ea: 0x00c3, 0x7eb: 0x00c3, 0x7ec: 0x00c3, 0x7ed: 0x00c3, 0x7ee: 0x00c3, 0x7ef: 0x00c3, - 0x7f0: 0x00c3, 0x7f1: 0x00c3, 0x7f2: 0x00c3, 0x7f3: 0x00c3, 0x7f4: 0x00c3, 0x7f5: 0x00c3, - 0x7f6: 0x00c3, 0x7f7: 0x00c3, 0x7f8: 0x00c3, 0x7f9: 0x00c3, 0x7fa: 0x00c3, 0x7fb: 0x00c3, - 0x7fc: 0x00c3, 0x7fd: 0x00c3, 0x7fe: 0x00c3, 0x7ff: 0x00c3, - // Block 0x20, offset 0x800 - 0x800: 0x00c3, 0x801: 0x00c3, 0x802: 0x00c3, 0x803: 0x00c0, 0x804: 0x00c0, 0x805: 0x00c0, - 0x806: 0x00c0, 0x807: 0x00c0, 0x808: 0x00c0, 0x809: 0x00c0, 0x80a: 0x00c0, 0x80b: 0x00c0, - 0x80c: 0x00c0, 0x80d: 0x00c0, 0x80e: 0x00c0, 0x80f: 0x00c0, 0x810: 0x00c0, 0x811: 0x00c0, - 0x812: 0x00c0, 0x813: 0x00c0, 0x814: 0x00c0, 0x815: 0x00c0, 0x816: 0x00c0, 0x817: 0x00c0, - 0x818: 0x00c0, 0x819: 0x00c0, 0x81a: 0x00c0, 0x81b: 0x00c0, 0x81c: 0x00c0, 0x81d: 0x00c0, - 0x81e: 0x00c0, 0x81f: 0x00c0, 0x820: 0x00c0, 0x821: 0x00c0, 0x822: 0x00c0, 0x823: 0x00c0, - 0x824: 0x00c0, 0x825: 0x00c0, 0x826: 0x00c0, 0x827: 0x00c0, 0x828: 0x00c0, 0x829: 0x00c0, - 0x82a: 0x00c0, 0x82b: 0x00c0, 0x82c: 0x00c0, 0x82d: 0x00c0, 0x82e: 0x00c0, 0x82f: 0x00c0, - 0x830: 0x00c0, 0x831: 0x00c0, 0x832: 0x00c0, 0x833: 0x00c0, 0x834: 0x00c0, 0x835: 0x00c0, - 0x836: 0x00c0, 0x837: 0x00c0, 0x838: 0x00c0, 0x839: 0x00c0, 0x83a: 0x00c3, 0x83b: 0x00c0, - 0x83c: 0x00c3, 0x83d: 0x00c0, 0x83e: 0x00c0, 0x83f: 0x00c0, - // Block 0x21, offset 0x840 - 0x840: 0x00c0, 0x841: 0x00c3, 0x842: 0x00c3, 0x843: 0x00c3, 0x844: 0x00c3, 0x845: 0x00c3, - 0x846: 0x00c3, 0x847: 0x00c3, 0x848: 0x00c3, 0x849: 0x00c0, 0x84a: 0x00c0, 0x84b: 0x00c0, - 0x84c: 0x00c0, 0x84d: 0x00c6, 0x84e: 0x00c0, 0x84f: 0x00c0, 0x850: 0x00c0, 0x851: 0x00c3, - 0x852: 0x00c3, 0x853: 0x00c3, 0x854: 0x00c3, 0x855: 0x00c3, 0x856: 0x00c3, 0x857: 0x00c3, - 0x858: 0x0080, 0x859: 0x0080, 0x85a: 0x0080, 0x85b: 0x0080, 0x85c: 0x0080, 0x85d: 0x0080, - 0x85e: 0x0080, 0x85f: 0x0080, 0x860: 0x00c0, 0x861: 0x00c0, 0x862: 0x00c3, 0x863: 0x00c3, - 0x864: 0x0080, 0x865: 0x0080, 0x866: 0x00c0, 0x867: 0x00c0, 0x868: 0x00c0, 0x869: 0x00c0, - 0x86a: 0x00c0, 0x86b: 0x00c0, 0x86c: 0x00c0, 0x86d: 0x00c0, 0x86e: 0x00c0, 0x86f: 0x00c0, - 0x870: 0x0080, 0x871: 0x00c0, 0x872: 0x00c0, 0x873: 0x00c0, 0x874: 0x00c0, 0x875: 0x00c0, - 0x876: 0x00c0, 0x877: 0x00c0, 0x878: 0x00c0, 0x879: 0x00c0, 0x87a: 0x00c0, 0x87b: 0x00c0, - 0x87c: 0x00c0, 0x87d: 0x00c0, 0x87e: 0x00c0, 0x87f: 0x00c0, - // Block 0x22, offset 0x880 - 0x880: 0x00c0, 0x881: 0x00c3, 0x882: 0x00c0, 0x883: 0x00c0, 0x885: 0x00c0, - 0x886: 0x00c0, 0x887: 0x00c0, 0x888: 0x00c0, 0x889: 0x00c0, 0x88a: 0x00c0, 0x88b: 0x00c0, - 0x88c: 0x00c0, 0x88f: 0x00c0, 0x890: 0x00c0, - 0x893: 0x00c0, 0x894: 0x00c0, 0x895: 0x00c0, 0x896: 0x00c0, 0x897: 0x00c0, - 0x898: 0x00c0, 0x899: 0x00c0, 0x89a: 0x00c0, 0x89b: 0x00c0, 0x89c: 0x00c0, 0x89d: 0x00c0, - 0x89e: 0x00c0, 0x89f: 0x00c0, 0x8a0: 0x00c0, 0x8a1: 0x00c0, 0x8a2: 0x00c0, 0x8a3: 0x00c0, - 0x8a4: 0x00c0, 0x8a5: 0x00c0, 0x8a6: 0x00c0, 0x8a7: 0x00c0, 0x8a8: 0x00c0, - 0x8aa: 0x00c0, 0x8ab: 0x00c0, 0x8ac: 0x00c0, 0x8ad: 0x00c0, 0x8ae: 0x00c0, 0x8af: 0x00c0, - 0x8b0: 0x00c0, 0x8b2: 0x00c0, - 0x8b6: 0x00c0, 0x8b7: 0x00c0, 0x8b8: 0x00c0, 0x8b9: 0x00c0, - 0x8bc: 0x00c3, 0x8bd: 0x00c0, 0x8be: 0x00c0, 0x8bf: 0x00c0, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x00c0, 0x8c1: 0x00c3, 0x8c2: 0x00c3, 0x8c3: 0x00c3, 0x8c4: 0x00c3, - 0x8c7: 0x00c0, 0x8c8: 0x00c0, 0x8cb: 0x00c0, - 0x8cc: 0x00c0, 0x8cd: 0x00c6, 0x8ce: 0x00c0, - 0x8d7: 0x00c0, - 0x8dc: 0x0080, 0x8dd: 0x0080, - 0x8df: 0x0080, 0x8e0: 0x00c0, 0x8e1: 0x00c0, 0x8e2: 0x00c3, 0x8e3: 0x00c3, - 0x8e6: 0x00c0, 0x8e7: 0x00c0, 0x8e8: 0x00c0, 0x8e9: 0x00c0, - 0x8ea: 0x00c0, 0x8eb: 0x00c0, 0x8ec: 0x00c0, 0x8ed: 0x00c0, 0x8ee: 0x00c0, 0x8ef: 0x00c0, - 0x8f0: 0x00c0, 0x8f1: 0x00c0, 0x8f2: 0x0080, 0x8f3: 0x0080, 0x8f4: 0x0080, 0x8f5: 0x0080, - 0x8f6: 0x0080, 0x8f7: 0x0080, 0x8f8: 0x0080, 0x8f9: 0x0080, 0x8fa: 0x0080, 0x8fb: 0x0080, - // Block 0x24, offset 0x900 - 0x901: 0x00c3, 0x902: 0x00c3, 0x903: 0x00c0, 0x905: 0x00c0, - 0x906: 0x00c0, 0x907: 0x00c0, 0x908: 0x00c0, 0x909: 0x00c0, 0x90a: 0x00c0, - 0x90f: 0x00c0, 0x910: 0x00c0, - 0x913: 0x00c0, 0x914: 0x00c0, 0x915: 0x00c0, 0x916: 0x00c0, 0x917: 0x00c0, - 0x918: 0x00c0, 0x919: 0x00c0, 0x91a: 0x00c0, 0x91b: 0x00c0, 0x91c: 0x00c0, 0x91d: 0x00c0, - 0x91e: 0x00c0, 0x91f: 0x00c0, 0x920: 0x00c0, 0x921: 0x00c0, 0x922: 0x00c0, 0x923: 0x00c0, - 0x924: 0x00c0, 0x925: 0x00c0, 0x926: 0x00c0, 0x927: 0x00c0, 0x928: 0x00c0, - 0x92a: 0x00c0, 0x92b: 0x00c0, 0x92c: 0x00c0, 0x92d: 0x00c0, 0x92e: 0x00c0, 0x92f: 0x00c0, - 0x930: 0x00c0, 0x932: 0x00c0, 0x933: 0x0080, 0x935: 0x00c0, - 0x936: 0x0080, 0x938: 0x00c0, 0x939: 0x00c0, - 0x93c: 0x00c3, 0x93e: 0x00c0, 0x93f: 0x00c0, - // Block 0x25, offset 0x940 - 0x940: 0x00c0, 0x941: 0x00c3, 0x942: 0x00c3, - 0x947: 0x00c3, 0x948: 0x00c3, 0x94b: 0x00c3, - 0x94c: 0x00c3, 0x94d: 0x00c6, 0x951: 0x00c3, - 0x959: 0x0080, 0x95a: 0x0080, 0x95b: 0x0080, 0x95c: 0x00c0, - 0x95e: 0x0080, - 0x966: 0x00c0, 0x967: 0x00c0, 0x968: 0x00c0, 0x969: 0x00c0, - 0x96a: 0x00c0, 0x96b: 0x00c0, 0x96c: 0x00c0, 0x96d: 0x00c0, 0x96e: 0x00c0, 0x96f: 0x00c0, - 0x970: 0x00c3, 0x971: 0x00c3, 0x972: 0x00c0, 0x973: 0x00c0, 0x974: 0x00c0, 0x975: 0x00c3, - // Block 0x26, offset 0x980 - 0x981: 0x00c3, 0x982: 0x00c3, 0x983: 0x00c0, 0x985: 0x00c0, - 0x986: 0x00c0, 0x987: 0x00c0, 0x988: 0x00c0, 0x989: 0x00c0, 0x98a: 0x00c0, 0x98b: 0x00c0, - 0x98c: 0x00c0, 0x98d: 0x00c0, 0x98f: 0x00c0, 0x990: 0x00c0, 0x991: 0x00c0, - 0x993: 0x00c0, 0x994: 0x00c0, 0x995: 0x00c0, 0x996: 0x00c0, 0x997: 0x00c0, - 0x998: 0x00c0, 0x999: 0x00c0, 0x99a: 0x00c0, 0x99b: 0x00c0, 0x99c: 0x00c0, 0x99d: 0x00c0, - 0x99e: 0x00c0, 0x99f: 0x00c0, 0x9a0: 0x00c0, 0x9a1: 0x00c0, 0x9a2: 0x00c0, 0x9a3: 0x00c0, - 0x9a4: 0x00c0, 0x9a5: 0x00c0, 0x9a6: 0x00c0, 0x9a7: 0x00c0, 0x9a8: 0x00c0, - 0x9aa: 0x00c0, 0x9ab: 0x00c0, 0x9ac: 0x00c0, 0x9ad: 0x00c0, 0x9ae: 0x00c0, 0x9af: 0x00c0, - 0x9b0: 0x00c0, 0x9b2: 0x00c0, 0x9b3: 0x00c0, 0x9b5: 0x00c0, - 0x9b6: 0x00c0, 0x9b7: 0x00c0, 0x9b8: 0x00c0, 0x9b9: 0x00c0, - 0x9bc: 0x00c3, 0x9bd: 0x00c0, 0x9be: 0x00c0, 0x9bf: 0x00c0, - // Block 0x27, offset 0x9c0 - 0x9c0: 0x00c0, 0x9c1: 0x00c3, 0x9c2: 0x00c3, 0x9c3: 0x00c3, 0x9c4: 0x00c3, 0x9c5: 0x00c3, - 0x9c7: 0x00c3, 0x9c8: 0x00c3, 0x9c9: 0x00c0, 0x9cb: 0x00c0, - 0x9cc: 0x00c0, 0x9cd: 0x00c6, 0x9d0: 0x00c0, - 0x9e0: 0x00c0, 0x9e1: 0x00c0, 0x9e2: 0x00c3, 0x9e3: 0x00c3, - 0x9e6: 0x00c0, 0x9e7: 0x00c0, 0x9e8: 0x00c0, 0x9e9: 0x00c0, - 0x9ea: 0x00c0, 0x9eb: 0x00c0, 0x9ec: 0x00c0, 0x9ed: 0x00c0, 0x9ee: 0x00c0, 0x9ef: 0x00c0, - 0x9f0: 0x0080, 0x9f1: 0x0080, - 0x9f9: 0x00c0, - // Block 0x28, offset 0xa00 - 0xa01: 0x00c3, 0xa02: 0x00c0, 0xa03: 0x00c0, 0xa05: 0x00c0, - 0xa06: 0x00c0, 0xa07: 0x00c0, 0xa08: 0x00c0, 0xa09: 0x00c0, 0xa0a: 0x00c0, 0xa0b: 0x00c0, - 0xa0c: 0x00c0, 0xa0f: 0x00c0, 0xa10: 0x00c0, - 0xa13: 0x00c0, 0xa14: 0x00c0, 0xa15: 0x00c0, 0xa16: 0x00c0, 0xa17: 0x00c0, - 0xa18: 0x00c0, 0xa19: 0x00c0, 0xa1a: 0x00c0, 0xa1b: 0x00c0, 0xa1c: 0x00c0, 0xa1d: 0x00c0, - 0xa1e: 0x00c0, 0xa1f: 0x00c0, 0xa20: 0x00c0, 0xa21: 0x00c0, 0xa22: 0x00c0, 0xa23: 0x00c0, - 0xa24: 0x00c0, 0xa25: 0x00c0, 0xa26: 0x00c0, 0xa27: 0x00c0, 0xa28: 0x00c0, - 0xa2a: 0x00c0, 0xa2b: 0x00c0, 0xa2c: 0x00c0, 0xa2d: 0x00c0, 0xa2e: 0x00c0, 0xa2f: 0x00c0, - 0xa30: 0x00c0, 0xa32: 0x00c0, 0xa33: 0x00c0, 0xa35: 0x00c0, - 0xa36: 0x00c0, 0xa37: 0x00c0, 0xa38: 0x00c0, 0xa39: 0x00c0, - 0xa3c: 0x00c3, 0xa3d: 0x00c0, 0xa3e: 0x00c0, 0xa3f: 0x00c3, - // Block 0x29, offset 0xa40 - 0xa40: 0x00c0, 0xa41: 0x00c3, 0xa42: 0x00c3, 0xa43: 0x00c3, 0xa44: 0x00c3, - 0xa47: 0x00c0, 0xa48: 0x00c0, 0xa4b: 0x00c0, - 0xa4c: 0x00c0, 0xa4d: 0x00c6, - 0xa56: 0x00c3, 0xa57: 0x00c0, - 0xa5c: 0x0080, 0xa5d: 0x0080, - 0xa5f: 0x00c0, 0xa60: 0x00c0, 0xa61: 0x00c0, 0xa62: 0x00c3, 0xa63: 0x00c3, - 0xa66: 0x00c0, 0xa67: 0x00c0, 0xa68: 0x00c0, 0xa69: 0x00c0, - 0xa6a: 0x00c0, 0xa6b: 0x00c0, 0xa6c: 0x00c0, 0xa6d: 0x00c0, 0xa6e: 0x00c0, 0xa6f: 0x00c0, - 0xa70: 0x0080, 0xa71: 0x00c0, 0xa72: 0x0080, 0xa73: 0x0080, 0xa74: 0x0080, 0xa75: 0x0080, - 0xa76: 0x0080, 0xa77: 0x0080, - // Block 0x2a, offset 0xa80 - 0xa82: 0x00c3, 0xa83: 0x00c0, 0xa85: 0x00c0, - 0xa86: 0x00c0, 0xa87: 0x00c0, 0xa88: 0x00c0, 0xa89: 0x00c0, 0xa8a: 0x00c0, - 0xa8e: 0x00c0, 0xa8f: 0x00c0, 0xa90: 0x00c0, - 0xa92: 0x00c0, 0xa93: 0x00c0, 0xa94: 0x00c0, 0xa95: 0x00c0, - 0xa99: 0x00c0, 0xa9a: 0x00c0, 0xa9c: 0x00c0, - 0xa9e: 0x00c0, 0xa9f: 0x00c0, 0xaa3: 0x00c0, - 0xaa4: 0x00c0, 0xaa8: 0x00c0, 0xaa9: 0x00c0, - 0xaaa: 0x00c0, 0xaae: 0x00c0, 0xaaf: 0x00c0, - 0xab0: 0x00c0, 0xab1: 0x00c0, 0xab2: 0x00c0, 0xab3: 0x00c0, 0xab4: 0x00c0, 0xab5: 0x00c0, - 0xab6: 0x00c0, 0xab7: 0x00c0, 0xab8: 0x00c0, 0xab9: 0x00c0, - 0xabe: 0x00c0, 0xabf: 0x00c0, - // Block 0x2b, offset 0xac0 - 0xac0: 0x00c3, 0xac1: 0x00c0, 0xac2: 0x00c0, - 0xac6: 0x00c0, 0xac7: 0x00c0, 0xac8: 0x00c0, 0xaca: 0x00c0, 0xacb: 0x00c0, - 0xacc: 0x00c0, 0xacd: 0x00c6, 0xad0: 0x00c0, - 0xad7: 0x00c0, - 0xae6: 0x00c0, 0xae7: 0x00c0, 0xae8: 0x00c0, 0xae9: 0x00c0, - 0xaea: 0x00c0, 0xaeb: 0x00c0, 0xaec: 0x00c0, 0xaed: 0x00c0, 0xaee: 0x00c0, 0xaef: 0x00c0, - 0xaf0: 0x0080, 0xaf1: 0x0080, 0xaf2: 0x0080, 0xaf3: 0x0080, 0xaf4: 0x0080, 0xaf5: 0x0080, - 0xaf6: 0x0080, 0xaf7: 0x0080, 0xaf8: 0x0080, 0xaf9: 0x0080, 0xafa: 0x0080, - // Block 0x2c, offset 0xb00 - 0xb00: 0x00c3, 0xb01: 0x00c0, 0xb02: 0x00c0, 0xb03: 0x00c0, 0xb05: 0x00c0, - 0xb06: 0x00c0, 0xb07: 0x00c0, 0xb08: 0x00c0, 0xb09: 0x00c0, 0xb0a: 0x00c0, 0xb0b: 0x00c0, - 0xb0c: 0x00c0, 0xb0e: 0x00c0, 0xb0f: 0x00c0, 0xb10: 0x00c0, - 0xb12: 0x00c0, 0xb13: 0x00c0, 0xb14: 0x00c0, 0xb15: 0x00c0, 0xb16: 0x00c0, 0xb17: 0x00c0, - 0xb18: 0x00c0, 0xb19: 0x00c0, 0xb1a: 0x00c0, 0xb1b: 0x00c0, 0xb1c: 0x00c0, 0xb1d: 0x00c0, - 0xb1e: 0x00c0, 0xb1f: 0x00c0, 0xb20: 0x00c0, 0xb21: 0x00c0, 0xb22: 0x00c0, 0xb23: 0x00c0, - 0xb24: 0x00c0, 0xb25: 0x00c0, 0xb26: 0x00c0, 0xb27: 0x00c0, 0xb28: 0x00c0, - 0xb2a: 0x00c0, 0xb2b: 0x00c0, 0xb2c: 0x00c0, 0xb2d: 0x00c0, 0xb2e: 0x00c0, 0xb2f: 0x00c0, - 0xb30: 0x00c0, 0xb31: 0x00c0, 0xb32: 0x00c0, 0xb33: 0x00c0, 0xb34: 0x00c0, 0xb35: 0x00c0, - 0xb36: 0x00c0, 0xb37: 0x00c0, 0xb38: 0x00c0, 0xb39: 0x00c0, - 0xb3d: 0x00c0, 0xb3e: 0x00c3, 0xb3f: 0x00c3, - // Block 0x2d, offset 0xb40 - 0xb40: 0x00c3, 0xb41: 0x00c0, 0xb42: 0x00c0, 0xb43: 0x00c0, 0xb44: 0x00c0, - 0xb46: 0x00c3, 0xb47: 0x00c3, 0xb48: 0x00c3, 0xb4a: 0x00c3, 0xb4b: 0x00c3, - 0xb4c: 0x00c3, 0xb4d: 0x00c6, - 0xb55: 0x00c3, 0xb56: 0x00c3, - 0xb58: 0x00c0, 0xb59: 0x00c0, 0xb5a: 0x00c0, - 0xb60: 0x00c0, 0xb61: 0x00c0, 0xb62: 0x00c3, 0xb63: 0x00c3, - 0xb66: 0x00c0, 0xb67: 0x00c0, 0xb68: 0x00c0, 0xb69: 0x00c0, - 0xb6a: 0x00c0, 0xb6b: 0x00c0, 0xb6c: 0x00c0, 0xb6d: 0x00c0, 0xb6e: 0x00c0, 0xb6f: 0x00c0, - 0xb78: 0x0080, 0xb79: 0x0080, 0xb7a: 0x0080, 0xb7b: 0x0080, - 0xb7c: 0x0080, 0xb7d: 0x0080, 0xb7e: 0x0080, 0xb7f: 0x0080, - // Block 0x2e, offset 0xb80 - 0xb80: 0x00c0, 0xb81: 0x00c3, 0xb82: 0x00c0, 0xb83: 0x00c0, 0xb85: 0x00c0, - 0xb86: 0x00c0, 0xb87: 0x00c0, 0xb88: 0x00c0, 0xb89: 0x00c0, 0xb8a: 0x00c0, 0xb8b: 0x00c0, - 0xb8c: 0x00c0, 0xb8e: 0x00c0, 0xb8f: 0x00c0, 0xb90: 0x00c0, - 0xb92: 0x00c0, 0xb93: 0x00c0, 0xb94: 0x00c0, 0xb95: 0x00c0, 0xb96: 0x00c0, 0xb97: 0x00c0, - 0xb98: 0x00c0, 0xb99: 0x00c0, 0xb9a: 0x00c0, 0xb9b: 0x00c0, 0xb9c: 0x00c0, 0xb9d: 0x00c0, - 0xb9e: 0x00c0, 0xb9f: 0x00c0, 0xba0: 0x00c0, 0xba1: 0x00c0, 0xba2: 0x00c0, 0xba3: 0x00c0, - 0xba4: 0x00c0, 0xba5: 0x00c0, 0xba6: 0x00c0, 0xba7: 0x00c0, 0xba8: 0x00c0, - 0xbaa: 0x00c0, 0xbab: 0x00c0, 0xbac: 0x00c0, 0xbad: 0x00c0, 0xbae: 0x00c0, 0xbaf: 0x00c0, - 0xbb0: 0x00c0, 0xbb1: 0x00c0, 0xbb2: 0x00c0, 0xbb3: 0x00c0, 0xbb5: 0x00c0, - 0xbb6: 0x00c0, 0xbb7: 0x00c0, 0xbb8: 0x00c0, 0xbb9: 0x00c0, - 0xbbc: 0x00c3, 0xbbd: 0x00c0, 0xbbe: 0x00c0, 0xbbf: 0x00c3, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x00c0, 0xbc1: 0x00c0, 0xbc2: 0x00c0, 0xbc3: 0x00c0, 0xbc4: 0x00c0, - 0xbc6: 0x00c3, 0xbc7: 0x00c0, 0xbc8: 0x00c0, 0xbca: 0x00c0, 0xbcb: 0x00c0, - 0xbcc: 0x00c3, 0xbcd: 0x00c6, - 0xbd5: 0x00c0, 0xbd6: 0x00c0, - 0xbde: 0x00c0, 0xbe0: 0x00c0, 0xbe1: 0x00c0, 0xbe2: 0x00c3, 0xbe3: 0x00c3, - 0xbe6: 0x00c0, 0xbe7: 0x00c0, 0xbe8: 0x00c0, 0xbe9: 0x00c0, - 0xbea: 0x00c0, 0xbeb: 0x00c0, 0xbec: 0x00c0, 0xbed: 0x00c0, 0xbee: 0x00c0, 0xbef: 0x00c0, - 0xbf1: 0x00c0, 0xbf2: 0x00c0, - // Block 0x30, offset 0xc00 - 0xc01: 0x00c3, 0xc02: 0x00c0, 0xc03: 0x00c0, 0xc05: 0x00c0, - 0xc06: 0x00c0, 0xc07: 0x00c0, 0xc08: 0x00c0, 0xc09: 0x00c0, 0xc0a: 0x00c0, 0xc0b: 0x00c0, - 0xc0c: 0x00c0, 0xc0e: 0x00c0, 0xc0f: 0x00c0, 0xc10: 0x00c0, - 0xc12: 0x00c0, 0xc13: 0x00c0, 0xc14: 0x00c0, 0xc15: 0x00c0, 0xc16: 0x00c0, 0xc17: 0x00c0, - 0xc18: 0x00c0, 0xc19: 0x00c0, 0xc1a: 0x00c0, 0xc1b: 0x00c0, 0xc1c: 0x00c0, 0xc1d: 0x00c0, - 0xc1e: 0x00c0, 0xc1f: 0x00c0, 0xc20: 0x00c0, 0xc21: 0x00c0, 0xc22: 0x00c0, 0xc23: 0x00c0, - 0xc24: 0x00c0, 0xc25: 0x00c0, 0xc26: 0x00c0, 0xc27: 0x00c0, 0xc28: 0x00c0, 0xc29: 0x00c0, - 0xc2a: 0x00c0, 0xc2b: 0x00c0, 0xc2c: 0x00c0, 0xc2d: 0x00c0, 0xc2e: 0x00c0, 0xc2f: 0x00c0, - 0xc30: 0x00c0, 0xc31: 0x00c0, 0xc32: 0x00c0, 0xc33: 0x00c0, 0xc34: 0x00c0, 0xc35: 0x00c0, - 0xc36: 0x00c0, 0xc37: 0x00c0, 0xc38: 0x00c0, 0xc39: 0x00c0, 0xc3a: 0x00c0, - 0xc3d: 0x00c0, 0xc3e: 0x00c0, 0xc3f: 0x00c0, - // Block 0x31, offset 0xc40 - 0xc40: 0x00c0, 0xc41: 0x00c3, 0xc42: 0x00c3, 0xc43: 0x00c3, 0xc44: 0x00c3, - 0xc46: 0x00c0, 0xc47: 0x00c0, 0xc48: 0x00c0, 0xc4a: 0x00c0, 0xc4b: 0x00c0, - 0xc4c: 0x00c0, 0xc4d: 0x00c6, 0xc4e: 0x00c0, 0xc4f: 0x0080, - 0xc54: 0x00c0, 0xc55: 0x00c0, 0xc56: 0x00c0, 0xc57: 0x00c0, - 0xc58: 0x0080, 0xc59: 0x0080, 0xc5a: 0x0080, 0xc5b: 0x0080, 0xc5c: 0x0080, 0xc5d: 0x0080, - 0xc5e: 0x0080, 0xc5f: 0x00c0, 0xc60: 0x00c0, 0xc61: 0x00c0, 0xc62: 0x00c3, 0xc63: 0x00c3, - 0xc66: 0x00c0, 0xc67: 0x00c0, 0xc68: 0x00c0, 0xc69: 0x00c0, - 0xc6a: 0x00c0, 0xc6b: 0x00c0, 0xc6c: 0x00c0, 0xc6d: 0x00c0, 0xc6e: 0x00c0, 0xc6f: 0x00c0, - 0xc70: 0x0080, 0xc71: 0x0080, 0xc72: 0x0080, 0xc73: 0x0080, 0xc74: 0x0080, 0xc75: 0x0080, - 0xc76: 0x0080, 0xc77: 0x0080, 0xc78: 0x0080, 0xc79: 0x0080, 0xc7a: 0x00c0, 0xc7b: 0x00c0, - 0xc7c: 0x00c0, 0xc7d: 0x00c0, 0xc7e: 0x00c0, 0xc7f: 0x00c0, - // Block 0x32, offset 0xc80 - 0xc82: 0x00c0, 0xc83: 0x00c0, 0xc85: 0x00c0, - 0xc86: 0x00c0, 0xc87: 0x00c0, 0xc88: 0x00c0, 0xc89: 0x00c0, 0xc8a: 0x00c0, 0xc8b: 0x00c0, - 0xc8c: 0x00c0, 0xc8d: 0x00c0, 0xc8e: 0x00c0, 0xc8f: 0x00c0, 0xc90: 0x00c0, 0xc91: 0x00c0, - 0xc92: 0x00c0, 0xc93: 0x00c0, 0xc94: 0x00c0, 0xc95: 0x00c0, 0xc96: 0x00c0, - 0xc9a: 0x00c0, 0xc9b: 0x00c0, 0xc9c: 0x00c0, 0xc9d: 0x00c0, - 0xc9e: 0x00c0, 0xc9f: 0x00c0, 0xca0: 0x00c0, 0xca1: 0x00c0, 0xca2: 0x00c0, 0xca3: 0x00c0, - 0xca4: 0x00c0, 0xca5: 0x00c0, 0xca6: 0x00c0, 0xca7: 0x00c0, 0xca8: 0x00c0, 0xca9: 0x00c0, - 0xcaa: 0x00c0, 0xcab: 0x00c0, 0xcac: 0x00c0, 0xcad: 0x00c0, 0xcae: 0x00c0, 0xcaf: 0x00c0, - 0xcb0: 0x00c0, 0xcb1: 0x00c0, 0xcb3: 0x00c0, 0xcb4: 0x00c0, 0xcb5: 0x00c0, - 0xcb6: 0x00c0, 0xcb7: 0x00c0, 0xcb8: 0x00c0, 0xcb9: 0x00c0, 0xcba: 0x00c0, 0xcbb: 0x00c0, - 0xcbd: 0x00c0, - // Block 0x33, offset 0xcc0 - 0xcc0: 0x00c0, 0xcc1: 0x00c0, 0xcc2: 0x00c0, 0xcc3: 0x00c0, 0xcc4: 0x00c0, 0xcc5: 0x00c0, - 0xcc6: 0x00c0, 0xcca: 0x00c6, - 0xccf: 0x00c0, 0xcd0: 0x00c0, 0xcd1: 0x00c0, - 0xcd2: 0x00c3, 0xcd3: 0x00c3, 0xcd4: 0x00c3, 0xcd6: 0x00c3, - 0xcd8: 0x00c0, 0xcd9: 0x00c0, 0xcda: 0x00c0, 0xcdb: 0x00c0, 0xcdc: 0x00c0, 0xcdd: 0x00c0, - 0xcde: 0x00c0, 0xcdf: 0x00c0, - 0xce6: 0x00c0, 0xce7: 0x00c0, 0xce8: 0x00c0, 0xce9: 0x00c0, - 0xcea: 0x00c0, 0xceb: 0x00c0, 0xcec: 0x00c0, 0xced: 0x00c0, 0xcee: 0x00c0, 0xcef: 0x00c0, - 0xcf2: 0x00c0, 0xcf3: 0x00c0, 0xcf4: 0x0080, - // Block 0x34, offset 0xd00 - 0xd01: 0x00c0, 0xd02: 0x00c0, 0xd03: 0x00c0, 0xd04: 0x00c0, 0xd05: 0x00c0, - 0xd06: 0x00c0, 0xd07: 0x00c0, 0xd08: 0x00c0, 0xd09: 0x00c0, 0xd0a: 0x00c0, 0xd0b: 0x00c0, - 0xd0c: 0x00c0, 0xd0d: 0x00c0, 0xd0e: 0x00c0, 0xd0f: 0x00c0, 0xd10: 0x00c0, 0xd11: 0x00c0, - 0xd12: 0x00c0, 0xd13: 0x00c0, 0xd14: 0x00c0, 0xd15: 0x00c0, 0xd16: 0x00c0, 0xd17: 0x00c0, - 0xd18: 0x00c0, 0xd19: 0x00c0, 0xd1a: 0x00c0, 0xd1b: 0x00c0, 0xd1c: 0x00c0, 0xd1d: 0x00c0, - 0xd1e: 0x00c0, 0xd1f: 0x00c0, 0xd20: 0x00c0, 0xd21: 0x00c0, 0xd22: 0x00c0, 0xd23: 0x00c0, - 0xd24: 0x00c0, 0xd25: 0x00c0, 0xd26: 0x00c0, 0xd27: 0x00c0, 0xd28: 0x00c0, 0xd29: 0x00c0, - 0xd2a: 0x00c0, 0xd2b: 0x00c0, 0xd2c: 0x00c0, 0xd2d: 0x00c0, 0xd2e: 0x00c0, 0xd2f: 0x00c0, - 0xd30: 0x00c0, 0xd31: 0x00c3, 0xd32: 0x00c0, 0xd33: 0x0080, 0xd34: 0x00c3, 0xd35: 0x00c3, - 0xd36: 0x00c3, 0xd37: 0x00c3, 0xd38: 0x00c3, 0xd39: 0x00c3, 0xd3a: 0x00c6, - 0xd3f: 0x0080, - // Block 0x35, offset 0xd40 - 0xd40: 0x00c0, 0xd41: 0x00c0, 0xd42: 0x00c0, 0xd43: 0x00c0, 0xd44: 0x00c0, 0xd45: 0x00c0, - 0xd46: 0x00c0, 0xd47: 0x00c3, 0xd48: 0x00c3, 0xd49: 0x00c3, 0xd4a: 0x00c3, 0xd4b: 0x00c3, - 0xd4c: 0x00c3, 0xd4d: 0x00c3, 0xd4e: 0x00c3, 0xd4f: 0x0080, 0xd50: 0x00c0, 0xd51: 0x00c0, - 0xd52: 0x00c0, 0xd53: 0x00c0, 0xd54: 0x00c0, 0xd55: 0x00c0, 0xd56: 0x00c0, 0xd57: 0x00c0, - 0xd58: 0x00c0, 0xd59: 0x00c0, 0xd5a: 0x0080, 0xd5b: 0x0080, - // Block 0x36, offset 0xd80 - 0xd81: 0x00c0, 0xd82: 0x00c0, 0xd84: 0x00c0, - 0xd87: 0x00c0, 0xd88: 0x00c0, 0xd8a: 0x00c0, - 0xd8d: 0x00c0, - 0xd94: 0x00c0, 0xd95: 0x00c0, 0xd96: 0x00c0, 0xd97: 0x00c0, - 0xd99: 0x00c0, 0xd9a: 0x00c0, 0xd9b: 0x00c0, 0xd9c: 0x00c0, 0xd9d: 0x00c0, - 0xd9e: 0x00c0, 0xd9f: 0x00c0, 0xda1: 0x00c0, 0xda2: 0x00c0, 0xda3: 0x00c0, - 0xda5: 0x00c0, 0xda7: 0x00c0, - 0xdaa: 0x00c0, 0xdab: 0x00c0, 0xdad: 0x00c0, 0xdae: 0x00c0, 0xdaf: 0x00c0, - 0xdb0: 0x00c0, 0xdb1: 0x00c3, 0xdb2: 0x00c0, 0xdb3: 0x0080, 0xdb4: 0x00c3, 0xdb5: 0x00c3, - 0xdb6: 0x00c3, 0xdb7: 0x00c3, 0xdb8: 0x00c3, 0xdb9: 0x00c3, 0xdbb: 0x00c3, - 0xdbc: 0x00c3, 0xdbd: 0x00c0, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x00c0, 0xdc1: 0x00c0, 0xdc2: 0x00c0, 0xdc3: 0x00c0, 0xdc4: 0x00c0, - 0xdc6: 0x00c0, 0xdc8: 0x00c3, 0xdc9: 0x00c3, 0xdca: 0x00c3, 0xdcb: 0x00c3, - 0xdcc: 0x00c3, 0xdcd: 0x00c3, 0xdd0: 0x00c0, 0xdd1: 0x00c0, - 0xdd2: 0x00c0, 0xdd3: 0x00c0, 0xdd4: 0x00c0, 0xdd5: 0x00c0, 0xdd6: 0x00c0, 0xdd7: 0x00c0, - 0xdd8: 0x00c0, 0xdd9: 0x00c0, 0xddc: 0x0080, 0xddd: 0x0080, - 0xdde: 0x00c0, 0xddf: 0x00c0, - // Block 0x38, offset 0xe00 - 0xe00: 0x00c0, 0xe01: 0x0080, 0xe02: 0x0080, 0xe03: 0x0080, 0xe04: 0x0080, 0xe05: 0x0080, - 0xe06: 0x0080, 0xe07: 0x0080, 0xe08: 0x0080, 0xe09: 0x0080, 0xe0a: 0x0080, 0xe0b: 0x00c0, - 0xe0c: 0x0080, 0xe0d: 0x0080, 0xe0e: 0x0080, 0xe0f: 0x0080, 0xe10: 0x0080, 0xe11: 0x0080, - 0xe12: 0x0080, 0xe13: 0x0080, 0xe14: 0x0080, 0xe15: 0x0080, 0xe16: 0x0080, 0xe17: 0x0080, - 0xe18: 0x00c3, 0xe19: 0x00c3, 0xe1a: 0x0080, 0xe1b: 0x0080, 0xe1c: 0x0080, 0xe1d: 0x0080, - 0xe1e: 0x0080, 0xe1f: 0x0080, 0xe20: 0x00c0, 0xe21: 0x00c0, 0xe22: 0x00c0, 0xe23: 0x00c0, - 0xe24: 0x00c0, 0xe25: 0x00c0, 0xe26: 0x00c0, 0xe27: 0x00c0, 0xe28: 0x00c0, 0xe29: 0x00c0, - 0xe2a: 0x0080, 0xe2b: 0x0080, 0xe2c: 0x0080, 0xe2d: 0x0080, 0xe2e: 0x0080, 0xe2f: 0x0080, - 0xe30: 0x0080, 0xe31: 0x0080, 0xe32: 0x0080, 0xe33: 0x0080, 0xe34: 0x0080, 0xe35: 0x00c3, - 0xe36: 0x0080, 0xe37: 0x00c3, 0xe38: 0x0080, 0xe39: 0x00c3, 0xe3a: 0x0080, 0xe3b: 0x0080, - 0xe3c: 0x0080, 0xe3d: 0x0080, 0xe3e: 0x00c0, 0xe3f: 0x00c0, - // Block 0x39, offset 0xe40 - 0xe40: 0x00c0, 0xe41: 0x00c0, 0xe42: 0x00c0, 0xe43: 0x0080, 0xe44: 0x00c0, 0xe45: 0x00c0, - 0xe46: 0x00c0, 0xe47: 0x00c0, 0xe49: 0x00c0, 0xe4a: 0x00c0, 0xe4b: 0x00c0, - 0xe4c: 0x00c0, 0xe4d: 0x0080, 0xe4e: 0x00c0, 0xe4f: 0x00c0, 0xe50: 0x00c0, 0xe51: 0x00c0, - 0xe52: 0x0080, 0xe53: 0x00c0, 0xe54: 0x00c0, 0xe55: 0x00c0, 0xe56: 0x00c0, 0xe57: 0x0080, - 0xe58: 0x00c0, 0xe59: 0x00c0, 0xe5a: 0x00c0, 0xe5b: 0x00c0, 0xe5c: 0x0080, 0xe5d: 0x00c0, - 0xe5e: 0x00c0, 0xe5f: 0x00c0, 0xe60: 0x00c0, 0xe61: 0x00c0, 0xe62: 0x00c0, 0xe63: 0x00c0, - 0xe64: 0x00c0, 0xe65: 0x00c0, 0xe66: 0x00c0, 0xe67: 0x00c0, 0xe68: 0x00c0, 0xe69: 0x0080, - 0xe6a: 0x00c0, 0xe6b: 0x00c0, 0xe6c: 0x00c0, - 0xe71: 0x00c3, 0xe72: 0x00c3, 0xe73: 0x0083, 0xe74: 0x00c3, 0xe75: 0x0083, - 0xe76: 0x0083, 0xe77: 0x0083, 0xe78: 0x0083, 0xe79: 0x0083, 0xe7a: 0x00c3, 0xe7b: 0x00c3, - 0xe7c: 0x00c3, 0xe7d: 0x00c3, 0xe7e: 0x00c3, 0xe7f: 0x00c0, - // Block 0x3a, offset 0xe80 - 0xe80: 0x00c3, 0xe81: 0x0083, 0xe82: 0x00c3, 0xe83: 0x00c3, 0xe84: 0x00c6, 0xe85: 0x0080, - 0xe86: 0x00c3, 0xe87: 0x00c3, 0xe88: 0x00c0, 0xe89: 0x00c0, 0xe8a: 0x00c0, 0xe8b: 0x00c0, - 0xe8c: 0x00c0, 0xe8d: 0x00c3, 0xe8e: 0x00c3, 0xe8f: 0x00c3, 0xe90: 0x00c3, 0xe91: 0x00c3, - 0xe92: 0x00c3, 0xe93: 0x0083, 0xe94: 0x00c3, 0xe95: 0x00c3, 0xe96: 0x00c3, 0xe97: 0x00c3, - 0xe99: 0x00c3, 0xe9a: 0x00c3, 0xe9b: 0x00c3, 0xe9c: 0x00c3, 0xe9d: 0x0083, - 0xe9e: 0x00c3, 0xe9f: 0x00c3, 0xea0: 0x00c3, 0xea1: 0x00c3, 0xea2: 0x0083, 0xea3: 0x00c3, - 0xea4: 0x00c3, 0xea5: 0x00c3, 0xea6: 0x00c3, 0xea7: 0x0083, 0xea8: 0x00c3, 0xea9: 0x00c3, - 0xeaa: 0x00c3, 0xeab: 0x00c3, 0xeac: 0x0083, 0xead: 0x00c3, 0xeae: 0x00c3, 0xeaf: 0x00c3, - 0xeb0: 0x00c3, 0xeb1: 0x00c3, 0xeb2: 0x00c3, 0xeb3: 0x00c3, 0xeb4: 0x00c3, 0xeb5: 0x00c3, - 0xeb6: 0x00c3, 0xeb7: 0x00c3, 0xeb8: 0x00c3, 0xeb9: 0x0083, 0xeba: 0x00c3, 0xebb: 0x00c3, - 0xebc: 0x00c3, 0xebe: 0x0080, 0xebf: 0x0080, - // Block 0x3b, offset 0xec0 - 0xec0: 0x0080, 0xec1: 0x0080, 0xec2: 0x0080, 0xec3: 0x0080, 0xec4: 0x0080, 0xec5: 0x0080, - 0xec6: 0x00c3, 0xec7: 0x0080, 0xec8: 0x0080, 0xec9: 0x0080, 0xeca: 0x0080, 0xecb: 0x0080, - 0xecc: 0x0080, 0xece: 0x0080, 0xecf: 0x0080, 0xed0: 0x0080, 0xed1: 0x0080, - 0xed2: 0x0080, 0xed3: 0x0080, 0xed4: 0x0080, 0xed5: 0x0080, 0xed6: 0x0080, 0xed7: 0x0080, - 0xed8: 0x0080, 0xed9: 0x0080, 0xeda: 0x0080, - // Block 0x3c, offset 0xf00 - 0xf00: 0x00c0, 0xf01: 0x00c0, 0xf02: 0x00c0, 0xf03: 0x00c0, 0xf04: 0x00c0, 0xf05: 0x00c0, - 0xf06: 0x00c0, 0xf07: 0x00c0, 0xf08: 0x00c0, 0xf09: 0x00c0, 0xf0a: 0x00c0, 0xf0b: 0x00c0, - 0xf0c: 0x00c0, 0xf0d: 0x00c0, 0xf0e: 0x00c0, 0xf0f: 0x00c0, 0xf10: 0x00c0, 0xf11: 0x00c0, - 0xf12: 0x00c0, 0xf13: 0x00c0, 0xf14: 0x00c0, 0xf15: 0x00c0, 0xf16: 0x00c0, 0xf17: 0x00c0, - 0xf18: 0x00c0, 0xf19: 0x00c0, 0xf1a: 0x00c0, 0xf1b: 0x00c0, 0xf1c: 0x00c0, 0xf1d: 0x00c0, - 0xf1e: 0x00c0, 0xf1f: 0x00c0, 0xf20: 0x00c0, 0xf21: 0x00c0, 0xf22: 0x00c0, 0xf23: 0x00c0, - 0xf24: 0x00c0, 0xf25: 0x00c0, 0xf26: 0x00c0, 0xf27: 0x00c0, 0xf28: 0x00c0, 0xf29: 0x00c0, - 0xf2a: 0x00c0, 0xf2b: 0x00c0, 0xf2c: 0x00c0, 0xf2d: 0x00c3, 0xf2e: 0x00c3, 0xf2f: 0x00c3, - 0xf30: 0x00c3, 0xf31: 0x00c0, 0xf32: 0x00c3, 0xf33: 0x00c3, 0xf34: 0x00c3, 0xf35: 0x00c3, - 0xf36: 0x00c3, 0xf37: 0x00c3, 0xf38: 0x00c0, 0xf39: 0x00c6, 0xf3a: 0x00c6, 0xf3b: 0x00c0, - 0xf3c: 0x00c0, 0xf3d: 0x00c3, 0xf3e: 0x00c3, 0xf3f: 0x00c0, - // Block 0x3d, offset 0xf40 - 0xf40: 0x00c0, 0xf41: 0x00c0, 0xf42: 0x00c0, 0xf43: 0x00c0, 0xf44: 0x00c0, 0xf45: 0x00c0, - 0xf46: 0x00c0, 0xf47: 0x00c0, 0xf48: 0x00c0, 0xf49: 0x00c0, 0xf4a: 0x0080, 0xf4b: 0x0080, - 0xf4c: 0x0080, 0xf4d: 0x0080, 0xf4e: 0x0080, 0xf4f: 0x0080, 0xf50: 0x00c0, 0xf51: 0x00c0, - 0xf52: 0x00c0, 0xf53: 0x00c0, 0xf54: 0x00c0, 0xf55: 0x00c0, 0xf56: 0x00c0, 0xf57: 0x00c0, - 0xf58: 0x00c3, 0xf59: 0x00c3, 0xf5a: 0x00c0, 0xf5b: 0x00c0, 0xf5c: 0x00c0, 0xf5d: 0x00c0, - 0xf5e: 0x00c3, 0xf5f: 0x00c3, 0xf60: 0x00c3, 0xf61: 0x00c0, 0xf62: 0x00c0, 0xf63: 0x00c0, - 0xf64: 0x00c0, 0xf65: 0x00c0, 0xf66: 0x00c0, 0xf67: 0x00c0, 0xf68: 0x00c0, 0xf69: 0x00c0, - 0xf6a: 0x00c0, 0xf6b: 0x00c0, 0xf6c: 0x00c0, 0xf6d: 0x00c0, 0xf6e: 0x00c0, 0xf6f: 0x00c0, - 0xf70: 0x00c0, 0xf71: 0x00c3, 0xf72: 0x00c3, 0xf73: 0x00c3, 0xf74: 0x00c3, 0xf75: 0x00c0, - 0xf76: 0x00c0, 0xf77: 0x00c0, 0xf78: 0x00c0, 0xf79: 0x00c0, 0xf7a: 0x00c0, 0xf7b: 0x00c0, - 0xf7c: 0x00c0, 0xf7d: 0x00c0, 0xf7e: 0x00c0, 0xf7f: 0x00c0, - // Block 0x3e, offset 0xf80 - 0xf80: 0x00c0, 0xf81: 0x00c0, 0xf82: 0x00c3, 0xf83: 0x00c0, 0xf84: 0x00c0, 0xf85: 0x00c3, - 0xf86: 0x00c3, 0xf87: 0x00c0, 0xf88: 0x00c0, 0xf89: 0x00c0, 0xf8a: 0x00c0, 0xf8b: 0x00c0, - 0xf8c: 0x00c0, 0xf8d: 0x00c3, 0xf8e: 0x00c0, 0xf8f: 0x00c0, 0xf90: 0x00c0, 0xf91: 0x00c0, - 0xf92: 0x00c0, 0xf93: 0x00c0, 0xf94: 0x00c0, 0xf95: 0x00c0, 0xf96: 0x00c0, 0xf97: 0x00c0, - 0xf98: 0x00c0, 0xf99: 0x00c0, 0xf9a: 0x00c0, 0xf9b: 0x00c0, 0xf9c: 0x00c0, 0xf9d: 0x00c3, - 0xf9e: 0x0080, 0xf9f: 0x0080, 0xfa0: 0x00c0, 0xfa1: 0x00c0, 0xfa2: 0x00c0, 0xfa3: 0x00c0, - 0xfa4: 0x00c0, 0xfa5: 0x00c0, 0xfa6: 0x00c0, 0xfa7: 0x00c0, 0xfa8: 0x00c0, 0xfa9: 0x00c0, - 0xfaa: 0x00c0, 0xfab: 0x00c0, 0xfac: 0x00c0, 0xfad: 0x00c0, 0xfae: 0x00c0, 0xfaf: 0x00c0, - 0xfb0: 0x00c0, 0xfb1: 0x00c0, 0xfb2: 0x00c0, 0xfb3: 0x00c0, 0xfb4: 0x00c0, 0xfb5: 0x00c0, - 0xfb6: 0x00c0, 0xfb7: 0x00c0, 0xfb8: 0x00c0, 0xfb9: 0x00c0, 0xfba: 0x00c0, 0xfbb: 0x00c0, - 0xfbc: 0x00c0, 0xfbd: 0x00c0, 0xfbe: 0x00c0, 0xfbf: 0x00c0, - // Block 0x3f, offset 0xfc0 - 0xfc0: 0x00c0, 0xfc1: 0x00c0, 0xfc2: 0x00c0, 0xfc3: 0x00c0, 0xfc4: 0x00c0, 0xfc5: 0x00c0, - 0xfc7: 0x00c0, - 0xfcd: 0x00c0, 0xfd0: 0x00c0, 0xfd1: 0x00c0, - 0xfd2: 0x00c0, 0xfd3: 0x00c0, 0xfd4: 0x00c0, 0xfd5: 0x00c0, 0xfd6: 0x00c0, 0xfd7: 0x00c0, - 0xfd8: 0x00c0, 0xfd9: 0x00c0, 0xfda: 0x00c0, 0xfdb: 0x00c0, 0xfdc: 0x00c0, 0xfdd: 0x00c0, - 0xfde: 0x00c0, 0xfdf: 0x00c0, 0xfe0: 0x00c0, 0xfe1: 0x00c0, 0xfe2: 0x00c0, 0xfe3: 0x00c0, - 0xfe4: 0x00c0, 0xfe5: 0x00c0, 0xfe6: 0x00c0, 0xfe7: 0x00c0, 0xfe8: 0x00c0, 0xfe9: 0x00c0, - 0xfea: 0x00c0, 0xfeb: 0x00c0, 0xfec: 0x00c0, 0xfed: 0x00c0, 0xfee: 0x00c0, 0xfef: 0x00c0, - 0xff0: 0x00c0, 0xff1: 0x00c0, 0xff2: 0x00c0, 0xff3: 0x00c0, 0xff4: 0x00c0, 0xff5: 0x00c0, - 0xff6: 0x00c0, 0xff7: 0x00c0, 0xff8: 0x00c0, 0xff9: 0x00c0, 0xffa: 0x00c0, 0xffb: 0x0080, - 0xffc: 0x0080, 0xffd: 0x00c0, 0xffe: 0x00c0, 0xfff: 0x00c0, - // Block 0x40, offset 0x1000 - 0x1000: 0x0040, 0x1001: 0x0040, 0x1002: 0x0040, 0x1003: 0x0040, 0x1004: 0x0040, 0x1005: 0x0040, - 0x1006: 0x0040, 0x1007: 0x0040, 0x1008: 0x0040, 0x1009: 0x0040, 0x100a: 0x0040, 0x100b: 0x0040, - 0x100c: 0x0040, 0x100d: 0x0040, 0x100e: 0x0040, 0x100f: 0x0040, 0x1010: 0x0040, 0x1011: 0x0040, - 0x1012: 0x0040, 0x1013: 0x0040, 0x1014: 0x0040, 0x1015: 0x0040, 0x1016: 0x0040, 0x1017: 0x0040, - 0x1018: 0x0040, 0x1019: 0x0040, 0x101a: 0x0040, 0x101b: 0x0040, 0x101c: 0x0040, 0x101d: 0x0040, - 0x101e: 0x0040, 0x101f: 0x0040, 0x1020: 0x0040, 0x1021: 0x0040, 0x1022: 0x0040, 0x1023: 0x0040, - 0x1024: 0x0040, 0x1025: 0x0040, 0x1026: 0x0040, 0x1027: 0x0040, 0x1028: 0x0040, 0x1029: 0x0040, - 0x102a: 0x0040, 0x102b: 0x0040, 0x102c: 0x0040, 0x102d: 0x0040, 0x102e: 0x0040, 0x102f: 0x0040, - 0x1030: 0x0040, 0x1031: 0x0040, 0x1032: 0x0040, 0x1033: 0x0040, 0x1034: 0x0040, 0x1035: 0x0040, - 0x1036: 0x0040, 0x1037: 0x0040, 0x1038: 0x0040, 0x1039: 0x0040, 0x103a: 0x0040, 0x103b: 0x0040, - 0x103c: 0x0040, 0x103d: 0x0040, 0x103e: 0x0040, 0x103f: 0x0040, - // Block 0x41, offset 0x1040 - 0x1040: 0x00c0, 0x1041: 0x00c0, 0x1042: 0x00c0, 0x1043: 0x00c0, 0x1044: 0x00c0, 0x1045: 0x00c0, - 0x1046: 0x00c0, 0x1047: 0x00c0, 0x1048: 0x00c0, 0x104a: 0x00c0, 0x104b: 0x00c0, - 0x104c: 0x00c0, 0x104d: 0x00c0, 0x1050: 0x00c0, 0x1051: 0x00c0, - 0x1052: 0x00c0, 0x1053: 0x00c0, 0x1054: 0x00c0, 0x1055: 0x00c0, 0x1056: 0x00c0, - 0x1058: 0x00c0, 0x105a: 0x00c0, 0x105b: 0x00c0, 0x105c: 0x00c0, 0x105d: 0x00c0, - 0x1060: 0x00c0, 0x1061: 0x00c0, 0x1062: 0x00c0, 0x1063: 0x00c0, - 0x1064: 0x00c0, 0x1065: 0x00c0, 0x1066: 0x00c0, 0x1067: 0x00c0, 0x1068: 0x00c0, 0x1069: 0x00c0, - 0x106a: 0x00c0, 0x106b: 0x00c0, 0x106c: 0x00c0, 0x106d: 0x00c0, 0x106e: 0x00c0, 0x106f: 0x00c0, - 0x1070: 0x00c0, 0x1071: 0x00c0, 0x1072: 0x00c0, 0x1073: 0x00c0, 0x1074: 0x00c0, 0x1075: 0x00c0, - 0x1076: 0x00c0, 0x1077: 0x00c0, 0x1078: 0x00c0, 0x1079: 0x00c0, 0x107a: 0x00c0, 0x107b: 0x00c0, - 0x107c: 0x00c0, 0x107d: 0x00c0, 0x107e: 0x00c0, 0x107f: 0x00c0, - // Block 0x42, offset 0x1080 - 0x1080: 0x00c0, 0x1081: 0x00c0, 0x1082: 0x00c0, 0x1083: 0x00c0, 0x1084: 0x00c0, 0x1085: 0x00c0, - 0x1086: 0x00c0, 0x1087: 0x00c0, 0x1088: 0x00c0, 0x108a: 0x00c0, 0x108b: 0x00c0, - 0x108c: 0x00c0, 0x108d: 0x00c0, 0x1090: 0x00c0, 0x1091: 0x00c0, - 0x1092: 0x00c0, 0x1093: 0x00c0, 0x1094: 0x00c0, 0x1095: 0x00c0, 0x1096: 0x00c0, 0x1097: 0x00c0, - 0x1098: 0x00c0, 0x1099: 0x00c0, 0x109a: 0x00c0, 0x109b: 0x00c0, 0x109c: 0x00c0, 0x109d: 0x00c0, - 0x109e: 0x00c0, 0x109f: 0x00c0, 0x10a0: 0x00c0, 0x10a1: 0x00c0, 0x10a2: 0x00c0, 0x10a3: 0x00c0, - 0x10a4: 0x00c0, 0x10a5: 0x00c0, 0x10a6: 0x00c0, 0x10a7: 0x00c0, 0x10a8: 0x00c0, 0x10a9: 0x00c0, - 0x10aa: 0x00c0, 0x10ab: 0x00c0, 0x10ac: 0x00c0, 0x10ad: 0x00c0, 0x10ae: 0x00c0, 0x10af: 0x00c0, - 0x10b0: 0x00c0, 0x10b2: 0x00c0, 0x10b3: 0x00c0, 0x10b4: 0x00c0, 0x10b5: 0x00c0, - 0x10b8: 0x00c0, 0x10b9: 0x00c0, 0x10ba: 0x00c0, 0x10bb: 0x00c0, - 0x10bc: 0x00c0, 0x10bd: 0x00c0, 0x10be: 0x00c0, - // Block 0x43, offset 0x10c0 - 0x10c0: 0x00c0, 0x10c2: 0x00c0, 0x10c3: 0x00c0, 0x10c4: 0x00c0, 0x10c5: 0x00c0, - 0x10c8: 0x00c0, 0x10c9: 0x00c0, 0x10ca: 0x00c0, 0x10cb: 0x00c0, - 0x10cc: 0x00c0, 0x10cd: 0x00c0, 0x10ce: 0x00c0, 0x10cf: 0x00c0, 0x10d0: 0x00c0, 0x10d1: 0x00c0, - 0x10d2: 0x00c0, 0x10d3: 0x00c0, 0x10d4: 0x00c0, 0x10d5: 0x00c0, 0x10d6: 0x00c0, - 0x10d8: 0x00c0, 0x10d9: 0x00c0, 0x10da: 0x00c0, 0x10db: 0x00c0, 0x10dc: 0x00c0, 0x10dd: 0x00c0, - 0x10de: 0x00c0, 0x10df: 0x00c0, 0x10e0: 0x00c0, 0x10e1: 0x00c0, 0x10e2: 0x00c0, 0x10e3: 0x00c0, - 0x10e4: 0x00c0, 0x10e5: 0x00c0, 0x10e6: 0x00c0, 0x10e7: 0x00c0, 0x10e8: 0x00c0, 0x10e9: 0x00c0, - 0x10ea: 0x00c0, 0x10eb: 0x00c0, 0x10ec: 0x00c0, 0x10ed: 0x00c0, 0x10ee: 0x00c0, 0x10ef: 0x00c0, - 0x10f0: 0x00c0, 0x10f1: 0x00c0, 0x10f2: 0x00c0, 0x10f3: 0x00c0, 0x10f4: 0x00c0, 0x10f5: 0x00c0, - 0x10f6: 0x00c0, 0x10f7: 0x00c0, 0x10f8: 0x00c0, 0x10f9: 0x00c0, 0x10fa: 0x00c0, 0x10fb: 0x00c0, - 0x10fc: 0x00c0, 0x10fd: 0x00c0, 0x10fe: 0x00c0, 0x10ff: 0x00c0, - // Block 0x44, offset 0x1100 - 0x1100: 0x00c0, 0x1101: 0x00c0, 0x1102: 0x00c0, 0x1103: 0x00c0, 0x1104: 0x00c0, 0x1105: 0x00c0, - 0x1106: 0x00c0, 0x1107: 0x00c0, 0x1108: 0x00c0, 0x1109: 0x00c0, 0x110a: 0x00c0, 0x110b: 0x00c0, - 0x110c: 0x00c0, 0x110d: 0x00c0, 0x110e: 0x00c0, 0x110f: 0x00c0, 0x1110: 0x00c0, - 0x1112: 0x00c0, 0x1113: 0x00c0, 0x1114: 0x00c0, 0x1115: 0x00c0, - 0x1118: 0x00c0, 0x1119: 0x00c0, 0x111a: 0x00c0, 0x111b: 0x00c0, 0x111c: 0x00c0, 0x111d: 0x00c0, - 0x111e: 0x00c0, 0x111f: 0x00c0, 0x1120: 0x00c0, 0x1121: 0x00c0, 0x1122: 0x00c0, 0x1123: 0x00c0, - 0x1124: 0x00c0, 0x1125: 0x00c0, 0x1126: 0x00c0, 0x1127: 0x00c0, 0x1128: 0x00c0, 0x1129: 0x00c0, - 0x112a: 0x00c0, 0x112b: 0x00c0, 0x112c: 0x00c0, 0x112d: 0x00c0, 0x112e: 0x00c0, 0x112f: 0x00c0, - 0x1130: 0x00c0, 0x1131: 0x00c0, 0x1132: 0x00c0, 0x1133: 0x00c0, 0x1134: 0x00c0, 0x1135: 0x00c0, - 0x1136: 0x00c0, 0x1137: 0x00c0, 0x1138: 0x00c0, 0x1139: 0x00c0, 0x113a: 0x00c0, 0x113b: 0x00c0, - 0x113c: 0x00c0, 0x113d: 0x00c0, 0x113e: 0x00c0, 0x113f: 0x00c0, - // Block 0x45, offset 0x1140 - 0x1140: 0x00c0, 0x1141: 0x00c0, 0x1142: 0x00c0, 0x1143: 0x00c0, 0x1144: 0x00c0, 0x1145: 0x00c0, - 0x1146: 0x00c0, 0x1147: 0x00c0, 0x1148: 0x00c0, 0x1149: 0x00c0, 0x114a: 0x00c0, 0x114b: 0x00c0, - 0x114c: 0x00c0, 0x114d: 0x00c0, 0x114e: 0x00c0, 0x114f: 0x00c0, 0x1150: 0x00c0, 0x1151: 0x00c0, - 0x1152: 0x00c0, 0x1153: 0x00c0, 0x1154: 0x00c0, 0x1155: 0x00c0, 0x1156: 0x00c0, 0x1157: 0x00c0, - 0x1158: 0x00c0, 0x1159: 0x00c0, 0x115a: 0x00c0, 0x115d: 0x00c3, - 0x115e: 0x00c3, 0x115f: 0x00c3, 0x1160: 0x0080, 0x1161: 0x0080, 0x1162: 0x0080, 0x1163: 0x0080, - 0x1164: 0x0080, 0x1165: 0x0080, 0x1166: 0x0080, 0x1167: 0x0080, 0x1168: 0x0080, 0x1169: 0x0080, - 0x116a: 0x0080, 0x116b: 0x0080, 0x116c: 0x0080, 0x116d: 0x0080, 0x116e: 0x0080, 0x116f: 0x0080, - 0x1170: 0x0080, 0x1171: 0x0080, 0x1172: 0x0080, 0x1173: 0x0080, 0x1174: 0x0080, 0x1175: 0x0080, - 0x1176: 0x0080, 0x1177: 0x0080, 0x1178: 0x0080, 0x1179: 0x0080, 0x117a: 0x0080, 0x117b: 0x0080, - 0x117c: 0x0080, - // Block 0x46, offset 0x1180 - 0x1180: 0x00c0, 0x1181: 0x00c0, 0x1182: 0x00c0, 0x1183: 0x00c0, 0x1184: 0x00c0, 0x1185: 0x00c0, - 0x1186: 0x00c0, 0x1187: 0x00c0, 0x1188: 0x00c0, 0x1189: 0x00c0, 0x118a: 0x00c0, 0x118b: 0x00c0, - 0x118c: 0x00c0, 0x118d: 0x00c0, 0x118e: 0x00c0, 0x118f: 0x00c0, 0x1190: 0x0080, 0x1191: 0x0080, - 0x1192: 0x0080, 0x1193: 0x0080, 0x1194: 0x0080, 0x1195: 0x0080, 0x1196: 0x0080, 0x1197: 0x0080, - 0x1198: 0x0080, 0x1199: 0x0080, - 0x11a0: 0x00c0, 0x11a1: 0x00c0, 0x11a2: 0x00c0, 0x11a3: 0x00c0, - 0x11a4: 0x00c0, 0x11a5: 0x00c0, 0x11a6: 0x00c0, 0x11a7: 0x00c0, 0x11a8: 0x00c0, 0x11a9: 0x00c0, - 0x11aa: 0x00c0, 0x11ab: 0x00c0, 0x11ac: 0x00c0, 0x11ad: 0x00c0, 0x11ae: 0x00c0, 0x11af: 0x00c0, - 0x11b0: 0x00c0, 0x11b1: 0x00c0, 0x11b2: 0x00c0, 0x11b3: 0x00c0, 0x11b4: 0x00c0, 0x11b5: 0x00c0, - 0x11b6: 0x00c0, 0x11b7: 0x00c0, 0x11b8: 0x00c0, 0x11b9: 0x00c0, 0x11ba: 0x00c0, 0x11bb: 0x00c0, - 0x11bc: 0x00c0, 0x11bd: 0x00c0, 0x11be: 0x00c0, 0x11bf: 0x00c0, - // Block 0x47, offset 0x11c0 - 0x11c0: 0x00c0, 0x11c1: 0x00c0, 0x11c2: 0x00c0, 0x11c3: 0x00c0, 0x11c4: 0x00c0, 0x11c5: 0x00c0, - 0x11c6: 0x00c0, 0x11c7: 0x00c0, 0x11c8: 0x00c0, 0x11c9: 0x00c0, 0x11ca: 0x00c0, 0x11cb: 0x00c0, - 0x11cc: 0x00c0, 0x11cd: 0x00c0, 0x11ce: 0x00c0, 0x11cf: 0x00c0, 0x11d0: 0x00c0, 0x11d1: 0x00c0, - 0x11d2: 0x00c0, 0x11d3: 0x00c0, 0x11d4: 0x00c0, 0x11d5: 0x00c0, 0x11d6: 0x00c0, 0x11d7: 0x00c0, - 0x11d8: 0x00c0, 0x11d9: 0x00c0, 0x11da: 0x00c0, 0x11db: 0x00c0, 0x11dc: 0x00c0, 0x11dd: 0x00c0, - 0x11de: 0x00c0, 0x11df: 0x00c0, 0x11e0: 0x00c0, 0x11e1: 0x00c0, 0x11e2: 0x00c0, 0x11e3: 0x00c0, - 0x11e4: 0x00c0, 0x11e5: 0x00c0, 0x11e6: 0x00c0, 0x11e7: 0x00c0, 0x11e8: 0x00c0, 0x11e9: 0x00c0, - 0x11ea: 0x00c0, 0x11eb: 0x00c0, 0x11ec: 0x00c0, 0x11ed: 0x00c0, 0x11ee: 0x00c0, 0x11ef: 0x00c0, - 0x11f0: 0x00c0, 0x11f1: 0x00c0, 0x11f2: 0x00c0, 0x11f3: 0x00c0, 0x11f4: 0x00c0, 0x11f5: 0x00c0, - 0x11f8: 0x00c0, 0x11f9: 0x00c0, 0x11fa: 0x00c0, 0x11fb: 0x00c0, - 0x11fc: 0x00c0, 0x11fd: 0x00c0, - // Block 0x48, offset 0x1200 - 0x1200: 0x0080, 0x1201: 0x00c0, 0x1202: 0x00c0, 0x1203: 0x00c0, 0x1204: 0x00c0, 0x1205: 0x00c0, - 0x1206: 0x00c0, 0x1207: 0x00c0, 0x1208: 0x00c0, 0x1209: 0x00c0, 0x120a: 0x00c0, 0x120b: 0x00c0, - 0x120c: 0x00c0, 0x120d: 0x00c0, 0x120e: 0x00c0, 0x120f: 0x00c0, 0x1210: 0x00c0, 0x1211: 0x00c0, - 0x1212: 0x00c0, 0x1213: 0x00c0, 0x1214: 0x00c0, 0x1215: 0x00c0, 0x1216: 0x00c0, 0x1217: 0x00c0, - 0x1218: 0x00c0, 0x1219: 0x00c0, 0x121a: 0x00c0, 0x121b: 0x00c0, 0x121c: 0x00c0, 0x121d: 0x00c0, - 0x121e: 0x00c0, 0x121f: 0x00c0, 0x1220: 0x00c0, 0x1221: 0x00c0, 0x1222: 0x00c0, 0x1223: 0x00c0, - 0x1224: 0x00c0, 0x1225: 0x00c0, 0x1226: 0x00c0, 0x1227: 0x00c0, 0x1228: 0x00c0, 0x1229: 0x00c0, - 0x122a: 0x00c0, 0x122b: 0x00c0, 0x122c: 0x00c0, 0x122d: 0x00c0, 0x122e: 0x00c0, 0x122f: 0x00c0, - 0x1230: 0x00c0, 0x1231: 0x00c0, 0x1232: 0x00c0, 0x1233: 0x00c0, 0x1234: 0x00c0, 0x1235: 0x00c0, - 0x1236: 0x00c0, 0x1237: 0x00c0, 0x1238: 0x00c0, 0x1239: 0x00c0, 0x123a: 0x00c0, 0x123b: 0x00c0, - 0x123c: 0x00c0, 0x123d: 0x00c0, 0x123e: 0x00c0, 0x123f: 0x00c0, - // Block 0x49, offset 0x1240 - 0x1240: 0x00c0, 0x1241: 0x00c0, 0x1242: 0x00c0, 0x1243: 0x00c0, 0x1244: 0x00c0, 0x1245: 0x00c0, - 0x1246: 0x00c0, 0x1247: 0x00c0, 0x1248: 0x00c0, 0x1249: 0x00c0, 0x124a: 0x00c0, 0x124b: 0x00c0, - 0x124c: 0x00c0, 0x124d: 0x00c0, 0x124e: 0x00c0, 0x124f: 0x00c0, 0x1250: 0x00c0, 0x1251: 0x00c0, - 0x1252: 0x00c0, 0x1253: 0x00c0, 0x1254: 0x00c0, 0x1255: 0x00c0, 0x1256: 0x00c0, 0x1257: 0x00c0, - 0x1258: 0x00c0, 0x1259: 0x00c0, 0x125a: 0x00c0, 0x125b: 0x00c0, 0x125c: 0x00c0, 0x125d: 0x00c0, - 0x125e: 0x00c0, 0x125f: 0x00c0, 0x1260: 0x00c0, 0x1261: 0x00c0, 0x1262: 0x00c0, 0x1263: 0x00c0, - 0x1264: 0x00c0, 0x1265: 0x00c0, 0x1266: 0x00c0, 0x1267: 0x00c0, 0x1268: 0x00c0, 0x1269: 0x00c0, - 0x126a: 0x00c0, 0x126b: 0x00c0, 0x126c: 0x00c0, 0x126d: 0x0080, 0x126e: 0x0080, 0x126f: 0x00c0, - 0x1270: 0x00c0, 0x1271: 0x00c0, 0x1272: 0x00c0, 0x1273: 0x00c0, 0x1274: 0x00c0, 0x1275: 0x00c0, - 0x1276: 0x00c0, 0x1277: 0x00c0, 0x1278: 0x00c0, 0x1279: 0x00c0, 0x127a: 0x00c0, 0x127b: 0x00c0, - 0x127c: 0x00c0, 0x127d: 0x00c0, 0x127e: 0x00c0, 0x127f: 0x00c0, - // Block 0x4a, offset 0x1280 - 0x1280: 0x0080, 0x1281: 0x00c0, 0x1282: 0x00c0, 0x1283: 0x00c0, 0x1284: 0x00c0, 0x1285: 0x00c0, - 0x1286: 0x00c0, 0x1287: 0x00c0, 0x1288: 0x00c0, 0x1289: 0x00c0, 0x128a: 0x00c0, 0x128b: 0x00c0, - 0x128c: 0x00c0, 0x128d: 0x00c0, 0x128e: 0x00c0, 0x128f: 0x00c0, 0x1290: 0x00c0, 0x1291: 0x00c0, - 0x1292: 0x00c0, 0x1293: 0x00c0, 0x1294: 0x00c0, 0x1295: 0x00c0, 0x1296: 0x00c0, 0x1297: 0x00c0, - 0x1298: 0x00c0, 0x1299: 0x00c0, 0x129a: 0x00c0, 0x129b: 0x0080, 0x129c: 0x0080, - 0x12a0: 0x00c0, 0x12a1: 0x00c0, 0x12a2: 0x00c0, 0x12a3: 0x00c0, - 0x12a4: 0x00c0, 0x12a5: 0x00c0, 0x12a6: 0x00c0, 0x12a7: 0x00c0, 0x12a8: 0x00c0, 0x12a9: 0x00c0, - 0x12aa: 0x00c0, 0x12ab: 0x00c0, 0x12ac: 0x00c0, 0x12ad: 0x00c0, 0x12ae: 0x00c0, 0x12af: 0x00c0, - 0x12b0: 0x00c0, 0x12b1: 0x00c0, 0x12b2: 0x00c0, 0x12b3: 0x00c0, 0x12b4: 0x00c0, 0x12b5: 0x00c0, - 0x12b6: 0x00c0, 0x12b7: 0x00c0, 0x12b8: 0x00c0, 0x12b9: 0x00c0, 0x12ba: 0x00c0, 0x12bb: 0x00c0, - 0x12bc: 0x00c0, 0x12bd: 0x00c0, 0x12be: 0x00c0, 0x12bf: 0x00c0, - // Block 0x4b, offset 0x12c0 - 0x12c0: 0x00c0, 0x12c1: 0x00c0, 0x12c2: 0x00c0, 0x12c3: 0x00c0, 0x12c4: 0x00c0, 0x12c5: 0x00c0, - 0x12c6: 0x00c0, 0x12c7: 0x00c0, 0x12c8: 0x00c0, 0x12c9: 0x00c0, 0x12ca: 0x00c0, 0x12cb: 0x00c0, - 0x12cc: 0x00c0, 0x12cd: 0x00c0, 0x12ce: 0x00c0, 0x12cf: 0x00c0, 0x12d0: 0x00c0, 0x12d1: 0x00c0, - 0x12d2: 0x00c0, 0x12d3: 0x00c0, 0x12d4: 0x00c0, 0x12d5: 0x00c0, 0x12d6: 0x00c0, 0x12d7: 0x00c0, - 0x12d8: 0x00c0, 0x12d9: 0x00c0, 0x12da: 0x00c0, 0x12db: 0x00c0, 0x12dc: 0x00c0, 0x12dd: 0x00c0, - 0x12de: 0x00c0, 0x12df: 0x00c0, 0x12e0: 0x00c0, 0x12e1: 0x00c0, 0x12e2: 0x00c0, 0x12e3: 0x00c0, - 0x12e4: 0x00c0, 0x12e5: 0x00c0, 0x12e6: 0x00c0, 0x12e7: 0x00c0, 0x12e8: 0x00c0, 0x12e9: 0x00c0, - 0x12ea: 0x00c0, 0x12eb: 0x0080, 0x12ec: 0x0080, 0x12ed: 0x0080, 0x12ee: 0x0080, 0x12ef: 0x0080, - 0x12f0: 0x0080, 0x12f1: 0x00c0, 0x12f2: 0x00c0, 0x12f3: 0x00c0, 0x12f4: 0x00c0, 0x12f5: 0x00c0, - 0x12f6: 0x00c0, 0x12f7: 0x00c0, 0x12f8: 0x00c0, - // Block 0x4c, offset 0x1300 - 0x1300: 0x00c0, 0x1301: 0x00c0, 0x1302: 0x00c0, 0x1303: 0x00c0, 0x1304: 0x00c0, 0x1305: 0x00c0, - 0x1306: 0x00c0, 0x1307: 0x00c0, 0x1308: 0x00c0, 0x1309: 0x00c0, 0x130a: 0x00c0, 0x130b: 0x00c0, - 0x130c: 0x00c0, 0x130e: 0x00c0, 0x130f: 0x00c0, 0x1310: 0x00c0, 0x1311: 0x00c0, - 0x1312: 0x00c3, 0x1313: 0x00c3, 0x1314: 0x00c6, - 0x1320: 0x00c0, 0x1321: 0x00c0, 0x1322: 0x00c0, 0x1323: 0x00c0, - 0x1324: 0x00c0, 0x1325: 0x00c0, 0x1326: 0x00c0, 0x1327: 0x00c0, 0x1328: 0x00c0, 0x1329: 0x00c0, - 0x132a: 0x00c0, 0x132b: 0x00c0, 0x132c: 0x00c0, 0x132d: 0x00c0, 0x132e: 0x00c0, 0x132f: 0x00c0, - 0x1330: 0x00c0, 0x1331: 0x00c0, 0x1332: 0x00c3, 0x1333: 0x00c3, 0x1334: 0x00c6, 0x1335: 0x0080, - 0x1336: 0x0080, - // Block 0x4d, offset 0x1340 - 0x1340: 0x00c0, 0x1341: 0x00c0, 0x1342: 0x00c0, 0x1343: 0x00c0, 0x1344: 0x00c0, 0x1345: 0x00c0, - 0x1346: 0x00c0, 0x1347: 0x00c0, 0x1348: 0x00c0, 0x1349: 0x00c0, 0x134a: 0x00c0, 0x134b: 0x00c0, - 0x134c: 0x00c0, 0x134d: 0x00c0, 0x134e: 0x00c0, 0x134f: 0x00c0, 0x1350: 0x00c0, 0x1351: 0x00c0, - 0x1352: 0x00c3, 0x1353: 0x00c3, - 0x1360: 0x00c0, 0x1361: 0x00c0, 0x1362: 0x00c0, 0x1363: 0x00c0, - 0x1364: 0x00c0, 0x1365: 0x00c0, 0x1366: 0x00c0, 0x1367: 0x00c0, 0x1368: 0x00c0, 0x1369: 0x00c0, - 0x136a: 0x00c0, 0x136b: 0x00c0, 0x136c: 0x00c0, 0x136e: 0x00c0, 0x136f: 0x00c0, - 0x1370: 0x00c0, 0x1372: 0x00c3, 0x1373: 0x00c3, - // Block 0x4e, offset 0x1380 - 0x1380: 0x00c0, 0x1381: 0x00c0, 0x1382: 0x00c0, 0x1383: 0x00c0, 0x1384: 0x00c0, 0x1385: 0x00c0, - 0x1386: 0x00c0, 0x1387: 0x00c0, 0x1388: 0x00c0, 0x1389: 0x00c0, 0x138a: 0x00c0, 0x138b: 0x00c0, - 0x138c: 0x00c0, 0x138d: 0x00c0, 0x138e: 0x00c0, 0x138f: 0x00c0, 0x1390: 0x00c0, 0x1391: 0x00c0, - 0x1392: 0x00c0, 0x1393: 0x00c0, 0x1394: 0x00c0, 0x1395: 0x00c0, 0x1396: 0x00c0, 0x1397: 0x00c0, - 0x1398: 0x00c0, 0x1399: 0x00c0, 0x139a: 0x00c0, 0x139b: 0x00c0, 0x139c: 0x00c0, 0x139d: 0x00c0, - 0x139e: 0x00c0, 0x139f: 0x00c0, 0x13a0: 0x00c0, 0x13a1: 0x00c0, 0x13a2: 0x00c0, 0x13a3: 0x00c0, - 0x13a4: 0x00c0, 0x13a5: 0x00c0, 0x13a6: 0x00c0, 0x13a7: 0x00c0, 0x13a8: 0x00c0, 0x13a9: 0x00c0, - 0x13aa: 0x00c0, 0x13ab: 0x00c0, 0x13ac: 0x00c0, 0x13ad: 0x00c0, 0x13ae: 0x00c0, 0x13af: 0x00c0, - 0x13b0: 0x00c0, 0x13b1: 0x00c0, 0x13b2: 0x00c0, 0x13b3: 0x00c0, 0x13b4: 0x0040, 0x13b5: 0x0040, - 0x13b6: 0x00c0, 0x13b7: 0x00c3, 0x13b8: 0x00c3, 0x13b9: 0x00c3, 0x13ba: 0x00c3, 0x13bb: 0x00c3, - 0x13bc: 0x00c3, 0x13bd: 0x00c3, 0x13be: 0x00c0, 0x13bf: 0x00c0, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x00c0, 0x13c1: 0x00c0, 0x13c2: 0x00c0, 0x13c3: 0x00c0, 0x13c4: 0x00c0, 0x13c5: 0x00c0, - 0x13c6: 0x00c3, 0x13c7: 0x00c0, 0x13c8: 0x00c0, 0x13c9: 0x00c3, 0x13ca: 0x00c3, 0x13cb: 0x00c3, - 0x13cc: 0x00c3, 0x13cd: 0x00c3, 0x13ce: 0x00c3, 0x13cf: 0x00c3, 0x13d0: 0x00c3, 0x13d1: 0x00c3, - 0x13d2: 0x00c6, 0x13d3: 0x00c3, 0x13d4: 0x0080, 0x13d5: 0x0080, 0x13d6: 0x0080, 0x13d7: 0x00c0, - 0x13d8: 0x0080, 0x13d9: 0x0080, 0x13da: 0x0080, 0x13db: 0x0080, 0x13dc: 0x00c0, 0x13dd: 0x00c3, - 0x13e0: 0x00c0, 0x13e1: 0x00c0, 0x13e2: 0x00c0, 0x13e3: 0x00c0, - 0x13e4: 0x00c0, 0x13e5: 0x00c0, 0x13e6: 0x00c0, 0x13e7: 0x00c0, 0x13e8: 0x00c0, 0x13e9: 0x00c0, - 0x13f0: 0x0080, 0x13f1: 0x0080, 0x13f2: 0x0080, 0x13f3: 0x0080, 0x13f4: 0x0080, 0x13f5: 0x0080, - 0x13f6: 0x0080, 0x13f7: 0x0080, 0x13f8: 0x0080, 0x13f9: 0x0080, - // Block 0x50, offset 0x1400 - 0x1400: 0x0080, 0x1401: 0x0080, 0x1402: 0x0080, 0x1403: 0x0080, 0x1404: 0x0080, 0x1405: 0x0080, - 0x1406: 0x0080, 0x1407: 0x0082, 0x1408: 0x0080, 0x1409: 0x0080, 0x140a: 0x0080, 0x140b: 0x0040, - 0x140c: 0x0040, 0x140d: 0x0040, 0x140e: 0x0040, 0x1410: 0x00c0, 0x1411: 0x00c0, - 0x1412: 0x00c0, 0x1413: 0x00c0, 0x1414: 0x00c0, 0x1415: 0x00c0, 0x1416: 0x00c0, 0x1417: 0x00c0, - 0x1418: 0x00c0, 0x1419: 0x00c0, - 0x1420: 0x00c2, 0x1421: 0x00c2, 0x1422: 0x00c2, 0x1423: 0x00c2, - 0x1424: 0x00c2, 0x1425: 0x00c2, 0x1426: 0x00c2, 0x1427: 0x00c2, 0x1428: 0x00c2, 0x1429: 0x00c2, - 0x142a: 0x00c2, 0x142b: 0x00c2, 0x142c: 0x00c2, 0x142d: 0x00c2, 0x142e: 0x00c2, 0x142f: 0x00c2, - 0x1430: 0x00c2, 0x1431: 0x00c2, 0x1432: 0x00c2, 0x1433: 0x00c2, 0x1434: 0x00c2, 0x1435: 0x00c2, - 0x1436: 0x00c2, 0x1437: 0x00c2, 0x1438: 0x00c2, 0x1439: 0x00c2, 0x143a: 0x00c2, 0x143b: 0x00c2, - 0x143c: 0x00c2, 0x143d: 0x00c2, 0x143e: 0x00c2, 0x143f: 0x00c2, - // Block 0x51, offset 0x1440 - 0x1440: 0x00c2, 0x1441: 0x00c2, 0x1442: 0x00c2, 0x1443: 0x00c2, 0x1444: 0x00c2, 0x1445: 0x00c2, - 0x1446: 0x00c2, 0x1447: 0x00c2, 0x1448: 0x00c2, 0x1449: 0x00c2, 0x144a: 0x00c2, 0x144b: 0x00c2, - 0x144c: 0x00c2, 0x144d: 0x00c2, 0x144e: 0x00c2, 0x144f: 0x00c2, 0x1450: 0x00c2, 0x1451: 0x00c2, - 0x1452: 0x00c2, 0x1453: 0x00c2, 0x1454: 0x00c2, 0x1455: 0x00c2, 0x1456: 0x00c2, 0x1457: 0x00c2, - 0x1458: 0x00c2, 0x1459: 0x00c2, 0x145a: 0x00c2, 0x145b: 0x00c2, 0x145c: 0x00c2, 0x145d: 0x00c2, - 0x145e: 0x00c2, 0x145f: 0x00c2, 0x1460: 0x00c2, 0x1461: 0x00c2, 0x1462: 0x00c2, 0x1463: 0x00c2, - 0x1464: 0x00c2, 0x1465: 0x00c2, 0x1466: 0x00c2, 0x1467: 0x00c2, 0x1468: 0x00c2, 0x1469: 0x00c2, - 0x146a: 0x00c2, 0x146b: 0x00c2, 0x146c: 0x00c2, 0x146d: 0x00c2, 0x146e: 0x00c2, 0x146f: 0x00c2, - 0x1470: 0x00c2, 0x1471: 0x00c2, 0x1472: 0x00c2, 0x1473: 0x00c2, 0x1474: 0x00c2, 0x1475: 0x00c2, - 0x1476: 0x00c2, 0x1477: 0x00c2, - // Block 0x52, offset 0x1480 - 0x1480: 0x00c0, 0x1481: 0x00c0, 0x1482: 0x00c0, 0x1483: 0x00c0, 0x1484: 0x00c0, 0x1485: 0x00c3, - 0x1486: 0x00c3, 0x1487: 0x00c2, 0x1488: 0x00c2, 0x1489: 0x00c2, 0x148a: 0x00c2, 0x148b: 0x00c2, - 0x148c: 0x00c2, 0x148d: 0x00c2, 0x148e: 0x00c2, 0x148f: 0x00c2, 0x1490: 0x00c2, 0x1491: 0x00c2, - 0x1492: 0x00c2, 0x1493: 0x00c2, 0x1494: 0x00c2, 0x1495: 0x00c2, 0x1496: 0x00c2, 0x1497: 0x00c2, - 0x1498: 0x00c2, 0x1499: 0x00c2, 0x149a: 0x00c2, 0x149b: 0x00c2, 0x149c: 0x00c2, 0x149d: 0x00c2, - 0x149e: 0x00c2, 0x149f: 0x00c2, 0x14a0: 0x00c2, 0x14a1: 0x00c2, 0x14a2: 0x00c2, 0x14a3: 0x00c2, - 0x14a4: 0x00c2, 0x14a5: 0x00c2, 0x14a6: 0x00c2, 0x14a7: 0x00c2, 0x14a8: 0x00c2, 0x14a9: 0x00c3, - 0x14aa: 0x00c2, - 0x14b0: 0x00c0, 0x14b1: 0x00c0, 0x14b2: 0x00c0, 0x14b3: 0x00c0, 0x14b4: 0x00c0, 0x14b5: 0x00c0, - 0x14b6: 0x00c0, 0x14b7: 0x00c0, 0x14b8: 0x00c0, 0x14b9: 0x00c0, 0x14ba: 0x00c0, 0x14bb: 0x00c0, - 0x14bc: 0x00c0, 0x14bd: 0x00c0, 0x14be: 0x00c0, 0x14bf: 0x00c0, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x00c0, 0x14c1: 0x00c0, 0x14c2: 0x00c0, 0x14c3: 0x00c0, 0x14c4: 0x00c0, 0x14c5: 0x00c0, - 0x14c6: 0x00c0, 0x14c7: 0x00c0, 0x14c8: 0x00c0, 0x14c9: 0x00c0, 0x14ca: 0x00c0, 0x14cb: 0x00c0, - 0x14cc: 0x00c0, 0x14cd: 0x00c0, 0x14ce: 0x00c0, 0x14cf: 0x00c0, 0x14d0: 0x00c0, 0x14d1: 0x00c0, - 0x14d2: 0x00c0, 0x14d3: 0x00c0, 0x14d4: 0x00c0, 0x14d5: 0x00c0, 0x14d6: 0x00c0, 0x14d7: 0x00c0, - 0x14d8: 0x00c0, 0x14d9: 0x00c0, 0x14da: 0x00c0, 0x14db: 0x00c0, 0x14dc: 0x00c0, 0x14dd: 0x00c0, - 0x14de: 0x00c0, 0x14df: 0x00c0, 0x14e0: 0x00c0, 0x14e1: 0x00c0, 0x14e2: 0x00c0, 0x14e3: 0x00c0, - 0x14e4: 0x00c0, 0x14e5: 0x00c0, 0x14e6: 0x00c0, 0x14e7: 0x00c0, 0x14e8: 0x00c0, 0x14e9: 0x00c0, - 0x14ea: 0x00c0, 0x14eb: 0x00c0, 0x14ec: 0x00c0, 0x14ed: 0x00c0, 0x14ee: 0x00c0, 0x14ef: 0x00c0, - 0x14f0: 0x00c0, 0x14f1: 0x00c0, 0x14f2: 0x00c0, 0x14f3: 0x00c0, 0x14f4: 0x00c0, 0x14f5: 0x00c0, - // Block 0x54, offset 0x1500 - 0x1500: 0x00c0, 0x1501: 0x00c0, 0x1502: 0x00c0, 0x1503: 0x00c0, 0x1504: 0x00c0, 0x1505: 0x00c0, - 0x1506: 0x00c0, 0x1507: 0x00c0, 0x1508: 0x00c0, 0x1509: 0x00c0, 0x150a: 0x00c0, 0x150b: 0x00c0, - 0x150c: 0x00c0, 0x150d: 0x00c0, 0x150e: 0x00c0, 0x150f: 0x00c0, 0x1510: 0x00c0, 0x1511: 0x00c0, - 0x1512: 0x00c0, 0x1513: 0x00c0, 0x1514: 0x00c0, 0x1515: 0x00c0, 0x1516: 0x00c0, 0x1517: 0x00c0, - 0x1518: 0x00c0, 0x1519: 0x00c0, 0x151a: 0x00c0, 0x151b: 0x00c0, 0x151c: 0x00c0, 0x151d: 0x00c0, - 0x151e: 0x00c0, 0x1520: 0x00c3, 0x1521: 0x00c3, 0x1522: 0x00c3, 0x1523: 0x00c0, - 0x1524: 0x00c0, 0x1525: 0x00c0, 0x1526: 0x00c0, 0x1527: 0x00c3, 0x1528: 0x00c3, 0x1529: 0x00c0, - 0x152a: 0x00c0, 0x152b: 0x00c0, - 0x1530: 0x00c0, 0x1531: 0x00c0, 0x1532: 0x00c3, 0x1533: 0x00c0, 0x1534: 0x00c0, 0x1535: 0x00c0, - 0x1536: 0x00c0, 0x1537: 0x00c0, 0x1538: 0x00c0, 0x1539: 0x00c3, 0x153a: 0x00c3, 0x153b: 0x00c3, - // Block 0x55, offset 0x1540 - 0x1540: 0x0080, 0x1544: 0x0080, 0x1545: 0x0080, - 0x1546: 0x00c0, 0x1547: 0x00c0, 0x1548: 0x00c0, 0x1549: 0x00c0, 0x154a: 0x00c0, 0x154b: 0x00c0, - 0x154c: 0x00c0, 0x154d: 0x00c0, 0x154e: 0x00c0, 0x154f: 0x00c0, 0x1550: 0x00c0, 0x1551: 0x00c0, - 0x1552: 0x00c0, 0x1553: 0x00c0, 0x1554: 0x00c0, 0x1555: 0x00c0, 0x1556: 0x00c0, 0x1557: 0x00c0, - 0x1558: 0x00c0, 0x1559: 0x00c0, 0x155a: 0x00c0, 0x155b: 0x00c0, 0x155c: 0x00c0, 0x155d: 0x00c0, - 0x155e: 0x00c0, 0x155f: 0x00c0, 0x1560: 0x00c0, 0x1561: 0x00c0, 0x1562: 0x00c0, 0x1563: 0x00c0, - 0x1564: 0x00c0, 0x1565: 0x00c0, 0x1566: 0x00c0, 0x1567: 0x00c0, 0x1568: 0x00c0, 0x1569: 0x00c0, - 0x156a: 0x00c0, 0x156b: 0x00c0, 0x156c: 0x00c0, 0x156d: 0x00c0, - 0x1570: 0x00c0, 0x1571: 0x00c0, 0x1572: 0x00c0, 0x1573: 0x00c0, 0x1574: 0x00c0, - // Block 0x56, offset 0x1580 - 0x1580: 0x00c0, 0x1581: 0x00c0, 0x1582: 0x00c0, 0x1583: 0x00c0, 0x1584: 0x00c0, 0x1585: 0x00c0, - 0x1586: 0x00c0, 0x1587: 0x00c0, 0x1588: 0x00c0, 0x1589: 0x00c0, 0x158a: 0x00c0, 0x158b: 0x00c0, - 0x158c: 0x00c0, 0x158d: 0x00c0, 0x158e: 0x00c0, 0x158f: 0x00c0, 0x1590: 0x00c0, 0x1591: 0x00c0, - 0x1592: 0x00c0, 0x1593: 0x00c0, 0x1594: 0x00c0, 0x1595: 0x00c0, 0x1596: 0x00c0, 0x1597: 0x00c0, - 0x1598: 0x00c0, 0x1599: 0x00c0, 0x159a: 0x00c0, 0x159b: 0x00c0, 0x159c: 0x00c0, 0x159d: 0x00c0, - 0x159e: 0x00c0, 0x159f: 0x00c0, 0x15a0: 0x00c0, 0x15a1: 0x00c0, 0x15a2: 0x00c0, 0x15a3: 0x00c0, - 0x15a4: 0x00c0, 0x15a5: 0x00c0, 0x15a6: 0x00c0, 0x15a7: 0x00c0, 0x15a8: 0x00c0, 0x15a9: 0x00c0, - 0x15aa: 0x00c0, 0x15ab: 0x00c0, - 0x15b0: 0x00c0, 0x15b1: 0x00c0, 0x15b2: 0x00c0, 0x15b3: 0x00c0, 0x15b4: 0x00c0, 0x15b5: 0x00c0, - 0x15b6: 0x00c0, 0x15b7: 0x00c0, 0x15b8: 0x00c0, 0x15b9: 0x00c0, 0x15ba: 0x00c0, 0x15bb: 0x00c0, - 0x15bc: 0x00c0, 0x15bd: 0x00c0, 0x15be: 0x00c0, 0x15bf: 0x00c0, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x00c0, 0x15c1: 0x00c0, 0x15c2: 0x00c0, 0x15c3: 0x00c0, 0x15c4: 0x00c0, 0x15c5: 0x00c0, - 0x15c6: 0x00c0, 0x15c7: 0x00c0, 0x15c8: 0x00c0, 0x15c9: 0x00c0, - 0x15d0: 0x00c0, 0x15d1: 0x00c0, - 0x15d2: 0x00c0, 0x15d3: 0x00c0, 0x15d4: 0x00c0, 0x15d5: 0x00c0, 0x15d6: 0x00c0, 0x15d7: 0x00c0, - 0x15d8: 0x00c0, 0x15d9: 0x00c0, 0x15da: 0x0080, - 0x15de: 0x0080, 0x15df: 0x0080, 0x15e0: 0x0080, 0x15e1: 0x0080, 0x15e2: 0x0080, 0x15e3: 0x0080, - 0x15e4: 0x0080, 0x15e5: 0x0080, 0x15e6: 0x0080, 0x15e7: 0x0080, 0x15e8: 0x0080, 0x15e9: 0x0080, - 0x15ea: 0x0080, 0x15eb: 0x0080, 0x15ec: 0x0080, 0x15ed: 0x0080, 0x15ee: 0x0080, 0x15ef: 0x0080, - 0x15f0: 0x0080, 0x15f1: 0x0080, 0x15f2: 0x0080, 0x15f3: 0x0080, 0x15f4: 0x0080, 0x15f5: 0x0080, - 0x15f6: 0x0080, 0x15f7: 0x0080, 0x15f8: 0x0080, 0x15f9: 0x0080, 0x15fa: 0x0080, 0x15fb: 0x0080, - 0x15fc: 0x0080, 0x15fd: 0x0080, 0x15fe: 0x0080, 0x15ff: 0x0080, - // Block 0x58, offset 0x1600 - 0x1600: 0x00c0, 0x1601: 0x00c0, 0x1602: 0x00c0, 0x1603: 0x00c0, 0x1604: 0x00c0, 0x1605: 0x00c0, - 0x1606: 0x00c0, 0x1607: 0x00c0, 0x1608: 0x00c0, 0x1609: 0x00c0, 0x160a: 0x00c0, 0x160b: 0x00c0, - 0x160c: 0x00c0, 0x160d: 0x00c0, 0x160e: 0x00c0, 0x160f: 0x00c0, 0x1610: 0x00c0, 0x1611: 0x00c0, - 0x1612: 0x00c0, 0x1613: 0x00c0, 0x1614: 0x00c0, 0x1615: 0x00c0, 0x1616: 0x00c0, 0x1617: 0x00c3, - 0x1618: 0x00c3, 0x1619: 0x00c0, 0x161a: 0x00c0, 0x161b: 0x00c3, - 0x161e: 0x0080, 0x161f: 0x0080, 0x1620: 0x00c0, 0x1621: 0x00c0, 0x1622: 0x00c0, 0x1623: 0x00c0, - 0x1624: 0x00c0, 0x1625: 0x00c0, 0x1626: 0x00c0, 0x1627: 0x00c0, 0x1628: 0x00c0, 0x1629: 0x00c0, - 0x162a: 0x00c0, 0x162b: 0x00c0, 0x162c: 0x00c0, 0x162d: 0x00c0, 0x162e: 0x00c0, 0x162f: 0x00c0, - 0x1630: 0x00c0, 0x1631: 0x00c0, 0x1632: 0x00c0, 0x1633: 0x00c0, 0x1634: 0x00c0, 0x1635: 0x00c0, - 0x1636: 0x00c0, 0x1637: 0x00c0, 0x1638: 0x00c0, 0x1639: 0x00c0, 0x163a: 0x00c0, 0x163b: 0x00c0, - 0x163c: 0x00c0, 0x163d: 0x00c0, 0x163e: 0x00c0, 0x163f: 0x00c0, - // Block 0x59, offset 0x1640 - 0x1640: 0x00c0, 0x1641: 0x00c0, 0x1642: 0x00c0, 0x1643: 0x00c0, 0x1644: 0x00c0, 0x1645: 0x00c0, - 0x1646: 0x00c0, 0x1647: 0x00c0, 0x1648: 0x00c0, 0x1649: 0x00c0, 0x164a: 0x00c0, 0x164b: 0x00c0, - 0x164c: 0x00c0, 0x164d: 0x00c0, 0x164e: 0x00c0, 0x164f: 0x00c0, 0x1650: 0x00c0, 0x1651: 0x00c0, - 0x1652: 0x00c0, 0x1653: 0x00c0, 0x1654: 0x00c0, 0x1655: 0x00c0, 0x1656: 0x00c3, 0x1657: 0x00c0, - 0x1658: 0x00c3, 0x1659: 0x00c3, 0x165a: 0x00c3, 0x165b: 0x00c3, 0x165c: 0x00c3, 0x165d: 0x00c3, - 0x165e: 0x00c3, 0x1660: 0x00c6, 0x1661: 0x00c0, 0x1662: 0x00c3, 0x1663: 0x00c0, - 0x1664: 0x00c0, 0x1665: 0x00c3, 0x1666: 0x00c3, 0x1667: 0x00c3, 0x1668: 0x00c3, 0x1669: 0x00c3, - 0x166a: 0x00c3, 0x166b: 0x00c3, 0x166c: 0x00c3, 0x166d: 0x00c0, 0x166e: 0x00c0, 0x166f: 0x00c0, - 0x1670: 0x00c0, 0x1671: 0x00c0, 0x1672: 0x00c0, 0x1673: 0x00c3, 0x1674: 0x00c3, 0x1675: 0x00c3, - 0x1676: 0x00c3, 0x1677: 0x00c3, 0x1678: 0x00c3, 0x1679: 0x00c3, 0x167a: 0x00c3, 0x167b: 0x00c3, - 0x167c: 0x00c3, 0x167f: 0x00c3, - // Block 0x5a, offset 0x1680 - 0x1680: 0x00c0, 0x1681: 0x00c0, 0x1682: 0x00c0, 0x1683: 0x00c0, 0x1684: 0x00c0, 0x1685: 0x00c0, - 0x1686: 0x00c0, 0x1687: 0x00c0, 0x1688: 0x00c0, 0x1689: 0x00c0, - 0x1690: 0x00c0, 0x1691: 0x00c0, - 0x1692: 0x00c0, 0x1693: 0x00c0, 0x1694: 0x00c0, 0x1695: 0x00c0, 0x1696: 0x00c0, 0x1697: 0x00c0, - 0x1698: 0x00c0, 0x1699: 0x00c0, - 0x16a0: 0x0080, 0x16a1: 0x0080, 0x16a2: 0x0080, 0x16a3: 0x0080, - 0x16a4: 0x0080, 0x16a5: 0x0080, 0x16a6: 0x0080, 0x16a7: 0x00c0, 0x16a8: 0x0080, 0x16a9: 0x0080, - 0x16aa: 0x0080, 0x16ab: 0x0080, 0x16ac: 0x0080, 0x16ad: 0x0080, - 0x16b0: 0x00c3, 0x16b1: 0x00c3, 0x16b2: 0x00c3, 0x16b3: 0x00c3, 0x16b4: 0x00c3, 0x16b5: 0x00c3, - 0x16b6: 0x00c3, 0x16b7: 0x00c3, 0x16b8: 0x00c3, 0x16b9: 0x00c3, 0x16ba: 0x00c3, 0x16bb: 0x00c3, - 0x16bc: 0x00c3, 0x16bd: 0x00c3, 0x16be: 0x0083, - // Block 0x5b, offset 0x16c0 - 0x16c0: 0x00c3, 0x16c1: 0x00c3, 0x16c2: 0x00c3, 0x16c3: 0x00c3, 0x16c4: 0x00c0, 0x16c5: 0x00c0, - 0x16c6: 0x00c0, 0x16c7: 0x00c0, 0x16c8: 0x00c0, 0x16c9: 0x00c0, 0x16ca: 0x00c0, 0x16cb: 0x00c0, - 0x16cc: 0x00c0, 0x16cd: 0x00c0, 0x16ce: 0x00c0, 0x16cf: 0x00c0, 0x16d0: 0x00c0, 0x16d1: 0x00c0, - 0x16d2: 0x00c0, 0x16d3: 0x00c0, 0x16d4: 0x00c0, 0x16d5: 0x00c0, 0x16d6: 0x00c0, 0x16d7: 0x00c0, - 0x16d8: 0x00c0, 0x16d9: 0x00c0, 0x16da: 0x00c0, 0x16db: 0x00c0, 0x16dc: 0x00c0, 0x16dd: 0x00c0, - 0x16de: 0x00c0, 0x16df: 0x00c0, 0x16e0: 0x00c0, 0x16e1: 0x00c0, 0x16e2: 0x00c0, 0x16e3: 0x00c0, - 0x16e4: 0x00c0, 0x16e5: 0x00c0, 0x16e6: 0x00c0, 0x16e7: 0x00c0, 0x16e8: 0x00c0, 0x16e9: 0x00c0, - 0x16ea: 0x00c0, 0x16eb: 0x00c0, 0x16ec: 0x00c0, 0x16ed: 0x00c0, 0x16ee: 0x00c0, 0x16ef: 0x00c0, - 0x16f0: 0x00c0, 0x16f1: 0x00c0, 0x16f2: 0x00c0, 0x16f3: 0x00c0, 0x16f4: 0x00c3, 0x16f5: 0x00c0, - 0x16f6: 0x00c3, 0x16f7: 0x00c3, 0x16f8: 0x00c3, 0x16f9: 0x00c3, 0x16fa: 0x00c3, 0x16fb: 0x00c0, - 0x16fc: 0x00c3, 0x16fd: 0x00c0, 0x16fe: 0x00c0, 0x16ff: 0x00c0, - // Block 0x5c, offset 0x1700 - 0x1700: 0x00c0, 0x1701: 0x00c0, 0x1702: 0x00c3, 0x1703: 0x00c0, 0x1704: 0x00c5, 0x1705: 0x00c0, - 0x1706: 0x00c0, 0x1707: 0x00c0, 0x1708: 0x00c0, 0x1709: 0x00c0, 0x170a: 0x00c0, 0x170b: 0x00c0, - 0x1710: 0x00c0, 0x1711: 0x00c0, - 0x1712: 0x00c0, 0x1713: 0x00c0, 0x1714: 0x00c0, 0x1715: 0x00c0, 0x1716: 0x00c0, 0x1717: 0x00c0, - 0x1718: 0x00c0, 0x1719: 0x00c0, 0x171a: 0x0080, 0x171b: 0x0080, 0x171c: 0x0080, 0x171d: 0x0080, - 0x171e: 0x0080, 0x171f: 0x0080, 0x1720: 0x0080, 0x1721: 0x0080, 0x1722: 0x0080, 0x1723: 0x0080, - 0x1724: 0x0080, 0x1725: 0x0080, 0x1726: 0x0080, 0x1727: 0x0080, 0x1728: 0x0080, 0x1729: 0x0080, - 0x172a: 0x0080, 0x172b: 0x00c3, 0x172c: 0x00c3, 0x172d: 0x00c3, 0x172e: 0x00c3, 0x172f: 0x00c3, - 0x1730: 0x00c3, 0x1731: 0x00c3, 0x1732: 0x00c3, 0x1733: 0x00c3, 0x1734: 0x0080, 0x1735: 0x0080, - 0x1736: 0x0080, 0x1737: 0x0080, 0x1738: 0x0080, 0x1739: 0x0080, 0x173a: 0x0080, 0x173b: 0x0080, - 0x173c: 0x0080, - // Block 0x5d, offset 0x1740 - 0x1740: 0x00c3, 0x1741: 0x00c3, 0x1742: 0x00c0, 0x1743: 0x00c0, 0x1744: 0x00c0, 0x1745: 0x00c0, - 0x1746: 0x00c0, 0x1747: 0x00c0, 0x1748: 0x00c0, 0x1749: 0x00c0, 0x174a: 0x00c0, 0x174b: 0x00c0, - 0x174c: 0x00c0, 0x174d: 0x00c0, 0x174e: 0x00c0, 0x174f: 0x00c0, 0x1750: 0x00c0, 0x1751: 0x00c0, - 0x1752: 0x00c0, 0x1753: 0x00c0, 0x1754: 0x00c0, 0x1755: 0x00c0, 0x1756: 0x00c0, 0x1757: 0x00c0, - 0x1758: 0x00c0, 0x1759: 0x00c0, 0x175a: 0x00c0, 0x175b: 0x00c0, 0x175c: 0x00c0, 0x175d: 0x00c0, - 0x175e: 0x00c0, 0x175f: 0x00c0, 0x1760: 0x00c0, 0x1761: 0x00c0, 0x1762: 0x00c3, 0x1763: 0x00c3, - 0x1764: 0x00c3, 0x1765: 0x00c3, 0x1766: 0x00c0, 0x1767: 0x00c0, 0x1768: 0x00c3, 0x1769: 0x00c3, - 0x176a: 0x00c5, 0x176b: 0x00c6, 0x176c: 0x00c3, 0x176d: 0x00c3, 0x176e: 0x00c0, 0x176f: 0x00c0, - 0x1770: 0x00c0, 0x1771: 0x00c0, 0x1772: 0x00c0, 0x1773: 0x00c0, 0x1774: 0x00c0, 0x1775: 0x00c0, - 0x1776: 0x00c0, 0x1777: 0x00c0, 0x1778: 0x00c0, 0x1779: 0x00c0, 0x177a: 0x00c0, 0x177b: 0x00c0, - 0x177c: 0x00c0, 0x177d: 0x00c0, 0x177e: 0x00c0, 0x177f: 0x00c0, - // Block 0x5e, offset 0x1780 - 0x1780: 0x00c0, 0x1781: 0x00c0, 0x1782: 0x00c0, 0x1783: 0x00c0, 0x1784: 0x00c0, 0x1785: 0x00c0, - 0x1786: 0x00c0, 0x1787: 0x00c0, 0x1788: 0x00c0, 0x1789: 0x00c0, 0x178a: 0x00c0, 0x178b: 0x00c0, - 0x178c: 0x00c0, 0x178d: 0x00c0, 0x178e: 0x00c0, 0x178f: 0x00c0, 0x1790: 0x00c0, 0x1791: 0x00c0, - 0x1792: 0x00c0, 0x1793: 0x00c0, 0x1794: 0x00c0, 0x1795: 0x00c0, 0x1796: 0x00c0, 0x1797: 0x00c0, - 0x1798: 0x00c0, 0x1799: 0x00c0, 0x179a: 0x00c0, 0x179b: 0x00c0, 0x179c: 0x00c0, 0x179d: 0x00c0, - 0x179e: 0x00c0, 0x179f: 0x00c0, 0x17a0: 0x00c0, 0x17a1: 0x00c0, 0x17a2: 0x00c0, 0x17a3: 0x00c0, - 0x17a4: 0x00c0, 0x17a5: 0x00c0, 0x17a6: 0x00c3, 0x17a7: 0x00c0, 0x17a8: 0x00c3, 0x17a9: 0x00c3, - 0x17aa: 0x00c0, 0x17ab: 0x00c0, 0x17ac: 0x00c0, 0x17ad: 0x00c3, 0x17ae: 0x00c0, 0x17af: 0x00c3, - 0x17b0: 0x00c3, 0x17b1: 0x00c3, 0x17b2: 0x00c5, 0x17b3: 0x00c5, - 0x17bc: 0x0080, 0x17bd: 0x0080, 0x17be: 0x0080, 0x17bf: 0x0080, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x00c0, 0x17c1: 0x00c0, 0x17c2: 0x00c0, 0x17c3: 0x00c0, 0x17c4: 0x00c0, 0x17c5: 0x00c0, - 0x17c6: 0x00c0, 0x17c7: 0x00c0, 0x17c8: 0x00c0, 0x17c9: 0x00c0, 0x17ca: 0x00c0, 0x17cb: 0x00c0, - 0x17cc: 0x00c0, 0x17cd: 0x00c0, 0x17ce: 0x00c0, 0x17cf: 0x00c0, 0x17d0: 0x00c0, 0x17d1: 0x00c0, - 0x17d2: 0x00c0, 0x17d3: 0x00c0, 0x17d4: 0x00c0, 0x17d5: 0x00c0, 0x17d6: 0x00c0, 0x17d7: 0x00c0, - 0x17d8: 0x00c0, 0x17d9: 0x00c0, 0x17da: 0x00c0, 0x17db: 0x00c0, 0x17dc: 0x00c0, 0x17dd: 0x00c0, - 0x17de: 0x00c0, 0x17df: 0x00c0, 0x17e0: 0x00c0, 0x17e1: 0x00c0, 0x17e2: 0x00c0, 0x17e3: 0x00c0, - 0x17e4: 0x00c0, 0x17e5: 0x00c0, 0x17e6: 0x00c0, 0x17e7: 0x00c0, 0x17e8: 0x00c0, 0x17e9: 0x00c0, - 0x17ea: 0x00c0, 0x17eb: 0x00c0, 0x17ec: 0x00c3, 0x17ed: 0x00c3, 0x17ee: 0x00c3, 0x17ef: 0x00c3, - 0x17f0: 0x00c3, 0x17f1: 0x00c3, 0x17f2: 0x00c3, 0x17f3: 0x00c3, 0x17f4: 0x00c0, 0x17f5: 0x00c0, - 0x17f6: 0x00c3, 0x17f7: 0x00c3, 0x17fb: 0x0080, - 0x17fc: 0x0080, 0x17fd: 0x0080, 0x17fe: 0x0080, 0x17ff: 0x0080, - // Block 0x60, offset 0x1800 - 0x1800: 0x00c0, 0x1801: 0x00c0, 0x1802: 0x00c0, 0x1803: 0x00c0, 0x1804: 0x00c0, 0x1805: 0x00c0, - 0x1806: 0x00c0, 0x1807: 0x00c0, 0x1808: 0x00c0, 0x1809: 0x00c0, - 0x180d: 0x00c0, 0x180e: 0x00c0, 0x180f: 0x00c0, 0x1810: 0x00c0, 0x1811: 0x00c0, - 0x1812: 0x00c0, 0x1813: 0x00c0, 0x1814: 0x00c0, 0x1815: 0x00c0, 0x1816: 0x00c0, 0x1817: 0x00c0, - 0x1818: 0x00c0, 0x1819: 0x00c0, 0x181a: 0x00c0, 0x181b: 0x00c0, 0x181c: 0x00c0, 0x181d: 0x00c0, - 0x181e: 0x00c0, 0x181f: 0x00c0, 0x1820: 0x00c0, 0x1821: 0x00c0, 0x1822: 0x00c0, 0x1823: 0x00c0, - 0x1824: 0x00c0, 0x1825: 0x00c0, 0x1826: 0x00c0, 0x1827: 0x00c0, 0x1828: 0x00c0, 0x1829: 0x00c0, - 0x182a: 0x00c0, 0x182b: 0x00c0, 0x182c: 0x00c0, 0x182d: 0x00c0, 0x182e: 0x00c0, 0x182f: 0x00c0, - 0x1830: 0x00c0, 0x1831: 0x00c0, 0x1832: 0x00c0, 0x1833: 0x00c0, 0x1834: 0x00c0, 0x1835: 0x00c0, - 0x1836: 0x00c0, 0x1837: 0x00c0, 0x1838: 0x00c0, 0x1839: 0x00c0, 0x183a: 0x00c0, 0x183b: 0x00c0, - 0x183c: 0x00c0, 0x183d: 0x00c0, 0x183e: 0x0080, 0x183f: 0x0080, - // Block 0x61, offset 0x1840 - 0x1840: 0x00c0, 0x1841: 0x00c0, 0x1842: 0x00c0, 0x1843: 0x00c0, 0x1844: 0x00c0, 0x1845: 0x00c0, - 0x1846: 0x00c0, 0x1847: 0x00c0, 0x1848: 0x00c0, - // Block 0x62, offset 0x1880 - 0x1880: 0x0080, 0x1881: 0x0080, 0x1882: 0x0080, 0x1883: 0x0080, 0x1884: 0x0080, 0x1885: 0x0080, - 0x1886: 0x0080, 0x1887: 0x0080, - 0x1890: 0x00c3, 0x1891: 0x00c3, - 0x1892: 0x00c3, 0x1893: 0x0080, 0x1894: 0x00c3, 0x1895: 0x00c3, 0x1896: 0x00c3, 0x1897: 0x00c3, - 0x1898: 0x00c3, 0x1899: 0x00c3, 0x189a: 0x00c3, 0x189b: 0x00c3, 0x189c: 0x00c3, 0x189d: 0x00c3, - 0x189e: 0x00c3, 0x189f: 0x00c3, 0x18a0: 0x00c3, 0x18a1: 0x00c0, 0x18a2: 0x00c3, 0x18a3: 0x00c3, - 0x18a4: 0x00c3, 0x18a5: 0x00c3, 0x18a6: 0x00c3, 0x18a7: 0x00c3, 0x18a8: 0x00c3, 0x18a9: 0x00c0, - 0x18aa: 0x00c0, 0x18ab: 0x00c0, 0x18ac: 0x00c0, 0x18ad: 0x00c3, 0x18ae: 0x00c0, 0x18af: 0x00c0, - 0x18b0: 0x00c0, 0x18b1: 0x00c0, 0x18b2: 0x00c0, 0x18b3: 0x00c0, 0x18b4: 0x00c3, 0x18b5: 0x00c0, - 0x18b6: 0x00c0, 0x18b8: 0x00c3, 0x18b9: 0x00c3, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x00c0, 0x18c1: 0x00c0, 0x18c2: 0x00c0, 0x18c3: 0x00c0, 0x18c4: 0x00c0, 0x18c5: 0x00c0, - 0x18c6: 0x00c0, 0x18c7: 0x00c0, 0x18c8: 0x00c0, 0x18c9: 0x00c0, 0x18ca: 0x00c0, 0x18cb: 0x00c0, - 0x18cc: 0x00c0, 0x18cd: 0x00c0, 0x18ce: 0x00c0, 0x18cf: 0x00c0, 0x18d0: 0x00c0, 0x18d1: 0x00c0, - 0x18d2: 0x00c0, 0x18d3: 0x00c0, 0x18d4: 0x00c0, 0x18d5: 0x00c0, 0x18d6: 0x00c0, 0x18d7: 0x00c0, - 0x18d8: 0x00c0, 0x18d9: 0x00c0, 0x18da: 0x00c0, 0x18db: 0x00c0, 0x18dc: 0x00c0, 0x18dd: 0x00c0, - 0x18de: 0x00c0, 0x18df: 0x00c0, 0x18e0: 0x00c0, 0x18e1: 0x00c0, 0x18e2: 0x00c0, 0x18e3: 0x00c0, - 0x18e4: 0x00c0, 0x18e5: 0x00c0, 0x18e6: 0x00c8, 0x18e7: 0x00c8, 0x18e8: 0x00c8, 0x18e9: 0x00c8, - 0x18ea: 0x00c8, 0x18eb: 0x00c0, 0x18ec: 0x0080, 0x18ed: 0x0080, 0x18ee: 0x0080, 0x18ef: 0x00c0, - 0x18f0: 0x0080, 0x18f1: 0x0080, 0x18f2: 0x0080, 0x18f3: 0x0080, 0x18f4: 0x0080, 0x18f5: 0x0080, - 0x18f6: 0x0080, 0x18f7: 0x0080, 0x18f8: 0x0080, 0x18f9: 0x0080, 0x18fa: 0x0080, 0x18fb: 0x00c0, - 0x18fc: 0x0080, 0x18fd: 0x0080, 0x18fe: 0x0080, 0x18ff: 0x0080, - // Block 0x64, offset 0x1900 - 0x1900: 0x0080, 0x1901: 0x0080, 0x1902: 0x0080, 0x1903: 0x0080, 0x1904: 0x0080, 0x1905: 0x0080, - 0x1906: 0x0080, 0x1907: 0x0080, 0x1908: 0x0080, 0x1909: 0x0080, 0x190a: 0x0080, 0x190b: 0x0080, - 0x190c: 0x0080, 0x190d: 0x0080, 0x190e: 0x00c0, 0x190f: 0x0080, 0x1910: 0x0080, 0x1911: 0x0080, - 0x1912: 0x0080, 0x1913: 0x0080, 0x1914: 0x0080, 0x1915: 0x0080, 0x1916: 0x0080, 0x1917: 0x0080, - 0x1918: 0x0080, 0x1919: 0x0080, 0x191a: 0x0080, 0x191b: 0x0080, 0x191c: 0x0080, 0x191d: 0x0088, - 0x191e: 0x0088, 0x191f: 0x0088, 0x1920: 0x0088, 0x1921: 0x0088, 0x1922: 0x0080, 0x1923: 0x0080, - 0x1924: 0x0080, 0x1925: 0x0080, 0x1926: 0x0088, 0x1927: 0x0088, 0x1928: 0x0088, 0x1929: 0x0088, - 0x192a: 0x0088, 0x192b: 0x00c0, 0x192c: 0x00c0, 0x192d: 0x00c0, 0x192e: 0x00c0, 0x192f: 0x00c0, - 0x1930: 0x00c0, 0x1931: 0x00c0, 0x1932: 0x00c0, 0x1933: 0x00c0, 0x1934: 0x00c0, 0x1935: 0x00c0, - 0x1936: 0x00c0, 0x1937: 0x00c0, 0x1938: 0x0080, 0x1939: 0x00c0, 0x193a: 0x00c0, 0x193b: 0x00c0, - 0x193c: 0x00c0, 0x193d: 0x00c0, 0x193e: 0x00c0, 0x193f: 0x00c0, - // Block 0x65, offset 0x1940 - 0x1940: 0x00c0, 0x1941: 0x00c0, 0x1942: 0x00c0, 0x1943: 0x00c0, 0x1944: 0x00c0, 0x1945: 0x00c0, - 0x1946: 0x00c0, 0x1947: 0x00c0, 0x1948: 0x00c0, 0x1949: 0x00c0, 0x194a: 0x00c0, 0x194b: 0x00c0, - 0x194c: 0x00c0, 0x194d: 0x00c0, 0x194e: 0x00c0, 0x194f: 0x00c0, 0x1950: 0x00c0, 0x1951: 0x00c0, - 0x1952: 0x00c0, 0x1953: 0x00c0, 0x1954: 0x00c0, 0x1955: 0x00c0, 0x1956: 0x00c0, 0x1957: 0x00c0, - 0x1958: 0x00c0, 0x1959: 0x00c0, 0x195a: 0x00c0, 0x195b: 0x0080, 0x195c: 0x0080, 0x195d: 0x0080, - 0x195e: 0x0080, 0x195f: 0x0080, 0x1960: 0x0080, 0x1961: 0x0080, 0x1962: 0x0080, 0x1963: 0x0080, - 0x1964: 0x0080, 0x1965: 0x0080, 0x1966: 0x0080, 0x1967: 0x0080, 0x1968: 0x0080, 0x1969: 0x0080, - 0x196a: 0x0080, 0x196b: 0x0080, 0x196c: 0x0080, 0x196d: 0x0080, 0x196e: 0x0080, 0x196f: 0x0080, - 0x1970: 0x0080, 0x1971: 0x0080, 0x1972: 0x0080, 0x1973: 0x0080, 0x1974: 0x0080, 0x1975: 0x0080, - 0x1976: 0x0080, 0x1977: 0x0080, 0x1978: 0x0080, 0x1979: 0x0080, 0x197a: 0x0080, 0x197b: 0x0080, - 0x197c: 0x0080, 0x197d: 0x0080, 0x197e: 0x0080, 0x197f: 0x0088, - // Block 0x66, offset 0x1980 - 0x1980: 0x00c3, 0x1981: 0x00c3, 0x1982: 0x00c3, 0x1983: 0x00c3, 0x1984: 0x00c3, 0x1985: 0x00c3, - 0x1986: 0x00c3, 0x1987: 0x00c3, 0x1988: 0x00c3, 0x1989: 0x00c3, 0x198a: 0x00c3, 0x198b: 0x00c3, - 0x198c: 0x00c3, 0x198d: 0x00c3, 0x198e: 0x00c3, 0x198f: 0x00c3, 0x1990: 0x00c3, 0x1991: 0x00c3, - 0x1992: 0x00c3, 0x1993: 0x00c3, 0x1994: 0x00c3, 0x1995: 0x00c3, 0x1996: 0x00c3, 0x1997: 0x00c3, - 0x1998: 0x00c3, 0x1999: 0x00c3, 0x199a: 0x00c3, 0x199b: 0x00c3, 0x199c: 0x00c3, 0x199d: 0x00c3, - 0x199e: 0x00c3, 0x199f: 0x00c3, 0x19a0: 0x00c3, 0x19a1: 0x00c3, 0x19a2: 0x00c3, 0x19a3: 0x00c3, - 0x19a4: 0x00c3, 0x19a5: 0x00c3, 0x19a6: 0x00c3, 0x19a7: 0x00c3, 0x19a8: 0x00c3, 0x19a9: 0x00c3, - 0x19aa: 0x00c3, 0x19ab: 0x00c3, 0x19ac: 0x00c3, 0x19ad: 0x00c3, 0x19ae: 0x00c3, 0x19af: 0x00c3, - 0x19b0: 0x00c3, 0x19b1: 0x00c3, 0x19b2: 0x00c3, 0x19b3: 0x00c3, 0x19b4: 0x00c3, 0x19b5: 0x00c3, - 0x19bb: 0x00c3, - 0x19bc: 0x00c3, 0x19bd: 0x00c3, 0x19be: 0x00c3, 0x19bf: 0x00c3, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x00c0, 0x19c1: 0x00c0, 0x19c2: 0x00c0, 0x19c3: 0x00c0, 0x19c4: 0x00c0, 0x19c5: 0x00c0, - 0x19c6: 0x00c0, 0x19c7: 0x00c0, 0x19c8: 0x00c0, 0x19c9: 0x00c0, 0x19ca: 0x00c0, 0x19cb: 0x00c0, - 0x19cc: 0x00c0, 0x19cd: 0x00c0, 0x19ce: 0x00c0, 0x19cf: 0x00c0, 0x19d0: 0x00c0, 0x19d1: 0x00c0, - 0x19d2: 0x00c0, 0x19d3: 0x00c0, 0x19d4: 0x00c0, 0x19d5: 0x00c0, 0x19d6: 0x00c0, 0x19d7: 0x00c0, - 0x19d8: 0x00c0, 0x19d9: 0x00c0, 0x19da: 0x0080, 0x19db: 0x0080, 0x19dc: 0x00c0, 0x19dd: 0x00c0, - 0x19de: 0x00c0, 0x19df: 0x00c0, 0x19e0: 0x00c0, 0x19e1: 0x00c0, 0x19e2: 0x00c0, 0x19e3: 0x00c0, - 0x19e4: 0x00c0, 0x19e5: 0x00c0, 0x19e6: 0x00c0, 0x19e7: 0x00c0, 0x19e8: 0x00c0, 0x19e9: 0x00c0, - 0x19ea: 0x00c0, 0x19eb: 0x00c0, 0x19ec: 0x00c0, 0x19ed: 0x00c0, 0x19ee: 0x00c0, 0x19ef: 0x00c0, - 0x19f0: 0x00c0, 0x19f1: 0x00c0, 0x19f2: 0x00c0, 0x19f3: 0x00c0, 0x19f4: 0x00c0, 0x19f5: 0x00c0, - 0x19f6: 0x00c0, 0x19f7: 0x00c0, 0x19f8: 0x00c0, 0x19f9: 0x00c0, 0x19fa: 0x00c0, 0x19fb: 0x00c0, - 0x19fc: 0x00c0, 0x19fd: 0x00c0, 0x19fe: 0x00c0, 0x19ff: 0x00c0, - // Block 0x68, offset 0x1a00 - 0x1a00: 0x00c8, 0x1a01: 0x00c8, 0x1a02: 0x00c8, 0x1a03: 0x00c8, 0x1a04: 0x00c8, 0x1a05: 0x00c8, - 0x1a06: 0x00c8, 0x1a07: 0x00c8, 0x1a08: 0x00c8, 0x1a09: 0x00c8, 0x1a0a: 0x00c8, 0x1a0b: 0x00c8, - 0x1a0c: 0x00c8, 0x1a0d: 0x00c8, 0x1a0e: 0x00c8, 0x1a0f: 0x00c8, 0x1a10: 0x00c8, 0x1a11: 0x00c8, - 0x1a12: 0x00c8, 0x1a13: 0x00c8, 0x1a14: 0x00c8, 0x1a15: 0x00c8, - 0x1a18: 0x00c8, 0x1a19: 0x00c8, 0x1a1a: 0x00c8, 0x1a1b: 0x00c8, 0x1a1c: 0x00c8, 0x1a1d: 0x00c8, - 0x1a20: 0x00c8, 0x1a21: 0x00c8, 0x1a22: 0x00c8, 0x1a23: 0x00c8, - 0x1a24: 0x00c8, 0x1a25: 0x00c8, 0x1a26: 0x00c8, 0x1a27: 0x00c8, 0x1a28: 0x00c8, 0x1a29: 0x00c8, - 0x1a2a: 0x00c8, 0x1a2b: 0x00c8, 0x1a2c: 0x00c8, 0x1a2d: 0x00c8, 0x1a2e: 0x00c8, 0x1a2f: 0x00c8, - 0x1a30: 0x00c8, 0x1a31: 0x00c8, 0x1a32: 0x00c8, 0x1a33: 0x00c8, 0x1a34: 0x00c8, 0x1a35: 0x00c8, - 0x1a36: 0x00c8, 0x1a37: 0x00c8, 0x1a38: 0x00c8, 0x1a39: 0x00c8, 0x1a3a: 0x00c8, 0x1a3b: 0x00c8, - 0x1a3c: 0x00c8, 0x1a3d: 0x00c8, 0x1a3e: 0x00c8, 0x1a3f: 0x00c8, - // Block 0x69, offset 0x1a40 - 0x1a40: 0x00c8, 0x1a41: 0x00c8, 0x1a42: 0x00c8, 0x1a43: 0x00c8, 0x1a44: 0x00c8, 0x1a45: 0x00c8, - 0x1a48: 0x00c8, 0x1a49: 0x00c8, 0x1a4a: 0x00c8, 0x1a4b: 0x00c8, - 0x1a4c: 0x00c8, 0x1a4d: 0x00c8, 0x1a50: 0x00c8, 0x1a51: 0x00c8, - 0x1a52: 0x00c8, 0x1a53: 0x00c8, 0x1a54: 0x00c8, 0x1a55: 0x00c8, 0x1a56: 0x00c8, 0x1a57: 0x00c8, - 0x1a59: 0x00c8, 0x1a5b: 0x00c8, 0x1a5d: 0x00c8, - 0x1a5f: 0x00c8, 0x1a60: 0x00c8, 0x1a61: 0x00c8, 0x1a62: 0x00c8, 0x1a63: 0x00c8, - 0x1a64: 0x00c8, 0x1a65: 0x00c8, 0x1a66: 0x00c8, 0x1a67: 0x00c8, 0x1a68: 0x00c8, 0x1a69: 0x00c8, - 0x1a6a: 0x00c8, 0x1a6b: 0x00c8, 0x1a6c: 0x00c8, 0x1a6d: 0x00c8, 0x1a6e: 0x00c8, 0x1a6f: 0x00c8, - 0x1a70: 0x00c8, 0x1a71: 0x0088, 0x1a72: 0x00c8, 0x1a73: 0x0088, 0x1a74: 0x00c8, 0x1a75: 0x0088, - 0x1a76: 0x00c8, 0x1a77: 0x0088, 0x1a78: 0x00c8, 0x1a79: 0x0088, 0x1a7a: 0x00c8, 0x1a7b: 0x0088, - 0x1a7c: 0x00c8, 0x1a7d: 0x0088, - // Block 0x6a, offset 0x1a80 - 0x1a80: 0x00c8, 0x1a81: 0x00c8, 0x1a82: 0x00c8, 0x1a83: 0x00c8, 0x1a84: 0x00c8, 0x1a85: 0x00c8, - 0x1a86: 0x00c8, 0x1a87: 0x00c8, 0x1a88: 0x0088, 0x1a89: 0x0088, 0x1a8a: 0x0088, 0x1a8b: 0x0088, - 0x1a8c: 0x0088, 0x1a8d: 0x0088, 0x1a8e: 0x0088, 0x1a8f: 0x0088, 0x1a90: 0x00c8, 0x1a91: 0x00c8, - 0x1a92: 0x00c8, 0x1a93: 0x00c8, 0x1a94: 0x00c8, 0x1a95: 0x00c8, 0x1a96: 0x00c8, 0x1a97: 0x00c8, - 0x1a98: 0x0088, 0x1a99: 0x0088, 0x1a9a: 0x0088, 0x1a9b: 0x0088, 0x1a9c: 0x0088, 0x1a9d: 0x0088, - 0x1a9e: 0x0088, 0x1a9f: 0x0088, 0x1aa0: 0x00c8, 0x1aa1: 0x00c8, 0x1aa2: 0x00c8, 0x1aa3: 0x00c8, - 0x1aa4: 0x00c8, 0x1aa5: 0x00c8, 0x1aa6: 0x00c8, 0x1aa7: 0x00c8, 0x1aa8: 0x0088, 0x1aa9: 0x0088, - 0x1aaa: 0x0088, 0x1aab: 0x0088, 0x1aac: 0x0088, 0x1aad: 0x0088, 0x1aae: 0x0088, 0x1aaf: 0x0088, - 0x1ab0: 0x00c8, 0x1ab1: 0x00c8, 0x1ab2: 0x00c8, 0x1ab3: 0x00c8, 0x1ab4: 0x00c8, - 0x1ab6: 0x00c8, 0x1ab7: 0x00c8, 0x1ab8: 0x00c8, 0x1ab9: 0x00c8, 0x1aba: 0x00c8, 0x1abb: 0x0088, - 0x1abc: 0x0088, 0x1abd: 0x0088, 0x1abe: 0x0088, 0x1abf: 0x0088, - // Block 0x6b, offset 0x1ac0 - 0x1ac0: 0x0088, 0x1ac1: 0x0088, 0x1ac2: 0x00c8, 0x1ac3: 0x00c8, 0x1ac4: 0x00c8, - 0x1ac6: 0x00c8, 0x1ac7: 0x00c8, 0x1ac8: 0x00c8, 0x1ac9: 0x0088, 0x1aca: 0x00c8, 0x1acb: 0x0088, - 0x1acc: 0x0088, 0x1acd: 0x0088, 0x1ace: 0x0088, 0x1acf: 0x0088, 0x1ad0: 0x00c8, 0x1ad1: 0x00c8, - 0x1ad2: 0x00c8, 0x1ad3: 0x0088, 0x1ad6: 0x00c8, 0x1ad7: 0x00c8, - 0x1ad8: 0x00c8, 0x1ad9: 0x00c8, 0x1ada: 0x00c8, 0x1adb: 0x0088, 0x1add: 0x0088, - 0x1ade: 0x0088, 0x1adf: 0x0088, 0x1ae0: 0x00c8, 0x1ae1: 0x00c8, 0x1ae2: 0x00c8, 0x1ae3: 0x0088, - 0x1ae4: 0x00c8, 0x1ae5: 0x00c8, 0x1ae6: 0x00c8, 0x1ae7: 0x00c8, 0x1ae8: 0x00c8, 0x1ae9: 0x00c8, - 0x1aea: 0x00c8, 0x1aeb: 0x0088, 0x1aec: 0x00c8, 0x1aed: 0x0088, 0x1aee: 0x0088, 0x1aef: 0x0088, - 0x1af2: 0x00c8, 0x1af3: 0x00c8, 0x1af4: 0x00c8, - 0x1af6: 0x00c8, 0x1af7: 0x00c8, 0x1af8: 0x00c8, 0x1af9: 0x0088, 0x1afa: 0x00c8, 0x1afb: 0x0088, - 0x1afc: 0x0088, 0x1afd: 0x0088, 0x1afe: 0x0088, - // Block 0x6c, offset 0x1b00 - 0x1b00: 0x0080, 0x1b01: 0x0080, 0x1b02: 0x0080, 0x1b03: 0x0080, 0x1b04: 0x0080, 0x1b05: 0x0080, - 0x1b06: 0x0080, 0x1b07: 0x0080, 0x1b08: 0x0080, 0x1b09: 0x0080, 0x1b0a: 0x0080, 0x1b0b: 0x0040, - 0x1b0c: 0x004d, 0x1b0d: 0x004e, 0x1b0e: 0x0040, 0x1b0f: 0x0040, 0x1b10: 0x0080, 0x1b11: 0x0080, - 0x1b12: 0x0080, 0x1b13: 0x0080, 0x1b14: 0x0080, 0x1b15: 0x0080, 0x1b16: 0x0080, 0x1b17: 0x0080, - 0x1b18: 0x0080, 0x1b19: 0x0080, 0x1b1a: 0x0080, 0x1b1b: 0x0080, 0x1b1c: 0x0080, 0x1b1d: 0x0080, - 0x1b1e: 0x0080, 0x1b1f: 0x0080, 0x1b20: 0x0080, 0x1b21: 0x0080, 0x1b22: 0x0080, 0x1b23: 0x0080, - 0x1b24: 0x0080, 0x1b25: 0x0080, 0x1b26: 0x0080, 0x1b27: 0x0080, 0x1b28: 0x0040, 0x1b29: 0x0040, - 0x1b2a: 0x0040, 0x1b2b: 0x0040, 0x1b2c: 0x0040, 0x1b2d: 0x0040, 0x1b2e: 0x0040, 0x1b2f: 0x0080, - 0x1b30: 0x0080, 0x1b31: 0x0080, 0x1b32: 0x0080, 0x1b33: 0x0080, 0x1b34: 0x0080, 0x1b35: 0x0080, - 0x1b36: 0x0080, 0x1b37: 0x0080, 0x1b38: 0x0080, 0x1b39: 0x0080, 0x1b3a: 0x0080, 0x1b3b: 0x0080, - 0x1b3c: 0x0080, 0x1b3d: 0x0080, 0x1b3e: 0x0080, 0x1b3f: 0x0080, - // Block 0x6d, offset 0x1b40 - 0x1b40: 0x0080, 0x1b41: 0x0080, 0x1b42: 0x0080, 0x1b43: 0x0080, 0x1b44: 0x0080, 0x1b45: 0x0080, - 0x1b46: 0x0080, 0x1b47: 0x0080, 0x1b48: 0x0080, 0x1b49: 0x0080, 0x1b4a: 0x0080, 0x1b4b: 0x0080, - 0x1b4c: 0x0080, 0x1b4d: 0x0080, 0x1b4e: 0x0080, 0x1b4f: 0x0080, 0x1b50: 0x0080, 0x1b51: 0x0080, - 0x1b52: 0x0080, 0x1b53: 0x0080, 0x1b54: 0x0080, 0x1b55: 0x0080, 0x1b56: 0x0080, 0x1b57: 0x0080, - 0x1b58: 0x0080, 0x1b59: 0x0080, 0x1b5a: 0x0080, 0x1b5b: 0x0080, 0x1b5c: 0x0080, 0x1b5d: 0x0080, - 0x1b5e: 0x0080, 0x1b5f: 0x0080, 0x1b60: 0x0040, 0x1b61: 0x0040, 0x1b62: 0x0040, 0x1b63: 0x0040, - 0x1b64: 0x0040, 0x1b66: 0x0040, 0x1b67: 0x0040, 0x1b68: 0x0040, 0x1b69: 0x0040, - 0x1b6a: 0x0040, 0x1b6b: 0x0040, 0x1b6c: 0x0040, 0x1b6d: 0x0040, 0x1b6e: 0x0040, 0x1b6f: 0x0040, - 0x1b70: 0x0080, 0x1b71: 0x0080, 0x1b74: 0x0080, 0x1b75: 0x0080, - 0x1b76: 0x0080, 0x1b77: 0x0080, 0x1b78: 0x0080, 0x1b79: 0x0080, 0x1b7a: 0x0080, 0x1b7b: 0x0080, - 0x1b7c: 0x0080, 0x1b7d: 0x0080, 0x1b7e: 0x0080, 0x1b7f: 0x0080, - // Block 0x6e, offset 0x1b80 - 0x1b80: 0x0080, 0x1b81: 0x0080, 0x1b82: 0x0080, 0x1b83: 0x0080, 0x1b84: 0x0080, 0x1b85: 0x0080, - 0x1b86: 0x0080, 0x1b87: 0x0080, 0x1b88: 0x0080, 0x1b89: 0x0080, 0x1b8a: 0x0080, 0x1b8b: 0x0080, - 0x1b8c: 0x0080, 0x1b8d: 0x0080, 0x1b8e: 0x0080, 0x1b90: 0x0080, 0x1b91: 0x0080, - 0x1b92: 0x0080, 0x1b93: 0x0080, 0x1b94: 0x0080, 0x1b95: 0x0080, 0x1b96: 0x0080, 0x1b97: 0x0080, - 0x1b98: 0x0080, 0x1b99: 0x0080, 0x1b9a: 0x0080, 0x1b9b: 0x0080, 0x1b9c: 0x0080, - 0x1ba0: 0x0080, 0x1ba1: 0x0080, 0x1ba2: 0x0080, 0x1ba3: 0x0080, - 0x1ba4: 0x0080, 0x1ba5: 0x0080, 0x1ba6: 0x0080, 0x1ba7: 0x0080, 0x1ba8: 0x0080, 0x1ba9: 0x0080, - 0x1baa: 0x0080, 0x1bab: 0x0080, 0x1bac: 0x0080, 0x1bad: 0x0080, 0x1bae: 0x0080, 0x1baf: 0x0080, - 0x1bb0: 0x0080, 0x1bb1: 0x0080, 0x1bb2: 0x0080, 0x1bb3: 0x0080, 0x1bb4: 0x0080, 0x1bb5: 0x0080, - 0x1bb6: 0x0080, 0x1bb7: 0x0080, 0x1bb8: 0x0080, 0x1bb9: 0x0080, 0x1bba: 0x0080, 0x1bbb: 0x0080, - 0x1bbc: 0x0080, 0x1bbd: 0x0080, 0x1bbe: 0x0080, - // Block 0x6f, offset 0x1bc0 - 0x1bd0: 0x00c3, 0x1bd1: 0x00c3, - 0x1bd2: 0x00c3, 0x1bd3: 0x00c3, 0x1bd4: 0x00c3, 0x1bd5: 0x00c3, 0x1bd6: 0x00c3, 0x1bd7: 0x00c3, - 0x1bd8: 0x00c3, 0x1bd9: 0x00c3, 0x1bda: 0x00c3, 0x1bdb: 0x00c3, 0x1bdc: 0x00c3, 0x1bdd: 0x0083, - 0x1bde: 0x0083, 0x1bdf: 0x0083, 0x1be0: 0x0083, 0x1be1: 0x00c3, 0x1be2: 0x0083, 0x1be3: 0x0083, - 0x1be4: 0x0083, 0x1be5: 0x00c3, 0x1be6: 0x00c3, 0x1be7: 0x00c3, 0x1be8: 0x00c3, 0x1be9: 0x00c3, - 0x1bea: 0x00c3, 0x1beb: 0x00c3, 0x1bec: 0x00c3, 0x1bed: 0x00c3, 0x1bee: 0x00c3, 0x1bef: 0x00c3, - 0x1bf0: 0x00c3, - // Block 0x70, offset 0x1c00 - 0x1c00: 0x0080, 0x1c01: 0x0080, 0x1c02: 0x0080, 0x1c03: 0x0080, 0x1c04: 0x0080, 0x1c05: 0x0080, - 0x1c06: 0x0080, 0x1c07: 0x0080, 0x1c08: 0x0080, 0x1c09: 0x0080, 0x1c0a: 0x0080, 0x1c0b: 0x0080, - 0x1c0c: 0x0080, 0x1c0d: 0x0080, 0x1c0e: 0x0080, 0x1c0f: 0x0080, 0x1c10: 0x0080, 0x1c11: 0x0080, - 0x1c12: 0x0080, 0x1c13: 0x0080, 0x1c14: 0x0080, 0x1c15: 0x0080, 0x1c16: 0x0080, 0x1c17: 0x0080, - 0x1c18: 0x0080, 0x1c19: 0x0080, 0x1c1a: 0x0080, 0x1c1b: 0x0080, 0x1c1c: 0x0080, 0x1c1d: 0x0080, - 0x1c1e: 0x0080, 0x1c1f: 0x0080, 0x1c20: 0x0080, 0x1c21: 0x0080, 0x1c22: 0x0080, 0x1c23: 0x0080, - 0x1c24: 0x0080, 0x1c25: 0x0080, 0x1c26: 0x0088, 0x1c27: 0x0080, 0x1c28: 0x0080, 0x1c29: 0x0080, - 0x1c2a: 0x0080, 0x1c2b: 0x0080, 0x1c2c: 0x0080, 0x1c2d: 0x0080, 0x1c2e: 0x0080, 0x1c2f: 0x0080, - 0x1c30: 0x0080, 0x1c31: 0x0080, 0x1c32: 0x00c0, 0x1c33: 0x0080, 0x1c34: 0x0080, 0x1c35: 0x0080, - 0x1c36: 0x0080, 0x1c37: 0x0080, 0x1c38: 0x0080, 0x1c39: 0x0080, 0x1c3a: 0x0080, 0x1c3b: 0x0080, - 0x1c3c: 0x0080, 0x1c3d: 0x0080, 0x1c3e: 0x0080, 0x1c3f: 0x0080, - // Block 0x71, offset 0x1c40 - 0x1c40: 0x0080, 0x1c41: 0x0080, 0x1c42: 0x0080, 0x1c43: 0x0080, 0x1c44: 0x0080, 0x1c45: 0x0080, - 0x1c46: 0x0080, 0x1c47: 0x0080, 0x1c48: 0x0080, 0x1c49: 0x0080, 0x1c4a: 0x0080, 0x1c4b: 0x0080, - 0x1c4c: 0x0080, 0x1c4d: 0x0080, 0x1c4e: 0x00c0, 0x1c4f: 0x0080, 0x1c50: 0x0080, 0x1c51: 0x0080, - 0x1c52: 0x0080, 0x1c53: 0x0080, 0x1c54: 0x0080, 0x1c55: 0x0080, 0x1c56: 0x0080, 0x1c57: 0x0080, - 0x1c58: 0x0080, 0x1c59: 0x0080, 0x1c5a: 0x0080, 0x1c5b: 0x0080, 0x1c5c: 0x0080, 0x1c5d: 0x0080, - 0x1c5e: 0x0080, 0x1c5f: 0x0080, 0x1c60: 0x0080, 0x1c61: 0x0080, 0x1c62: 0x0080, 0x1c63: 0x0080, - 0x1c64: 0x0080, 0x1c65: 0x0080, 0x1c66: 0x0080, 0x1c67: 0x0080, 0x1c68: 0x0080, 0x1c69: 0x0080, - 0x1c6a: 0x0080, 0x1c6b: 0x0080, 0x1c6c: 0x0080, 0x1c6d: 0x0080, 0x1c6e: 0x0080, 0x1c6f: 0x0080, - 0x1c70: 0x0080, 0x1c71: 0x0080, 0x1c72: 0x0080, 0x1c73: 0x0080, 0x1c74: 0x0080, 0x1c75: 0x0080, - 0x1c76: 0x0080, 0x1c77: 0x0080, 0x1c78: 0x0080, 0x1c79: 0x0080, 0x1c7a: 0x0080, 0x1c7b: 0x0080, - 0x1c7c: 0x0080, 0x1c7d: 0x0080, 0x1c7e: 0x0080, 0x1c7f: 0x0080, - // Block 0x72, offset 0x1c80 - 0x1c80: 0x0080, 0x1c81: 0x0080, 0x1c82: 0x0080, 0x1c83: 0x00c0, 0x1c84: 0x00c0, 0x1c85: 0x0080, - 0x1c86: 0x0080, 0x1c87: 0x0080, 0x1c88: 0x0080, 0x1c89: 0x0080, 0x1c8a: 0x0080, 0x1c8b: 0x0080, - 0x1c90: 0x0080, 0x1c91: 0x0080, - 0x1c92: 0x0080, 0x1c93: 0x0080, 0x1c94: 0x0080, 0x1c95: 0x0080, 0x1c96: 0x0080, 0x1c97: 0x0080, - 0x1c98: 0x0080, 0x1c99: 0x0080, 0x1c9a: 0x0080, 0x1c9b: 0x0080, 0x1c9c: 0x0080, 0x1c9d: 0x0080, - 0x1c9e: 0x0080, 0x1c9f: 0x0080, 0x1ca0: 0x0080, 0x1ca1: 0x0080, 0x1ca2: 0x0080, 0x1ca3: 0x0080, - 0x1ca4: 0x0080, 0x1ca5: 0x0080, 0x1ca6: 0x0080, 0x1ca7: 0x0080, 0x1ca8: 0x0080, 0x1ca9: 0x0080, - 0x1caa: 0x0080, 0x1cab: 0x0080, 0x1cac: 0x0080, 0x1cad: 0x0080, 0x1cae: 0x0080, 0x1caf: 0x0080, - 0x1cb0: 0x0080, 0x1cb1: 0x0080, 0x1cb2: 0x0080, 0x1cb3: 0x0080, 0x1cb4: 0x0080, 0x1cb5: 0x0080, - 0x1cb6: 0x0080, 0x1cb7: 0x0080, 0x1cb8: 0x0080, 0x1cb9: 0x0080, 0x1cba: 0x0080, 0x1cbb: 0x0080, - 0x1cbc: 0x0080, 0x1cbd: 0x0080, 0x1cbe: 0x0080, 0x1cbf: 0x0080, - // Block 0x73, offset 0x1cc0 - 0x1cc0: 0x0080, 0x1cc1: 0x0080, 0x1cc2: 0x0080, 0x1cc3: 0x0080, 0x1cc4: 0x0080, 0x1cc5: 0x0080, - 0x1cc6: 0x0080, 0x1cc7: 0x0080, 0x1cc8: 0x0080, 0x1cc9: 0x0080, 0x1cca: 0x0080, 0x1ccb: 0x0080, - 0x1ccc: 0x0080, 0x1ccd: 0x0080, 0x1cce: 0x0080, 0x1ccf: 0x0080, 0x1cd0: 0x0080, 0x1cd1: 0x0080, - 0x1cd2: 0x0080, 0x1cd3: 0x0080, 0x1cd4: 0x0080, 0x1cd5: 0x0080, 0x1cd6: 0x0080, 0x1cd7: 0x0080, - 0x1cd8: 0x0080, 0x1cd9: 0x0080, 0x1cda: 0x0080, 0x1cdb: 0x0080, 0x1cdc: 0x0080, 0x1cdd: 0x0080, - 0x1cde: 0x0080, 0x1cdf: 0x0080, 0x1ce0: 0x0080, 0x1ce1: 0x0080, 0x1ce2: 0x0080, 0x1ce3: 0x0080, - 0x1ce4: 0x0080, 0x1ce5: 0x0080, 0x1ce6: 0x0080, 0x1ce7: 0x0080, 0x1ce8: 0x0080, 0x1ce9: 0x0080, - 0x1cea: 0x0080, 0x1ceb: 0x0080, 0x1cec: 0x0080, 0x1ced: 0x0080, 0x1cee: 0x0080, 0x1cef: 0x0080, - 0x1cf0: 0x0080, 0x1cf1: 0x0080, 0x1cf2: 0x0080, 0x1cf3: 0x0080, 0x1cf4: 0x0080, 0x1cf5: 0x0080, - 0x1cf6: 0x0080, 0x1cf7: 0x0080, 0x1cf8: 0x0080, 0x1cf9: 0x0080, 0x1cfa: 0x0080, 0x1cfb: 0x0080, - 0x1cfc: 0x0080, 0x1cfd: 0x0080, 0x1cfe: 0x0080, 0x1cff: 0x0080, - // Block 0x74, offset 0x1d00 - 0x1d00: 0x0080, 0x1d01: 0x0080, 0x1d02: 0x0080, 0x1d03: 0x0080, 0x1d04: 0x0080, 0x1d05: 0x0080, - 0x1d06: 0x0080, 0x1d07: 0x0080, 0x1d08: 0x0080, 0x1d09: 0x0080, 0x1d0a: 0x0080, 0x1d0b: 0x0080, - 0x1d0c: 0x0080, 0x1d0d: 0x0080, 0x1d0e: 0x0080, 0x1d0f: 0x0080, 0x1d10: 0x0080, 0x1d11: 0x0080, - 0x1d12: 0x0080, 0x1d13: 0x0080, 0x1d14: 0x0080, 0x1d15: 0x0080, 0x1d16: 0x0080, 0x1d17: 0x0080, - 0x1d18: 0x0080, 0x1d19: 0x0080, 0x1d1a: 0x0080, 0x1d1b: 0x0080, 0x1d1c: 0x0080, 0x1d1d: 0x0080, - 0x1d1e: 0x0080, 0x1d1f: 0x0080, 0x1d20: 0x0080, 0x1d21: 0x0080, 0x1d22: 0x0080, 0x1d23: 0x0080, - 0x1d24: 0x0080, 0x1d25: 0x0080, 0x1d26: 0x0080, 0x1d27: 0x0080, 0x1d28: 0x0080, 0x1d29: 0x0080, - 0x1d2a: 0x0080, 0x1d2b: 0x0080, 0x1d2c: 0x0080, 0x1d2d: 0x0080, 0x1d2e: 0x0080, 0x1d2f: 0x0080, - 0x1d30: 0x0080, 0x1d31: 0x0080, 0x1d32: 0x0080, 0x1d33: 0x0080, 0x1d34: 0x0080, 0x1d35: 0x0080, - 0x1d36: 0x0080, 0x1d37: 0x0080, 0x1d38: 0x0080, 0x1d39: 0x0080, 0x1d3a: 0x0080, 0x1d3b: 0x0080, - 0x1d3c: 0x0080, 0x1d3d: 0x0080, 0x1d3e: 0x0080, - // Block 0x75, offset 0x1d40 - 0x1d40: 0x0080, 0x1d41: 0x0080, 0x1d42: 0x0080, 0x1d43: 0x0080, 0x1d44: 0x0080, 0x1d45: 0x0080, - 0x1d46: 0x0080, 0x1d47: 0x0080, 0x1d48: 0x0080, 0x1d49: 0x0080, 0x1d4a: 0x0080, 0x1d4b: 0x0080, - 0x1d4c: 0x0080, 0x1d4d: 0x0080, 0x1d4e: 0x0080, 0x1d4f: 0x0080, 0x1d50: 0x0080, 0x1d51: 0x0080, - 0x1d52: 0x0080, 0x1d53: 0x0080, 0x1d54: 0x0080, 0x1d55: 0x0080, 0x1d56: 0x0080, 0x1d57: 0x0080, - 0x1d58: 0x0080, 0x1d59: 0x0080, 0x1d5a: 0x0080, 0x1d5b: 0x0080, 0x1d5c: 0x0080, 0x1d5d: 0x0080, - 0x1d5e: 0x0080, 0x1d5f: 0x0080, 0x1d60: 0x0080, 0x1d61: 0x0080, 0x1d62: 0x0080, 0x1d63: 0x0080, - 0x1d64: 0x0080, 0x1d65: 0x0080, 0x1d66: 0x0080, - // Block 0x76, offset 0x1d80 - 0x1d80: 0x0080, 0x1d81: 0x0080, 0x1d82: 0x0080, 0x1d83: 0x0080, 0x1d84: 0x0080, 0x1d85: 0x0080, - 0x1d86: 0x0080, 0x1d87: 0x0080, 0x1d88: 0x0080, 0x1d89: 0x0080, 0x1d8a: 0x0080, - 0x1da0: 0x0080, 0x1da1: 0x0080, 0x1da2: 0x0080, 0x1da3: 0x0080, - 0x1da4: 0x0080, 0x1da5: 0x0080, 0x1da6: 0x0080, 0x1da7: 0x0080, 0x1da8: 0x0080, 0x1da9: 0x0080, - 0x1daa: 0x0080, 0x1dab: 0x0080, 0x1dac: 0x0080, 0x1dad: 0x0080, 0x1dae: 0x0080, 0x1daf: 0x0080, - 0x1db0: 0x0080, 0x1db1: 0x0080, 0x1db2: 0x0080, 0x1db3: 0x0080, 0x1db4: 0x0080, 0x1db5: 0x0080, - 0x1db6: 0x0080, 0x1db7: 0x0080, 0x1db8: 0x0080, 0x1db9: 0x0080, 0x1dba: 0x0080, 0x1dbb: 0x0080, - 0x1dbc: 0x0080, 0x1dbd: 0x0080, 0x1dbe: 0x0080, 0x1dbf: 0x0080, - // Block 0x77, offset 0x1dc0 - 0x1dc0: 0x0080, 0x1dc1: 0x0080, 0x1dc2: 0x0080, 0x1dc3: 0x0080, 0x1dc4: 0x0080, 0x1dc5: 0x0080, - 0x1dc6: 0x0080, 0x1dc7: 0x0080, 0x1dc8: 0x0080, 0x1dc9: 0x0080, 0x1dca: 0x0080, 0x1dcb: 0x0080, - 0x1dcc: 0x0080, 0x1dcd: 0x0080, 0x1dce: 0x0080, 0x1dcf: 0x0080, 0x1dd0: 0x0080, 0x1dd1: 0x0080, - 0x1dd2: 0x0080, 0x1dd3: 0x0080, 0x1dd4: 0x0080, 0x1dd5: 0x0080, 0x1dd6: 0x0080, 0x1dd7: 0x0080, - 0x1dd8: 0x0080, 0x1dd9: 0x0080, 0x1dda: 0x0080, 0x1ddb: 0x0080, 0x1ddc: 0x0080, 0x1ddd: 0x0080, - 0x1dde: 0x0080, 0x1ddf: 0x0080, 0x1de0: 0x0080, 0x1de1: 0x0080, 0x1de2: 0x0080, 0x1de3: 0x0080, - 0x1de4: 0x0080, 0x1de5: 0x0080, 0x1de6: 0x0080, 0x1de7: 0x0080, 0x1de8: 0x0080, 0x1de9: 0x0080, - 0x1dea: 0x0080, 0x1deb: 0x0080, 0x1dec: 0x0080, 0x1ded: 0x0080, 0x1dee: 0x0080, 0x1def: 0x0080, - 0x1df0: 0x0080, 0x1df1: 0x0080, 0x1df2: 0x0080, 0x1df3: 0x0080, - 0x1df6: 0x0080, 0x1df7: 0x0080, 0x1df8: 0x0080, 0x1df9: 0x0080, 0x1dfa: 0x0080, 0x1dfb: 0x0080, - 0x1dfc: 0x0080, 0x1dfd: 0x0080, 0x1dfe: 0x0080, 0x1dff: 0x0080, - // Block 0x78, offset 0x1e00 - 0x1e00: 0x0080, 0x1e01: 0x0080, 0x1e02: 0x0080, 0x1e03: 0x0080, 0x1e04: 0x0080, 0x1e05: 0x0080, - 0x1e06: 0x0080, 0x1e07: 0x0080, 0x1e08: 0x0080, 0x1e09: 0x0080, 0x1e0a: 0x0080, 0x1e0b: 0x0080, - 0x1e0c: 0x0080, 0x1e0d: 0x0080, 0x1e0e: 0x0080, 0x1e0f: 0x0080, 0x1e10: 0x0080, 0x1e11: 0x0080, - 0x1e12: 0x0080, 0x1e13: 0x0080, 0x1e14: 0x0080, 0x1e15: 0x0080, - 0x1e18: 0x0080, 0x1e19: 0x0080, 0x1e1a: 0x0080, 0x1e1b: 0x0080, 0x1e1c: 0x0080, 0x1e1d: 0x0080, - 0x1e1e: 0x0080, 0x1e1f: 0x0080, 0x1e20: 0x0080, 0x1e21: 0x0080, 0x1e22: 0x0080, 0x1e23: 0x0080, - 0x1e24: 0x0080, 0x1e25: 0x0080, 0x1e26: 0x0080, 0x1e27: 0x0080, 0x1e28: 0x0080, 0x1e29: 0x0080, - 0x1e2a: 0x0080, 0x1e2b: 0x0080, 0x1e2c: 0x0080, 0x1e2d: 0x0080, 0x1e2e: 0x0080, 0x1e2f: 0x0080, - 0x1e30: 0x0080, 0x1e31: 0x0080, 0x1e32: 0x0080, 0x1e33: 0x0080, 0x1e34: 0x0080, 0x1e35: 0x0080, - 0x1e36: 0x0080, 0x1e37: 0x0080, 0x1e38: 0x0080, 0x1e39: 0x0080, - 0x1e3d: 0x0080, 0x1e3e: 0x0080, 0x1e3f: 0x0080, - // Block 0x79, offset 0x1e40 - 0x1e40: 0x0080, 0x1e41: 0x0080, 0x1e42: 0x0080, 0x1e43: 0x0080, 0x1e44: 0x0080, 0x1e45: 0x0080, - 0x1e46: 0x0080, 0x1e47: 0x0080, 0x1e48: 0x0080, 0x1e4a: 0x0080, 0x1e4b: 0x0080, - 0x1e4c: 0x0080, 0x1e4d: 0x0080, 0x1e4e: 0x0080, 0x1e4f: 0x0080, 0x1e50: 0x0080, 0x1e51: 0x0080, - 0x1e6c: 0x0080, 0x1e6d: 0x0080, 0x1e6e: 0x0080, 0x1e6f: 0x0080, - // Block 0x7a, offset 0x1e80 - 0x1e80: 0x00c0, 0x1e81: 0x00c0, 0x1e82: 0x00c0, 0x1e83: 0x00c0, 0x1e84: 0x00c0, 0x1e85: 0x00c0, - 0x1e86: 0x00c0, 0x1e87: 0x00c0, 0x1e88: 0x00c0, 0x1e89: 0x00c0, 0x1e8a: 0x00c0, 0x1e8b: 0x00c0, - 0x1e8c: 0x00c0, 0x1e8d: 0x00c0, 0x1e8e: 0x00c0, 0x1e8f: 0x00c0, 0x1e90: 0x00c0, 0x1e91: 0x00c0, - 0x1e92: 0x00c0, 0x1e93: 0x00c0, 0x1e94: 0x00c0, 0x1e95: 0x00c0, 0x1e96: 0x00c0, 0x1e97: 0x00c0, - 0x1e98: 0x00c0, 0x1e99: 0x00c0, 0x1e9a: 0x00c0, 0x1e9b: 0x00c0, 0x1e9c: 0x00c0, 0x1e9d: 0x00c0, - 0x1e9e: 0x00c0, 0x1e9f: 0x00c0, 0x1ea0: 0x00c0, 0x1ea1: 0x00c0, 0x1ea2: 0x00c0, 0x1ea3: 0x00c0, - 0x1ea4: 0x00c0, 0x1ea5: 0x00c0, 0x1ea6: 0x00c0, 0x1ea7: 0x00c0, 0x1ea8: 0x00c0, 0x1ea9: 0x00c0, - 0x1eaa: 0x00c0, 0x1eab: 0x00c0, 0x1eac: 0x00c0, 0x1ead: 0x00c0, 0x1eae: 0x00c0, - 0x1eb0: 0x00c0, 0x1eb1: 0x00c0, 0x1eb2: 0x00c0, 0x1eb3: 0x00c0, 0x1eb4: 0x00c0, 0x1eb5: 0x00c0, - 0x1eb6: 0x00c0, 0x1eb7: 0x00c0, 0x1eb8: 0x00c0, 0x1eb9: 0x00c0, 0x1eba: 0x00c0, 0x1ebb: 0x00c0, - 0x1ebc: 0x00c0, 0x1ebd: 0x00c0, 0x1ebe: 0x00c0, 0x1ebf: 0x00c0, - // Block 0x7b, offset 0x1ec0 - 0x1ec0: 0x00c0, 0x1ec1: 0x00c0, 0x1ec2: 0x00c0, 0x1ec3: 0x00c0, 0x1ec4: 0x00c0, 0x1ec5: 0x00c0, - 0x1ec6: 0x00c0, 0x1ec7: 0x00c0, 0x1ec8: 0x00c0, 0x1ec9: 0x00c0, 0x1eca: 0x00c0, 0x1ecb: 0x00c0, - 0x1ecc: 0x00c0, 0x1ecd: 0x00c0, 0x1ece: 0x00c0, 0x1ecf: 0x00c0, 0x1ed0: 0x00c0, 0x1ed1: 0x00c0, - 0x1ed2: 0x00c0, 0x1ed3: 0x00c0, 0x1ed4: 0x00c0, 0x1ed5: 0x00c0, 0x1ed6: 0x00c0, 0x1ed7: 0x00c0, - 0x1ed8: 0x00c0, 0x1ed9: 0x00c0, 0x1eda: 0x00c0, 0x1edb: 0x00c0, 0x1edc: 0x00c0, 0x1edd: 0x00c0, - 0x1ede: 0x00c0, 0x1ee0: 0x00c0, 0x1ee1: 0x00c0, 0x1ee2: 0x00c0, 0x1ee3: 0x00c0, - 0x1ee4: 0x00c0, 0x1ee5: 0x00c0, 0x1ee6: 0x00c0, 0x1ee7: 0x00c0, 0x1ee8: 0x00c0, 0x1ee9: 0x00c0, - 0x1eea: 0x00c0, 0x1eeb: 0x00c0, 0x1eec: 0x00c0, 0x1eed: 0x00c0, 0x1eee: 0x00c0, 0x1eef: 0x00c0, - 0x1ef0: 0x00c0, 0x1ef1: 0x00c0, 0x1ef2: 0x00c0, 0x1ef3: 0x00c0, 0x1ef4: 0x00c0, 0x1ef5: 0x00c0, - 0x1ef6: 0x00c0, 0x1ef7: 0x00c0, 0x1ef8: 0x00c0, 0x1ef9: 0x00c0, 0x1efa: 0x00c0, 0x1efb: 0x00c0, - 0x1efc: 0x0080, 0x1efd: 0x0080, 0x1efe: 0x00c0, 0x1eff: 0x00c0, - // Block 0x7c, offset 0x1f00 - 0x1f00: 0x00c0, 0x1f01: 0x00c0, 0x1f02: 0x00c0, 0x1f03: 0x00c0, 0x1f04: 0x00c0, 0x1f05: 0x00c0, - 0x1f06: 0x00c0, 0x1f07: 0x00c0, 0x1f08: 0x00c0, 0x1f09: 0x00c0, 0x1f0a: 0x00c0, 0x1f0b: 0x00c0, - 0x1f0c: 0x00c0, 0x1f0d: 0x00c0, 0x1f0e: 0x00c0, 0x1f0f: 0x00c0, 0x1f10: 0x00c0, 0x1f11: 0x00c0, - 0x1f12: 0x00c0, 0x1f13: 0x00c0, 0x1f14: 0x00c0, 0x1f15: 0x00c0, 0x1f16: 0x00c0, 0x1f17: 0x00c0, - 0x1f18: 0x00c0, 0x1f19: 0x00c0, 0x1f1a: 0x00c0, 0x1f1b: 0x00c0, 0x1f1c: 0x00c0, 0x1f1d: 0x00c0, - 0x1f1e: 0x00c0, 0x1f1f: 0x00c0, 0x1f20: 0x00c0, 0x1f21: 0x00c0, 0x1f22: 0x00c0, 0x1f23: 0x00c0, - 0x1f24: 0x00c0, 0x1f25: 0x0080, 0x1f26: 0x0080, 0x1f27: 0x0080, 0x1f28: 0x0080, 0x1f29: 0x0080, - 0x1f2a: 0x0080, 0x1f2b: 0x00c0, 0x1f2c: 0x00c0, 0x1f2d: 0x00c0, 0x1f2e: 0x00c0, 0x1f2f: 0x00c3, - 0x1f30: 0x00c3, 0x1f31: 0x00c3, 0x1f32: 0x00c0, 0x1f33: 0x00c0, - 0x1f39: 0x0080, 0x1f3a: 0x0080, 0x1f3b: 0x0080, - 0x1f3c: 0x0080, 0x1f3d: 0x0080, 0x1f3e: 0x0080, 0x1f3f: 0x0080, - // Block 0x7d, offset 0x1f40 - 0x1f40: 0x00c0, 0x1f41: 0x00c0, 0x1f42: 0x00c0, 0x1f43: 0x00c0, 0x1f44: 0x00c0, 0x1f45: 0x00c0, - 0x1f46: 0x00c0, 0x1f47: 0x00c0, 0x1f48: 0x00c0, 0x1f49: 0x00c0, 0x1f4a: 0x00c0, 0x1f4b: 0x00c0, - 0x1f4c: 0x00c0, 0x1f4d: 0x00c0, 0x1f4e: 0x00c0, 0x1f4f: 0x00c0, 0x1f50: 0x00c0, 0x1f51: 0x00c0, - 0x1f52: 0x00c0, 0x1f53: 0x00c0, 0x1f54: 0x00c0, 0x1f55: 0x00c0, 0x1f56: 0x00c0, 0x1f57: 0x00c0, - 0x1f58: 0x00c0, 0x1f59: 0x00c0, 0x1f5a: 0x00c0, 0x1f5b: 0x00c0, 0x1f5c: 0x00c0, 0x1f5d: 0x00c0, - 0x1f5e: 0x00c0, 0x1f5f: 0x00c0, 0x1f60: 0x00c0, 0x1f61: 0x00c0, 0x1f62: 0x00c0, 0x1f63: 0x00c0, - 0x1f64: 0x00c0, 0x1f65: 0x00c0, 0x1f67: 0x00c0, - 0x1f6d: 0x00c0, - 0x1f70: 0x00c0, 0x1f71: 0x00c0, 0x1f72: 0x00c0, 0x1f73: 0x00c0, 0x1f74: 0x00c0, 0x1f75: 0x00c0, - 0x1f76: 0x00c0, 0x1f77: 0x00c0, 0x1f78: 0x00c0, 0x1f79: 0x00c0, 0x1f7a: 0x00c0, 0x1f7b: 0x00c0, - 0x1f7c: 0x00c0, 0x1f7d: 0x00c0, 0x1f7e: 0x00c0, 0x1f7f: 0x00c0, - // Block 0x7e, offset 0x1f80 - 0x1f80: 0x00c0, 0x1f81: 0x00c0, 0x1f82: 0x00c0, 0x1f83: 0x00c0, 0x1f84: 0x00c0, 0x1f85: 0x00c0, - 0x1f86: 0x00c0, 0x1f87: 0x00c0, 0x1f88: 0x00c0, 0x1f89: 0x00c0, 0x1f8a: 0x00c0, 0x1f8b: 0x00c0, - 0x1f8c: 0x00c0, 0x1f8d: 0x00c0, 0x1f8e: 0x00c0, 0x1f8f: 0x00c0, 0x1f90: 0x00c0, 0x1f91: 0x00c0, - 0x1f92: 0x00c0, 0x1f93: 0x00c0, 0x1f94: 0x00c0, 0x1f95: 0x00c0, 0x1f96: 0x00c0, 0x1f97: 0x00c0, - 0x1f98: 0x00c0, 0x1f99: 0x00c0, 0x1f9a: 0x00c0, 0x1f9b: 0x00c0, 0x1f9c: 0x00c0, 0x1f9d: 0x00c0, - 0x1f9e: 0x00c0, 0x1f9f: 0x00c0, 0x1fa0: 0x00c0, 0x1fa1: 0x00c0, 0x1fa2: 0x00c0, 0x1fa3: 0x00c0, - 0x1fa4: 0x00c0, 0x1fa5: 0x00c0, 0x1fa6: 0x00c0, 0x1fa7: 0x00c0, - 0x1faf: 0x0080, - 0x1fb0: 0x0080, - 0x1fbf: 0x00c6, - // Block 0x7f, offset 0x1fc0 - 0x1fc0: 0x00c0, 0x1fc1: 0x00c0, 0x1fc2: 0x00c0, 0x1fc3: 0x00c0, 0x1fc4: 0x00c0, 0x1fc5: 0x00c0, - 0x1fc6: 0x00c0, 0x1fc7: 0x00c0, 0x1fc8: 0x00c0, 0x1fc9: 0x00c0, 0x1fca: 0x00c0, 0x1fcb: 0x00c0, - 0x1fcc: 0x00c0, 0x1fcd: 0x00c0, 0x1fce: 0x00c0, 0x1fcf: 0x00c0, 0x1fd0: 0x00c0, 0x1fd1: 0x00c0, - 0x1fd2: 0x00c0, 0x1fd3: 0x00c0, 0x1fd4: 0x00c0, 0x1fd5: 0x00c0, 0x1fd6: 0x00c0, - 0x1fe0: 0x00c0, 0x1fe1: 0x00c0, 0x1fe2: 0x00c0, 0x1fe3: 0x00c0, - 0x1fe4: 0x00c0, 0x1fe5: 0x00c0, 0x1fe6: 0x00c0, 0x1fe8: 0x00c0, 0x1fe9: 0x00c0, - 0x1fea: 0x00c0, 0x1feb: 0x00c0, 0x1fec: 0x00c0, 0x1fed: 0x00c0, 0x1fee: 0x00c0, - 0x1ff0: 0x00c0, 0x1ff1: 0x00c0, 0x1ff2: 0x00c0, 0x1ff3: 0x00c0, 0x1ff4: 0x00c0, 0x1ff5: 0x00c0, - 0x1ff6: 0x00c0, 0x1ff8: 0x00c0, 0x1ff9: 0x00c0, 0x1ffa: 0x00c0, 0x1ffb: 0x00c0, - 0x1ffc: 0x00c0, 0x1ffd: 0x00c0, 0x1ffe: 0x00c0, - // Block 0x80, offset 0x2000 - 0x2000: 0x00c0, 0x2001: 0x00c0, 0x2002: 0x00c0, 0x2003: 0x00c0, 0x2004: 0x00c0, 0x2005: 0x00c0, - 0x2006: 0x00c0, 0x2008: 0x00c0, 0x2009: 0x00c0, 0x200a: 0x00c0, 0x200b: 0x00c0, - 0x200c: 0x00c0, 0x200d: 0x00c0, 0x200e: 0x00c0, 0x2010: 0x00c0, 0x2011: 0x00c0, - 0x2012: 0x00c0, 0x2013: 0x00c0, 0x2014: 0x00c0, 0x2015: 0x00c0, 0x2016: 0x00c0, - 0x2018: 0x00c0, 0x2019: 0x00c0, 0x201a: 0x00c0, 0x201b: 0x00c0, 0x201c: 0x00c0, 0x201d: 0x00c0, - 0x201e: 0x00c0, 0x2020: 0x00c3, 0x2021: 0x00c3, 0x2022: 0x00c3, 0x2023: 0x00c3, - 0x2024: 0x00c3, 0x2025: 0x00c3, 0x2026: 0x00c3, 0x2027: 0x00c3, 0x2028: 0x00c3, 0x2029: 0x00c3, - 0x202a: 0x00c3, 0x202b: 0x00c3, 0x202c: 0x00c3, 0x202d: 0x00c3, 0x202e: 0x00c3, 0x202f: 0x00c3, - 0x2030: 0x00c3, 0x2031: 0x00c3, 0x2032: 0x00c3, 0x2033: 0x00c3, 0x2034: 0x00c3, 0x2035: 0x00c3, - 0x2036: 0x00c3, 0x2037: 0x00c3, 0x2038: 0x00c3, 0x2039: 0x00c3, 0x203a: 0x00c3, 0x203b: 0x00c3, - 0x203c: 0x00c3, 0x203d: 0x00c3, 0x203e: 0x00c3, 0x203f: 0x00c3, - // Block 0x81, offset 0x2040 - 0x2040: 0x0080, 0x2041: 0x0080, 0x2042: 0x0080, 0x2043: 0x0080, 0x2044: 0x0080, 0x2045: 0x0080, - 0x2046: 0x0080, 0x2047: 0x0080, 0x2048: 0x0080, 0x2049: 0x0080, 0x204a: 0x0080, 0x204b: 0x0080, - 0x204c: 0x0080, 0x204d: 0x0080, 0x204e: 0x0080, 0x204f: 0x0080, 0x2050: 0x0080, 0x2051: 0x0080, - 0x2052: 0x0080, 0x2053: 0x0080, 0x2054: 0x0080, 0x2055: 0x0080, 0x2056: 0x0080, 0x2057: 0x0080, - 0x2058: 0x0080, 0x2059: 0x0080, 0x205a: 0x0080, 0x205b: 0x0080, 0x205c: 0x0080, 0x205d: 0x0080, - 0x205e: 0x0080, 0x205f: 0x0080, 0x2060: 0x0080, 0x2061: 0x0080, 0x2062: 0x0080, 0x2063: 0x0080, - 0x2064: 0x0080, 0x2065: 0x0080, 0x2066: 0x0080, 0x2067: 0x0080, 0x2068: 0x0080, 0x2069: 0x0080, - 0x206a: 0x0080, 0x206b: 0x0080, 0x206c: 0x0080, 0x206d: 0x0080, 0x206e: 0x0080, 0x206f: 0x00c0, - 0x2070: 0x0080, 0x2071: 0x0080, 0x2072: 0x0080, 0x2073: 0x0080, 0x2074: 0x0080, 0x2075: 0x0080, - 0x2076: 0x0080, 0x2077: 0x0080, 0x2078: 0x0080, 0x2079: 0x0080, 0x207a: 0x0080, 0x207b: 0x0080, - 0x207c: 0x0080, 0x207d: 0x0080, 0x207e: 0x0080, 0x207f: 0x0080, - // Block 0x82, offset 0x2080 - 0x2080: 0x0080, 0x2081: 0x0080, 0x2082: 0x0080, 0x2083: 0x0080, 0x2084: 0x0080, - // Block 0x83, offset 0x20c0 - 0x20c0: 0x008c, 0x20c1: 0x008c, 0x20c2: 0x008c, 0x20c3: 0x008c, 0x20c4: 0x008c, 0x20c5: 0x008c, - 0x20c6: 0x008c, 0x20c7: 0x008c, 0x20c8: 0x008c, 0x20c9: 0x008c, 0x20ca: 0x008c, 0x20cb: 0x008c, - 0x20cc: 0x008c, 0x20cd: 0x008c, 0x20ce: 0x008c, 0x20cf: 0x008c, 0x20d0: 0x008c, 0x20d1: 0x008c, - 0x20d2: 0x008c, 0x20d3: 0x008c, 0x20d4: 0x008c, 0x20d5: 0x008c, 0x20d6: 0x008c, 0x20d7: 0x008c, - 0x20d8: 0x008c, 0x20d9: 0x008c, 0x20db: 0x008c, 0x20dc: 0x008c, 0x20dd: 0x008c, - 0x20de: 0x008c, 0x20df: 0x008c, 0x20e0: 0x008c, 0x20e1: 0x008c, 0x20e2: 0x008c, 0x20e3: 0x008c, - 0x20e4: 0x008c, 0x20e5: 0x008c, 0x20e6: 0x008c, 0x20e7: 0x008c, 0x20e8: 0x008c, 0x20e9: 0x008c, - 0x20ea: 0x008c, 0x20eb: 0x008c, 0x20ec: 0x008c, 0x20ed: 0x008c, 0x20ee: 0x008c, 0x20ef: 0x008c, - 0x20f0: 0x008c, 0x20f1: 0x008c, 0x20f2: 0x008c, 0x20f3: 0x008c, 0x20f4: 0x008c, 0x20f5: 0x008c, - 0x20f6: 0x008c, 0x20f7: 0x008c, 0x20f8: 0x008c, 0x20f9: 0x008c, 0x20fa: 0x008c, 0x20fb: 0x008c, - 0x20fc: 0x008c, 0x20fd: 0x008c, 0x20fe: 0x008c, 0x20ff: 0x008c, - // Block 0x84, offset 0x2100 - 0x2100: 0x008c, 0x2101: 0x008c, 0x2102: 0x008c, 0x2103: 0x008c, 0x2104: 0x008c, 0x2105: 0x008c, - 0x2106: 0x008c, 0x2107: 0x008c, 0x2108: 0x008c, 0x2109: 0x008c, 0x210a: 0x008c, 0x210b: 0x008c, - 0x210c: 0x008c, 0x210d: 0x008c, 0x210e: 0x008c, 0x210f: 0x008c, 0x2110: 0x008c, 0x2111: 0x008c, - 0x2112: 0x008c, 0x2113: 0x008c, 0x2114: 0x008c, 0x2115: 0x008c, 0x2116: 0x008c, 0x2117: 0x008c, - 0x2118: 0x008c, 0x2119: 0x008c, 0x211a: 0x008c, 0x211b: 0x008c, 0x211c: 0x008c, 0x211d: 0x008c, - 0x211e: 0x008c, 0x211f: 0x008c, 0x2120: 0x008c, 0x2121: 0x008c, 0x2122: 0x008c, 0x2123: 0x008c, - 0x2124: 0x008c, 0x2125: 0x008c, 0x2126: 0x008c, 0x2127: 0x008c, 0x2128: 0x008c, 0x2129: 0x008c, - 0x212a: 0x008c, 0x212b: 0x008c, 0x212c: 0x008c, 0x212d: 0x008c, 0x212e: 0x008c, 0x212f: 0x008c, - 0x2130: 0x008c, 0x2131: 0x008c, 0x2132: 0x008c, 0x2133: 0x008c, - // Block 0x85, offset 0x2140 - 0x2140: 0x008c, 0x2141: 0x008c, 0x2142: 0x008c, 0x2143: 0x008c, 0x2144: 0x008c, 0x2145: 0x008c, - 0x2146: 0x008c, 0x2147: 0x008c, 0x2148: 0x008c, 0x2149: 0x008c, 0x214a: 0x008c, 0x214b: 0x008c, - 0x214c: 0x008c, 0x214d: 0x008c, 0x214e: 0x008c, 0x214f: 0x008c, 0x2150: 0x008c, 0x2151: 0x008c, - 0x2152: 0x008c, 0x2153: 0x008c, 0x2154: 0x008c, 0x2155: 0x008c, 0x2156: 0x008c, 0x2157: 0x008c, - 0x2158: 0x008c, 0x2159: 0x008c, 0x215a: 0x008c, 0x215b: 0x008c, 0x215c: 0x008c, 0x215d: 0x008c, - 0x215e: 0x008c, 0x215f: 0x008c, 0x2160: 0x008c, 0x2161: 0x008c, 0x2162: 0x008c, 0x2163: 0x008c, - 0x2164: 0x008c, 0x2165: 0x008c, 0x2166: 0x008c, 0x2167: 0x008c, 0x2168: 0x008c, 0x2169: 0x008c, - 0x216a: 0x008c, 0x216b: 0x008c, 0x216c: 0x008c, 0x216d: 0x008c, 0x216e: 0x008c, 0x216f: 0x008c, - 0x2170: 0x008c, 0x2171: 0x008c, 0x2172: 0x008c, 0x2173: 0x008c, 0x2174: 0x008c, 0x2175: 0x008c, - 0x2176: 0x008c, 0x2177: 0x008c, 0x2178: 0x008c, 0x2179: 0x008c, 0x217a: 0x008c, 0x217b: 0x008c, - 0x217c: 0x008c, 0x217d: 0x008c, 0x217e: 0x008c, 0x217f: 0x008c, - // Block 0x86, offset 0x2180 - 0x2180: 0x008c, 0x2181: 0x008c, 0x2182: 0x008c, 0x2183: 0x008c, 0x2184: 0x008c, 0x2185: 0x008c, - 0x2186: 0x008c, 0x2187: 0x008c, 0x2188: 0x008c, 0x2189: 0x008c, 0x218a: 0x008c, 0x218b: 0x008c, - 0x218c: 0x008c, 0x218d: 0x008c, 0x218e: 0x008c, 0x218f: 0x008c, 0x2190: 0x008c, 0x2191: 0x008c, - 0x2192: 0x008c, 0x2193: 0x008c, 0x2194: 0x008c, 0x2195: 0x008c, - 0x21b0: 0x0080, 0x21b1: 0x0080, 0x21b2: 0x0080, 0x21b3: 0x0080, 0x21b4: 0x0080, 0x21b5: 0x0080, - 0x21b6: 0x0080, 0x21b7: 0x0080, 0x21b8: 0x0080, 0x21b9: 0x0080, 0x21ba: 0x0080, 0x21bb: 0x0080, - // Block 0x87, offset 0x21c0 - 0x21c0: 0x0080, 0x21c1: 0x0080, 0x21c2: 0x0080, 0x21c3: 0x0080, 0x21c4: 0x0080, 0x21c5: 0x00cc, - 0x21c6: 0x00c0, 0x21c7: 0x00cc, 0x21c8: 0x0080, 0x21c9: 0x0080, 0x21ca: 0x0080, 0x21cb: 0x0080, - 0x21cc: 0x0080, 0x21cd: 0x0080, 0x21ce: 0x0080, 0x21cf: 0x0080, 0x21d0: 0x0080, 0x21d1: 0x0080, - 0x21d2: 0x0080, 0x21d3: 0x0080, 0x21d4: 0x0080, 0x21d5: 0x0080, 0x21d6: 0x0080, 0x21d7: 0x0080, - 0x21d8: 0x0080, 0x21d9: 0x0080, 0x21da: 0x0080, 0x21db: 0x0080, 0x21dc: 0x0080, 0x21dd: 0x0080, - 0x21de: 0x0080, 0x21df: 0x0080, 0x21e0: 0x0080, 0x21e1: 0x008c, 0x21e2: 0x008c, 0x21e3: 0x008c, - 0x21e4: 0x008c, 0x21e5: 0x008c, 0x21e6: 0x008c, 0x21e7: 0x008c, 0x21e8: 0x008c, 0x21e9: 0x008c, - 0x21ea: 0x00c3, 0x21eb: 0x00c3, 0x21ec: 0x00c3, 0x21ed: 0x00c3, 0x21ee: 0x0040, 0x21ef: 0x0040, - 0x21f0: 0x0080, 0x21f1: 0x0040, 0x21f2: 0x0040, 0x21f3: 0x0040, 0x21f4: 0x0040, 0x21f5: 0x0040, - 0x21f6: 0x0080, 0x21f7: 0x0080, 0x21f8: 0x008c, 0x21f9: 0x008c, 0x21fa: 0x008c, 0x21fb: 0x0040, - 0x21fc: 0x00c0, 0x21fd: 0x0080, 0x21fe: 0x0080, 0x21ff: 0x0080, - // Block 0x88, offset 0x2200 - 0x2201: 0x00cc, 0x2202: 0x00cc, 0x2203: 0x00cc, 0x2204: 0x00cc, 0x2205: 0x00cc, - 0x2206: 0x00cc, 0x2207: 0x00cc, 0x2208: 0x00cc, 0x2209: 0x00cc, 0x220a: 0x00cc, 0x220b: 0x00cc, - 0x220c: 0x00cc, 0x220d: 0x00cc, 0x220e: 0x00cc, 0x220f: 0x00cc, 0x2210: 0x00cc, 0x2211: 0x00cc, - 0x2212: 0x00cc, 0x2213: 0x00cc, 0x2214: 0x00cc, 0x2215: 0x00cc, 0x2216: 0x00cc, 0x2217: 0x00cc, - 0x2218: 0x00cc, 0x2219: 0x00cc, 0x221a: 0x00cc, 0x221b: 0x00cc, 0x221c: 0x00cc, 0x221d: 0x00cc, - 0x221e: 0x00cc, 0x221f: 0x00cc, 0x2220: 0x00cc, 0x2221: 0x00cc, 0x2222: 0x00cc, 0x2223: 0x00cc, - 0x2224: 0x00cc, 0x2225: 0x00cc, 0x2226: 0x00cc, 0x2227: 0x00cc, 0x2228: 0x00cc, 0x2229: 0x00cc, - 0x222a: 0x00cc, 0x222b: 0x00cc, 0x222c: 0x00cc, 0x222d: 0x00cc, 0x222e: 0x00cc, 0x222f: 0x00cc, - 0x2230: 0x00cc, 0x2231: 0x00cc, 0x2232: 0x00cc, 0x2233: 0x00cc, 0x2234: 0x00cc, 0x2235: 0x00cc, - 0x2236: 0x00cc, 0x2237: 0x00cc, 0x2238: 0x00cc, 0x2239: 0x00cc, 0x223a: 0x00cc, 0x223b: 0x00cc, - 0x223c: 0x00cc, 0x223d: 0x00cc, 0x223e: 0x00cc, 0x223f: 0x00cc, - // Block 0x89, offset 0x2240 - 0x2240: 0x00cc, 0x2241: 0x00cc, 0x2242: 0x00cc, 0x2243: 0x00cc, 0x2244: 0x00cc, 0x2245: 0x00cc, - 0x2246: 0x00cc, 0x2247: 0x00cc, 0x2248: 0x00cc, 0x2249: 0x00cc, 0x224a: 0x00cc, 0x224b: 0x00cc, - 0x224c: 0x00cc, 0x224d: 0x00cc, 0x224e: 0x00cc, 0x224f: 0x00cc, 0x2250: 0x00cc, 0x2251: 0x00cc, - 0x2252: 0x00cc, 0x2253: 0x00cc, 0x2254: 0x00cc, 0x2255: 0x00cc, 0x2256: 0x00cc, - 0x2259: 0x00c3, 0x225a: 0x00c3, 0x225b: 0x0080, 0x225c: 0x0080, 0x225d: 0x00cc, - 0x225e: 0x00cc, 0x225f: 0x008c, 0x2260: 0x0080, 0x2261: 0x00cc, 0x2262: 0x00cc, 0x2263: 0x00cc, - 0x2264: 0x00cc, 0x2265: 0x00cc, 0x2266: 0x00cc, 0x2267: 0x00cc, 0x2268: 0x00cc, 0x2269: 0x00cc, - 0x226a: 0x00cc, 0x226b: 0x00cc, 0x226c: 0x00cc, 0x226d: 0x00cc, 0x226e: 0x00cc, 0x226f: 0x00cc, - 0x2270: 0x00cc, 0x2271: 0x00cc, 0x2272: 0x00cc, 0x2273: 0x00cc, 0x2274: 0x00cc, 0x2275: 0x00cc, - 0x2276: 0x00cc, 0x2277: 0x00cc, 0x2278: 0x00cc, 0x2279: 0x00cc, 0x227a: 0x00cc, 0x227b: 0x00cc, - 0x227c: 0x00cc, 0x227d: 0x00cc, 0x227e: 0x00cc, 0x227f: 0x00cc, - // Block 0x8a, offset 0x2280 - 0x2280: 0x00cc, 0x2281: 0x00cc, 0x2282: 0x00cc, 0x2283: 0x00cc, 0x2284: 0x00cc, 0x2285: 0x00cc, - 0x2286: 0x00cc, 0x2287: 0x00cc, 0x2288: 0x00cc, 0x2289: 0x00cc, 0x228a: 0x00cc, 0x228b: 0x00cc, - 0x228c: 0x00cc, 0x228d: 0x00cc, 0x228e: 0x00cc, 0x228f: 0x00cc, 0x2290: 0x00cc, 0x2291: 0x00cc, - 0x2292: 0x00cc, 0x2293: 0x00cc, 0x2294: 0x00cc, 0x2295: 0x00cc, 0x2296: 0x00cc, 0x2297: 0x00cc, - 0x2298: 0x00cc, 0x2299: 0x00cc, 0x229a: 0x00cc, 0x229b: 0x00cc, 0x229c: 0x00cc, 0x229d: 0x00cc, - 0x229e: 0x00cc, 0x229f: 0x00cc, 0x22a0: 0x00cc, 0x22a1: 0x00cc, 0x22a2: 0x00cc, 0x22a3: 0x00cc, - 0x22a4: 0x00cc, 0x22a5: 0x00cc, 0x22a6: 0x00cc, 0x22a7: 0x00cc, 0x22a8: 0x00cc, 0x22a9: 0x00cc, - 0x22aa: 0x00cc, 0x22ab: 0x00cc, 0x22ac: 0x00cc, 0x22ad: 0x00cc, 0x22ae: 0x00cc, 0x22af: 0x00cc, - 0x22b0: 0x00cc, 0x22b1: 0x00cc, 0x22b2: 0x00cc, 0x22b3: 0x00cc, 0x22b4: 0x00cc, 0x22b5: 0x00cc, - 0x22b6: 0x00cc, 0x22b7: 0x00cc, 0x22b8: 0x00cc, 0x22b9: 0x00cc, 0x22ba: 0x00cc, 0x22bb: 0x00d2, - 0x22bc: 0x00c0, 0x22bd: 0x00cc, 0x22be: 0x00cc, 0x22bf: 0x008c, - // Block 0x8b, offset 0x22c0 - 0x22c5: 0x00c0, - 0x22c6: 0x00c0, 0x22c7: 0x00c0, 0x22c8: 0x00c0, 0x22c9: 0x00c0, 0x22ca: 0x00c0, 0x22cb: 0x00c0, - 0x22cc: 0x00c0, 0x22cd: 0x00c0, 0x22ce: 0x00c0, 0x22cf: 0x00c0, 0x22d0: 0x00c0, 0x22d1: 0x00c0, - 0x22d2: 0x00c0, 0x22d3: 0x00c0, 0x22d4: 0x00c0, 0x22d5: 0x00c0, 0x22d6: 0x00c0, 0x22d7: 0x00c0, - 0x22d8: 0x00c0, 0x22d9: 0x00c0, 0x22da: 0x00c0, 0x22db: 0x00c0, 0x22dc: 0x00c0, 0x22dd: 0x00c0, - 0x22de: 0x00c0, 0x22df: 0x00c0, 0x22e0: 0x00c0, 0x22e1: 0x00c0, 0x22e2: 0x00c0, 0x22e3: 0x00c0, - 0x22e4: 0x00c0, 0x22e5: 0x00c0, 0x22e6: 0x00c0, 0x22e7: 0x00c0, 0x22e8: 0x00c0, 0x22e9: 0x00c0, - 0x22ea: 0x00c0, 0x22eb: 0x00c0, 0x22ec: 0x00c0, 0x22ed: 0x00c0, - 0x22f1: 0x0080, 0x22f2: 0x0080, 0x22f3: 0x0080, 0x22f4: 0x0080, 0x22f5: 0x0080, - 0x22f6: 0x0080, 0x22f7: 0x0080, 0x22f8: 0x0080, 0x22f9: 0x0080, 0x22fa: 0x0080, 0x22fb: 0x0080, - 0x22fc: 0x0080, 0x22fd: 0x0080, 0x22fe: 0x0080, 0x22ff: 0x0080, - // Block 0x8c, offset 0x2300 - 0x2300: 0x0080, 0x2301: 0x0080, 0x2302: 0x0080, 0x2303: 0x0080, 0x2304: 0x0080, 0x2305: 0x0080, - 0x2306: 0x0080, 0x2307: 0x0080, 0x2308: 0x0080, 0x2309: 0x0080, 0x230a: 0x0080, 0x230b: 0x0080, - 0x230c: 0x0080, 0x230d: 0x0080, 0x230e: 0x0080, 0x230f: 0x0080, 0x2310: 0x0080, 0x2311: 0x0080, - 0x2312: 0x0080, 0x2313: 0x0080, 0x2314: 0x0080, 0x2315: 0x0080, 0x2316: 0x0080, 0x2317: 0x0080, - 0x2318: 0x0080, 0x2319: 0x0080, 0x231a: 0x0080, 0x231b: 0x0080, 0x231c: 0x0080, 0x231d: 0x0080, - 0x231e: 0x0080, 0x231f: 0x0080, 0x2320: 0x0080, 0x2321: 0x0080, 0x2322: 0x0080, 0x2323: 0x0080, - 0x2324: 0x0040, 0x2325: 0x0080, 0x2326: 0x0080, 0x2327: 0x0080, 0x2328: 0x0080, 0x2329: 0x0080, - 0x232a: 0x0080, 0x232b: 0x0080, 0x232c: 0x0080, 0x232d: 0x0080, 0x232e: 0x0080, 0x232f: 0x0080, - 0x2330: 0x0080, 0x2331: 0x0080, 0x2332: 0x0080, 0x2333: 0x0080, 0x2334: 0x0080, 0x2335: 0x0080, - 0x2336: 0x0080, 0x2337: 0x0080, 0x2338: 0x0080, 0x2339: 0x0080, 0x233a: 0x0080, 0x233b: 0x0080, - 0x233c: 0x0080, 0x233d: 0x0080, 0x233e: 0x0080, 0x233f: 0x0080, - // Block 0x8d, offset 0x2340 - 0x2340: 0x0080, 0x2341: 0x0080, 0x2342: 0x0080, 0x2343: 0x0080, 0x2344: 0x0080, 0x2345: 0x0080, - 0x2346: 0x0080, 0x2347: 0x0080, 0x2348: 0x0080, 0x2349: 0x0080, 0x234a: 0x0080, 0x234b: 0x0080, - 0x234c: 0x0080, 0x234d: 0x0080, 0x234e: 0x0080, 0x2350: 0x0080, 0x2351: 0x0080, - 0x2352: 0x0080, 0x2353: 0x0080, 0x2354: 0x0080, 0x2355: 0x0080, 0x2356: 0x0080, 0x2357: 0x0080, - 0x2358: 0x0080, 0x2359: 0x0080, 0x235a: 0x0080, 0x235b: 0x0080, 0x235c: 0x0080, 0x235d: 0x0080, - 0x235e: 0x0080, 0x235f: 0x0080, 0x2360: 0x00c0, 0x2361: 0x00c0, 0x2362: 0x00c0, 0x2363: 0x00c0, - 0x2364: 0x00c0, 0x2365: 0x00c0, 0x2366: 0x00c0, 0x2367: 0x00c0, 0x2368: 0x00c0, 0x2369: 0x00c0, - 0x236a: 0x00c0, 0x236b: 0x00c0, 0x236c: 0x00c0, 0x236d: 0x00c0, 0x236e: 0x00c0, 0x236f: 0x00c0, - 0x2370: 0x00c0, 0x2371: 0x00c0, 0x2372: 0x00c0, 0x2373: 0x00c0, 0x2374: 0x00c0, 0x2375: 0x00c0, - 0x2376: 0x00c0, 0x2377: 0x00c0, 0x2378: 0x00c0, 0x2379: 0x00c0, 0x237a: 0x00c0, - // Block 0x8e, offset 0x2380 - 0x2380: 0x0080, 0x2381: 0x0080, 0x2382: 0x0080, 0x2383: 0x0080, 0x2384: 0x0080, 0x2385: 0x0080, - 0x2386: 0x0080, 0x2387: 0x0080, 0x2388: 0x0080, 0x2389: 0x0080, 0x238a: 0x0080, 0x238b: 0x0080, - 0x238c: 0x0080, 0x238d: 0x0080, 0x238e: 0x0080, 0x238f: 0x0080, 0x2390: 0x0080, 0x2391: 0x0080, - 0x2392: 0x0080, 0x2393: 0x0080, 0x2394: 0x0080, 0x2395: 0x0080, 0x2396: 0x0080, 0x2397: 0x0080, - 0x2398: 0x0080, 0x2399: 0x0080, 0x239a: 0x0080, 0x239b: 0x0080, 0x239c: 0x0080, 0x239d: 0x0080, - 0x239e: 0x0080, 0x239f: 0x0080, 0x23a0: 0x0080, 0x23a1: 0x0080, 0x23a2: 0x0080, 0x23a3: 0x0080, - 0x23b0: 0x00cc, 0x23b1: 0x00cc, 0x23b2: 0x00cc, 0x23b3: 0x00cc, 0x23b4: 0x00cc, 0x23b5: 0x00cc, - 0x23b6: 0x00cc, 0x23b7: 0x00cc, 0x23b8: 0x00cc, 0x23b9: 0x00cc, 0x23ba: 0x00cc, 0x23bb: 0x00cc, - 0x23bc: 0x00cc, 0x23bd: 0x00cc, 0x23be: 0x00cc, 0x23bf: 0x00cc, - // Block 0x8f, offset 0x23c0 - 0x23c0: 0x0080, 0x23c1: 0x0080, 0x23c2: 0x0080, 0x23c3: 0x0080, 0x23c4: 0x0080, 0x23c5: 0x0080, - 0x23c6: 0x0080, 0x23c7: 0x0080, 0x23c8: 0x0080, 0x23c9: 0x0080, 0x23ca: 0x0080, 0x23cb: 0x0080, - 0x23cc: 0x0080, 0x23cd: 0x0080, 0x23ce: 0x0080, 0x23cf: 0x0080, 0x23d0: 0x0080, 0x23d1: 0x0080, - 0x23d2: 0x0080, 0x23d3: 0x0080, 0x23d4: 0x0080, 0x23d5: 0x0080, 0x23d6: 0x0080, 0x23d7: 0x0080, - 0x23d8: 0x0080, 0x23d9: 0x0080, 0x23da: 0x0080, 0x23db: 0x0080, 0x23dc: 0x0080, 0x23dd: 0x0080, - 0x23de: 0x0080, 0x23e0: 0x0080, 0x23e1: 0x0080, 0x23e2: 0x0080, 0x23e3: 0x0080, - 0x23e4: 0x0080, 0x23e5: 0x0080, 0x23e6: 0x0080, 0x23e7: 0x0080, 0x23e8: 0x0080, 0x23e9: 0x0080, - 0x23ea: 0x0080, 0x23eb: 0x0080, 0x23ec: 0x0080, 0x23ed: 0x0080, 0x23ee: 0x0080, 0x23ef: 0x0080, - 0x23f0: 0x0080, 0x23f1: 0x0080, 0x23f2: 0x0080, 0x23f3: 0x0080, 0x23f4: 0x0080, 0x23f5: 0x0080, - 0x23f6: 0x0080, 0x23f7: 0x0080, 0x23f8: 0x0080, 0x23f9: 0x0080, 0x23fa: 0x0080, 0x23fb: 0x0080, - 0x23fc: 0x0080, 0x23fd: 0x0080, 0x23fe: 0x0080, 0x23ff: 0x0080, - // Block 0x90, offset 0x2400 - 0x2400: 0x0080, 0x2401: 0x0080, 0x2402: 0x0080, 0x2403: 0x0080, 0x2404: 0x0080, 0x2405: 0x0080, - 0x2406: 0x0080, 0x2407: 0x0080, 0x2408: 0x0080, 0x2409: 0x0080, 0x240a: 0x0080, 0x240b: 0x0080, - 0x240c: 0x0080, 0x240d: 0x0080, 0x240e: 0x0080, 0x240f: 0x0080, 0x2410: 0x008c, 0x2411: 0x008c, - 0x2412: 0x008c, 0x2413: 0x008c, 0x2414: 0x008c, 0x2415: 0x008c, 0x2416: 0x008c, 0x2417: 0x008c, - 0x2418: 0x008c, 0x2419: 0x008c, 0x241a: 0x008c, 0x241b: 0x008c, 0x241c: 0x008c, 0x241d: 0x008c, - 0x241e: 0x008c, 0x241f: 0x008c, 0x2420: 0x008c, 0x2421: 0x008c, 0x2422: 0x008c, 0x2423: 0x008c, - 0x2424: 0x008c, 0x2425: 0x008c, 0x2426: 0x008c, 0x2427: 0x008c, 0x2428: 0x008c, 0x2429: 0x008c, - 0x242a: 0x008c, 0x242b: 0x008c, 0x242c: 0x008c, 0x242d: 0x008c, 0x242e: 0x008c, 0x242f: 0x008c, - 0x2430: 0x008c, 0x2431: 0x008c, 0x2432: 0x008c, 0x2433: 0x008c, 0x2434: 0x008c, 0x2435: 0x008c, - 0x2436: 0x008c, 0x2437: 0x008c, 0x2438: 0x008c, 0x2439: 0x008c, 0x243a: 0x008c, 0x243b: 0x008c, - 0x243c: 0x008c, 0x243d: 0x008c, 0x243e: 0x008c, - // Block 0x91, offset 0x2440 - 0x2440: 0x008c, 0x2441: 0x008c, 0x2442: 0x008c, 0x2443: 0x008c, 0x2444: 0x008c, 0x2445: 0x008c, - 0x2446: 0x008c, 0x2447: 0x008c, 0x2448: 0x008c, 0x2449: 0x008c, 0x244a: 0x008c, 0x244b: 0x008c, - 0x244c: 0x008c, 0x244d: 0x008c, 0x244e: 0x008c, 0x244f: 0x008c, 0x2450: 0x008c, 0x2451: 0x008c, - 0x2452: 0x008c, 0x2453: 0x008c, 0x2454: 0x008c, 0x2455: 0x008c, 0x2456: 0x008c, 0x2457: 0x008c, - 0x2458: 0x0080, 0x2459: 0x0080, 0x245a: 0x0080, 0x245b: 0x0080, 0x245c: 0x0080, 0x245d: 0x0080, - 0x245e: 0x0080, 0x245f: 0x0080, 0x2460: 0x0080, 0x2461: 0x0080, 0x2462: 0x0080, 0x2463: 0x0080, - 0x2464: 0x0080, 0x2465: 0x0080, 0x2466: 0x0080, 0x2467: 0x0080, 0x2468: 0x0080, 0x2469: 0x0080, - 0x246a: 0x0080, 0x246b: 0x0080, 0x246c: 0x0080, 0x246d: 0x0080, 0x246e: 0x0080, 0x246f: 0x0080, - 0x2470: 0x0080, 0x2471: 0x0080, 0x2472: 0x0080, 0x2473: 0x0080, 0x2474: 0x0080, 0x2475: 0x0080, - 0x2476: 0x0080, 0x2477: 0x0080, 0x2478: 0x0080, 0x2479: 0x0080, 0x247a: 0x0080, 0x247b: 0x0080, - 0x247c: 0x0080, 0x247d: 0x0080, 0x247e: 0x0080, 0x247f: 0x0080, - // Block 0x92, offset 0x2480 - 0x2480: 0x00cc, 0x2481: 0x00cc, 0x2482: 0x00cc, 0x2483: 0x00cc, 0x2484: 0x00cc, 0x2485: 0x00cc, - 0x2486: 0x00cc, 0x2487: 0x00cc, 0x2488: 0x00cc, 0x2489: 0x00cc, 0x248a: 0x00cc, 0x248b: 0x00cc, - 0x248c: 0x00cc, 0x248d: 0x00cc, 0x248e: 0x00cc, 0x248f: 0x00cc, 0x2490: 0x00cc, 0x2491: 0x00cc, - 0x2492: 0x00cc, 0x2493: 0x00cc, 0x2494: 0x00cc, 0x2495: 0x00cc, 0x2496: 0x00cc, 0x2497: 0x00cc, - 0x2498: 0x00cc, 0x2499: 0x00cc, 0x249a: 0x00cc, 0x249b: 0x00cc, 0x249c: 0x00cc, 0x249d: 0x00cc, - 0x249e: 0x00cc, 0x249f: 0x00cc, 0x24a0: 0x00cc, 0x24a1: 0x00cc, 0x24a2: 0x00cc, 0x24a3: 0x00cc, - 0x24a4: 0x00cc, 0x24a5: 0x00cc, 0x24a6: 0x00cc, 0x24a7: 0x00cc, 0x24a8: 0x00cc, 0x24a9: 0x00cc, - 0x24aa: 0x00cc, 0x24ab: 0x00cc, 0x24ac: 0x00cc, 0x24ad: 0x00cc, 0x24ae: 0x00cc, 0x24af: 0x00cc, - 0x24b0: 0x00cc, 0x24b1: 0x00cc, 0x24b2: 0x00cc, 0x24b3: 0x00cc, 0x24b4: 0x00cc, 0x24b5: 0x00cc, - 0x24b6: 0x00cc, 0x24b7: 0x00cc, 0x24b8: 0x00cc, 0x24b9: 0x00cc, 0x24ba: 0x00cc, 0x24bb: 0x00cc, - 0x24bc: 0x00cc, 0x24bd: 0x00cc, 0x24be: 0x00cc, 0x24bf: 0x00cc, - // Block 0x93, offset 0x24c0 - 0x24c0: 0x00cc, 0x24c1: 0x00cc, 0x24c2: 0x00cc, 0x24c3: 0x00cc, 0x24c4: 0x00cc, 0x24c5: 0x00cc, - 0x24c6: 0x00cc, 0x24c7: 0x00cc, 0x24c8: 0x00cc, 0x24c9: 0x00cc, 0x24ca: 0x00cc, 0x24cb: 0x00cc, - 0x24cc: 0x00cc, 0x24cd: 0x00cc, 0x24ce: 0x00cc, 0x24cf: 0x00cc, 0x24d0: 0x00cc, 0x24d1: 0x00cc, - 0x24d2: 0x00cc, 0x24d3: 0x00cc, 0x24d4: 0x00cc, 0x24d5: 0x00cc, 0x24d6: 0x00cc, 0x24d7: 0x00cc, - 0x24d8: 0x00cc, 0x24d9: 0x00cc, 0x24da: 0x00cc, 0x24db: 0x00cc, 0x24dc: 0x00cc, 0x24dd: 0x00cc, - 0x24de: 0x00cc, 0x24df: 0x00cc, 0x24e0: 0x00cc, 0x24e1: 0x00cc, 0x24e2: 0x00cc, 0x24e3: 0x00cc, - 0x24e4: 0x00cc, 0x24e5: 0x00cc, 0x24e6: 0x00cc, 0x24e7: 0x00cc, 0x24e8: 0x00cc, 0x24e9: 0x00cc, - 0x24ea: 0x00cc, 0x24eb: 0x00cc, 0x24ec: 0x00cc, 0x24ed: 0x00cc, 0x24ee: 0x00cc, 0x24ef: 0x00cc, - 0x24f0: 0x00cc, 0x24f1: 0x00cc, 0x24f2: 0x00cc, 0x24f3: 0x00cc, 0x24f4: 0x00cc, 0x24f5: 0x00cc, - // Block 0x94, offset 0x2500 - 0x2500: 0x00cc, 0x2501: 0x00cc, 0x2502: 0x00cc, 0x2503: 0x00cc, 0x2504: 0x00cc, 0x2505: 0x00cc, - 0x2506: 0x00cc, 0x2507: 0x00cc, 0x2508: 0x00cc, 0x2509: 0x00cc, 0x250a: 0x00cc, 0x250b: 0x00cc, - 0x250c: 0x00cc, 0x250d: 0x00cc, 0x250e: 0x00cc, 0x250f: 0x00cc, 0x2510: 0x00cc, 0x2511: 0x00cc, - 0x2512: 0x00cc, 0x2513: 0x00cc, 0x2514: 0x00cc, 0x2515: 0x00cc, - // Block 0x95, offset 0x2540 - 0x2540: 0x00c0, 0x2541: 0x00c0, 0x2542: 0x00c0, 0x2543: 0x00c0, 0x2544: 0x00c0, 0x2545: 0x00c0, - 0x2546: 0x00c0, 0x2547: 0x00c0, 0x2548: 0x00c0, 0x2549: 0x00c0, 0x254a: 0x00c0, 0x254b: 0x00c0, - 0x254c: 0x00c0, 0x2550: 0x0080, 0x2551: 0x0080, - 0x2552: 0x0080, 0x2553: 0x0080, 0x2554: 0x0080, 0x2555: 0x0080, 0x2556: 0x0080, 0x2557: 0x0080, - 0x2558: 0x0080, 0x2559: 0x0080, 0x255a: 0x0080, 0x255b: 0x0080, 0x255c: 0x0080, 0x255d: 0x0080, - 0x255e: 0x0080, 0x255f: 0x0080, 0x2560: 0x0080, 0x2561: 0x0080, 0x2562: 0x0080, 0x2563: 0x0080, - 0x2564: 0x0080, 0x2565: 0x0080, 0x2566: 0x0080, 0x2567: 0x0080, 0x2568: 0x0080, 0x2569: 0x0080, - 0x256a: 0x0080, 0x256b: 0x0080, 0x256c: 0x0080, 0x256d: 0x0080, 0x256e: 0x0080, 0x256f: 0x0080, - 0x2570: 0x0080, 0x2571: 0x0080, 0x2572: 0x0080, 0x2573: 0x0080, 0x2574: 0x0080, 0x2575: 0x0080, - 0x2576: 0x0080, 0x2577: 0x0080, 0x2578: 0x0080, 0x2579: 0x0080, 0x257a: 0x0080, 0x257b: 0x0080, - 0x257c: 0x0080, 0x257d: 0x0080, 0x257e: 0x0080, 0x257f: 0x0080, - // Block 0x96, offset 0x2580 - 0x2580: 0x0080, 0x2581: 0x0080, 0x2582: 0x0080, 0x2583: 0x0080, 0x2584: 0x0080, 0x2585: 0x0080, - 0x2586: 0x0080, - 0x2590: 0x00c0, 0x2591: 0x00c0, - 0x2592: 0x00c0, 0x2593: 0x00c0, 0x2594: 0x00c0, 0x2595: 0x00c0, 0x2596: 0x00c0, 0x2597: 0x00c0, - 0x2598: 0x00c0, 0x2599: 0x00c0, 0x259a: 0x00c0, 0x259b: 0x00c0, 0x259c: 0x00c0, 0x259d: 0x00c0, - 0x259e: 0x00c0, 0x259f: 0x00c0, 0x25a0: 0x00c0, 0x25a1: 0x00c0, 0x25a2: 0x00c0, 0x25a3: 0x00c0, - 0x25a4: 0x00c0, 0x25a5: 0x00c0, 0x25a6: 0x00c0, 0x25a7: 0x00c0, 0x25a8: 0x00c0, 0x25a9: 0x00c0, - 0x25aa: 0x00c0, 0x25ab: 0x00c0, 0x25ac: 0x00c0, 0x25ad: 0x00c0, 0x25ae: 0x00c0, 0x25af: 0x00c0, - 0x25b0: 0x00c0, 0x25b1: 0x00c0, 0x25b2: 0x00c0, 0x25b3: 0x00c0, 0x25b4: 0x00c0, 0x25b5: 0x00c0, - 0x25b6: 0x00c0, 0x25b7: 0x00c0, 0x25b8: 0x00c0, 0x25b9: 0x00c0, 0x25ba: 0x00c0, 0x25bb: 0x00c0, - 0x25bc: 0x00c0, 0x25bd: 0x00c0, 0x25be: 0x0080, 0x25bf: 0x0080, - // Block 0x97, offset 0x25c0 - 0x25c0: 0x00c0, 0x25c1: 0x00c0, 0x25c2: 0x00c0, 0x25c3: 0x00c0, 0x25c4: 0x00c0, 0x25c5: 0x00c0, - 0x25c6: 0x00c0, 0x25c7: 0x00c0, 0x25c8: 0x00c0, 0x25c9: 0x00c0, 0x25ca: 0x00c0, 0x25cb: 0x00c0, - 0x25cc: 0x00c0, 0x25cd: 0x0080, 0x25ce: 0x0080, 0x25cf: 0x0080, 0x25d0: 0x00c0, 0x25d1: 0x00c0, - 0x25d2: 0x00c0, 0x25d3: 0x00c0, 0x25d4: 0x00c0, 0x25d5: 0x00c0, 0x25d6: 0x00c0, 0x25d7: 0x00c0, - 0x25d8: 0x00c0, 0x25d9: 0x00c0, 0x25da: 0x00c0, 0x25db: 0x00c0, 0x25dc: 0x00c0, 0x25dd: 0x00c0, - 0x25de: 0x00c0, 0x25df: 0x00c0, 0x25e0: 0x00c0, 0x25e1: 0x00c0, 0x25e2: 0x00c0, 0x25e3: 0x00c0, - 0x25e4: 0x00c0, 0x25e5: 0x00c0, 0x25e6: 0x00c0, 0x25e7: 0x00c0, 0x25e8: 0x00c0, 0x25e9: 0x00c0, - 0x25ea: 0x00c0, 0x25eb: 0x00c0, - // Block 0x98, offset 0x2600 - 0x2600: 0x00c0, 0x2601: 0x00c0, 0x2602: 0x00c0, 0x2603: 0x00c0, 0x2604: 0x00c0, 0x2605: 0x00c0, - 0x2606: 0x00c0, 0x2607: 0x00c0, 0x2608: 0x00c0, 0x2609: 0x00c0, 0x260a: 0x00c0, 0x260b: 0x00c0, - 0x260c: 0x00c0, 0x260d: 0x00c0, 0x260e: 0x00c0, 0x260f: 0x00c0, 0x2610: 0x00c0, 0x2611: 0x00c0, - 0x2612: 0x00c0, 0x2613: 0x00c0, 0x2614: 0x00c0, 0x2615: 0x00c0, 0x2616: 0x00c0, 0x2617: 0x00c0, - 0x2618: 0x00c0, 0x2619: 0x00c0, 0x261a: 0x00c0, 0x261b: 0x00c0, 0x261c: 0x00c0, 0x261d: 0x00c0, - 0x261e: 0x00c0, 0x261f: 0x00c0, 0x2620: 0x00c0, 0x2621: 0x00c0, 0x2622: 0x00c0, 0x2623: 0x00c0, - 0x2624: 0x00c0, 0x2625: 0x00c0, 0x2626: 0x00c0, 0x2627: 0x00c0, 0x2628: 0x00c0, 0x2629: 0x00c0, - 0x262a: 0x00c0, 0x262b: 0x00c0, 0x262c: 0x00c0, 0x262d: 0x00c0, 0x262e: 0x00c0, 0x262f: 0x00c3, - 0x2630: 0x0083, 0x2631: 0x0083, 0x2632: 0x0083, 0x2633: 0x0080, 0x2634: 0x00c3, 0x2635: 0x00c3, - 0x2636: 0x00c3, 0x2637: 0x00c3, 0x2638: 0x00c3, 0x2639: 0x00c3, 0x263a: 0x00c3, 0x263b: 0x00c3, - 0x263c: 0x00c3, 0x263d: 0x00c3, 0x263e: 0x0080, 0x263f: 0x00c0, - // Block 0x99, offset 0x2640 - 0x2640: 0x00c0, 0x2641: 0x00c0, 0x2642: 0x00c0, 0x2643: 0x00c0, 0x2644: 0x00c0, 0x2645: 0x00c0, - 0x2646: 0x00c0, 0x2647: 0x00c0, 0x2648: 0x00c0, 0x2649: 0x00c0, 0x264a: 0x00c0, 0x264b: 0x00c0, - 0x264c: 0x00c0, 0x264d: 0x00c0, 0x264e: 0x00c0, 0x264f: 0x00c0, 0x2650: 0x00c0, 0x2651: 0x00c0, - 0x2652: 0x00c0, 0x2653: 0x00c0, 0x2654: 0x00c0, 0x2655: 0x00c0, 0x2656: 0x00c0, 0x2657: 0x00c0, - 0x2658: 0x00c0, 0x2659: 0x00c0, 0x265a: 0x00c0, 0x265b: 0x00c0, 0x265c: 0x0080, 0x265d: 0x0080, - 0x265e: 0x00c3, 0x265f: 0x00c3, 0x2660: 0x00c0, 0x2661: 0x00c0, 0x2662: 0x00c0, 0x2663: 0x00c0, - 0x2664: 0x00c0, 0x2665: 0x00c0, 0x2666: 0x00c0, 0x2667: 0x00c0, 0x2668: 0x00c0, 0x2669: 0x00c0, - 0x266a: 0x00c0, 0x266b: 0x00c0, 0x266c: 0x00c0, 0x266d: 0x00c0, 0x266e: 0x00c0, 0x266f: 0x00c0, - 0x2670: 0x00c0, 0x2671: 0x00c0, 0x2672: 0x00c0, 0x2673: 0x00c0, 0x2674: 0x00c0, 0x2675: 0x00c0, - 0x2676: 0x00c0, 0x2677: 0x00c0, 0x2678: 0x00c0, 0x2679: 0x00c0, 0x267a: 0x00c0, 0x267b: 0x00c0, - 0x267c: 0x00c0, 0x267d: 0x00c0, 0x267e: 0x00c0, 0x267f: 0x00c0, - // Block 0x9a, offset 0x2680 - 0x2680: 0x00c0, 0x2681: 0x00c0, 0x2682: 0x00c0, 0x2683: 0x00c0, 0x2684: 0x00c0, 0x2685: 0x00c0, - 0x2686: 0x00c0, 0x2687: 0x00c0, 0x2688: 0x00c0, 0x2689: 0x00c0, 0x268a: 0x00c0, 0x268b: 0x00c0, - 0x268c: 0x00c0, 0x268d: 0x00c0, 0x268e: 0x00c0, 0x268f: 0x00c0, 0x2690: 0x00c0, 0x2691: 0x00c0, - 0x2692: 0x00c0, 0x2693: 0x00c0, 0x2694: 0x00c0, 0x2695: 0x00c0, 0x2696: 0x00c0, 0x2697: 0x00c0, - 0x2698: 0x00c0, 0x2699: 0x00c0, 0x269a: 0x00c0, 0x269b: 0x00c0, 0x269c: 0x00c0, 0x269d: 0x00c0, - 0x269e: 0x00c0, 0x269f: 0x00c0, 0x26a0: 0x00c0, 0x26a1: 0x00c0, 0x26a2: 0x00c0, 0x26a3: 0x00c0, - 0x26a4: 0x00c0, 0x26a5: 0x00c0, 0x26a6: 0x0080, 0x26a7: 0x0080, 0x26a8: 0x0080, 0x26a9: 0x0080, - 0x26aa: 0x0080, 0x26ab: 0x0080, 0x26ac: 0x0080, 0x26ad: 0x0080, 0x26ae: 0x0080, 0x26af: 0x0080, - 0x26b0: 0x00c3, 0x26b1: 0x00c3, 0x26b2: 0x0080, 0x26b3: 0x0080, 0x26b4: 0x0080, 0x26b5: 0x0080, - 0x26b6: 0x0080, 0x26b7: 0x0080, - // Block 0x9b, offset 0x26c0 - 0x26c0: 0x0080, 0x26c1: 0x0080, 0x26c2: 0x0080, 0x26c3: 0x0080, 0x26c4: 0x0080, 0x26c5: 0x0080, - 0x26c6: 0x0080, 0x26c7: 0x0080, 0x26c8: 0x0080, 0x26c9: 0x0080, 0x26ca: 0x0080, 0x26cb: 0x0080, - 0x26cc: 0x0080, 0x26cd: 0x0080, 0x26ce: 0x0080, 0x26cf: 0x0080, 0x26d0: 0x0080, 0x26d1: 0x0080, - 0x26d2: 0x0080, 0x26d3: 0x0080, 0x26d4: 0x0080, 0x26d5: 0x0080, 0x26d6: 0x0080, 0x26d7: 0x00c0, - 0x26d8: 0x00c0, 0x26d9: 0x00c0, 0x26da: 0x00c0, 0x26db: 0x00c0, 0x26dc: 0x00c0, 0x26dd: 0x00c0, - 0x26de: 0x00c0, 0x26df: 0x00c0, 0x26e0: 0x0080, 0x26e1: 0x0080, 0x26e2: 0x00c0, 0x26e3: 0x00c0, - 0x26e4: 0x00c0, 0x26e5: 0x00c0, 0x26e6: 0x00c0, 0x26e7: 0x00c0, 0x26e8: 0x00c0, 0x26e9: 0x00c0, - 0x26ea: 0x00c0, 0x26eb: 0x00c0, 0x26ec: 0x00c0, 0x26ed: 0x00c0, 0x26ee: 0x00c0, 0x26ef: 0x00c0, - 0x26f0: 0x00c0, 0x26f1: 0x00c0, 0x26f2: 0x00c0, 0x26f3: 0x00c0, 0x26f4: 0x00c0, 0x26f5: 0x00c0, - 0x26f6: 0x00c0, 0x26f7: 0x00c0, 0x26f8: 0x00c0, 0x26f9: 0x00c0, 0x26fa: 0x00c0, 0x26fb: 0x00c0, - 0x26fc: 0x00c0, 0x26fd: 0x00c0, 0x26fe: 0x00c0, 0x26ff: 0x00c0, - // Block 0x9c, offset 0x2700 - 0x2700: 0x00c0, 0x2701: 0x00c0, 0x2702: 0x00c0, 0x2703: 0x00c0, 0x2704: 0x00c0, 0x2705: 0x00c0, - 0x2706: 0x00c0, 0x2707: 0x00c0, 0x2708: 0x00c0, 0x2709: 0x00c0, 0x270a: 0x00c0, 0x270b: 0x00c0, - 0x270c: 0x00c0, 0x270d: 0x00c0, 0x270e: 0x00c0, 0x270f: 0x00c0, 0x2710: 0x00c0, 0x2711: 0x00c0, - 0x2712: 0x00c0, 0x2713: 0x00c0, 0x2714: 0x00c0, 0x2715: 0x00c0, 0x2716: 0x00c0, 0x2717: 0x00c0, - 0x2718: 0x00c0, 0x2719: 0x00c0, 0x271a: 0x00c0, 0x271b: 0x00c0, 0x271c: 0x00c0, 0x271d: 0x00c0, - 0x271e: 0x00c0, 0x271f: 0x00c0, 0x2720: 0x00c0, 0x2721: 0x00c0, 0x2722: 0x00c0, 0x2723: 0x00c0, - 0x2724: 0x00c0, 0x2725: 0x00c0, 0x2726: 0x00c0, 0x2727: 0x00c0, 0x2728: 0x00c0, 0x2729: 0x00c0, - 0x272a: 0x00c0, 0x272b: 0x00c0, 0x272c: 0x00c0, 0x272d: 0x00c0, 0x272e: 0x00c0, 0x272f: 0x00c0, - 0x2730: 0x0080, 0x2731: 0x00c0, 0x2732: 0x00c0, 0x2733: 0x00c0, 0x2734: 0x00c0, 0x2735: 0x00c0, - 0x2736: 0x00c0, 0x2737: 0x00c0, 0x2738: 0x00c0, 0x2739: 0x00c0, 0x273a: 0x00c0, 0x273b: 0x00c0, - 0x273c: 0x00c0, 0x273d: 0x00c0, 0x273e: 0x00c0, 0x273f: 0x00c0, - // Block 0x9d, offset 0x2740 - 0x2740: 0x00c0, 0x2741: 0x00c0, 0x2742: 0x00c0, 0x2743: 0x00c0, 0x2744: 0x00c0, 0x2745: 0x00c0, - 0x2746: 0x00c0, 0x2747: 0x00c0, 0x2748: 0x00c0, 0x2749: 0x0080, 0x274a: 0x0080, 0x274b: 0x00c0, - 0x274c: 0x00c0, 0x274d: 0x00c0, 0x274e: 0x00c0, 0x274f: 0x00c0, 0x2750: 0x00c0, 0x2751: 0x00c0, - 0x2752: 0x00c0, 0x2753: 0x00c0, 0x2754: 0x00c0, 0x2755: 0x00c0, 0x2756: 0x00c0, 0x2757: 0x00c0, - 0x2758: 0x00c0, 0x2759: 0x00c0, 0x275a: 0x00c0, 0x275b: 0x00c0, 0x275c: 0x00c0, 0x275d: 0x00c0, - 0x275e: 0x00c0, 0x275f: 0x00c0, 0x2760: 0x00c0, 0x2761: 0x00c0, 0x2762: 0x00c0, 0x2763: 0x00c0, - 0x2764: 0x00c0, 0x2765: 0x00c0, 0x2766: 0x00c0, 0x2767: 0x00c0, 0x2768: 0x00c0, 0x2769: 0x00c0, - 0x276a: 0x00c0, 0x276b: 0x00c0, 0x276c: 0x00c0, 0x276d: 0x00c0, 0x276e: 0x00c0, - 0x2770: 0x00c0, 0x2771: 0x00c0, 0x2772: 0x00c0, 0x2773: 0x00c0, 0x2774: 0x00c0, 0x2775: 0x00c0, - 0x2776: 0x00c0, 0x2777: 0x00c0, - // Block 0x9e, offset 0x2780 - 0x27b7: 0x00c0, 0x27b8: 0x0080, 0x27b9: 0x0080, 0x27ba: 0x00c0, 0x27bb: 0x00c0, - 0x27bc: 0x00c0, 0x27bd: 0x00c0, 0x27be: 0x00c0, 0x27bf: 0x00c0, - // Block 0x9f, offset 0x27c0 - 0x27c0: 0x00c0, 0x27c1: 0x00c0, 0x27c2: 0x00c3, 0x27c3: 0x00c0, 0x27c4: 0x00c0, 0x27c5: 0x00c0, - 0x27c6: 0x00c6, 0x27c7: 0x00c0, 0x27c8: 0x00c0, 0x27c9: 0x00c0, 0x27ca: 0x00c0, 0x27cb: 0x00c3, - 0x27cc: 0x00c0, 0x27cd: 0x00c0, 0x27ce: 0x00c0, 0x27cf: 0x00c0, 0x27d0: 0x00c0, 0x27d1: 0x00c0, - 0x27d2: 0x00c0, 0x27d3: 0x00c0, 0x27d4: 0x00c0, 0x27d5: 0x00c0, 0x27d6: 0x00c0, 0x27d7: 0x00c0, - 0x27d8: 0x00c0, 0x27d9: 0x00c0, 0x27da: 0x00c0, 0x27db: 0x00c0, 0x27dc: 0x00c0, 0x27dd: 0x00c0, - 0x27de: 0x00c0, 0x27df: 0x00c0, 0x27e0: 0x00c0, 0x27e1: 0x00c0, 0x27e2: 0x00c0, 0x27e3: 0x00c0, - 0x27e4: 0x00c0, 0x27e5: 0x00c3, 0x27e6: 0x00c3, 0x27e7: 0x00c0, 0x27e8: 0x0080, 0x27e9: 0x0080, - 0x27ea: 0x0080, 0x27eb: 0x0080, - 0x27f0: 0x0080, 0x27f1: 0x0080, 0x27f2: 0x0080, 0x27f3: 0x0080, 0x27f4: 0x0080, 0x27f5: 0x0080, - 0x27f6: 0x0080, 0x27f7: 0x0080, 0x27f8: 0x0080, 0x27f9: 0x0080, - // Block 0xa0, offset 0x2800 - 0x2800: 0x00c2, 0x2801: 0x00c2, 0x2802: 0x00c2, 0x2803: 0x00c2, 0x2804: 0x00c2, 0x2805: 0x00c2, - 0x2806: 0x00c2, 0x2807: 0x00c2, 0x2808: 0x00c2, 0x2809: 0x00c2, 0x280a: 0x00c2, 0x280b: 0x00c2, - 0x280c: 0x00c2, 0x280d: 0x00c2, 0x280e: 0x00c2, 0x280f: 0x00c2, 0x2810: 0x00c2, 0x2811: 0x00c2, - 0x2812: 0x00c2, 0x2813: 0x00c2, 0x2814: 0x00c2, 0x2815: 0x00c2, 0x2816: 0x00c2, 0x2817: 0x00c2, - 0x2818: 0x00c2, 0x2819: 0x00c2, 0x281a: 0x00c2, 0x281b: 0x00c2, 0x281c: 0x00c2, 0x281d: 0x00c2, - 0x281e: 0x00c2, 0x281f: 0x00c2, 0x2820: 0x00c2, 0x2821: 0x00c2, 0x2822: 0x00c2, 0x2823: 0x00c2, - 0x2824: 0x00c2, 0x2825: 0x00c2, 0x2826: 0x00c2, 0x2827: 0x00c2, 0x2828: 0x00c2, 0x2829: 0x00c2, - 0x282a: 0x00c2, 0x282b: 0x00c2, 0x282c: 0x00c2, 0x282d: 0x00c2, 0x282e: 0x00c2, 0x282f: 0x00c2, - 0x2830: 0x00c2, 0x2831: 0x00c2, 0x2832: 0x00c1, 0x2833: 0x00c0, 0x2834: 0x0080, 0x2835: 0x0080, - 0x2836: 0x0080, 0x2837: 0x0080, - // Block 0xa1, offset 0x2840 - 0x2840: 0x00c0, 0x2841: 0x00c0, 0x2842: 0x00c0, 0x2843: 0x00c0, 0x2844: 0x00c6, 0x2845: 0x00c3, - 0x284e: 0x0080, 0x284f: 0x0080, 0x2850: 0x00c0, 0x2851: 0x00c0, - 0x2852: 0x00c0, 0x2853: 0x00c0, 0x2854: 0x00c0, 0x2855: 0x00c0, 0x2856: 0x00c0, 0x2857: 0x00c0, - 0x2858: 0x00c0, 0x2859: 0x00c0, - 0x2860: 0x00c3, 0x2861: 0x00c3, 0x2862: 0x00c3, 0x2863: 0x00c3, - 0x2864: 0x00c3, 0x2865: 0x00c3, 0x2866: 0x00c3, 0x2867: 0x00c3, 0x2868: 0x00c3, 0x2869: 0x00c3, - 0x286a: 0x00c3, 0x286b: 0x00c3, 0x286c: 0x00c3, 0x286d: 0x00c3, 0x286e: 0x00c3, 0x286f: 0x00c3, - 0x2870: 0x00c3, 0x2871: 0x00c3, 0x2872: 0x00c0, 0x2873: 0x00c0, 0x2874: 0x00c0, 0x2875: 0x00c0, - 0x2876: 0x00c0, 0x2877: 0x00c0, 0x2878: 0x0080, 0x2879: 0x0080, 0x287a: 0x0080, 0x287b: 0x00c0, - 0x287c: 0x0080, 0x287d: 0x00c0, - // Block 0xa2, offset 0x2880 - 0x2880: 0x00c0, 0x2881: 0x00c0, 0x2882: 0x00c0, 0x2883: 0x00c0, 0x2884: 0x00c0, 0x2885: 0x00c0, - 0x2886: 0x00c0, 0x2887: 0x00c0, 0x2888: 0x00c0, 0x2889: 0x00c0, 0x288a: 0x00c0, 0x288b: 0x00c0, - 0x288c: 0x00c0, 0x288d: 0x00c0, 0x288e: 0x00c0, 0x288f: 0x00c0, 0x2890: 0x00c0, 0x2891: 0x00c0, - 0x2892: 0x00c0, 0x2893: 0x00c0, 0x2894: 0x00c0, 0x2895: 0x00c0, 0x2896: 0x00c0, 0x2897: 0x00c0, - 0x2898: 0x00c0, 0x2899: 0x00c0, 0x289a: 0x00c0, 0x289b: 0x00c0, 0x289c: 0x00c0, 0x289d: 0x00c0, - 0x289e: 0x00c0, 0x289f: 0x00c0, 0x28a0: 0x00c0, 0x28a1: 0x00c0, 0x28a2: 0x00c0, 0x28a3: 0x00c0, - 0x28a4: 0x00c0, 0x28a5: 0x00c0, 0x28a6: 0x00c3, 0x28a7: 0x00c3, 0x28a8: 0x00c3, 0x28a9: 0x00c3, - 0x28aa: 0x00c3, 0x28ab: 0x00c3, 0x28ac: 0x00c3, 0x28ad: 0x00c3, 0x28ae: 0x0080, 0x28af: 0x0080, - 0x28b0: 0x00c0, 0x28b1: 0x00c0, 0x28b2: 0x00c0, 0x28b3: 0x00c0, 0x28b4: 0x00c0, 0x28b5: 0x00c0, - 0x28b6: 0x00c0, 0x28b7: 0x00c0, 0x28b8: 0x00c0, 0x28b9: 0x00c0, 0x28ba: 0x00c0, 0x28bb: 0x00c0, - 0x28bc: 0x00c0, 0x28bd: 0x00c0, 0x28be: 0x00c0, 0x28bf: 0x00c0, - // Block 0xa3, offset 0x28c0 - 0x28c0: 0x00c0, 0x28c1: 0x00c0, 0x28c2: 0x00c0, 0x28c3: 0x00c0, 0x28c4: 0x00c0, 0x28c5: 0x00c0, - 0x28c6: 0x00c0, 0x28c7: 0x00c3, 0x28c8: 0x00c3, 0x28c9: 0x00c3, 0x28ca: 0x00c3, 0x28cb: 0x00c3, - 0x28cc: 0x00c3, 0x28cd: 0x00c3, 0x28ce: 0x00c3, 0x28cf: 0x00c3, 0x28d0: 0x00c3, 0x28d1: 0x00c3, - 0x28d2: 0x00c0, 0x28d3: 0x00c5, - 0x28df: 0x0080, 0x28e0: 0x0040, 0x28e1: 0x0040, 0x28e2: 0x0040, 0x28e3: 0x0040, - 0x28e4: 0x0040, 0x28e5: 0x0040, 0x28e6: 0x0040, 0x28e7: 0x0040, 0x28e8: 0x0040, 0x28e9: 0x0040, - 0x28ea: 0x0040, 0x28eb: 0x0040, 0x28ec: 0x0040, 0x28ed: 0x0040, 0x28ee: 0x0040, 0x28ef: 0x0040, - 0x28f0: 0x0040, 0x28f1: 0x0040, 0x28f2: 0x0040, 0x28f3: 0x0040, 0x28f4: 0x0040, 0x28f5: 0x0040, - 0x28f6: 0x0040, 0x28f7: 0x0040, 0x28f8: 0x0040, 0x28f9: 0x0040, 0x28fa: 0x0040, 0x28fb: 0x0040, - 0x28fc: 0x0040, - // Block 0xa4, offset 0x2900 - 0x2900: 0x00c3, 0x2901: 0x00c3, 0x2902: 0x00c3, 0x2903: 0x00c0, 0x2904: 0x00c0, 0x2905: 0x00c0, - 0x2906: 0x00c0, 0x2907: 0x00c0, 0x2908: 0x00c0, 0x2909: 0x00c0, 0x290a: 0x00c0, 0x290b: 0x00c0, - 0x290c: 0x00c0, 0x290d: 0x00c0, 0x290e: 0x00c0, 0x290f: 0x00c0, 0x2910: 0x00c0, 0x2911: 0x00c0, - 0x2912: 0x00c0, 0x2913: 0x00c0, 0x2914: 0x00c0, 0x2915: 0x00c0, 0x2916: 0x00c0, 0x2917: 0x00c0, - 0x2918: 0x00c0, 0x2919: 0x00c0, 0x291a: 0x00c0, 0x291b: 0x00c0, 0x291c: 0x00c0, 0x291d: 0x00c0, - 0x291e: 0x00c0, 0x291f: 0x00c0, 0x2920: 0x00c0, 0x2921: 0x00c0, 0x2922: 0x00c0, 0x2923: 0x00c0, - 0x2924: 0x00c0, 0x2925: 0x00c0, 0x2926: 0x00c0, 0x2927: 0x00c0, 0x2928: 0x00c0, 0x2929: 0x00c0, - 0x292a: 0x00c0, 0x292b: 0x00c0, 0x292c: 0x00c0, 0x292d: 0x00c0, 0x292e: 0x00c0, 0x292f: 0x00c0, - 0x2930: 0x00c0, 0x2931: 0x00c0, 0x2932: 0x00c0, 0x2933: 0x00c3, 0x2934: 0x00c0, 0x2935: 0x00c0, - 0x2936: 0x00c3, 0x2937: 0x00c3, 0x2938: 0x00c3, 0x2939: 0x00c3, 0x293a: 0x00c0, 0x293b: 0x00c0, - 0x293c: 0x00c3, 0x293d: 0x00c0, 0x293e: 0x00c0, 0x293f: 0x00c0, - // Block 0xa5, offset 0x2940 - 0x2940: 0x00c5, 0x2941: 0x0080, 0x2942: 0x0080, 0x2943: 0x0080, 0x2944: 0x0080, 0x2945: 0x0080, - 0x2946: 0x0080, 0x2947: 0x0080, 0x2948: 0x0080, 0x2949: 0x0080, 0x294a: 0x0080, 0x294b: 0x0080, - 0x294c: 0x0080, 0x294d: 0x0080, 0x294f: 0x00c0, 0x2950: 0x00c0, 0x2951: 0x00c0, - 0x2952: 0x00c0, 0x2953: 0x00c0, 0x2954: 0x00c0, 0x2955: 0x00c0, 0x2956: 0x00c0, 0x2957: 0x00c0, - 0x2958: 0x00c0, 0x2959: 0x00c0, - 0x295e: 0x0080, 0x295f: 0x0080, 0x2960: 0x00c0, 0x2961: 0x00c0, 0x2962: 0x00c0, 0x2963: 0x00c0, - 0x2964: 0x00c0, 0x2965: 0x00c3, 0x2966: 0x00c0, 0x2967: 0x00c0, 0x2968: 0x00c0, 0x2969: 0x00c0, - 0x296a: 0x00c0, 0x296b: 0x00c0, 0x296c: 0x00c0, 0x296d: 0x00c0, 0x296e: 0x00c0, 0x296f: 0x00c0, - 0x2970: 0x00c0, 0x2971: 0x00c0, 0x2972: 0x00c0, 0x2973: 0x00c0, 0x2974: 0x00c0, 0x2975: 0x00c0, - 0x2976: 0x00c0, 0x2977: 0x00c0, 0x2978: 0x00c0, 0x2979: 0x00c0, 0x297a: 0x00c0, 0x297b: 0x00c0, - 0x297c: 0x00c0, 0x297d: 0x00c0, 0x297e: 0x00c0, - // Block 0xa6, offset 0x2980 - 0x2980: 0x00c0, 0x2981: 0x00c0, 0x2982: 0x00c0, 0x2983: 0x00c0, 0x2984: 0x00c0, 0x2985: 0x00c0, - 0x2986: 0x00c0, 0x2987: 0x00c0, 0x2988: 0x00c0, 0x2989: 0x00c0, 0x298a: 0x00c0, 0x298b: 0x00c0, - 0x298c: 0x00c0, 0x298d: 0x00c0, 0x298e: 0x00c0, 0x298f: 0x00c0, 0x2990: 0x00c0, 0x2991: 0x00c0, - 0x2992: 0x00c0, 0x2993: 0x00c0, 0x2994: 0x00c0, 0x2995: 0x00c0, 0x2996: 0x00c0, 0x2997: 0x00c0, - 0x2998: 0x00c0, 0x2999: 0x00c0, 0x299a: 0x00c0, 0x299b: 0x00c0, 0x299c: 0x00c0, 0x299d: 0x00c0, - 0x299e: 0x00c0, 0x299f: 0x00c0, 0x29a0: 0x00c0, 0x29a1: 0x00c0, 0x29a2: 0x00c0, 0x29a3: 0x00c0, - 0x29a4: 0x00c0, 0x29a5: 0x00c0, 0x29a6: 0x00c0, 0x29a7: 0x00c0, 0x29a8: 0x00c0, 0x29a9: 0x00c3, - 0x29aa: 0x00c3, 0x29ab: 0x00c3, 0x29ac: 0x00c3, 0x29ad: 0x00c3, 0x29ae: 0x00c3, 0x29af: 0x00c0, - 0x29b0: 0x00c0, 0x29b1: 0x00c3, 0x29b2: 0x00c3, 0x29b3: 0x00c0, 0x29b4: 0x00c0, 0x29b5: 0x00c3, - 0x29b6: 0x00c3, - // Block 0xa7, offset 0x29c0 - 0x29c0: 0x00c0, 0x29c1: 0x00c0, 0x29c2: 0x00c0, 0x29c3: 0x00c3, 0x29c4: 0x00c0, 0x29c5: 0x00c0, - 0x29c6: 0x00c0, 0x29c7: 0x00c0, 0x29c8: 0x00c0, 0x29c9: 0x00c0, 0x29ca: 0x00c0, 0x29cb: 0x00c0, - 0x29cc: 0x00c3, 0x29cd: 0x00c0, 0x29d0: 0x00c0, 0x29d1: 0x00c0, - 0x29d2: 0x00c0, 0x29d3: 0x00c0, 0x29d4: 0x00c0, 0x29d5: 0x00c0, 0x29d6: 0x00c0, 0x29d7: 0x00c0, - 0x29d8: 0x00c0, 0x29d9: 0x00c0, 0x29dc: 0x0080, 0x29dd: 0x0080, - 0x29de: 0x0080, 0x29df: 0x0080, 0x29e0: 0x00c0, 0x29e1: 0x00c0, 0x29e2: 0x00c0, 0x29e3: 0x00c0, - 0x29e4: 0x00c0, 0x29e5: 0x00c0, 0x29e6: 0x00c0, 0x29e7: 0x00c0, 0x29e8: 0x00c0, 0x29e9: 0x00c0, - 0x29ea: 0x00c0, 0x29eb: 0x00c0, 0x29ec: 0x00c0, 0x29ed: 0x00c0, 0x29ee: 0x00c0, 0x29ef: 0x00c0, - 0x29f0: 0x00c0, 0x29f1: 0x00c0, 0x29f2: 0x00c0, 0x29f3: 0x00c0, 0x29f4: 0x00c0, 0x29f5: 0x00c0, - 0x29f6: 0x00c0, 0x29f7: 0x0080, 0x29f8: 0x0080, 0x29f9: 0x0080, 0x29fa: 0x00c0, 0x29fb: 0x00c0, - 0x29fc: 0x00c3, 0x29fd: 0x00c0, 0x29fe: 0x00c0, 0x29ff: 0x00c0, - // Block 0xa8, offset 0x2a00 - 0x2a00: 0x00c0, 0x2a01: 0x00c0, 0x2a02: 0x00c0, 0x2a03: 0x00c0, 0x2a04: 0x00c0, 0x2a05: 0x00c0, - 0x2a06: 0x00c0, 0x2a07: 0x00c0, 0x2a08: 0x00c0, 0x2a09: 0x00c0, 0x2a0a: 0x00c0, 0x2a0b: 0x00c0, - 0x2a0c: 0x00c0, 0x2a0d: 0x00c0, 0x2a0e: 0x00c0, 0x2a0f: 0x00c0, 0x2a10: 0x00c0, 0x2a11: 0x00c0, - 0x2a12: 0x00c0, 0x2a13: 0x00c0, 0x2a14: 0x00c0, 0x2a15: 0x00c0, 0x2a16: 0x00c0, 0x2a17: 0x00c0, - 0x2a18: 0x00c0, 0x2a19: 0x00c0, 0x2a1a: 0x00c0, 0x2a1b: 0x00c0, 0x2a1c: 0x00c0, 0x2a1d: 0x00c0, - 0x2a1e: 0x00c0, 0x2a1f: 0x00c0, 0x2a20: 0x00c0, 0x2a21: 0x00c0, 0x2a22: 0x00c0, 0x2a23: 0x00c0, - 0x2a24: 0x00c0, 0x2a25: 0x00c0, 0x2a26: 0x00c0, 0x2a27: 0x00c0, 0x2a28: 0x00c0, 0x2a29: 0x00c0, - 0x2a2a: 0x00c0, 0x2a2b: 0x00c0, 0x2a2c: 0x00c0, 0x2a2d: 0x00c0, 0x2a2e: 0x00c0, 0x2a2f: 0x00c0, - 0x2a30: 0x00c3, 0x2a31: 0x00c0, 0x2a32: 0x00c3, 0x2a33: 0x00c3, 0x2a34: 0x00c3, 0x2a35: 0x00c0, - 0x2a36: 0x00c0, 0x2a37: 0x00c3, 0x2a38: 0x00c3, 0x2a39: 0x00c0, 0x2a3a: 0x00c0, 0x2a3b: 0x00c0, - 0x2a3c: 0x00c0, 0x2a3d: 0x00c0, 0x2a3e: 0x00c3, 0x2a3f: 0x00c3, - // Block 0xa9, offset 0x2a40 - 0x2a40: 0x00c0, 0x2a41: 0x00c3, 0x2a42: 0x00c0, - 0x2a5b: 0x00c0, 0x2a5c: 0x00c0, 0x2a5d: 0x00c0, - 0x2a5e: 0x0080, 0x2a5f: 0x0080, 0x2a60: 0x00c0, 0x2a61: 0x00c0, 0x2a62: 0x00c0, 0x2a63: 0x00c0, - 0x2a64: 0x00c0, 0x2a65: 0x00c0, 0x2a66: 0x00c0, 0x2a67: 0x00c0, 0x2a68: 0x00c0, 0x2a69: 0x00c0, - 0x2a6a: 0x00c0, 0x2a6b: 0x00c0, 0x2a6c: 0x00c3, 0x2a6d: 0x00c3, 0x2a6e: 0x00c0, 0x2a6f: 0x00c0, - 0x2a70: 0x0080, 0x2a71: 0x0080, 0x2a72: 0x00c0, 0x2a73: 0x00c0, 0x2a74: 0x00c0, 0x2a75: 0x00c0, - 0x2a76: 0x00c6, - // Block 0xaa, offset 0x2a80 - 0x2a81: 0x00c0, 0x2a82: 0x00c0, 0x2a83: 0x00c0, 0x2a84: 0x00c0, 0x2a85: 0x00c0, - 0x2a86: 0x00c0, 0x2a89: 0x00c0, 0x2a8a: 0x00c0, 0x2a8b: 0x00c0, - 0x2a8c: 0x00c0, 0x2a8d: 0x00c0, 0x2a8e: 0x00c0, 0x2a91: 0x00c0, - 0x2a92: 0x00c0, 0x2a93: 0x00c0, 0x2a94: 0x00c0, 0x2a95: 0x00c0, 0x2a96: 0x00c0, - 0x2aa0: 0x00c0, 0x2aa1: 0x00c0, 0x2aa2: 0x00c0, 0x2aa3: 0x00c0, - 0x2aa4: 0x00c0, 0x2aa5: 0x00c0, 0x2aa6: 0x00c0, 0x2aa8: 0x00c0, 0x2aa9: 0x00c0, - 0x2aaa: 0x00c0, 0x2aab: 0x00c0, 0x2aac: 0x00c0, 0x2aad: 0x00c0, 0x2aae: 0x00c0, - 0x2ab0: 0x00c0, 0x2ab1: 0x00c0, 0x2ab2: 0x00c0, 0x2ab3: 0x00c0, 0x2ab4: 0x00c0, 0x2ab5: 0x00c0, - 0x2ab6: 0x00c0, 0x2ab7: 0x00c0, 0x2ab8: 0x00c0, 0x2ab9: 0x00c0, 0x2aba: 0x00c0, 0x2abb: 0x00c0, - 0x2abc: 0x00c0, 0x2abd: 0x00c0, 0x2abe: 0x00c0, 0x2abf: 0x00c0, - // Block 0xab, offset 0x2ac0 - 0x2ac0: 0x00c0, 0x2ac1: 0x00c0, 0x2ac2: 0x00c0, 0x2ac3: 0x00c0, 0x2ac4: 0x00c0, 0x2ac5: 0x00c0, - 0x2ac6: 0x00c0, 0x2ac7: 0x00c0, 0x2ac8: 0x00c0, 0x2ac9: 0x00c0, 0x2aca: 0x00c0, 0x2acb: 0x00c0, - 0x2acc: 0x00c0, 0x2acd: 0x00c0, 0x2ace: 0x00c0, 0x2acf: 0x00c0, 0x2ad0: 0x00c0, 0x2ad1: 0x00c0, - 0x2ad2: 0x00c0, 0x2ad3: 0x00c0, 0x2ad4: 0x00c0, 0x2ad5: 0x00c0, 0x2ad6: 0x00c0, 0x2ad7: 0x00c0, - 0x2ad8: 0x00c0, 0x2ad9: 0x00c0, 0x2ada: 0x00c0, 0x2adb: 0x0080, 0x2adc: 0x0080, 0x2add: 0x0080, - 0x2ade: 0x0080, 0x2adf: 0x0080, 0x2ae0: 0x00c0, 0x2ae1: 0x00c0, 0x2ae2: 0x00c0, 0x2ae3: 0x00c0, - 0x2ae4: 0x00c0, 0x2ae5: 0x00c8, - 0x2af0: 0x00c0, 0x2af1: 0x00c0, 0x2af2: 0x00c0, 0x2af3: 0x00c0, 0x2af4: 0x00c0, 0x2af5: 0x00c0, - 0x2af6: 0x00c0, 0x2af7: 0x00c0, 0x2af8: 0x00c0, 0x2af9: 0x00c0, 0x2afa: 0x00c0, 0x2afb: 0x00c0, - 0x2afc: 0x00c0, 0x2afd: 0x00c0, 0x2afe: 0x00c0, 0x2aff: 0x00c0, - // Block 0xac, offset 0x2b00 - 0x2b00: 0x00c0, 0x2b01: 0x00c0, 0x2b02: 0x00c0, 0x2b03: 0x00c0, 0x2b04: 0x00c0, 0x2b05: 0x00c0, - 0x2b06: 0x00c0, 0x2b07: 0x00c0, 0x2b08: 0x00c0, 0x2b09: 0x00c0, 0x2b0a: 0x00c0, 0x2b0b: 0x00c0, - 0x2b0c: 0x00c0, 0x2b0d: 0x00c0, 0x2b0e: 0x00c0, 0x2b0f: 0x00c0, 0x2b10: 0x00c0, 0x2b11: 0x00c0, - 0x2b12: 0x00c0, 0x2b13: 0x00c0, 0x2b14: 0x00c0, 0x2b15: 0x00c0, 0x2b16: 0x00c0, 0x2b17: 0x00c0, - 0x2b18: 0x00c0, 0x2b19: 0x00c0, 0x2b1a: 0x00c0, 0x2b1b: 0x00c0, 0x2b1c: 0x00c0, 0x2b1d: 0x00c0, - 0x2b1e: 0x00c0, 0x2b1f: 0x00c0, 0x2b20: 0x00c0, 0x2b21: 0x00c0, 0x2b22: 0x00c0, 0x2b23: 0x00c0, - 0x2b24: 0x00c0, 0x2b25: 0x00c3, 0x2b26: 0x00c0, 0x2b27: 0x00c0, 0x2b28: 0x00c3, 0x2b29: 0x00c0, - 0x2b2a: 0x00c0, 0x2b2b: 0x0080, 0x2b2c: 0x00c0, 0x2b2d: 0x00c6, - 0x2b30: 0x00c0, 0x2b31: 0x00c0, 0x2b32: 0x00c0, 0x2b33: 0x00c0, 0x2b34: 0x00c0, 0x2b35: 0x00c0, - 0x2b36: 0x00c0, 0x2b37: 0x00c0, 0x2b38: 0x00c0, 0x2b39: 0x00c0, - // Block 0xad, offset 0x2b40 - 0x2b40: 0x00c0, 0x2b41: 0x00c0, 0x2b42: 0x00c0, 0x2b43: 0x00c0, 0x2b44: 0x00c0, 0x2b45: 0x00c0, - 0x2b46: 0x00c0, 0x2b47: 0x00c0, 0x2b48: 0x00c0, 0x2b49: 0x00c0, 0x2b4a: 0x00c0, 0x2b4b: 0x00c0, - 0x2b4c: 0x00c0, 0x2b4d: 0x00c0, 0x2b4e: 0x00c0, 0x2b4f: 0x00c0, 0x2b50: 0x00c0, 0x2b51: 0x00c0, - 0x2b52: 0x00c0, 0x2b53: 0x00c0, 0x2b54: 0x00c0, 0x2b55: 0x00c0, 0x2b56: 0x00c0, 0x2b57: 0x00c0, - 0x2b58: 0x00c0, 0x2b59: 0x00c0, 0x2b5a: 0x00c0, 0x2b5b: 0x00c0, 0x2b5c: 0x00c0, 0x2b5d: 0x00c0, - 0x2b5e: 0x00c0, 0x2b5f: 0x00c0, 0x2b60: 0x00c0, 0x2b61: 0x00c0, 0x2b62: 0x00c0, 0x2b63: 0x00c0, - 0x2b70: 0x0040, 0x2b71: 0x0040, 0x2b72: 0x0040, 0x2b73: 0x0040, 0x2b74: 0x0040, 0x2b75: 0x0040, - 0x2b76: 0x0040, 0x2b77: 0x0040, 0x2b78: 0x0040, 0x2b79: 0x0040, 0x2b7a: 0x0040, 0x2b7b: 0x0040, - 0x2b7c: 0x0040, 0x2b7d: 0x0040, 0x2b7e: 0x0040, 0x2b7f: 0x0040, - // Block 0xae, offset 0x2b80 - 0x2b80: 0x0040, 0x2b81: 0x0040, 0x2b82: 0x0040, 0x2b83: 0x0040, 0x2b84: 0x0040, 0x2b85: 0x0040, - 0x2b86: 0x0040, 0x2b8b: 0x0040, - 0x2b8c: 0x0040, 0x2b8d: 0x0040, 0x2b8e: 0x0040, 0x2b8f: 0x0040, 0x2b90: 0x0040, 0x2b91: 0x0040, - 0x2b92: 0x0040, 0x2b93: 0x0040, 0x2b94: 0x0040, 0x2b95: 0x0040, 0x2b96: 0x0040, 0x2b97: 0x0040, - 0x2b98: 0x0040, 0x2b99: 0x0040, 0x2b9a: 0x0040, 0x2b9b: 0x0040, 0x2b9c: 0x0040, 0x2b9d: 0x0040, - 0x2b9e: 0x0040, 0x2b9f: 0x0040, 0x2ba0: 0x0040, 0x2ba1: 0x0040, 0x2ba2: 0x0040, 0x2ba3: 0x0040, - 0x2ba4: 0x0040, 0x2ba5: 0x0040, 0x2ba6: 0x0040, 0x2ba7: 0x0040, 0x2ba8: 0x0040, 0x2ba9: 0x0040, - 0x2baa: 0x0040, 0x2bab: 0x0040, 0x2bac: 0x0040, 0x2bad: 0x0040, 0x2bae: 0x0040, 0x2baf: 0x0040, - 0x2bb0: 0x0040, 0x2bb1: 0x0040, 0x2bb2: 0x0040, 0x2bb3: 0x0040, 0x2bb4: 0x0040, 0x2bb5: 0x0040, - 0x2bb6: 0x0040, 0x2bb7: 0x0040, 0x2bb8: 0x0040, 0x2bb9: 0x0040, 0x2bba: 0x0040, 0x2bbb: 0x0040, - // Block 0xaf, offset 0x2bc0 - 0x2bc0: 0x008c, 0x2bc1: 0x008c, 0x2bc2: 0x008c, 0x2bc3: 0x008c, 0x2bc4: 0x008c, 0x2bc5: 0x008c, - 0x2bc6: 0x008c, 0x2bc7: 0x008c, 0x2bc8: 0x008c, 0x2bc9: 0x008c, 0x2bca: 0x008c, 0x2bcb: 0x008c, - 0x2bcc: 0x008c, 0x2bcd: 0x008c, 0x2bce: 0x00cc, 0x2bcf: 0x00cc, 0x2bd0: 0x008c, 0x2bd1: 0x00cc, - 0x2bd2: 0x008c, 0x2bd3: 0x00cc, 0x2bd4: 0x00cc, 0x2bd5: 0x008c, 0x2bd6: 0x008c, 0x2bd7: 0x008c, - 0x2bd8: 0x008c, 0x2bd9: 0x008c, 0x2bda: 0x008c, 0x2bdb: 0x008c, 0x2bdc: 0x008c, 0x2bdd: 0x008c, - 0x2bde: 0x008c, 0x2bdf: 0x00cc, 0x2be0: 0x008c, 0x2be1: 0x00cc, 0x2be2: 0x008c, 0x2be3: 0x00cc, - 0x2be4: 0x00cc, 0x2be5: 0x008c, 0x2be6: 0x008c, 0x2be7: 0x00cc, 0x2be8: 0x00cc, 0x2be9: 0x00cc, - 0x2bea: 0x008c, 0x2beb: 0x008c, 0x2bec: 0x008c, 0x2bed: 0x008c, 0x2bee: 0x008c, 0x2bef: 0x008c, - 0x2bf0: 0x008c, 0x2bf1: 0x008c, 0x2bf2: 0x008c, 0x2bf3: 0x008c, 0x2bf4: 0x008c, 0x2bf5: 0x008c, - 0x2bf6: 0x008c, 0x2bf7: 0x008c, 0x2bf8: 0x008c, 0x2bf9: 0x008c, 0x2bfa: 0x008c, 0x2bfb: 0x008c, - 0x2bfc: 0x008c, 0x2bfd: 0x008c, 0x2bfe: 0x008c, 0x2bff: 0x008c, - // Block 0xb0, offset 0x2c00 - 0x2c00: 0x008c, 0x2c01: 0x008c, 0x2c02: 0x008c, 0x2c03: 0x008c, 0x2c04: 0x008c, 0x2c05: 0x008c, - 0x2c06: 0x008c, 0x2c07: 0x008c, 0x2c08: 0x008c, 0x2c09: 0x008c, 0x2c0a: 0x008c, 0x2c0b: 0x008c, - 0x2c0c: 0x008c, 0x2c0d: 0x008c, 0x2c0e: 0x008c, 0x2c0f: 0x008c, 0x2c10: 0x008c, 0x2c11: 0x008c, - 0x2c12: 0x008c, 0x2c13: 0x008c, 0x2c14: 0x008c, 0x2c15: 0x008c, 0x2c16: 0x008c, 0x2c17: 0x008c, - 0x2c18: 0x008c, 0x2c19: 0x008c, 0x2c1a: 0x008c, 0x2c1b: 0x008c, 0x2c1c: 0x008c, 0x2c1d: 0x008c, - 0x2c1e: 0x008c, 0x2c1f: 0x008c, 0x2c20: 0x008c, 0x2c21: 0x008c, 0x2c22: 0x008c, 0x2c23: 0x008c, - 0x2c24: 0x008c, 0x2c25: 0x008c, 0x2c26: 0x008c, 0x2c27: 0x008c, 0x2c28: 0x008c, 0x2c29: 0x008c, - 0x2c2a: 0x008c, 0x2c2b: 0x008c, 0x2c2c: 0x008c, 0x2c2d: 0x008c, - 0x2c30: 0x008c, 0x2c31: 0x008c, 0x2c32: 0x008c, 0x2c33: 0x008c, 0x2c34: 0x008c, 0x2c35: 0x008c, - 0x2c36: 0x008c, 0x2c37: 0x008c, 0x2c38: 0x008c, 0x2c39: 0x008c, 0x2c3a: 0x008c, 0x2c3b: 0x008c, - 0x2c3c: 0x008c, 0x2c3d: 0x008c, 0x2c3e: 0x008c, 0x2c3f: 0x008c, - // Block 0xb1, offset 0x2c40 - 0x2c40: 0x008c, 0x2c41: 0x008c, 0x2c42: 0x008c, 0x2c43: 0x008c, 0x2c44: 0x008c, 0x2c45: 0x008c, - 0x2c46: 0x008c, 0x2c47: 0x008c, 0x2c48: 0x008c, 0x2c49: 0x008c, 0x2c4a: 0x008c, 0x2c4b: 0x008c, - 0x2c4c: 0x008c, 0x2c4d: 0x008c, 0x2c4e: 0x008c, 0x2c4f: 0x008c, 0x2c50: 0x008c, 0x2c51: 0x008c, - 0x2c52: 0x008c, 0x2c53: 0x008c, 0x2c54: 0x008c, 0x2c55: 0x008c, 0x2c56: 0x008c, 0x2c57: 0x008c, - 0x2c58: 0x008c, 0x2c59: 0x008c, - // Block 0xb2, offset 0x2c80 - 0x2c80: 0x0080, 0x2c81: 0x0080, 0x2c82: 0x0080, 0x2c83: 0x0080, 0x2c84: 0x0080, 0x2c85: 0x0080, - 0x2c86: 0x0080, - 0x2c93: 0x0080, 0x2c94: 0x0080, 0x2c95: 0x0080, 0x2c96: 0x0080, 0x2c97: 0x0080, - 0x2c9d: 0x008a, - 0x2c9e: 0x00cb, 0x2c9f: 0x008a, 0x2ca0: 0x008a, 0x2ca1: 0x008a, 0x2ca2: 0x008a, 0x2ca3: 0x008a, - 0x2ca4: 0x008a, 0x2ca5: 0x008a, 0x2ca6: 0x008a, 0x2ca7: 0x008a, 0x2ca8: 0x008a, 0x2ca9: 0x008a, - 0x2caa: 0x008a, 0x2cab: 0x008a, 0x2cac: 0x008a, 0x2cad: 0x008a, 0x2cae: 0x008a, 0x2caf: 0x008a, - 0x2cb0: 0x008a, 0x2cb1: 0x008a, 0x2cb2: 0x008a, 0x2cb3: 0x008a, 0x2cb4: 0x008a, 0x2cb5: 0x008a, - 0x2cb6: 0x008a, 0x2cb8: 0x008a, 0x2cb9: 0x008a, 0x2cba: 0x008a, 0x2cbb: 0x008a, - 0x2cbc: 0x008a, 0x2cbe: 0x008a, - // Block 0xb3, offset 0x2cc0 - 0x2cc0: 0x008a, 0x2cc1: 0x008a, 0x2cc3: 0x008a, 0x2cc4: 0x008a, - 0x2cc6: 0x008a, 0x2cc7: 0x008a, 0x2cc8: 0x008a, 0x2cc9: 0x008a, 0x2cca: 0x008a, 0x2ccb: 0x008a, - 0x2ccc: 0x008a, 0x2ccd: 0x008a, 0x2cce: 0x008a, 0x2ccf: 0x008a, 0x2cd0: 0x0080, 0x2cd1: 0x0080, - 0x2cd2: 0x0080, 0x2cd3: 0x0080, 0x2cd4: 0x0080, 0x2cd5: 0x0080, 0x2cd6: 0x0080, 0x2cd7: 0x0080, - 0x2cd8: 0x0080, 0x2cd9: 0x0080, 0x2cda: 0x0080, 0x2cdb: 0x0080, 0x2cdc: 0x0080, 0x2cdd: 0x0080, - 0x2cde: 0x0080, 0x2cdf: 0x0080, 0x2ce0: 0x0080, 0x2ce1: 0x0080, 0x2ce2: 0x0080, 0x2ce3: 0x0080, - 0x2ce4: 0x0080, 0x2ce5: 0x0080, 0x2ce6: 0x0080, 0x2ce7: 0x0080, 0x2ce8: 0x0080, 0x2ce9: 0x0080, - 0x2cea: 0x0080, 0x2ceb: 0x0080, 0x2cec: 0x0080, 0x2ced: 0x0080, 0x2cee: 0x0080, 0x2cef: 0x0080, - 0x2cf0: 0x0080, 0x2cf1: 0x0080, 0x2cf2: 0x0080, 0x2cf3: 0x0080, 0x2cf4: 0x0080, 0x2cf5: 0x0080, - 0x2cf6: 0x0080, 0x2cf7: 0x0080, 0x2cf8: 0x0080, 0x2cf9: 0x0080, 0x2cfa: 0x0080, 0x2cfb: 0x0080, - 0x2cfc: 0x0080, 0x2cfd: 0x0080, 0x2cfe: 0x0080, 0x2cff: 0x0080, - // Block 0xb4, offset 0x2d00 - 0x2d00: 0x0080, 0x2d01: 0x0080, - 0x2d13: 0x0080, 0x2d14: 0x0080, 0x2d15: 0x0080, 0x2d16: 0x0080, 0x2d17: 0x0080, - 0x2d18: 0x0080, 0x2d19: 0x0080, 0x2d1a: 0x0080, 0x2d1b: 0x0080, 0x2d1c: 0x0080, 0x2d1d: 0x0080, - 0x2d1e: 0x0080, 0x2d1f: 0x0080, 0x2d20: 0x0080, 0x2d21: 0x0080, 0x2d22: 0x0080, 0x2d23: 0x0080, - 0x2d24: 0x0080, 0x2d25: 0x0080, 0x2d26: 0x0080, 0x2d27: 0x0080, 0x2d28: 0x0080, 0x2d29: 0x0080, - 0x2d2a: 0x0080, 0x2d2b: 0x0080, 0x2d2c: 0x0080, 0x2d2d: 0x0080, 0x2d2e: 0x0080, 0x2d2f: 0x0080, - 0x2d30: 0x0080, 0x2d31: 0x0080, 0x2d32: 0x0080, 0x2d33: 0x0080, 0x2d34: 0x0080, 0x2d35: 0x0080, - 0x2d36: 0x0080, 0x2d37: 0x0080, 0x2d38: 0x0080, 0x2d39: 0x0080, 0x2d3a: 0x0080, 0x2d3b: 0x0080, - 0x2d3c: 0x0080, 0x2d3d: 0x0080, 0x2d3e: 0x0080, 0x2d3f: 0x0080, - // Block 0xb5, offset 0x2d40 - 0x2d50: 0x0080, 0x2d51: 0x0080, - 0x2d52: 0x0080, 0x2d53: 0x0080, 0x2d54: 0x0080, 0x2d55: 0x0080, 0x2d56: 0x0080, 0x2d57: 0x0080, - 0x2d58: 0x0080, 0x2d59: 0x0080, 0x2d5a: 0x0080, 0x2d5b: 0x0080, 0x2d5c: 0x0080, 0x2d5d: 0x0080, - 0x2d5e: 0x0080, 0x2d5f: 0x0080, 0x2d60: 0x0080, 0x2d61: 0x0080, 0x2d62: 0x0080, 0x2d63: 0x0080, - 0x2d64: 0x0080, 0x2d65: 0x0080, 0x2d66: 0x0080, 0x2d67: 0x0080, 0x2d68: 0x0080, 0x2d69: 0x0080, - 0x2d6a: 0x0080, 0x2d6b: 0x0080, 0x2d6c: 0x0080, 0x2d6d: 0x0080, 0x2d6e: 0x0080, 0x2d6f: 0x0080, - 0x2d70: 0x0080, 0x2d71: 0x0080, 0x2d72: 0x0080, 0x2d73: 0x0080, 0x2d74: 0x0080, 0x2d75: 0x0080, - 0x2d76: 0x0080, 0x2d77: 0x0080, 0x2d78: 0x0080, 0x2d79: 0x0080, 0x2d7a: 0x0080, 0x2d7b: 0x0080, - 0x2d7c: 0x0080, 0x2d7d: 0x0080, 0x2d7e: 0x0080, 0x2d7f: 0x0080, - // Block 0xb6, offset 0x2d80 - 0x2d80: 0x0080, 0x2d81: 0x0080, 0x2d82: 0x0080, 0x2d83: 0x0080, 0x2d84: 0x0080, 0x2d85: 0x0080, - 0x2d86: 0x0080, 0x2d87: 0x0080, 0x2d88: 0x0080, 0x2d89: 0x0080, 0x2d8a: 0x0080, 0x2d8b: 0x0080, - 0x2d8c: 0x0080, 0x2d8d: 0x0080, 0x2d8e: 0x0080, 0x2d8f: 0x0080, - 0x2d92: 0x0080, 0x2d93: 0x0080, 0x2d94: 0x0080, 0x2d95: 0x0080, 0x2d96: 0x0080, 0x2d97: 0x0080, - 0x2d98: 0x0080, 0x2d99: 0x0080, 0x2d9a: 0x0080, 0x2d9b: 0x0080, 0x2d9c: 0x0080, 0x2d9d: 0x0080, - 0x2d9e: 0x0080, 0x2d9f: 0x0080, 0x2da0: 0x0080, 0x2da1: 0x0080, 0x2da2: 0x0080, 0x2da3: 0x0080, - 0x2da4: 0x0080, 0x2da5: 0x0080, 0x2da6: 0x0080, 0x2da7: 0x0080, 0x2da8: 0x0080, 0x2da9: 0x0080, - 0x2daa: 0x0080, 0x2dab: 0x0080, 0x2dac: 0x0080, 0x2dad: 0x0080, 0x2dae: 0x0080, 0x2daf: 0x0080, - 0x2db0: 0x0080, 0x2db1: 0x0080, 0x2db2: 0x0080, 0x2db3: 0x0080, 0x2db4: 0x0080, 0x2db5: 0x0080, - 0x2db6: 0x0080, 0x2db7: 0x0080, 0x2db8: 0x0080, 0x2db9: 0x0080, 0x2dba: 0x0080, 0x2dbb: 0x0080, - 0x2dbc: 0x0080, 0x2dbd: 0x0080, 0x2dbe: 0x0080, 0x2dbf: 0x0080, - // Block 0xb7, offset 0x2dc0 - 0x2dc0: 0x0080, 0x2dc1: 0x0080, 0x2dc2: 0x0080, 0x2dc3: 0x0080, 0x2dc4: 0x0080, 0x2dc5: 0x0080, - 0x2dc6: 0x0080, 0x2dc7: 0x0080, - 0x2df0: 0x0080, 0x2df1: 0x0080, 0x2df2: 0x0080, 0x2df3: 0x0080, 0x2df4: 0x0080, 0x2df5: 0x0080, - 0x2df6: 0x0080, 0x2df7: 0x0080, 0x2df8: 0x0080, 0x2df9: 0x0080, 0x2dfa: 0x0080, 0x2dfb: 0x0080, - 0x2dfc: 0x0080, 0x2dfd: 0x0080, - // Block 0xb8, offset 0x2e00 - 0x2e00: 0x0040, 0x2e01: 0x0040, 0x2e02: 0x0040, 0x2e03: 0x0040, 0x2e04: 0x0040, 0x2e05: 0x0040, - 0x2e06: 0x0040, 0x2e07: 0x0040, 0x2e08: 0x0040, 0x2e09: 0x0040, 0x2e0a: 0x0040, 0x2e0b: 0x0040, - 0x2e0c: 0x0040, 0x2e0d: 0x0040, 0x2e0e: 0x0040, 0x2e0f: 0x0040, 0x2e10: 0x0080, 0x2e11: 0x0080, - 0x2e12: 0x0080, 0x2e13: 0x0080, 0x2e14: 0x0080, 0x2e15: 0x0080, 0x2e16: 0x0080, 0x2e17: 0x0080, - 0x2e18: 0x0080, 0x2e19: 0x0080, - 0x2e20: 0x00c3, 0x2e21: 0x00c3, 0x2e22: 0x00c3, 0x2e23: 0x00c3, - 0x2e24: 0x00c3, 0x2e25: 0x00c3, 0x2e26: 0x00c3, 0x2e27: 0x00c3, 0x2e28: 0x00c3, 0x2e29: 0x00c3, - 0x2e2a: 0x00c3, 0x2e2b: 0x00c3, 0x2e2c: 0x00c3, 0x2e2d: 0x00c3, 0x2e2e: 0x00c3, 0x2e2f: 0x00c3, - 0x2e30: 0x0080, 0x2e31: 0x0080, 0x2e32: 0x0080, 0x2e33: 0x0080, 0x2e34: 0x0080, 0x2e35: 0x0080, - 0x2e36: 0x0080, 0x2e37: 0x0080, 0x2e38: 0x0080, 0x2e39: 0x0080, 0x2e3a: 0x0080, 0x2e3b: 0x0080, - 0x2e3c: 0x0080, 0x2e3d: 0x0080, 0x2e3e: 0x0080, 0x2e3f: 0x0080, - // Block 0xb9, offset 0x2e40 - 0x2e40: 0x0080, 0x2e41: 0x0080, 0x2e42: 0x0080, 0x2e43: 0x0080, 0x2e44: 0x0080, 0x2e45: 0x0080, - 0x2e46: 0x0080, 0x2e47: 0x0080, 0x2e48: 0x0080, 0x2e49: 0x0080, 0x2e4a: 0x0080, 0x2e4b: 0x0080, - 0x2e4c: 0x0080, 0x2e4d: 0x0080, 0x2e4e: 0x0080, 0x2e4f: 0x0080, 0x2e50: 0x0080, 0x2e51: 0x0080, - 0x2e52: 0x0080, 0x2e54: 0x0080, 0x2e55: 0x0080, 0x2e56: 0x0080, 0x2e57: 0x0080, - 0x2e58: 0x0080, 0x2e59: 0x0080, 0x2e5a: 0x0080, 0x2e5b: 0x0080, 0x2e5c: 0x0080, 0x2e5d: 0x0080, - 0x2e5e: 0x0080, 0x2e5f: 0x0080, 0x2e60: 0x0080, 0x2e61: 0x0080, 0x2e62: 0x0080, 0x2e63: 0x0080, - 0x2e64: 0x0080, 0x2e65: 0x0080, 0x2e66: 0x0080, 0x2e68: 0x0080, 0x2e69: 0x0080, - 0x2e6a: 0x0080, 0x2e6b: 0x0080, - 0x2e70: 0x0080, 0x2e71: 0x0080, 0x2e72: 0x0080, 0x2e73: 0x00c0, 0x2e74: 0x0080, - 0x2e76: 0x0080, 0x2e77: 0x0080, 0x2e78: 0x0080, 0x2e79: 0x0080, 0x2e7a: 0x0080, 0x2e7b: 0x0080, - 0x2e7c: 0x0080, 0x2e7d: 0x0080, 0x2e7e: 0x0080, 0x2e7f: 0x0080, - // Block 0xba, offset 0x2e80 - 0x2e80: 0x0080, 0x2e81: 0x0080, 0x2e82: 0x0080, 0x2e83: 0x0080, 0x2e84: 0x0080, 0x2e85: 0x0080, - 0x2e86: 0x0080, 0x2e87: 0x0080, 0x2e88: 0x0080, 0x2e89: 0x0080, 0x2e8a: 0x0080, 0x2e8b: 0x0080, - 0x2e8c: 0x0080, 0x2e8d: 0x0080, 0x2e8e: 0x0080, 0x2e8f: 0x0080, 0x2e90: 0x0080, 0x2e91: 0x0080, - 0x2e92: 0x0080, 0x2e93: 0x0080, 0x2e94: 0x0080, 0x2e95: 0x0080, 0x2e96: 0x0080, 0x2e97: 0x0080, - 0x2e98: 0x0080, 0x2e99: 0x0080, 0x2e9a: 0x0080, 0x2e9b: 0x0080, 0x2e9c: 0x0080, 0x2e9d: 0x0080, - 0x2e9e: 0x0080, 0x2e9f: 0x0080, 0x2ea0: 0x0080, 0x2ea1: 0x0080, 0x2ea2: 0x0080, 0x2ea3: 0x0080, - 0x2ea4: 0x0080, 0x2ea5: 0x0080, 0x2ea6: 0x0080, 0x2ea7: 0x0080, 0x2ea8: 0x0080, 0x2ea9: 0x0080, - 0x2eaa: 0x0080, 0x2eab: 0x0080, 0x2eac: 0x0080, 0x2ead: 0x0080, 0x2eae: 0x0080, 0x2eaf: 0x0080, - 0x2eb0: 0x0080, 0x2eb1: 0x0080, 0x2eb2: 0x0080, 0x2eb3: 0x0080, 0x2eb4: 0x0080, 0x2eb5: 0x0080, - 0x2eb6: 0x0080, 0x2eb7: 0x0080, 0x2eb8: 0x0080, 0x2eb9: 0x0080, 0x2eba: 0x0080, 0x2ebb: 0x0080, - 0x2ebc: 0x0080, 0x2ebf: 0x0040, - // Block 0xbb, offset 0x2ec0 - 0x2ec1: 0x0080, 0x2ec2: 0x0080, 0x2ec3: 0x0080, 0x2ec4: 0x0080, 0x2ec5: 0x0080, - 0x2ec6: 0x0080, 0x2ec7: 0x0080, 0x2ec8: 0x0080, 0x2ec9: 0x0080, 0x2eca: 0x0080, 0x2ecb: 0x0080, - 0x2ecc: 0x0080, 0x2ecd: 0x0080, 0x2ece: 0x0080, 0x2ecf: 0x0080, 0x2ed0: 0x0080, 0x2ed1: 0x0080, - 0x2ed2: 0x0080, 0x2ed3: 0x0080, 0x2ed4: 0x0080, 0x2ed5: 0x0080, 0x2ed6: 0x0080, 0x2ed7: 0x0080, - 0x2ed8: 0x0080, 0x2ed9: 0x0080, 0x2eda: 0x0080, 0x2edb: 0x0080, 0x2edc: 0x0080, 0x2edd: 0x0080, - 0x2ede: 0x0080, 0x2edf: 0x0080, 0x2ee0: 0x0080, 0x2ee1: 0x0080, 0x2ee2: 0x0080, 0x2ee3: 0x0080, - 0x2ee4: 0x0080, 0x2ee5: 0x0080, 0x2ee6: 0x0080, 0x2ee7: 0x0080, 0x2ee8: 0x0080, 0x2ee9: 0x0080, - 0x2eea: 0x0080, 0x2eeb: 0x0080, 0x2eec: 0x0080, 0x2eed: 0x0080, 0x2eee: 0x0080, 0x2eef: 0x0080, - 0x2ef0: 0x0080, 0x2ef1: 0x0080, 0x2ef2: 0x0080, 0x2ef3: 0x0080, 0x2ef4: 0x0080, 0x2ef5: 0x0080, - 0x2ef6: 0x0080, 0x2ef7: 0x0080, 0x2ef8: 0x0080, 0x2ef9: 0x0080, 0x2efa: 0x0080, 0x2efb: 0x0080, - 0x2efc: 0x0080, 0x2efd: 0x0080, 0x2efe: 0x0080, 0x2eff: 0x0080, - // Block 0xbc, offset 0x2f00 - 0x2f00: 0x0080, 0x2f01: 0x0080, 0x2f02: 0x0080, 0x2f03: 0x0080, 0x2f04: 0x0080, 0x2f05: 0x0080, - 0x2f06: 0x0080, 0x2f07: 0x0080, 0x2f08: 0x0080, 0x2f09: 0x0080, 0x2f0a: 0x0080, 0x2f0b: 0x0080, - 0x2f0c: 0x0080, 0x2f0d: 0x0080, 0x2f0e: 0x0080, 0x2f0f: 0x0080, 0x2f10: 0x0080, 0x2f11: 0x0080, - 0x2f12: 0x0080, 0x2f13: 0x0080, 0x2f14: 0x0080, 0x2f15: 0x0080, 0x2f16: 0x0080, 0x2f17: 0x0080, - 0x2f18: 0x0080, 0x2f19: 0x0080, 0x2f1a: 0x0080, 0x2f1b: 0x0080, 0x2f1c: 0x0080, 0x2f1d: 0x0080, - 0x2f1e: 0x0080, 0x2f1f: 0x0080, 0x2f20: 0x0080, 0x2f21: 0x0080, 0x2f22: 0x0080, 0x2f23: 0x0080, - 0x2f24: 0x0080, 0x2f25: 0x0080, 0x2f26: 0x008c, 0x2f27: 0x008c, 0x2f28: 0x008c, 0x2f29: 0x008c, - 0x2f2a: 0x008c, 0x2f2b: 0x008c, 0x2f2c: 0x008c, 0x2f2d: 0x008c, 0x2f2e: 0x008c, 0x2f2f: 0x008c, - 0x2f30: 0x0080, 0x2f31: 0x008c, 0x2f32: 0x008c, 0x2f33: 0x008c, 0x2f34: 0x008c, 0x2f35: 0x008c, - 0x2f36: 0x008c, 0x2f37: 0x008c, 0x2f38: 0x008c, 0x2f39: 0x008c, 0x2f3a: 0x008c, 0x2f3b: 0x008c, - 0x2f3c: 0x008c, 0x2f3d: 0x008c, 0x2f3e: 0x008c, 0x2f3f: 0x008c, - // Block 0xbd, offset 0x2f40 - 0x2f40: 0x008c, 0x2f41: 0x008c, 0x2f42: 0x008c, 0x2f43: 0x008c, 0x2f44: 0x008c, 0x2f45: 0x008c, - 0x2f46: 0x008c, 0x2f47: 0x008c, 0x2f48: 0x008c, 0x2f49: 0x008c, 0x2f4a: 0x008c, 0x2f4b: 0x008c, - 0x2f4c: 0x008c, 0x2f4d: 0x008c, 0x2f4e: 0x008c, 0x2f4f: 0x008c, 0x2f50: 0x008c, 0x2f51: 0x008c, - 0x2f52: 0x008c, 0x2f53: 0x008c, 0x2f54: 0x008c, 0x2f55: 0x008c, 0x2f56: 0x008c, 0x2f57: 0x008c, - 0x2f58: 0x008c, 0x2f59: 0x008c, 0x2f5a: 0x008c, 0x2f5b: 0x008c, 0x2f5c: 0x008c, 0x2f5d: 0x008c, - 0x2f5e: 0x0080, 0x2f5f: 0x0080, 0x2f60: 0x0040, 0x2f61: 0x0080, 0x2f62: 0x0080, 0x2f63: 0x0080, - 0x2f64: 0x0080, 0x2f65: 0x0080, 0x2f66: 0x0080, 0x2f67: 0x0080, 0x2f68: 0x0080, 0x2f69: 0x0080, - 0x2f6a: 0x0080, 0x2f6b: 0x0080, 0x2f6c: 0x0080, 0x2f6d: 0x0080, 0x2f6e: 0x0080, 0x2f6f: 0x0080, - 0x2f70: 0x0080, 0x2f71: 0x0080, 0x2f72: 0x0080, 0x2f73: 0x0080, 0x2f74: 0x0080, 0x2f75: 0x0080, - 0x2f76: 0x0080, 0x2f77: 0x0080, 0x2f78: 0x0080, 0x2f79: 0x0080, 0x2f7a: 0x0080, 0x2f7b: 0x0080, - 0x2f7c: 0x0080, 0x2f7d: 0x0080, 0x2f7e: 0x0080, - // Block 0xbe, offset 0x2f80 - 0x2f82: 0x0080, 0x2f83: 0x0080, 0x2f84: 0x0080, 0x2f85: 0x0080, - 0x2f86: 0x0080, 0x2f87: 0x0080, 0x2f8a: 0x0080, 0x2f8b: 0x0080, - 0x2f8c: 0x0080, 0x2f8d: 0x0080, 0x2f8e: 0x0080, 0x2f8f: 0x0080, - 0x2f92: 0x0080, 0x2f93: 0x0080, 0x2f94: 0x0080, 0x2f95: 0x0080, 0x2f96: 0x0080, 0x2f97: 0x0080, - 0x2f9a: 0x0080, 0x2f9b: 0x0080, 0x2f9c: 0x0080, - 0x2fa0: 0x0080, 0x2fa1: 0x0080, 0x2fa2: 0x0080, 0x2fa3: 0x0080, - 0x2fa4: 0x0080, 0x2fa5: 0x0080, 0x2fa6: 0x0080, 0x2fa8: 0x0080, 0x2fa9: 0x0080, - 0x2faa: 0x0080, 0x2fab: 0x0080, 0x2fac: 0x0080, 0x2fad: 0x0080, 0x2fae: 0x0080, - 0x2fb9: 0x0040, 0x2fba: 0x0040, 0x2fbb: 0x0040, - 0x2fbc: 0x0080, 0x2fbd: 0x0080, - // Block 0xbf, offset 0x2fc0 - 0x2fc0: 0x00c0, 0x2fc1: 0x00c0, 0x2fc2: 0x00c0, 0x2fc3: 0x00c0, 0x2fc4: 0x00c0, 0x2fc5: 0x00c0, - 0x2fc6: 0x00c0, 0x2fc7: 0x00c0, 0x2fc8: 0x00c0, 0x2fc9: 0x00c0, 0x2fca: 0x00c0, 0x2fcb: 0x00c0, - 0x2fcd: 0x00c0, 0x2fce: 0x00c0, 0x2fcf: 0x00c0, 0x2fd0: 0x00c0, 0x2fd1: 0x00c0, - 0x2fd2: 0x00c0, 0x2fd3: 0x00c0, 0x2fd4: 0x00c0, 0x2fd5: 0x00c0, 0x2fd6: 0x00c0, 0x2fd7: 0x00c0, - 0x2fd8: 0x00c0, 0x2fd9: 0x00c0, 0x2fda: 0x00c0, 0x2fdb: 0x00c0, 0x2fdc: 0x00c0, 0x2fdd: 0x00c0, - 0x2fde: 0x00c0, 0x2fdf: 0x00c0, 0x2fe0: 0x00c0, 0x2fe1: 0x00c0, 0x2fe2: 0x00c0, 0x2fe3: 0x00c0, - 0x2fe4: 0x00c0, 0x2fe5: 0x00c0, 0x2fe6: 0x00c0, 0x2fe8: 0x00c0, 0x2fe9: 0x00c0, - 0x2fea: 0x00c0, 0x2feb: 0x00c0, 0x2fec: 0x00c0, 0x2fed: 0x00c0, 0x2fee: 0x00c0, 0x2fef: 0x00c0, - 0x2ff0: 0x00c0, 0x2ff1: 0x00c0, 0x2ff2: 0x00c0, 0x2ff3: 0x00c0, 0x2ff4: 0x00c0, 0x2ff5: 0x00c0, - 0x2ff6: 0x00c0, 0x2ff7: 0x00c0, 0x2ff8: 0x00c0, 0x2ff9: 0x00c0, 0x2ffa: 0x00c0, - 0x2ffc: 0x00c0, 0x2ffd: 0x00c0, 0x2fff: 0x00c0, - // Block 0xc0, offset 0x3000 - 0x3000: 0x00c0, 0x3001: 0x00c0, 0x3002: 0x00c0, 0x3003: 0x00c0, 0x3004: 0x00c0, 0x3005: 0x00c0, - 0x3006: 0x00c0, 0x3007: 0x00c0, 0x3008: 0x00c0, 0x3009: 0x00c0, 0x300a: 0x00c0, 0x300b: 0x00c0, - 0x300c: 0x00c0, 0x300d: 0x00c0, 0x3010: 0x00c0, 0x3011: 0x00c0, - 0x3012: 0x00c0, 0x3013: 0x00c0, 0x3014: 0x00c0, 0x3015: 0x00c0, 0x3016: 0x00c0, 0x3017: 0x00c0, - 0x3018: 0x00c0, 0x3019: 0x00c0, 0x301a: 0x00c0, 0x301b: 0x00c0, 0x301c: 0x00c0, 0x301d: 0x00c0, - // Block 0xc1, offset 0x3040 - 0x3040: 0x00c0, 0x3041: 0x00c0, 0x3042: 0x00c0, 0x3043: 0x00c0, 0x3044: 0x00c0, 0x3045: 0x00c0, - 0x3046: 0x00c0, 0x3047: 0x00c0, 0x3048: 0x00c0, 0x3049: 0x00c0, 0x304a: 0x00c0, 0x304b: 0x00c0, - 0x304c: 0x00c0, 0x304d: 0x00c0, 0x304e: 0x00c0, 0x304f: 0x00c0, 0x3050: 0x00c0, 0x3051: 0x00c0, - 0x3052: 0x00c0, 0x3053: 0x00c0, 0x3054: 0x00c0, 0x3055: 0x00c0, 0x3056: 0x00c0, 0x3057: 0x00c0, - 0x3058: 0x00c0, 0x3059: 0x00c0, 0x305a: 0x00c0, 0x305b: 0x00c0, 0x305c: 0x00c0, 0x305d: 0x00c0, - 0x305e: 0x00c0, 0x305f: 0x00c0, 0x3060: 0x00c0, 0x3061: 0x00c0, 0x3062: 0x00c0, 0x3063: 0x00c0, - 0x3064: 0x00c0, 0x3065: 0x00c0, 0x3066: 0x00c0, 0x3067: 0x00c0, 0x3068: 0x00c0, 0x3069: 0x00c0, - 0x306a: 0x00c0, 0x306b: 0x00c0, 0x306c: 0x00c0, 0x306d: 0x00c0, 0x306e: 0x00c0, 0x306f: 0x00c0, - 0x3070: 0x00c0, 0x3071: 0x00c0, 0x3072: 0x00c0, 0x3073: 0x00c0, 0x3074: 0x00c0, 0x3075: 0x00c0, - 0x3076: 0x00c0, 0x3077: 0x00c0, 0x3078: 0x00c0, 0x3079: 0x00c0, 0x307a: 0x00c0, - // Block 0xc2, offset 0x3080 - 0x3080: 0x0080, 0x3081: 0x0080, 0x3082: 0x0080, - 0x3087: 0x0080, 0x3088: 0x0080, 0x3089: 0x0080, 0x308a: 0x0080, 0x308b: 0x0080, - 0x308c: 0x0080, 0x308d: 0x0080, 0x308e: 0x0080, 0x308f: 0x0080, 0x3090: 0x0080, 0x3091: 0x0080, - 0x3092: 0x0080, 0x3093: 0x0080, 0x3094: 0x0080, 0x3095: 0x0080, 0x3096: 0x0080, 0x3097: 0x0080, - 0x3098: 0x0080, 0x3099: 0x0080, 0x309a: 0x0080, 0x309b: 0x0080, 0x309c: 0x0080, 0x309d: 0x0080, - 0x309e: 0x0080, 0x309f: 0x0080, 0x30a0: 0x0080, 0x30a1: 0x0080, 0x30a2: 0x0080, 0x30a3: 0x0080, - 0x30a4: 0x0080, 0x30a5: 0x0080, 0x30a6: 0x0080, 0x30a7: 0x0080, 0x30a8: 0x0080, 0x30a9: 0x0080, - 0x30aa: 0x0080, 0x30ab: 0x0080, 0x30ac: 0x0080, 0x30ad: 0x0080, 0x30ae: 0x0080, 0x30af: 0x0080, - 0x30b0: 0x0080, 0x30b1: 0x0080, 0x30b2: 0x0080, 0x30b3: 0x0080, - 0x30b7: 0x0080, 0x30b8: 0x0080, 0x30b9: 0x0080, 0x30ba: 0x0080, 0x30bb: 0x0080, - 0x30bc: 0x0080, 0x30bd: 0x0080, 0x30be: 0x0080, 0x30bf: 0x0080, - // Block 0xc3, offset 0x30c0 - 0x30c0: 0x0088, 0x30c1: 0x0088, 0x30c2: 0x0088, 0x30c3: 0x0088, 0x30c4: 0x0088, 0x30c5: 0x0088, - 0x30c6: 0x0088, 0x30c7: 0x0088, 0x30c8: 0x0088, 0x30c9: 0x0088, 0x30ca: 0x0088, 0x30cb: 0x0088, - 0x30cc: 0x0088, 0x30cd: 0x0088, 0x30ce: 0x0088, 0x30cf: 0x0088, 0x30d0: 0x0088, 0x30d1: 0x0088, - 0x30d2: 0x0088, 0x30d3: 0x0088, 0x30d4: 0x0088, 0x30d5: 0x0088, 0x30d6: 0x0088, 0x30d7: 0x0088, - 0x30d8: 0x0088, 0x30d9: 0x0088, 0x30da: 0x0088, 0x30db: 0x0088, 0x30dc: 0x0088, 0x30dd: 0x0088, - 0x30de: 0x0088, 0x30df: 0x0088, 0x30e0: 0x0088, 0x30e1: 0x0088, 0x30e2: 0x0088, 0x30e3: 0x0088, - 0x30e4: 0x0088, 0x30e5: 0x0088, 0x30e6: 0x0088, 0x30e7: 0x0088, 0x30e8: 0x0088, 0x30e9: 0x0088, - 0x30ea: 0x0088, 0x30eb: 0x0088, 0x30ec: 0x0088, 0x30ed: 0x0088, 0x30ee: 0x0088, 0x30ef: 0x0088, - 0x30f0: 0x0088, 0x30f1: 0x0088, 0x30f2: 0x0088, 0x30f3: 0x0088, 0x30f4: 0x0088, 0x30f5: 0x0088, - 0x30f6: 0x0088, 0x30f7: 0x0088, 0x30f8: 0x0088, 0x30f9: 0x0088, 0x30fa: 0x0088, 0x30fb: 0x0088, - 0x30fc: 0x0088, 0x30fd: 0x0088, 0x30fe: 0x0088, 0x30ff: 0x0088, - // Block 0xc4, offset 0x3100 - 0x3100: 0x0088, 0x3101: 0x0088, 0x3102: 0x0088, 0x3103: 0x0088, 0x3104: 0x0088, 0x3105: 0x0088, - 0x3106: 0x0088, 0x3107: 0x0088, 0x3108: 0x0088, 0x3109: 0x0088, 0x310a: 0x0088, 0x310b: 0x0088, - 0x310c: 0x0088, 0x310d: 0x0088, 0x310e: 0x0088, 0x3110: 0x0080, 0x3111: 0x0080, - 0x3112: 0x0080, 0x3113: 0x0080, 0x3114: 0x0080, 0x3115: 0x0080, 0x3116: 0x0080, 0x3117: 0x0080, - 0x3118: 0x0080, 0x3119: 0x0080, 0x311a: 0x0080, 0x311b: 0x0080, - 0x3120: 0x0088, - // Block 0xc5, offset 0x3140 - 0x3150: 0x0080, 0x3151: 0x0080, - 0x3152: 0x0080, 0x3153: 0x0080, 0x3154: 0x0080, 0x3155: 0x0080, 0x3156: 0x0080, 0x3157: 0x0080, - 0x3158: 0x0080, 0x3159: 0x0080, 0x315a: 0x0080, 0x315b: 0x0080, 0x315c: 0x0080, 0x315d: 0x0080, - 0x315e: 0x0080, 0x315f: 0x0080, 0x3160: 0x0080, 0x3161: 0x0080, 0x3162: 0x0080, 0x3163: 0x0080, - 0x3164: 0x0080, 0x3165: 0x0080, 0x3166: 0x0080, 0x3167: 0x0080, 0x3168: 0x0080, 0x3169: 0x0080, - 0x316a: 0x0080, 0x316b: 0x0080, 0x316c: 0x0080, 0x316d: 0x0080, 0x316e: 0x0080, 0x316f: 0x0080, - 0x3170: 0x0080, 0x3171: 0x0080, 0x3172: 0x0080, 0x3173: 0x0080, 0x3174: 0x0080, 0x3175: 0x0080, - 0x3176: 0x0080, 0x3177: 0x0080, 0x3178: 0x0080, 0x3179: 0x0080, 0x317a: 0x0080, 0x317b: 0x0080, - 0x317c: 0x0080, 0x317d: 0x00c3, - // Block 0xc6, offset 0x3180 - 0x3180: 0x00c0, 0x3181: 0x00c0, 0x3182: 0x00c0, 0x3183: 0x00c0, 0x3184: 0x00c0, 0x3185: 0x00c0, - 0x3186: 0x00c0, 0x3187: 0x00c0, 0x3188: 0x00c0, 0x3189: 0x00c0, 0x318a: 0x00c0, 0x318b: 0x00c0, - 0x318c: 0x00c0, 0x318d: 0x00c0, 0x318e: 0x00c0, 0x318f: 0x00c0, 0x3190: 0x00c0, 0x3191: 0x00c0, - 0x3192: 0x00c0, 0x3193: 0x00c0, 0x3194: 0x00c0, 0x3195: 0x00c0, 0x3196: 0x00c0, 0x3197: 0x00c0, - 0x3198: 0x00c0, 0x3199: 0x00c0, 0x319a: 0x00c0, 0x319b: 0x00c0, 0x319c: 0x00c0, - 0x31a0: 0x00c0, 0x31a1: 0x00c0, 0x31a2: 0x00c0, 0x31a3: 0x00c0, - 0x31a4: 0x00c0, 0x31a5: 0x00c0, 0x31a6: 0x00c0, 0x31a7: 0x00c0, 0x31a8: 0x00c0, 0x31a9: 0x00c0, - 0x31aa: 0x00c0, 0x31ab: 0x00c0, 0x31ac: 0x00c0, 0x31ad: 0x00c0, 0x31ae: 0x00c0, 0x31af: 0x00c0, - 0x31b0: 0x00c0, 0x31b1: 0x00c0, 0x31b2: 0x00c0, 0x31b3: 0x00c0, 0x31b4: 0x00c0, 0x31b5: 0x00c0, - 0x31b6: 0x00c0, 0x31b7: 0x00c0, 0x31b8: 0x00c0, 0x31b9: 0x00c0, 0x31ba: 0x00c0, 0x31bb: 0x00c0, - 0x31bc: 0x00c0, 0x31bd: 0x00c0, 0x31be: 0x00c0, 0x31bf: 0x00c0, - // Block 0xc7, offset 0x31c0 - 0x31c0: 0x00c0, 0x31c1: 0x00c0, 0x31c2: 0x00c0, 0x31c3: 0x00c0, 0x31c4: 0x00c0, 0x31c5: 0x00c0, - 0x31c6: 0x00c0, 0x31c7: 0x00c0, 0x31c8: 0x00c0, 0x31c9: 0x00c0, 0x31ca: 0x00c0, 0x31cb: 0x00c0, - 0x31cc: 0x00c0, 0x31cd: 0x00c0, 0x31ce: 0x00c0, 0x31cf: 0x00c0, 0x31d0: 0x00c0, - 0x31e0: 0x00c3, 0x31e1: 0x0080, 0x31e2: 0x0080, 0x31e3: 0x0080, - 0x31e4: 0x0080, 0x31e5: 0x0080, 0x31e6: 0x0080, 0x31e7: 0x0080, 0x31e8: 0x0080, 0x31e9: 0x0080, - 0x31ea: 0x0080, 0x31eb: 0x0080, 0x31ec: 0x0080, 0x31ed: 0x0080, 0x31ee: 0x0080, 0x31ef: 0x0080, - 0x31f0: 0x0080, 0x31f1: 0x0080, 0x31f2: 0x0080, 0x31f3: 0x0080, 0x31f4: 0x0080, 0x31f5: 0x0080, - 0x31f6: 0x0080, 0x31f7: 0x0080, 0x31f8: 0x0080, 0x31f9: 0x0080, 0x31fa: 0x0080, 0x31fb: 0x0080, - // Block 0xc8, offset 0x3200 - 0x3200: 0x00c0, 0x3201: 0x00c0, 0x3202: 0x00c0, 0x3203: 0x00c0, 0x3204: 0x00c0, 0x3205: 0x00c0, - 0x3206: 0x00c0, 0x3207: 0x00c0, 0x3208: 0x00c0, 0x3209: 0x00c0, 0x320a: 0x00c0, 0x320b: 0x00c0, - 0x320c: 0x00c0, 0x320d: 0x00c0, 0x320e: 0x00c0, 0x320f: 0x00c0, 0x3210: 0x00c0, 0x3211: 0x00c0, - 0x3212: 0x00c0, 0x3213: 0x00c0, 0x3214: 0x00c0, 0x3215: 0x00c0, 0x3216: 0x00c0, 0x3217: 0x00c0, - 0x3218: 0x00c0, 0x3219: 0x00c0, 0x321a: 0x00c0, 0x321b: 0x00c0, 0x321c: 0x00c0, 0x321d: 0x00c0, - 0x321e: 0x00c0, 0x321f: 0x00c0, 0x3220: 0x0080, 0x3221: 0x0080, 0x3222: 0x0080, 0x3223: 0x0080, - 0x3230: 0x00c0, 0x3231: 0x00c0, 0x3232: 0x00c0, 0x3233: 0x00c0, 0x3234: 0x00c0, 0x3235: 0x00c0, - 0x3236: 0x00c0, 0x3237: 0x00c0, 0x3238: 0x00c0, 0x3239: 0x00c0, 0x323a: 0x00c0, 0x323b: 0x00c0, - 0x323c: 0x00c0, 0x323d: 0x00c0, 0x323e: 0x00c0, 0x323f: 0x00c0, - // Block 0xc9, offset 0x3240 - 0x3240: 0x00c0, 0x3241: 0x0080, 0x3242: 0x00c0, 0x3243: 0x00c0, 0x3244: 0x00c0, 0x3245: 0x00c0, - 0x3246: 0x00c0, 0x3247: 0x00c0, 0x3248: 0x00c0, 0x3249: 0x00c0, 0x324a: 0x0080, - 0x3250: 0x00c0, 0x3251: 0x00c0, - 0x3252: 0x00c0, 0x3253: 0x00c0, 0x3254: 0x00c0, 0x3255: 0x00c0, 0x3256: 0x00c0, 0x3257: 0x00c0, - 0x3258: 0x00c0, 0x3259: 0x00c0, 0x325a: 0x00c0, 0x325b: 0x00c0, 0x325c: 0x00c0, 0x325d: 0x00c0, - 0x325e: 0x00c0, 0x325f: 0x00c0, 0x3260: 0x00c0, 0x3261: 0x00c0, 0x3262: 0x00c0, 0x3263: 0x00c0, - 0x3264: 0x00c0, 0x3265: 0x00c0, 0x3266: 0x00c0, 0x3267: 0x00c0, 0x3268: 0x00c0, 0x3269: 0x00c0, - 0x326a: 0x00c0, 0x326b: 0x00c0, 0x326c: 0x00c0, 0x326d: 0x00c0, 0x326e: 0x00c0, 0x326f: 0x00c0, - 0x3270: 0x00c0, 0x3271: 0x00c0, 0x3272: 0x00c0, 0x3273: 0x00c0, 0x3274: 0x00c0, 0x3275: 0x00c0, - 0x3276: 0x00c3, 0x3277: 0x00c3, 0x3278: 0x00c3, 0x3279: 0x00c3, 0x327a: 0x00c3, - // Block 0xca, offset 0x3280 - 0x3280: 0x00c0, 0x3281: 0x00c0, 0x3282: 0x00c0, 0x3283: 0x00c0, 0x3284: 0x00c0, 0x3285: 0x00c0, - 0x3286: 0x00c0, 0x3287: 0x00c0, 0x3288: 0x00c0, 0x3289: 0x00c0, 0x328a: 0x00c0, 0x328b: 0x00c0, - 0x328c: 0x00c0, 0x328d: 0x00c0, 0x328e: 0x00c0, 0x328f: 0x00c0, 0x3290: 0x00c0, 0x3291: 0x00c0, - 0x3292: 0x00c0, 0x3293: 0x00c0, 0x3294: 0x00c0, 0x3295: 0x00c0, 0x3296: 0x00c0, 0x3297: 0x00c0, - 0x3298: 0x00c0, 0x3299: 0x00c0, 0x329a: 0x00c0, 0x329b: 0x00c0, 0x329c: 0x00c0, 0x329d: 0x00c0, - 0x329f: 0x0080, 0x32a0: 0x00c0, 0x32a1: 0x00c0, 0x32a2: 0x00c0, 0x32a3: 0x00c0, - 0x32a4: 0x00c0, 0x32a5: 0x00c0, 0x32a6: 0x00c0, 0x32a7: 0x00c0, 0x32a8: 0x00c0, 0x32a9: 0x00c0, - 0x32aa: 0x00c0, 0x32ab: 0x00c0, 0x32ac: 0x00c0, 0x32ad: 0x00c0, 0x32ae: 0x00c0, 0x32af: 0x00c0, - 0x32b0: 0x00c0, 0x32b1: 0x00c0, 0x32b2: 0x00c0, 0x32b3: 0x00c0, 0x32b4: 0x00c0, 0x32b5: 0x00c0, - 0x32b6: 0x00c0, 0x32b7: 0x00c0, 0x32b8: 0x00c0, 0x32b9: 0x00c0, 0x32ba: 0x00c0, 0x32bb: 0x00c0, - 0x32bc: 0x00c0, 0x32bd: 0x00c0, 0x32be: 0x00c0, 0x32bf: 0x00c0, - // Block 0xcb, offset 0x32c0 - 0x32c0: 0x00c0, 0x32c1: 0x00c0, 0x32c2: 0x00c0, 0x32c3: 0x00c0, - 0x32c8: 0x00c0, 0x32c9: 0x00c0, 0x32ca: 0x00c0, 0x32cb: 0x00c0, - 0x32cc: 0x00c0, 0x32cd: 0x00c0, 0x32ce: 0x00c0, 0x32cf: 0x00c0, 0x32d0: 0x0080, 0x32d1: 0x0080, - 0x32d2: 0x0080, 0x32d3: 0x0080, 0x32d4: 0x0080, 0x32d5: 0x0080, - // Block 0xcc, offset 0x3300 - 0x3300: 0x00c0, 0x3301: 0x00c0, 0x3302: 0x00c0, 0x3303: 0x00c0, 0x3304: 0x00c0, 0x3305: 0x00c0, - 0x3306: 0x00c0, 0x3307: 0x00c0, 0x3308: 0x00c0, 0x3309: 0x00c0, 0x330a: 0x00c0, 0x330b: 0x00c0, - 0x330c: 0x00c0, 0x330d: 0x00c0, 0x330e: 0x00c0, 0x330f: 0x00c0, 0x3310: 0x00c0, 0x3311: 0x00c0, - 0x3312: 0x00c0, 0x3313: 0x00c0, 0x3314: 0x00c0, 0x3315: 0x00c0, 0x3316: 0x00c0, 0x3317: 0x00c0, - 0x3318: 0x00c0, 0x3319: 0x00c0, 0x331a: 0x00c0, 0x331b: 0x00c0, 0x331c: 0x00c0, 0x331d: 0x00c0, - 0x3320: 0x00c0, 0x3321: 0x00c0, 0x3322: 0x00c0, 0x3323: 0x00c0, - 0x3324: 0x00c0, 0x3325: 0x00c0, 0x3326: 0x00c0, 0x3327: 0x00c0, 0x3328: 0x00c0, 0x3329: 0x00c0, - 0x3330: 0x00c0, 0x3331: 0x00c0, 0x3332: 0x00c0, 0x3333: 0x00c0, 0x3334: 0x00c0, 0x3335: 0x00c0, - 0x3336: 0x00c0, 0x3337: 0x00c0, 0x3338: 0x00c0, 0x3339: 0x00c0, 0x333a: 0x00c0, 0x333b: 0x00c0, - 0x333c: 0x00c0, 0x333d: 0x00c0, 0x333e: 0x00c0, 0x333f: 0x00c0, - // Block 0xcd, offset 0x3340 - 0x3340: 0x00c0, 0x3341: 0x00c0, 0x3342: 0x00c0, 0x3343: 0x00c0, 0x3344: 0x00c0, 0x3345: 0x00c0, - 0x3346: 0x00c0, 0x3347: 0x00c0, 0x3348: 0x00c0, 0x3349: 0x00c0, 0x334a: 0x00c0, 0x334b: 0x00c0, - 0x334c: 0x00c0, 0x334d: 0x00c0, 0x334e: 0x00c0, 0x334f: 0x00c0, 0x3350: 0x00c0, 0x3351: 0x00c0, - 0x3352: 0x00c0, 0x3353: 0x00c0, - 0x3358: 0x00c0, 0x3359: 0x00c0, 0x335a: 0x00c0, 0x335b: 0x00c0, 0x335c: 0x00c0, 0x335d: 0x00c0, - 0x335e: 0x00c0, 0x335f: 0x00c0, 0x3360: 0x00c0, 0x3361: 0x00c0, 0x3362: 0x00c0, 0x3363: 0x00c0, - 0x3364: 0x00c0, 0x3365: 0x00c0, 0x3366: 0x00c0, 0x3367: 0x00c0, 0x3368: 0x00c0, 0x3369: 0x00c0, - 0x336a: 0x00c0, 0x336b: 0x00c0, 0x336c: 0x00c0, 0x336d: 0x00c0, 0x336e: 0x00c0, 0x336f: 0x00c0, - 0x3370: 0x00c0, 0x3371: 0x00c0, 0x3372: 0x00c0, 0x3373: 0x00c0, 0x3374: 0x00c0, 0x3375: 0x00c0, - 0x3376: 0x00c0, 0x3377: 0x00c0, 0x3378: 0x00c0, 0x3379: 0x00c0, 0x337a: 0x00c0, 0x337b: 0x00c0, - // Block 0xce, offset 0x3380 - 0x3380: 0x00c0, 0x3381: 0x00c0, 0x3382: 0x00c0, 0x3383: 0x00c0, 0x3384: 0x00c0, 0x3385: 0x00c0, - 0x3386: 0x00c0, 0x3387: 0x00c0, 0x3388: 0x00c0, 0x3389: 0x00c0, 0x338a: 0x00c0, 0x338b: 0x00c0, - 0x338c: 0x00c0, 0x338d: 0x00c0, 0x338e: 0x00c0, 0x338f: 0x00c0, 0x3390: 0x00c0, 0x3391: 0x00c0, - 0x3392: 0x00c0, 0x3393: 0x00c0, 0x3394: 0x00c0, 0x3395: 0x00c0, 0x3396: 0x00c0, 0x3397: 0x00c0, - 0x3398: 0x00c0, 0x3399: 0x00c0, 0x339a: 0x00c0, 0x339b: 0x00c0, 0x339c: 0x00c0, 0x339d: 0x00c0, - 0x339e: 0x00c0, 0x339f: 0x00c0, 0x33a0: 0x00c0, 0x33a1: 0x00c0, 0x33a2: 0x00c0, 0x33a3: 0x00c0, - 0x33a4: 0x00c0, 0x33a5: 0x00c0, 0x33a6: 0x00c0, 0x33a7: 0x00c0, - 0x33b0: 0x00c0, 0x33b1: 0x00c0, 0x33b2: 0x00c0, 0x33b3: 0x00c0, 0x33b4: 0x00c0, 0x33b5: 0x00c0, - 0x33b6: 0x00c0, 0x33b7: 0x00c0, 0x33b8: 0x00c0, 0x33b9: 0x00c0, 0x33ba: 0x00c0, 0x33bb: 0x00c0, - 0x33bc: 0x00c0, 0x33bd: 0x00c0, 0x33be: 0x00c0, 0x33bf: 0x00c0, - // Block 0xcf, offset 0x33c0 - 0x33c0: 0x00c0, 0x33c1: 0x00c0, 0x33c2: 0x00c0, 0x33c3: 0x00c0, 0x33c4: 0x00c0, 0x33c5: 0x00c0, - 0x33c6: 0x00c0, 0x33c7: 0x00c0, 0x33c8: 0x00c0, 0x33c9: 0x00c0, 0x33ca: 0x00c0, 0x33cb: 0x00c0, - 0x33cc: 0x00c0, 0x33cd: 0x00c0, 0x33ce: 0x00c0, 0x33cf: 0x00c0, 0x33d0: 0x00c0, 0x33d1: 0x00c0, - 0x33d2: 0x00c0, 0x33d3: 0x00c0, 0x33d4: 0x00c0, 0x33d5: 0x00c0, 0x33d6: 0x00c0, 0x33d7: 0x00c0, - 0x33d8: 0x00c0, 0x33d9: 0x00c0, 0x33da: 0x00c0, 0x33db: 0x00c0, 0x33dc: 0x00c0, 0x33dd: 0x00c0, - 0x33de: 0x00c0, 0x33df: 0x00c0, 0x33e0: 0x00c0, 0x33e1: 0x00c0, 0x33e2: 0x00c0, 0x33e3: 0x00c0, - 0x33ef: 0x0080, - // Block 0xd0, offset 0x3400 - 0x3400: 0x00c0, 0x3401: 0x00c0, 0x3402: 0x00c0, 0x3403: 0x00c0, 0x3404: 0x00c0, 0x3405: 0x00c0, - 0x3406: 0x00c0, 0x3407: 0x00c0, 0x3408: 0x00c0, 0x3409: 0x00c0, 0x340a: 0x00c0, 0x340b: 0x00c0, - 0x340c: 0x00c0, 0x340d: 0x00c0, 0x340e: 0x00c0, 0x340f: 0x00c0, 0x3410: 0x00c0, 0x3411: 0x00c0, - 0x3412: 0x00c0, 0x3413: 0x00c0, 0x3414: 0x00c0, 0x3415: 0x00c0, 0x3416: 0x00c0, 0x3417: 0x00c0, - 0x3418: 0x00c0, 0x3419: 0x00c0, 0x341a: 0x00c0, 0x341b: 0x00c0, 0x341c: 0x00c0, 0x341d: 0x00c0, - 0x341e: 0x00c0, 0x341f: 0x00c0, 0x3420: 0x00c0, 0x3421: 0x00c0, 0x3422: 0x00c0, 0x3423: 0x00c0, - 0x3424: 0x00c0, 0x3425: 0x00c0, 0x3426: 0x00c0, 0x3427: 0x00c0, 0x3428: 0x00c0, 0x3429: 0x00c0, - 0x342a: 0x00c0, 0x342b: 0x00c0, 0x342c: 0x00c0, 0x342d: 0x00c0, 0x342e: 0x00c0, 0x342f: 0x00c0, - 0x3430: 0x00c0, 0x3431: 0x00c0, 0x3432: 0x00c0, 0x3433: 0x00c0, 0x3434: 0x00c0, 0x3435: 0x00c0, - 0x3436: 0x00c0, - // Block 0xd1, offset 0x3440 - 0x3440: 0x00c0, 0x3441: 0x00c0, 0x3442: 0x00c0, 0x3443: 0x00c0, 0x3444: 0x00c0, 0x3445: 0x00c0, - 0x3446: 0x00c0, 0x3447: 0x00c0, 0x3448: 0x00c0, 0x3449: 0x00c0, 0x344a: 0x00c0, 0x344b: 0x00c0, - 0x344c: 0x00c0, 0x344d: 0x00c0, 0x344e: 0x00c0, 0x344f: 0x00c0, 0x3450: 0x00c0, 0x3451: 0x00c0, - 0x3452: 0x00c0, 0x3453: 0x00c0, 0x3454: 0x00c0, 0x3455: 0x00c0, - 0x3460: 0x00c0, 0x3461: 0x00c0, 0x3462: 0x00c0, 0x3463: 0x00c0, - 0x3464: 0x00c0, 0x3465: 0x00c0, 0x3466: 0x00c0, 0x3467: 0x00c0, - // Block 0xd2, offset 0x3480 - 0x3480: 0x00c0, 0x3481: 0x00c0, 0x3482: 0x00c0, 0x3483: 0x00c0, 0x3484: 0x00c0, 0x3485: 0x00c0, - 0x3488: 0x00c0, 0x348a: 0x00c0, 0x348b: 0x00c0, - 0x348c: 0x00c0, 0x348d: 0x00c0, 0x348e: 0x00c0, 0x348f: 0x00c0, 0x3490: 0x00c0, 0x3491: 0x00c0, - 0x3492: 0x00c0, 0x3493: 0x00c0, 0x3494: 0x00c0, 0x3495: 0x00c0, 0x3496: 0x00c0, 0x3497: 0x00c0, - 0x3498: 0x00c0, 0x3499: 0x00c0, 0x349a: 0x00c0, 0x349b: 0x00c0, 0x349c: 0x00c0, 0x349d: 0x00c0, - 0x349e: 0x00c0, 0x349f: 0x00c0, 0x34a0: 0x00c0, 0x34a1: 0x00c0, 0x34a2: 0x00c0, 0x34a3: 0x00c0, - 0x34a4: 0x00c0, 0x34a5: 0x00c0, 0x34a6: 0x00c0, 0x34a7: 0x00c0, 0x34a8: 0x00c0, 0x34a9: 0x00c0, - 0x34aa: 0x00c0, 0x34ab: 0x00c0, 0x34ac: 0x00c0, 0x34ad: 0x00c0, 0x34ae: 0x00c0, 0x34af: 0x00c0, - 0x34b0: 0x00c0, 0x34b1: 0x00c0, 0x34b2: 0x00c0, 0x34b3: 0x00c0, 0x34b4: 0x00c0, 0x34b5: 0x00c0, - 0x34b7: 0x00c0, 0x34b8: 0x00c0, - 0x34bc: 0x00c0, 0x34bf: 0x00c0, - // Block 0xd3, offset 0x34c0 - 0x34c0: 0x00c0, 0x34c1: 0x00c0, 0x34c2: 0x00c0, 0x34c3: 0x00c0, 0x34c4: 0x00c0, 0x34c5: 0x00c0, - 0x34c6: 0x00c0, 0x34c7: 0x00c0, 0x34c8: 0x00c0, 0x34c9: 0x00c0, 0x34ca: 0x00c0, 0x34cb: 0x00c0, - 0x34cc: 0x00c0, 0x34cd: 0x00c0, 0x34ce: 0x00c0, 0x34cf: 0x00c0, 0x34d0: 0x00c0, 0x34d1: 0x00c0, - 0x34d2: 0x00c0, 0x34d3: 0x00c0, 0x34d4: 0x00c0, 0x34d5: 0x00c0, 0x34d7: 0x0080, - 0x34d8: 0x0080, 0x34d9: 0x0080, 0x34da: 0x0080, 0x34db: 0x0080, 0x34dc: 0x0080, 0x34dd: 0x0080, - 0x34de: 0x0080, 0x34df: 0x0080, 0x34e0: 0x00c0, 0x34e1: 0x00c0, 0x34e2: 0x00c0, 0x34e3: 0x00c0, - 0x34e4: 0x00c0, 0x34e5: 0x00c0, 0x34e6: 0x00c0, 0x34e7: 0x00c0, 0x34e8: 0x00c0, 0x34e9: 0x00c0, - 0x34ea: 0x00c0, 0x34eb: 0x00c0, 0x34ec: 0x00c0, 0x34ed: 0x00c0, 0x34ee: 0x00c0, 0x34ef: 0x00c0, - 0x34f0: 0x00c0, 0x34f1: 0x00c0, 0x34f2: 0x00c0, 0x34f3: 0x00c0, 0x34f4: 0x00c0, 0x34f5: 0x00c0, - 0x34f6: 0x00c0, 0x34f7: 0x0080, 0x34f8: 0x0080, 0x34f9: 0x0080, 0x34fa: 0x0080, 0x34fb: 0x0080, - 0x34fc: 0x0080, 0x34fd: 0x0080, 0x34fe: 0x0080, 0x34ff: 0x0080, - // Block 0xd4, offset 0x3500 - 0x3500: 0x00c0, 0x3501: 0x00c0, 0x3502: 0x00c0, 0x3503: 0x00c0, 0x3504: 0x00c0, 0x3505: 0x00c0, - 0x3506: 0x00c0, 0x3507: 0x00c0, 0x3508: 0x00c0, 0x3509: 0x00c0, 0x350a: 0x00c0, 0x350b: 0x00c0, - 0x350c: 0x00c0, 0x350d: 0x00c0, 0x350e: 0x00c0, 0x350f: 0x00c0, 0x3510: 0x00c0, 0x3511: 0x00c0, - 0x3512: 0x00c0, 0x3513: 0x00c0, 0x3514: 0x00c0, 0x3515: 0x00c0, 0x3516: 0x00c0, 0x3517: 0x00c0, - 0x3518: 0x00c0, 0x3519: 0x00c0, 0x351a: 0x00c0, 0x351b: 0x00c0, 0x351c: 0x00c0, 0x351d: 0x00c0, - 0x351e: 0x00c0, - 0x3527: 0x0080, 0x3528: 0x0080, 0x3529: 0x0080, - 0x352a: 0x0080, 0x352b: 0x0080, 0x352c: 0x0080, 0x352d: 0x0080, 0x352e: 0x0080, 0x352f: 0x0080, - // Block 0xd5, offset 0x3540 - 0x3560: 0x00c0, 0x3561: 0x00c0, 0x3562: 0x00c0, 0x3563: 0x00c0, - 0x3564: 0x00c0, 0x3565: 0x00c0, 0x3566: 0x00c0, 0x3567: 0x00c0, 0x3568: 0x00c0, 0x3569: 0x00c0, - 0x356a: 0x00c0, 0x356b: 0x00c0, 0x356c: 0x00c0, 0x356d: 0x00c0, 0x356e: 0x00c0, 0x356f: 0x00c0, - 0x3570: 0x00c0, 0x3571: 0x00c0, 0x3572: 0x00c0, 0x3574: 0x00c0, 0x3575: 0x00c0, - 0x357b: 0x0080, - 0x357c: 0x0080, 0x357d: 0x0080, 0x357e: 0x0080, 0x357f: 0x0080, - // Block 0xd6, offset 0x3580 - 0x3580: 0x00c0, 0x3581: 0x00c0, 0x3582: 0x00c0, 0x3583: 0x00c0, 0x3584: 0x00c0, 0x3585: 0x00c0, - 0x3586: 0x00c0, 0x3587: 0x00c0, 0x3588: 0x00c0, 0x3589: 0x00c0, 0x358a: 0x00c0, 0x358b: 0x00c0, - 0x358c: 0x00c0, 0x358d: 0x00c0, 0x358e: 0x00c0, 0x358f: 0x00c0, 0x3590: 0x00c0, 0x3591: 0x00c0, - 0x3592: 0x00c0, 0x3593: 0x00c0, 0x3594: 0x00c0, 0x3595: 0x00c0, 0x3596: 0x0080, 0x3597: 0x0080, - 0x3598: 0x0080, 0x3599: 0x0080, 0x359a: 0x0080, 0x359b: 0x0080, - 0x359f: 0x0080, 0x35a0: 0x00c0, 0x35a1: 0x00c0, 0x35a2: 0x00c0, 0x35a3: 0x00c0, - 0x35a4: 0x00c0, 0x35a5: 0x00c0, 0x35a6: 0x00c0, 0x35a7: 0x00c0, 0x35a8: 0x00c0, 0x35a9: 0x00c0, - 0x35aa: 0x00c0, 0x35ab: 0x00c0, 0x35ac: 0x00c0, 0x35ad: 0x00c0, 0x35ae: 0x00c0, 0x35af: 0x00c0, - 0x35b0: 0x00c0, 0x35b1: 0x00c0, 0x35b2: 0x00c0, 0x35b3: 0x00c0, 0x35b4: 0x00c0, 0x35b5: 0x00c0, - 0x35b6: 0x00c0, 0x35b7: 0x00c0, 0x35b8: 0x00c0, 0x35b9: 0x00c0, - 0x35bf: 0x0080, - // Block 0xd7, offset 0x35c0 - 0x35c0: 0x00c0, 0x35c1: 0x00c0, 0x35c2: 0x00c0, 0x35c3: 0x00c0, 0x35c4: 0x00c0, 0x35c5: 0x00c0, - 0x35c6: 0x00c0, 0x35c7: 0x00c0, 0x35c8: 0x00c0, 0x35c9: 0x00c0, 0x35ca: 0x00c0, 0x35cb: 0x00c0, - 0x35cc: 0x00c0, 0x35cd: 0x00c0, 0x35ce: 0x00c0, 0x35cf: 0x00c0, 0x35d0: 0x00c0, 0x35d1: 0x00c0, - 0x35d2: 0x00c0, 0x35d3: 0x00c0, 0x35d4: 0x00c0, 0x35d5: 0x00c0, 0x35d6: 0x00c0, 0x35d7: 0x00c0, - 0x35d8: 0x00c0, 0x35d9: 0x00c0, 0x35da: 0x00c0, 0x35db: 0x00c0, 0x35dc: 0x00c0, 0x35dd: 0x00c0, - 0x35de: 0x00c0, 0x35df: 0x00c0, 0x35e0: 0x00c0, 0x35e1: 0x00c0, 0x35e2: 0x00c0, 0x35e3: 0x00c0, - 0x35e4: 0x00c0, 0x35e5: 0x00c0, 0x35e6: 0x00c0, 0x35e7: 0x00c0, 0x35e8: 0x00c0, 0x35e9: 0x00c0, - 0x35ea: 0x00c0, 0x35eb: 0x00c0, 0x35ec: 0x00c0, 0x35ed: 0x00c0, 0x35ee: 0x00c0, 0x35ef: 0x00c0, - 0x35f0: 0x00c0, 0x35f1: 0x00c0, 0x35f2: 0x00c0, 0x35f3: 0x00c0, 0x35f4: 0x00c0, 0x35f5: 0x00c0, - 0x35f6: 0x00c0, 0x35f7: 0x00c0, - 0x35fc: 0x0080, 0x35fd: 0x0080, 0x35fe: 0x00c0, 0x35ff: 0x00c0, - // Block 0xd8, offset 0x3600 - 0x3600: 0x00c0, 0x3601: 0x00c3, 0x3602: 0x00c3, 0x3603: 0x00c3, 0x3605: 0x00c3, - 0x3606: 0x00c3, - 0x360c: 0x00c3, 0x360d: 0x00c3, 0x360e: 0x00c3, 0x360f: 0x00c3, 0x3610: 0x00c0, 0x3611: 0x00c0, - 0x3612: 0x00c0, 0x3613: 0x00c0, 0x3615: 0x00c0, 0x3616: 0x00c0, 0x3617: 0x00c0, - 0x3619: 0x00c0, 0x361a: 0x00c0, 0x361b: 0x00c0, 0x361c: 0x00c0, 0x361d: 0x00c0, - 0x361e: 0x00c0, 0x361f: 0x00c0, 0x3620: 0x00c0, 0x3621: 0x00c0, 0x3622: 0x00c0, 0x3623: 0x00c0, - 0x3624: 0x00c0, 0x3625: 0x00c0, 0x3626: 0x00c0, 0x3627: 0x00c0, 0x3628: 0x00c0, 0x3629: 0x00c0, - 0x362a: 0x00c0, 0x362b: 0x00c0, 0x362c: 0x00c0, 0x362d: 0x00c0, 0x362e: 0x00c0, 0x362f: 0x00c0, - 0x3630: 0x00c0, 0x3631: 0x00c0, 0x3632: 0x00c0, 0x3633: 0x00c0, - 0x3638: 0x00c3, 0x3639: 0x00c3, 0x363a: 0x00c3, - 0x363f: 0x00c6, - // Block 0xd9, offset 0x3640 - 0x3640: 0x0080, 0x3641: 0x0080, 0x3642: 0x0080, 0x3643: 0x0080, 0x3644: 0x0080, 0x3645: 0x0080, - 0x3646: 0x0080, 0x3647: 0x0080, - 0x3650: 0x0080, 0x3651: 0x0080, - 0x3652: 0x0080, 0x3653: 0x0080, 0x3654: 0x0080, 0x3655: 0x0080, 0x3656: 0x0080, 0x3657: 0x0080, - 0x3658: 0x0080, - 0x3660: 0x00c0, 0x3661: 0x00c0, 0x3662: 0x00c0, 0x3663: 0x00c0, - 0x3664: 0x00c0, 0x3665: 0x00c0, 0x3666: 0x00c0, 0x3667: 0x00c0, 0x3668: 0x00c0, 0x3669: 0x00c0, - 0x366a: 0x00c0, 0x366b: 0x00c0, 0x366c: 0x00c0, 0x366d: 0x00c0, 0x366e: 0x00c0, 0x366f: 0x00c0, - 0x3670: 0x00c0, 0x3671: 0x00c0, 0x3672: 0x00c0, 0x3673: 0x00c0, 0x3674: 0x00c0, 0x3675: 0x00c0, - 0x3676: 0x00c0, 0x3677: 0x00c0, 0x3678: 0x00c0, 0x3679: 0x00c0, 0x367a: 0x00c0, 0x367b: 0x00c0, - 0x367c: 0x00c0, 0x367d: 0x0080, 0x367e: 0x0080, 0x367f: 0x0080, - // Block 0xda, offset 0x3680 - 0x3680: 0x00c0, 0x3681: 0x00c0, 0x3682: 0x00c0, 0x3683: 0x00c0, 0x3684: 0x00c0, 0x3685: 0x00c0, - 0x3686: 0x00c0, 0x3687: 0x00c0, 0x3688: 0x00c0, 0x3689: 0x00c0, 0x368a: 0x00c0, 0x368b: 0x00c0, - 0x368c: 0x00c0, 0x368d: 0x00c0, 0x368e: 0x00c0, 0x368f: 0x00c0, 0x3690: 0x00c0, 0x3691: 0x00c0, - 0x3692: 0x00c0, 0x3693: 0x00c0, 0x3694: 0x00c0, 0x3695: 0x00c0, 0x3696: 0x00c0, 0x3697: 0x00c0, - 0x3698: 0x00c0, 0x3699: 0x00c0, 0x369a: 0x00c0, 0x369b: 0x00c0, 0x369c: 0x00c0, 0x369d: 0x0080, - 0x369e: 0x0080, 0x369f: 0x0080, - // Block 0xdb, offset 0x36c0 - 0x36c0: 0x00c2, 0x36c1: 0x00c2, 0x36c2: 0x00c2, 0x36c3: 0x00c2, 0x36c4: 0x00c2, 0x36c5: 0x00c4, - 0x36c6: 0x00c0, 0x36c7: 0x00c4, 0x36c8: 0x0080, 0x36c9: 0x00c4, 0x36ca: 0x00c4, 0x36cb: 0x00c0, - 0x36cc: 0x00c0, 0x36cd: 0x00c1, 0x36ce: 0x00c4, 0x36cf: 0x00c4, 0x36d0: 0x00c4, 0x36d1: 0x00c4, - 0x36d2: 0x00c4, 0x36d3: 0x00c2, 0x36d4: 0x00c2, 0x36d5: 0x00c2, 0x36d6: 0x00c2, 0x36d7: 0x00c1, - 0x36d8: 0x00c2, 0x36d9: 0x00c2, 0x36da: 0x00c2, 0x36db: 0x00c2, 0x36dc: 0x00c2, 0x36dd: 0x00c4, - 0x36de: 0x00c2, 0x36df: 0x00c2, 0x36e0: 0x00c2, 0x36e1: 0x00c4, 0x36e2: 0x00c0, 0x36e3: 0x00c0, - 0x36e4: 0x00c4, 0x36e5: 0x00c3, 0x36e6: 0x00c3, - 0x36eb: 0x0082, 0x36ec: 0x0082, 0x36ed: 0x0082, 0x36ee: 0x0082, 0x36ef: 0x0084, - 0x36f0: 0x0080, 0x36f1: 0x0080, 0x36f2: 0x0080, 0x36f3: 0x0080, 0x36f4: 0x0080, 0x36f5: 0x0080, - 0x36f6: 0x0080, - // Block 0xdc, offset 0x3700 - 0x3700: 0x00c0, 0x3701: 0x00c0, 0x3702: 0x00c0, 0x3703: 0x00c0, 0x3704: 0x00c0, 0x3705: 0x00c0, - 0x3706: 0x00c0, 0x3707: 0x00c0, 0x3708: 0x00c0, 0x3709: 0x00c0, 0x370a: 0x00c0, 0x370b: 0x00c0, - 0x370c: 0x00c0, 0x370d: 0x00c0, 0x370e: 0x00c0, 0x370f: 0x00c0, 0x3710: 0x00c0, 0x3711: 0x00c0, - 0x3712: 0x00c0, 0x3713: 0x00c0, 0x3714: 0x00c0, 0x3715: 0x00c0, 0x3716: 0x00c0, 0x3717: 0x00c0, - 0x3718: 0x00c0, 0x3719: 0x00c0, 0x371a: 0x00c0, 0x371b: 0x00c0, 0x371c: 0x00c0, 0x371d: 0x00c0, - 0x371e: 0x00c0, 0x371f: 0x00c0, 0x3720: 0x00c0, 0x3721: 0x00c0, 0x3722: 0x00c0, 0x3723: 0x00c0, - 0x3724: 0x00c0, 0x3725: 0x00c0, 0x3726: 0x00c0, 0x3727: 0x00c0, 0x3728: 0x00c0, 0x3729: 0x00c0, - 0x372a: 0x00c0, 0x372b: 0x00c0, 0x372c: 0x00c0, 0x372d: 0x00c0, 0x372e: 0x00c0, 0x372f: 0x00c0, - 0x3730: 0x00c0, 0x3731: 0x00c0, 0x3732: 0x00c0, 0x3733: 0x00c0, 0x3734: 0x00c0, 0x3735: 0x00c0, - 0x3739: 0x0080, 0x373a: 0x0080, 0x373b: 0x0080, - 0x373c: 0x0080, 0x373d: 0x0080, 0x373e: 0x0080, 0x373f: 0x0080, - // Block 0xdd, offset 0x3740 - 0x3740: 0x00c0, 0x3741: 0x00c0, 0x3742: 0x00c0, 0x3743: 0x00c0, 0x3744: 0x00c0, 0x3745: 0x00c0, - 0x3746: 0x00c0, 0x3747: 0x00c0, 0x3748: 0x00c0, 0x3749: 0x00c0, 0x374a: 0x00c0, 0x374b: 0x00c0, - 0x374c: 0x00c0, 0x374d: 0x00c0, 0x374e: 0x00c0, 0x374f: 0x00c0, 0x3750: 0x00c0, 0x3751: 0x00c0, - 0x3752: 0x00c0, 0x3753: 0x00c0, 0x3754: 0x00c0, 0x3755: 0x00c0, - 0x3758: 0x0080, 0x3759: 0x0080, 0x375a: 0x0080, 0x375b: 0x0080, 0x375c: 0x0080, 0x375d: 0x0080, - 0x375e: 0x0080, 0x375f: 0x0080, 0x3760: 0x00c0, 0x3761: 0x00c0, 0x3762: 0x00c0, 0x3763: 0x00c0, - 0x3764: 0x00c0, 0x3765: 0x00c0, 0x3766: 0x00c0, 0x3767: 0x00c0, 0x3768: 0x00c0, 0x3769: 0x00c0, - 0x376a: 0x00c0, 0x376b: 0x00c0, 0x376c: 0x00c0, 0x376d: 0x00c0, 0x376e: 0x00c0, 0x376f: 0x00c0, - 0x3770: 0x00c0, 0x3771: 0x00c0, 0x3772: 0x00c0, - 0x3778: 0x0080, 0x3779: 0x0080, 0x377a: 0x0080, 0x377b: 0x0080, - 0x377c: 0x0080, 0x377d: 0x0080, 0x377e: 0x0080, 0x377f: 0x0080, - // Block 0xde, offset 0x3780 - 0x3780: 0x00c2, 0x3781: 0x00c4, 0x3782: 0x00c2, 0x3783: 0x00c4, 0x3784: 0x00c4, 0x3785: 0x00c4, - 0x3786: 0x00c2, 0x3787: 0x00c2, 0x3788: 0x00c2, 0x3789: 0x00c4, 0x378a: 0x00c2, 0x378b: 0x00c2, - 0x378c: 0x00c4, 0x378d: 0x00c2, 0x378e: 0x00c4, 0x378f: 0x00c4, 0x3790: 0x00c2, 0x3791: 0x00c4, - 0x3799: 0x0080, 0x379a: 0x0080, 0x379b: 0x0080, 0x379c: 0x0080, - 0x37a9: 0x0084, - 0x37aa: 0x0084, 0x37ab: 0x0084, 0x37ac: 0x0084, 0x37ad: 0x0082, 0x37ae: 0x0082, 0x37af: 0x0080, - // Block 0xdf, offset 0x37c0 - 0x37c0: 0x00c0, 0x37c1: 0x00c0, 0x37c2: 0x00c0, 0x37c3: 0x00c0, 0x37c4: 0x00c0, 0x37c5: 0x00c0, - 0x37c6: 0x00c0, 0x37c7: 0x00c0, 0x37c8: 0x00c0, 0x37c9: 0x00c0, 0x37ca: 0x00c0, 0x37cb: 0x00c0, - 0x37cc: 0x00c0, 0x37cd: 0x00c0, 0x37ce: 0x00c0, 0x37cf: 0x00c0, 0x37d0: 0x00c0, 0x37d1: 0x00c0, - 0x37d2: 0x00c0, 0x37d3: 0x00c0, 0x37d4: 0x00c0, 0x37d5: 0x00c0, 0x37d6: 0x00c0, 0x37d7: 0x00c0, - 0x37d8: 0x00c0, 0x37d9: 0x00c0, 0x37da: 0x00c0, 0x37db: 0x00c0, 0x37dc: 0x00c0, 0x37dd: 0x00c0, - 0x37de: 0x00c0, 0x37df: 0x00c0, 0x37e0: 0x00c0, 0x37e1: 0x00c0, 0x37e2: 0x00c0, 0x37e3: 0x00c0, - 0x37e4: 0x00c0, 0x37e5: 0x00c0, 0x37e6: 0x00c0, 0x37e7: 0x00c0, 0x37e8: 0x00c0, 0x37e9: 0x00c0, - 0x37ea: 0x00c0, 0x37eb: 0x00c0, 0x37ec: 0x00c0, 0x37ed: 0x00c0, 0x37ee: 0x00c0, 0x37ef: 0x00c0, - 0x37f0: 0x00c0, 0x37f1: 0x00c0, 0x37f2: 0x00c0, - // Block 0xe0, offset 0x3800 - 0x3800: 0x00c0, 0x3801: 0x00c0, 0x3802: 0x00c0, 0x3803: 0x00c0, 0x3804: 0x00c0, 0x3805: 0x00c0, - 0x3806: 0x00c0, 0x3807: 0x00c0, 0x3808: 0x00c0, 0x3809: 0x00c0, 0x380a: 0x00c0, 0x380b: 0x00c0, - 0x380c: 0x00c0, 0x380d: 0x00c0, 0x380e: 0x00c0, 0x380f: 0x00c0, 0x3810: 0x00c0, 0x3811: 0x00c0, - 0x3812: 0x00c0, 0x3813: 0x00c0, 0x3814: 0x00c0, 0x3815: 0x00c0, 0x3816: 0x00c0, 0x3817: 0x00c0, - 0x3818: 0x00c0, 0x3819: 0x00c0, 0x381a: 0x00c0, 0x381b: 0x00c0, 0x381c: 0x00c0, 0x381d: 0x00c0, - 0x381e: 0x00c0, 0x381f: 0x00c0, 0x3820: 0x00c0, 0x3821: 0x00c0, 0x3822: 0x00c0, 0x3823: 0x00c0, - 0x3824: 0x00c0, 0x3825: 0x00c0, 0x3826: 0x00c0, 0x3827: 0x00c0, 0x3828: 0x00c0, 0x3829: 0x00c0, - 0x382a: 0x00c0, 0x382b: 0x00c0, 0x382c: 0x00c0, 0x382d: 0x00c0, 0x382e: 0x00c0, 0x382f: 0x00c0, - 0x3830: 0x00c0, 0x3831: 0x00c0, 0x3832: 0x00c0, - 0x383a: 0x0080, 0x383b: 0x0080, - 0x383c: 0x0080, 0x383d: 0x0080, 0x383e: 0x0080, 0x383f: 0x0080, - // Block 0xe1, offset 0x3840 - 0x3860: 0x0080, 0x3861: 0x0080, 0x3862: 0x0080, 0x3863: 0x0080, - 0x3864: 0x0080, 0x3865: 0x0080, 0x3866: 0x0080, 0x3867: 0x0080, 0x3868: 0x0080, 0x3869: 0x0080, - 0x386a: 0x0080, 0x386b: 0x0080, 0x386c: 0x0080, 0x386d: 0x0080, 0x386e: 0x0080, 0x386f: 0x0080, - 0x3870: 0x0080, 0x3871: 0x0080, 0x3872: 0x0080, 0x3873: 0x0080, 0x3874: 0x0080, 0x3875: 0x0080, - 0x3876: 0x0080, 0x3877: 0x0080, 0x3878: 0x0080, 0x3879: 0x0080, 0x387a: 0x0080, 0x387b: 0x0080, - 0x387c: 0x0080, 0x387d: 0x0080, 0x387e: 0x0080, - // Block 0xe2, offset 0x3880 - 0x3880: 0x00c0, 0x3881: 0x00c3, 0x3882: 0x00c0, 0x3883: 0x00c0, 0x3884: 0x00c0, 0x3885: 0x00c0, - 0x3886: 0x00c0, 0x3887: 0x00c0, 0x3888: 0x00c0, 0x3889: 0x00c0, 0x388a: 0x00c0, 0x388b: 0x00c0, - 0x388c: 0x00c0, 0x388d: 0x00c0, 0x388e: 0x00c0, 0x388f: 0x00c0, 0x3890: 0x00c0, 0x3891: 0x00c0, - 0x3892: 0x00c0, 0x3893: 0x00c0, 0x3894: 0x00c0, 0x3895: 0x00c0, 0x3896: 0x00c0, 0x3897: 0x00c0, - 0x3898: 0x00c0, 0x3899: 0x00c0, 0x389a: 0x00c0, 0x389b: 0x00c0, 0x389c: 0x00c0, 0x389d: 0x00c0, - 0x389e: 0x00c0, 0x389f: 0x00c0, 0x38a0: 0x00c0, 0x38a1: 0x00c0, 0x38a2: 0x00c0, 0x38a3: 0x00c0, - 0x38a4: 0x00c0, 0x38a5: 0x00c0, 0x38a6: 0x00c0, 0x38a7: 0x00c0, 0x38a8: 0x00c0, 0x38a9: 0x00c0, - 0x38aa: 0x00c0, 0x38ab: 0x00c0, 0x38ac: 0x00c0, 0x38ad: 0x00c0, 0x38ae: 0x00c0, 0x38af: 0x00c0, - 0x38b0: 0x00c0, 0x38b1: 0x00c0, 0x38b2: 0x00c0, 0x38b3: 0x00c0, 0x38b4: 0x00c0, 0x38b5: 0x00c0, - 0x38b6: 0x00c0, 0x38b7: 0x00c0, 0x38b8: 0x00c3, 0x38b9: 0x00c3, 0x38ba: 0x00c3, 0x38bb: 0x00c3, - 0x38bc: 0x00c3, 0x38bd: 0x00c3, 0x38be: 0x00c3, 0x38bf: 0x00c3, - // Block 0xe3, offset 0x38c0 - 0x38c0: 0x00c3, 0x38c1: 0x00c3, 0x38c2: 0x00c3, 0x38c3: 0x00c3, 0x38c4: 0x00c3, 0x38c5: 0x00c3, - 0x38c6: 0x00c6, 0x38c7: 0x0080, 0x38c8: 0x0080, 0x38c9: 0x0080, 0x38ca: 0x0080, 0x38cb: 0x0080, - 0x38cc: 0x0080, 0x38cd: 0x0080, - 0x38d2: 0x0080, 0x38d3: 0x0080, 0x38d4: 0x0080, 0x38d5: 0x0080, 0x38d6: 0x0080, 0x38d7: 0x0080, - 0x38d8: 0x0080, 0x38d9: 0x0080, 0x38da: 0x0080, 0x38db: 0x0080, 0x38dc: 0x0080, 0x38dd: 0x0080, - 0x38de: 0x0080, 0x38df: 0x0080, 0x38e0: 0x0080, 0x38e1: 0x0080, 0x38e2: 0x0080, 0x38e3: 0x0080, - 0x38e4: 0x0080, 0x38e5: 0x0080, 0x38e6: 0x00c0, 0x38e7: 0x00c0, 0x38e8: 0x00c0, 0x38e9: 0x00c0, - 0x38ea: 0x00c0, 0x38eb: 0x00c0, 0x38ec: 0x00c0, 0x38ed: 0x00c0, 0x38ee: 0x00c0, 0x38ef: 0x00c0, - 0x38ff: 0x00c6, - // Block 0xe4, offset 0x3900 - 0x3900: 0x00c3, 0x3901: 0x00c3, 0x3902: 0x00c0, 0x3903: 0x00c0, 0x3904: 0x00c0, 0x3905: 0x00c0, - 0x3906: 0x00c0, 0x3907: 0x00c0, 0x3908: 0x00c0, 0x3909: 0x00c0, 0x390a: 0x00c0, 0x390b: 0x00c0, - 0x390c: 0x00c0, 0x390d: 0x00c0, 0x390e: 0x00c0, 0x390f: 0x00c0, 0x3910: 0x00c0, 0x3911: 0x00c0, - 0x3912: 0x00c0, 0x3913: 0x00c0, 0x3914: 0x00c0, 0x3915: 0x00c0, 0x3916: 0x00c0, 0x3917: 0x00c0, - 0x3918: 0x00c0, 0x3919: 0x00c0, 0x391a: 0x00c0, 0x391b: 0x00c0, 0x391c: 0x00c0, 0x391d: 0x00c0, - 0x391e: 0x00c0, 0x391f: 0x00c0, 0x3920: 0x00c0, 0x3921: 0x00c0, 0x3922: 0x00c0, 0x3923: 0x00c0, - 0x3924: 0x00c0, 0x3925: 0x00c0, 0x3926: 0x00c0, 0x3927: 0x00c0, 0x3928: 0x00c0, 0x3929: 0x00c0, - 0x392a: 0x00c0, 0x392b: 0x00c0, 0x392c: 0x00c0, 0x392d: 0x00c0, 0x392e: 0x00c0, 0x392f: 0x00c0, - 0x3930: 0x00c0, 0x3931: 0x00c0, 0x3932: 0x00c0, 0x3933: 0x00c3, 0x3934: 0x00c3, 0x3935: 0x00c3, - 0x3936: 0x00c3, 0x3937: 0x00c0, 0x3938: 0x00c0, 0x3939: 0x00c6, 0x393a: 0x00c3, 0x393b: 0x0080, - 0x393c: 0x0080, 0x393d: 0x0040, 0x393e: 0x0080, 0x393f: 0x0080, - // Block 0xe5, offset 0x3940 - 0x3940: 0x0080, 0x3941: 0x0080, - 0x3950: 0x00c0, 0x3951: 0x00c0, - 0x3952: 0x00c0, 0x3953: 0x00c0, 0x3954: 0x00c0, 0x3955: 0x00c0, 0x3956: 0x00c0, 0x3957: 0x00c0, - 0x3958: 0x00c0, 0x3959: 0x00c0, 0x395a: 0x00c0, 0x395b: 0x00c0, 0x395c: 0x00c0, 0x395d: 0x00c0, - 0x395e: 0x00c0, 0x395f: 0x00c0, 0x3960: 0x00c0, 0x3961: 0x00c0, 0x3962: 0x00c0, 0x3963: 0x00c0, - 0x3964: 0x00c0, 0x3965: 0x00c0, 0x3966: 0x00c0, 0x3967: 0x00c0, 0x3968: 0x00c0, - 0x3970: 0x00c0, 0x3971: 0x00c0, 0x3972: 0x00c0, 0x3973: 0x00c0, 0x3974: 0x00c0, 0x3975: 0x00c0, - 0x3976: 0x00c0, 0x3977: 0x00c0, 0x3978: 0x00c0, 0x3979: 0x00c0, - // Block 0xe6, offset 0x3980 - 0x3980: 0x00c3, 0x3981: 0x00c3, 0x3982: 0x00c3, 0x3983: 0x00c0, 0x3984: 0x00c0, 0x3985: 0x00c0, - 0x3986: 0x00c0, 0x3987: 0x00c0, 0x3988: 0x00c0, 0x3989: 0x00c0, 0x398a: 0x00c0, 0x398b: 0x00c0, - 0x398c: 0x00c0, 0x398d: 0x00c0, 0x398e: 0x00c0, 0x398f: 0x00c0, 0x3990: 0x00c0, 0x3991: 0x00c0, - 0x3992: 0x00c0, 0x3993: 0x00c0, 0x3994: 0x00c0, 0x3995: 0x00c0, 0x3996: 0x00c0, 0x3997: 0x00c0, - 0x3998: 0x00c0, 0x3999: 0x00c0, 0x399a: 0x00c0, 0x399b: 0x00c0, 0x399c: 0x00c0, 0x399d: 0x00c0, - 0x399e: 0x00c0, 0x399f: 0x00c0, 0x39a0: 0x00c0, 0x39a1: 0x00c0, 0x39a2: 0x00c0, 0x39a3: 0x00c0, - 0x39a4: 0x00c0, 0x39a5: 0x00c0, 0x39a6: 0x00c0, 0x39a7: 0x00c3, 0x39a8: 0x00c3, 0x39a9: 0x00c3, - 0x39aa: 0x00c3, 0x39ab: 0x00c3, 0x39ac: 0x00c0, 0x39ad: 0x00c3, 0x39ae: 0x00c3, 0x39af: 0x00c3, - 0x39b0: 0x00c3, 0x39b1: 0x00c3, 0x39b2: 0x00c3, 0x39b3: 0x00c6, 0x39b4: 0x00c6, - 0x39b6: 0x00c0, 0x39b7: 0x00c0, 0x39b8: 0x00c0, 0x39b9: 0x00c0, 0x39ba: 0x00c0, 0x39bb: 0x00c0, - 0x39bc: 0x00c0, 0x39bd: 0x00c0, 0x39be: 0x00c0, 0x39bf: 0x00c0, - // Block 0xe7, offset 0x39c0 - 0x39c0: 0x0080, 0x39c1: 0x0080, 0x39c2: 0x0080, 0x39c3: 0x0080, - 0x39d0: 0x00c0, 0x39d1: 0x00c0, - 0x39d2: 0x00c0, 0x39d3: 0x00c0, 0x39d4: 0x00c0, 0x39d5: 0x00c0, 0x39d6: 0x00c0, 0x39d7: 0x00c0, - 0x39d8: 0x00c0, 0x39d9: 0x00c0, 0x39da: 0x00c0, 0x39db: 0x00c0, 0x39dc: 0x00c0, 0x39dd: 0x00c0, - 0x39de: 0x00c0, 0x39df: 0x00c0, 0x39e0: 0x00c0, 0x39e1: 0x00c0, 0x39e2: 0x00c0, 0x39e3: 0x00c0, - 0x39e4: 0x00c0, 0x39e5: 0x00c0, 0x39e6: 0x00c0, 0x39e7: 0x00c0, 0x39e8: 0x00c0, 0x39e9: 0x00c0, - 0x39ea: 0x00c0, 0x39eb: 0x00c0, 0x39ec: 0x00c0, 0x39ed: 0x00c0, 0x39ee: 0x00c0, 0x39ef: 0x00c0, - 0x39f0: 0x00c0, 0x39f1: 0x00c0, 0x39f2: 0x00c0, 0x39f3: 0x00c3, 0x39f4: 0x0080, 0x39f5: 0x0080, - 0x39f6: 0x00c0, - // Block 0xe8, offset 0x3a00 - 0x3a00: 0x00c3, 0x3a01: 0x00c3, 0x3a02: 0x00c0, 0x3a03: 0x00c0, 0x3a04: 0x00c0, 0x3a05: 0x00c0, - 0x3a06: 0x00c0, 0x3a07: 0x00c0, 0x3a08: 0x00c0, 0x3a09: 0x00c0, 0x3a0a: 0x00c0, 0x3a0b: 0x00c0, - 0x3a0c: 0x00c0, 0x3a0d: 0x00c0, 0x3a0e: 0x00c0, 0x3a0f: 0x00c0, 0x3a10: 0x00c0, 0x3a11: 0x00c0, - 0x3a12: 0x00c0, 0x3a13: 0x00c0, 0x3a14: 0x00c0, 0x3a15: 0x00c0, 0x3a16: 0x00c0, 0x3a17: 0x00c0, - 0x3a18: 0x00c0, 0x3a19: 0x00c0, 0x3a1a: 0x00c0, 0x3a1b: 0x00c0, 0x3a1c: 0x00c0, 0x3a1d: 0x00c0, - 0x3a1e: 0x00c0, 0x3a1f: 0x00c0, 0x3a20: 0x00c0, 0x3a21: 0x00c0, 0x3a22: 0x00c0, 0x3a23: 0x00c0, - 0x3a24: 0x00c0, 0x3a25: 0x00c0, 0x3a26: 0x00c0, 0x3a27: 0x00c0, 0x3a28: 0x00c0, 0x3a29: 0x00c0, - 0x3a2a: 0x00c0, 0x3a2b: 0x00c0, 0x3a2c: 0x00c0, 0x3a2d: 0x00c0, 0x3a2e: 0x00c0, 0x3a2f: 0x00c0, - 0x3a30: 0x00c0, 0x3a31: 0x00c0, 0x3a32: 0x00c0, 0x3a33: 0x00c0, 0x3a34: 0x00c0, 0x3a35: 0x00c0, - 0x3a36: 0x00c3, 0x3a37: 0x00c3, 0x3a38: 0x00c3, 0x3a39: 0x00c3, 0x3a3a: 0x00c3, 0x3a3b: 0x00c3, - 0x3a3c: 0x00c3, 0x3a3d: 0x00c3, 0x3a3e: 0x00c3, 0x3a3f: 0x00c0, - // Block 0xe9, offset 0x3a40 - 0x3a40: 0x00c5, 0x3a41: 0x00c0, 0x3a42: 0x00c0, 0x3a43: 0x00c0, 0x3a44: 0x00c0, 0x3a45: 0x0080, - 0x3a46: 0x0080, 0x3a47: 0x0080, 0x3a48: 0x0080, 0x3a49: 0x0080, 0x3a4a: 0x00c3, 0x3a4b: 0x00c3, - 0x3a4c: 0x00c3, 0x3a4d: 0x0080, 0x3a50: 0x00c0, 0x3a51: 0x00c0, - 0x3a52: 0x00c0, 0x3a53: 0x00c0, 0x3a54: 0x00c0, 0x3a55: 0x00c0, 0x3a56: 0x00c0, 0x3a57: 0x00c0, - 0x3a58: 0x00c0, 0x3a59: 0x00c0, 0x3a5a: 0x00c0, 0x3a5b: 0x0080, 0x3a5c: 0x00c0, 0x3a5d: 0x0080, - 0x3a5e: 0x0080, 0x3a5f: 0x0080, 0x3a61: 0x0080, 0x3a62: 0x0080, 0x3a63: 0x0080, - 0x3a64: 0x0080, 0x3a65: 0x0080, 0x3a66: 0x0080, 0x3a67: 0x0080, 0x3a68: 0x0080, 0x3a69: 0x0080, - 0x3a6a: 0x0080, 0x3a6b: 0x0080, 0x3a6c: 0x0080, 0x3a6d: 0x0080, 0x3a6e: 0x0080, 0x3a6f: 0x0080, - 0x3a70: 0x0080, 0x3a71: 0x0080, 0x3a72: 0x0080, 0x3a73: 0x0080, 0x3a74: 0x0080, - // Block 0xea, offset 0x3a80 - 0x3a80: 0x00c0, 0x3a81: 0x00c0, 0x3a82: 0x00c0, 0x3a83: 0x00c0, 0x3a84: 0x00c0, 0x3a85: 0x00c0, - 0x3a86: 0x00c0, 0x3a87: 0x00c0, 0x3a88: 0x00c0, 0x3a89: 0x00c0, 0x3a8a: 0x00c0, 0x3a8b: 0x00c0, - 0x3a8c: 0x00c0, 0x3a8d: 0x00c0, 0x3a8e: 0x00c0, 0x3a8f: 0x00c0, 0x3a90: 0x00c0, 0x3a91: 0x00c0, - 0x3a93: 0x00c0, 0x3a94: 0x00c0, 0x3a95: 0x00c0, 0x3a96: 0x00c0, 0x3a97: 0x00c0, - 0x3a98: 0x00c0, 0x3a99: 0x00c0, 0x3a9a: 0x00c0, 0x3a9b: 0x00c0, 0x3a9c: 0x00c0, 0x3a9d: 0x00c0, - 0x3a9e: 0x00c0, 0x3a9f: 0x00c0, 0x3aa0: 0x00c0, 0x3aa1: 0x00c0, 0x3aa2: 0x00c0, 0x3aa3: 0x00c0, - 0x3aa4: 0x00c0, 0x3aa5: 0x00c0, 0x3aa6: 0x00c0, 0x3aa7: 0x00c0, 0x3aa8: 0x00c0, 0x3aa9: 0x00c0, - 0x3aaa: 0x00c0, 0x3aab: 0x00c0, 0x3aac: 0x00c0, 0x3aad: 0x00c0, 0x3aae: 0x00c0, 0x3aaf: 0x00c3, - 0x3ab0: 0x00c3, 0x3ab1: 0x00c3, 0x3ab2: 0x00c0, 0x3ab3: 0x00c0, 0x3ab4: 0x00c3, 0x3ab5: 0x00c5, - 0x3ab6: 0x00c3, 0x3ab7: 0x00c3, 0x3ab8: 0x0080, 0x3ab9: 0x0080, 0x3aba: 0x0080, 0x3abb: 0x0080, - 0x3abc: 0x0080, 0x3abd: 0x0080, 0x3abe: 0x00c3, - // Block 0xeb, offset 0x3ac0 - 0x3ac0: 0x00c0, 0x3ac1: 0x00c0, 0x3ac2: 0x00c0, 0x3ac3: 0x00c0, 0x3ac4: 0x00c0, 0x3ac5: 0x00c0, - 0x3ac6: 0x00c0, 0x3ac8: 0x00c0, 0x3aca: 0x00c0, 0x3acb: 0x00c0, - 0x3acc: 0x00c0, 0x3acd: 0x00c0, 0x3acf: 0x00c0, 0x3ad0: 0x00c0, 0x3ad1: 0x00c0, - 0x3ad2: 0x00c0, 0x3ad3: 0x00c0, 0x3ad4: 0x00c0, 0x3ad5: 0x00c0, 0x3ad6: 0x00c0, 0x3ad7: 0x00c0, - 0x3ad8: 0x00c0, 0x3ad9: 0x00c0, 0x3ada: 0x00c0, 0x3adb: 0x00c0, 0x3adc: 0x00c0, 0x3add: 0x00c0, - 0x3adf: 0x00c0, 0x3ae0: 0x00c0, 0x3ae1: 0x00c0, 0x3ae2: 0x00c0, 0x3ae3: 0x00c0, - 0x3ae4: 0x00c0, 0x3ae5: 0x00c0, 0x3ae6: 0x00c0, 0x3ae7: 0x00c0, 0x3ae8: 0x00c0, 0x3ae9: 0x0080, - 0x3af0: 0x00c0, 0x3af1: 0x00c0, 0x3af2: 0x00c0, 0x3af3: 0x00c0, 0x3af4: 0x00c0, 0x3af5: 0x00c0, - 0x3af6: 0x00c0, 0x3af7: 0x00c0, 0x3af8: 0x00c0, 0x3af9: 0x00c0, 0x3afa: 0x00c0, 0x3afb: 0x00c0, - 0x3afc: 0x00c0, 0x3afd: 0x00c0, 0x3afe: 0x00c0, 0x3aff: 0x00c0, - // Block 0xec, offset 0x3b00 - 0x3b00: 0x00c0, 0x3b01: 0x00c0, 0x3b02: 0x00c0, 0x3b03: 0x00c0, 0x3b04: 0x00c0, 0x3b05: 0x00c0, - 0x3b06: 0x00c0, 0x3b07: 0x00c0, 0x3b08: 0x00c0, 0x3b09: 0x00c0, 0x3b0a: 0x00c0, 0x3b0b: 0x00c0, - 0x3b0c: 0x00c0, 0x3b0d: 0x00c0, 0x3b0e: 0x00c0, 0x3b0f: 0x00c0, 0x3b10: 0x00c0, 0x3b11: 0x00c0, - 0x3b12: 0x00c0, 0x3b13: 0x00c0, 0x3b14: 0x00c0, 0x3b15: 0x00c0, 0x3b16: 0x00c0, 0x3b17: 0x00c0, - 0x3b18: 0x00c0, 0x3b19: 0x00c0, 0x3b1a: 0x00c0, 0x3b1b: 0x00c0, 0x3b1c: 0x00c0, 0x3b1d: 0x00c0, - 0x3b1e: 0x00c0, 0x3b1f: 0x00c3, 0x3b20: 0x00c0, 0x3b21: 0x00c0, 0x3b22: 0x00c0, 0x3b23: 0x00c3, - 0x3b24: 0x00c3, 0x3b25: 0x00c3, 0x3b26: 0x00c3, 0x3b27: 0x00c3, 0x3b28: 0x00c3, 0x3b29: 0x00c3, - 0x3b2a: 0x00c6, - 0x3b30: 0x00c0, 0x3b31: 0x00c0, 0x3b32: 0x00c0, 0x3b33: 0x00c0, 0x3b34: 0x00c0, 0x3b35: 0x00c0, - 0x3b36: 0x00c0, 0x3b37: 0x00c0, 0x3b38: 0x00c0, 0x3b39: 0x00c0, - // Block 0xed, offset 0x3b40 - 0x3b40: 0x00c3, 0x3b41: 0x00c3, 0x3b42: 0x00c0, 0x3b43: 0x00c0, 0x3b45: 0x00c0, - 0x3b46: 0x00c0, 0x3b47: 0x00c0, 0x3b48: 0x00c0, 0x3b49: 0x00c0, 0x3b4a: 0x00c0, 0x3b4b: 0x00c0, - 0x3b4c: 0x00c0, 0x3b4f: 0x00c0, 0x3b50: 0x00c0, - 0x3b53: 0x00c0, 0x3b54: 0x00c0, 0x3b55: 0x00c0, 0x3b56: 0x00c0, 0x3b57: 0x00c0, - 0x3b58: 0x00c0, 0x3b59: 0x00c0, 0x3b5a: 0x00c0, 0x3b5b: 0x00c0, 0x3b5c: 0x00c0, 0x3b5d: 0x00c0, - 0x3b5e: 0x00c0, 0x3b5f: 0x00c0, 0x3b60: 0x00c0, 0x3b61: 0x00c0, 0x3b62: 0x00c0, 0x3b63: 0x00c0, - 0x3b64: 0x00c0, 0x3b65: 0x00c0, 0x3b66: 0x00c0, 0x3b67: 0x00c0, 0x3b68: 0x00c0, - 0x3b6a: 0x00c0, 0x3b6b: 0x00c0, 0x3b6c: 0x00c0, 0x3b6d: 0x00c0, 0x3b6e: 0x00c0, 0x3b6f: 0x00c0, - 0x3b70: 0x00c0, 0x3b72: 0x00c0, 0x3b73: 0x00c0, 0x3b75: 0x00c0, - 0x3b76: 0x00c0, 0x3b77: 0x00c0, 0x3b78: 0x00c0, 0x3b79: 0x00c0, - 0x3b7c: 0x00c3, 0x3b7d: 0x00c0, 0x3b7e: 0x00c0, 0x3b7f: 0x00c0, - // Block 0xee, offset 0x3b80 - 0x3b80: 0x00c3, 0x3b81: 0x00c0, 0x3b82: 0x00c0, 0x3b83: 0x00c0, 0x3b84: 0x00c0, - 0x3b87: 0x00c0, 0x3b88: 0x00c0, 0x3b8b: 0x00c0, - 0x3b8c: 0x00c0, 0x3b8d: 0x00c5, 0x3b90: 0x00c0, - 0x3b97: 0x00c0, - 0x3b9d: 0x00c0, - 0x3b9e: 0x00c0, 0x3b9f: 0x00c0, 0x3ba0: 0x00c0, 0x3ba1: 0x00c0, 0x3ba2: 0x00c0, 0x3ba3: 0x00c0, - 0x3ba6: 0x00c3, 0x3ba7: 0x00c3, 0x3ba8: 0x00c3, 0x3ba9: 0x00c3, - 0x3baa: 0x00c3, 0x3bab: 0x00c3, 0x3bac: 0x00c3, - 0x3bb0: 0x00c3, 0x3bb1: 0x00c3, 0x3bb2: 0x00c3, 0x3bb3: 0x00c3, 0x3bb4: 0x00c3, - // Block 0xef, offset 0x3bc0 - 0x3bc0: 0x00c0, 0x3bc1: 0x00c0, 0x3bc2: 0x00c0, 0x3bc3: 0x00c0, 0x3bc4: 0x00c0, 0x3bc5: 0x00c0, - 0x3bc6: 0x00c0, 0x3bc7: 0x00c0, 0x3bc8: 0x00c0, 0x3bc9: 0x00c0, 0x3bca: 0x00c0, 0x3bcb: 0x00c0, - 0x3bcc: 0x00c0, 0x3bcd: 0x00c0, 0x3bce: 0x00c0, 0x3bcf: 0x00c0, 0x3bd0: 0x00c0, 0x3bd1: 0x00c0, - 0x3bd2: 0x00c0, 0x3bd3: 0x00c0, 0x3bd4: 0x00c0, 0x3bd5: 0x00c0, 0x3bd6: 0x00c0, 0x3bd7: 0x00c0, - 0x3bd8: 0x00c0, 0x3bd9: 0x00c0, 0x3bda: 0x00c0, 0x3bdb: 0x00c0, 0x3bdc: 0x00c0, 0x3bdd: 0x00c0, - 0x3bde: 0x00c0, 0x3bdf: 0x00c0, 0x3be0: 0x00c0, 0x3be1: 0x00c0, 0x3be2: 0x00c0, 0x3be3: 0x00c0, - 0x3be4: 0x00c0, 0x3be5: 0x00c0, 0x3be6: 0x00c0, 0x3be7: 0x00c0, 0x3be8: 0x00c0, 0x3be9: 0x00c0, - 0x3bea: 0x00c0, 0x3beb: 0x00c0, 0x3bec: 0x00c0, 0x3bed: 0x00c0, 0x3bee: 0x00c0, 0x3bef: 0x00c0, - 0x3bf0: 0x00c0, 0x3bf1: 0x00c0, 0x3bf2: 0x00c0, 0x3bf3: 0x00c0, 0x3bf4: 0x00c0, 0x3bf5: 0x00c0, - 0x3bf6: 0x00c0, 0x3bf7: 0x00c0, 0x3bf8: 0x00c3, 0x3bf9: 0x00c3, 0x3bfa: 0x00c3, 0x3bfb: 0x00c3, - 0x3bfc: 0x00c3, 0x3bfd: 0x00c3, 0x3bfe: 0x00c3, 0x3bff: 0x00c3, - // Block 0xf0, offset 0x3c00 - 0x3c00: 0x00c0, 0x3c01: 0x00c0, 0x3c02: 0x00c6, 0x3c03: 0x00c3, 0x3c04: 0x00c3, 0x3c05: 0x00c0, - 0x3c06: 0x00c3, 0x3c07: 0x00c0, 0x3c08: 0x00c0, 0x3c09: 0x00c0, 0x3c0a: 0x00c0, 0x3c0b: 0x0080, - 0x3c0c: 0x0080, 0x3c0d: 0x0080, 0x3c0e: 0x0080, 0x3c0f: 0x0080, 0x3c10: 0x00c0, 0x3c11: 0x00c0, - 0x3c12: 0x00c0, 0x3c13: 0x00c0, 0x3c14: 0x00c0, 0x3c15: 0x00c0, 0x3c16: 0x00c0, 0x3c17: 0x00c0, - 0x3c18: 0x00c0, 0x3c19: 0x00c0, 0x3c1b: 0x0080, 0x3c1d: 0x0080, - // Block 0xf1, offset 0x3c40 - 0x3c40: 0x00c0, 0x3c41: 0x00c0, 0x3c42: 0x00c0, 0x3c43: 0x00c0, 0x3c44: 0x00c0, 0x3c45: 0x00c0, - 0x3c46: 0x00c0, 0x3c47: 0x00c0, 0x3c48: 0x00c0, 0x3c49: 0x00c0, 0x3c4a: 0x00c0, 0x3c4b: 0x00c0, - 0x3c4c: 0x00c0, 0x3c4d: 0x00c0, 0x3c4e: 0x00c0, 0x3c4f: 0x00c0, 0x3c50: 0x00c0, 0x3c51: 0x00c0, - 0x3c52: 0x00c0, 0x3c53: 0x00c0, 0x3c54: 0x00c0, 0x3c55: 0x00c0, 0x3c56: 0x00c0, 0x3c57: 0x00c0, - 0x3c58: 0x00c0, 0x3c59: 0x00c0, 0x3c5a: 0x00c0, 0x3c5b: 0x00c0, 0x3c5c: 0x00c0, 0x3c5d: 0x00c0, - 0x3c5e: 0x00c0, 0x3c5f: 0x00c0, 0x3c60: 0x00c0, 0x3c61: 0x00c0, 0x3c62: 0x00c0, 0x3c63: 0x00c0, - 0x3c64: 0x00c0, 0x3c65: 0x00c0, 0x3c66: 0x00c0, 0x3c67: 0x00c0, 0x3c68: 0x00c0, 0x3c69: 0x00c0, - 0x3c6a: 0x00c0, 0x3c6b: 0x00c0, 0x3c6c: 0x00c0, 0x3c6d: 0x00c0, 0x3c6e: 0x00c0, 0x3c6f: 0x00c0, - 0x3c70: 0x00c0, 0x3c71: 0x00c0, 0x3c72: 0x00c0, 0x3c73: 0x00c3, 0x3c74: 0x00c3, 0x3c75: 0x00c3, - 0x3c76: 0x00c3, 0x3c77: 0x00c3, 0x3c78: 0x00c3, 0x3c79: 0x00c0, 0x3c7a: 0x00c3, 0x3c7b: 0x00c0, - 0x3c7c: 0x00c0, 0x3c7d: 0x00c0, 0x3c7e: 0x00c0, 0x3c7f: 0x00c3, - // Block 0xf2, offset 0x3c80 - 0x3c80: 0x00c3, 0x3c81: 0x00c0, 0x3c82: 0x00c6, 0x3c83: 0x00c3, 0x3c84: 0x00c0, 0x3c85: 0x00c0, - 0x3c86: 0x0080, 0x3c87: 0x00c0, - 0x3c90: 0x00c0, 0x3c91: 0x00c0, - 0x3c92: 0x00c0, 0x3c93: 0x00c0, 0x3c94: 0x00c0, 0x3c95: 0x00c0, 0x3c96: 0x00c0, 0x3c97: 0x00c0, - 0x3c98: 0x00c0, 0x3c99: 0x00c0, - // Block 0xf3, offset 0x3cc0 - 0x3cc0: 0x00c0, 0x3cc1: 0x00c0, 0x3cc2: 0x00c0, 0x3cc3: 0x00c0, 0x3cc4: 0x00c0, 0x3cc5: 0x00c0, - 0x3cc6: 0x00c0, 0x3cc7: 0x00c0, 0x3cc8: 0x00c0, 0x3cc9: 0x00c0, 0x3cca: 0x00c0, 0x3ccb: 0x00c0, - 0x3ccc: 0x00c0, 0x3ccd: 0x00c0, 0x3cce: 0x00c0, 0x3ccf: 0x00c0, 0x3cd0: 0x00c0, 0x3cd1: 0x00c0, - 0x3cd2: 0x00c0, 0x3cd3: 0x00c0, 0x3cd4: 0x00c0, 0x3cd5: 0x00c0, 0x3cd6: 0x00c0, 0x3cd7: 0x00c0, - 0x3cd8: 0x00c0, 0x3cd9: 0x00c0, 0x3cda: 0x00c0, 0x3cdb: 0x00c0, 0x3cdc: 0x00c0, 0x3cdd: 0x00c0, - 0x3cde: 0x00c0, 0x3cdf: 0x00c0, 0x3ce0: 0x00c0, 0x3ce1: 0x00c0, 0x3ce2: 0x00c0, 0x3ce3: 0x00c0, - 0x3ce4: 0x00c0, 0x3ce5: 0x00c0, 0x3ce6: 0x00c0, 0x3ce7: 0x00c0, 0x3ce8: 0x00c0, 0x3ce9: 0x00c0, - 0x3cea: 0x00c0, 0x3ceb: 0x00c0, 0x3cec: 0x00c0, 0x3ced: 0x00c0, 0x3cee: 0x00c0, 0x3cef: 0x00c0, - 0x3cf0: 0x00c0, 0x3cf1: 0x00c0, 0x3cf2: 0x00c3, 0x3cf3: 0x00c3, 0x3cf4: 0x00c3, 0x3cf5: 0x00c3, - 0x3cf8: 0x00c0, 0x3cf9: 0x00c0, 0x3cfa: 0x00c0, 0x3cfb: 0x00c0, - 0x3cfc: 0x00c3, 0x3cfd: 0x00c3, 0x3cfe: 0x00c0, 0x3cff: 0x00c6, - // Block 0xf4, offset 0x3d00 - 0x3d00: 0x00c3, 0x3d01: 0x0080, 0x3d02: 0x0080, 0x3d03: 0x0080, 0x3d04: 0x0080, 0x3d05: 0x0080, - 0x3d06: 0x0080, 0x3d07: 0x0080, 0x3d08: 0x0080, 0x3d09: 0x0080, 0x3d0a: 0x0080, 0x3d0b: 0x0080, - 0x3d0c: 0x0080, 0x3d0d: 0x0080, 0x3d0e: 0x0080, 0x3d0f: 0x0080, 0x3d10: 0x0080, 0x3d11: 0x0080, - 0x3d12: 0x0080, 0x3d13: 0x0080, 0x3d14: 0x0080, 0x3d15: 0x0080, 0x3d16: 0x0080, 0x3d17: 0x0080, - 0x3d18: 0x00c0, 0x3d19: 0x00c0, 0x3d1a: 0x00c0, 0x3d1b: 0x00c0, 0x3d1c: 0x00c3, 0x3d1d: 0x00c3, - // Block 0xf5, offset 0x3d40 - 0x3d40: 0x00c0, 0x3d41: 0x00c0, 0x3d42: 0x00c0, 0x3d43: 0x00c0, 0x3d44: 0x00c0, 0x3d45: 0x00c0, - 0x3d46: 0x00c0, 0x3d47: 0x00c0, 0x3d48: 0x00c0, 0x3d49: 0x00c0, 0x3d4a: 0x00c0, 0x3d4b: 0x00c0, - 0x3d4c: 0x00c0, 0x3d4d: 0x00c0, 0x3d4e: 0x00c0, 0x3d4f: 0x00c0, 0x3d50: 0x00c0, 0x3d51: 0x00c0, - 0x3d52: 0x00c0, 0x3d53: 0x00c0, 0x3d54: 0x00c0, 0x3d55: 0x00c0, 0x3d56: 0x00c0, 0x3d57: 0x00c0, - 0x3d58: 0x00c0, 0x3d59: 0x00c0, 0x3d5a: 0x00c0, 0x3d5b: 0x00c0, 0x3d5c: 0x00c0, 0x3d5d: 0x00c0, - 0x3d5e: 0x00c0, 0x3d5f: 0x00c0, 0x3d60: 0x00c0, 0x3d61: 0x00c0, 0x3d62: 0x00c0, 0x3d63: 0x00c0, - 0x3d64: 0x00c0, 0x3d65: 0x00c0, 0x3d66: 0x00c0, 0x3d67: 0x00c0, 0x3d68: 0x00c0, 0x3d69: 0x00c0, - 0x3d6a: 0x00c0, 0x3d6b: 0x00c0, 0x3d6c: 0x00c0, 0x3d6d: 0x00c0, 0x3d6e: 0x00c0, 0x3d6f: 0x00c0, - 0x3d70: 0x00c0, 0x3d71: 0x00c0, 0x3d72: 0x00c0, 0x3d73: 0x00c3, 0x3d74: 0x00c3, 0x3d75: 0x00c3, - 0x3d76: 0x00c3, 0x3d77: 0x00c3, 0x3d78: 0x00c3, 0x3d79: 0x00c3, 0x3d7a: 0x00c3, 0x3d7b: 0x00c0, - 0x3d7c: 0x00c0, 0x3d7d: 0x00c3, 0x3d7e: 0x00c0, 0x3d7f: 0x00c6, - // Block 0xf6, offset 0x3d80 - 0x3d80: 0x00c3, 0x3d81: 0x0080, 0x3d82: 0x0080, 0x3d83: 0x0080, 0x3d84: 0x00c0, - 0x3d90: 0x00c0, 0x3d91: 0x00c0, - 0x3d92: 0x00c0, 0x3d93: 0x00c0, 0x3d94: 0x00c0, 0x3d95: 0x00c0, 0x3d96: 0x00c0, 0x3d97: 0x00c0, - 0x3d98: 0x00c0, 0x3d99: 0x00c0, - 0x3da0: 0x0080, 0x3da1: 0x0080, 0x3da2: 0x0080, 0x3da3: 0x0080, - 0x3da4: 0x0080, 0x3da5: 0x0080, 0x3da6: 0x0080, 0x3da7: 0x0080, 0x3da8: 0x0080, 0x3da9: 0x0080, - 0x3daa: 0x0080, 0x3dab: 0x0080, 0x3dac: 0x0080, - // Block 0xf7, offset 0x3dc0 - 0x3dc0: 0x00c0, 0x3dc1: 0x00c0, 0x3dc2: 0x00c0, 0x3dc3: 0x00c0, 0x3dc4: 0x00c0, 0x3dc5: 0x00c0, - 0x3dc6: 0x00c0, 0x3dc7: 0x00c0, 0x3dc8: 0x00c0, 0x3dc9: 0x00c0, 0x3dca: 0x00c0, 0x3dcb: 0x00c0, - 0x3dcc: 0x00c0, 0x3dcd: 0x00c0, 0x3dce: 0x00c0, 0x3dcf: 0x00c0, 0x3dd0: 0x00c0, 0x3dd1: 0x00c0, - 0x3dd2: 0x00c0, 0x3dd3: 0x00c0, 0x3dd4: 0x00c0, 0x3dd5: 0x00c0, 0x3dd6: 0x00c0, 0x3dd7: 0x00c0, - 0x3dd8: 0x00c0, 0x3dd9: 0x00c0, 0x3dda: 0x00c0, 0x3ddb: 0x00c0, 0x3ddc: 0x00c0, 0x3ddd: 0x00c0, - 0x3dde: 0x00c0, 0x3ddf: 0x00c0, 0x3de0: 0x00c0, 0x3de1: 0x00c0, 0x3de2: 0x00c0, 0x3de3: 0x00c0, - 0x3de4: 0x00c0, 0x3de5: 0x00c0, 0x3de6: 0x00c0, 0x3de7: 0x00c0, 0x3de8: 0x00c0, 0x3de9: 0x00c0, - 0x3dea: 0x00c0, 0x3deb: 0x00c3, 0x3dec: 0x00c0, 0x3ded: 0x00c3, 0x3dee: 0x00c0, 0x3def: 0x00c0, - 0x3df0: 0x00c3, 0x3df1: 0x00c3, 0x3df2: 0x00c3, 0x3df3: 0x00c3, 0x3df4: 0x00c3, 0x3df5: 0x00c3, - 0x3df6: 0x00c5, 0x3df7: 0x00c3, - // Block 0xf8, offset 0x3e00 - 0x3e00: 0x00c0, 0x3e01: 0x00c0, 0x3e02: 0x00c0, 0x3e03: 0x00c0, 0x3e04: 0x00c0, 0x3e05: 0x00c0, - 0x3e06: 0x00c0, 0x3e07: 0x00c0, 0x3e08: 0x00c0, 0x3e09: 0x00c0, - // Block 0xf9, offset 0x3e40 - 0x3e40: 0x00c0, 0x3e41: 0x00c0, 0x3e42: 0x00c0, 0x3e43: 0x00c0, 0x3e44: 0x00c0, 0x3e45: 0x00c0, - 0x3e46: 0x00c0, 0x3e47: 0x00c0, 0x3e48: 0x00c0, 0x3e49: 0x00c0, 0x3e4a: 0x00c0, 0x3e4b: 0x00c0, - 0x3e4c: 0x00c0, 0x3e4d: 0x00c0, 0x3e4e: 0x00c0, 0x3e4f: 0x00c0, 0x3e50: 0x00c0, 0x3e51: 0x00c0, - 0x3e52: 0x00c0, 0x3e53: 0x00c0, 0x3e54: 0x00c0, 0x3e55: 0x00c0, 0x3e56: 0x00c0, 0x3e57: 0x00c0, - 0x3e58: 0x00c0, 0x3e59: 0x00c0, 0x3e5d: 0x00c3, - 0x3e5e: 0x00c3, 0x3e5f: 0x00c3, 0x3e60: 0x00c0, 0x3e61: 0x00c0, 0x3e62: 0x00c3, 0x3e63: 0x00c3, - 0x3e64: 0x00c3, 0x3e65: 0x00c3, 0x3e66: 0x00c0, 0x3e67: 0x00c3, 0x3e68: 0x00c3, 0x3e69: 0x00c3, - 0x3e6a: 0x00c3, 0x3e6b: 0x00c6, - 0x3e70: 0x00c0, 0x3e71: 0x00c0, 0x3e72: 0x00c0, 0x3e73: 0x00c0, 0x3e74: 0x00c0, 0x3e75: 0x00c0, - 0x3e76: 0x00c0, 0x3e77: 0x00c0, 0x3e78: 0x00c0, 0x3e79: 0x00c0, 0x3e7a: 0x0080, 0x3e7b: 0x0080, - 0x3e7c: 0x0080, 0x3e7d: 0x0080, 0x3e7e: 0x0080, 0x3e7f: 0x0080, - // Block 0xfa, offset 0x3e80 - 0x3ea0: 0x00c0, 0x3ea1: 0x00c0, 0x3ea2: 0x00c0, 0x3ea3: 0x00c0, - 0x3ea4: 0x00c0, 0x3ea5: 0x00c0, 0x3ea6: 0x00c0, 0x3ea7: 0x00c0, 0x3ea8: 0x00c0, 0x3ea9: 0x00c0, - 0x3eaa: 0x00c0, 0x3eab: 0x00c0, 0x3eac: 0x00c0, 0x3ead: 0x00c0, 0x3eae: 0x00c0, 0x3eaf: 0x00c0, - 0x3eb0: 0x00c0, 0x3eb1: 0x00c0, 0x3eb2: 0x00c0, 0x3eb3: 0x00c0, 0x3eb4: 0x00c0, 0x3eb5: 0x00c0, - 0x3eb6: 0x00c0, 0x3eb7: 0x00c0, 0x3eb8: 0x00c0, 0x3eb9: 0x00c0, 0x3eba: 0x00c0, 0x3ebb: 0x00c0, - 0x3ebc: 0x00c0, 0x3ebd: 0x00c0, 0x3ebe: 0x00c0, 0x3ebf: 0x00c0, - // Block 0xfb, offset 0x3ec0 - 0x3ec0: 0x00c0, 0x3ec1: 0x00c0, 0x3ec2: 0x00c0, 0x3ec3: 0x00c0, 0x3ec4: 0x00c0, 0x3ec5: 0x00c0, - 0x3ec6: 0x00c0, 0x3ec7: 0x00c0, 0x3ec8: 0x00c0, 0x3ec9: 0x00c0, 0x3eca: 0x00c0, 0x3ecb: 0x00c0, - 0x3ecc: 0x00c0, 0x3ecd: 0x00c0, 0x3ece: 0x00c0, 0x3ecf: 0x00c0, 0x3ed0: 0x00c0, 0x3ed1: 0x00c0, - 0x3ed2: 0x00c0, 0x3ed3: 0x00c0, 0x3ed4: 0x00c0, 0x3ed5: 0x00c0, 0x3ed6: 0x00c0, 0x3ed7: 0x00c0, - 0x3ed8: 0x00c0, 0x3ed9: 0x00c0, 0x3eda: 0x00c0, 0x3edb: 0x00c0, 0x3edc: 0x00c0, 0x3edd: 0x00c0, - 0x3ede: 0x00c0, 0x3edf: 0x00c0, 0x3ee0: 0x00c0, 0x3ee1: 0x00c0, 0x3ee2: 0x00c0, 0x3ee3: 0x00c0, - 0x3ee4: 0x00c0, 0x3ee5: 0x00c0, 0x3ee6: 0x00c0, 0x3ee7: 0x00c0, 0x3ee8: 0x00c0, 0x3ee9: 0x00c0, - 0x3eea: 0x0080, 0x3eeb: 0x0080, 0x3eec: 0x0080, 0x3eed: 0x0080, 0x3eee: 0x0080, 0x3eef: 0x0080, - 0x3ef0: 0x0080, 0x3ef1: 0x0080, 0x3ef2: 0x0080, - 0x3eff: 0x00c0, - // Block 0xfc, offset 0x3f00 - 0x3f00: 0x00c0, 0x3f01: 0x00c0, 0x3f02: 0x00c0, 0x3f03: 0x00c0, 0x3f04: 0x00c0, 0x3f05: 0x00c0, - 0x3f06: 0x00c0, 0x3f07: 0x00c0, 0x3f08: 0x00c0, 0x3f09: 0x00c0, 0x3f0a: 0x00c0, 0x3f0b: 0x00c0, - 0x3f0c: 0x00c0, 0x3f0d: 0x00c0, 0x3f0e: 0x00c0, 0x3f0f: 0x00c0, 0x3f10: 0x00c0, 0x3f11: 0x00c0, - 0x3f12: 0x00c0, 0x3f13: 0x00c0, 0x3f14: 0x00c0, 0x3f15: 0x00c0, 0x3f16: 0x00c0, 0x3f17: 0x00c0, - 0x3f18: 0x00c0, 0x3f19: 0x00c0, 0x3f1a: 0x00c0, 0x3f1b: 0x00c0, 0x3f1c: 0x00c0, 0x3f1d: 0x00c0, - 0x3f1e: 0x00c0, 0x3f1f: 0x00c0, 0x3f20: 0x00c0, 0x3f21: 0x00c0, 0x3f22: 0x00c0, 0x3f23: 0x00c0, - 0x3f24: 0x00c0, 0x3f25: 0x00c0, 0x3f26: 0x00c0, 0x3f27: 0x00c0, 0x3f28: 0x00c0, 0x3f29: 0x00c0, - 0x3f2a: 0x00c0, 0x3f2b: 0x00c0, 0x3f2c: 0x00c0, 0x3f2d: 0x00c0, 0x3f2e: 0x00c0, 0x3f2f: 0x00c0, - 0x3f30: 0x00c0, 0x3f31: 0x00c0, 0x3f32: 0x00c0, 0x3f33: 0x00c0, 0x3f34: 0x00c0, 0x3f35: 0x00c0, - 0x3f36: 0x00c0, 0x3f37: 0x00c0, 0x3f38: 0x00c0, - // Block 0xfd, offset 0x3f40 - 0x3f40: 0x00c0, 0x3f41: 0x00c0, 0x3f42: 0x00c0, 0x3f43: 0x00c0, 0x3f44: 0x00c0, 0x3f45: 0x00c0, - 0x3f46: 0x00c0, 0x3f47: 0x00c0, 0x3f48: 0x00c0, 0x3f4a: 0x00c0, 0x3f4b: 0x00c0, - 0x3f4c: 0x00c0, 0x3f4d: 0x00c0, 0x3f4e: 0x00c0, 0x3f4f: 0x00c0, 0x3f50: 0x00c0, 0x3f51: 0x00c0, - 0x3f52: 0x00c0, 0x3f53: 0x00c0, 0x3f54: 0x00c0, 0x3f55: 0x00c0, 0x3f56: 0x00c0, 0x3f57: 0x00c0, - 0x3f58: 0x00c0, 0x3f59: 0x00c0, 0x3f5a: 0x00c0, 0x3f5b: 0x00c0, 0x3f5c: 0x00c0, 0x3f5d: 0x00c0, - 0x3f5e: 0x00c0, 0x3f5f: 0x00c0, 0x3f60: 0x00c0, 0x3f61: 0x00c0, 0x3f62: 0x00c0, 0x3f63: 0x00c0, - 0x3f64: 0x00c0, 0x3f65: 0x00c0, 0x3f66: 0x00c0, 0x3f67: 0x00c0, 0x3f68: 0x00c0, 0x3f69: 0x00c0, - 0x3f6a: 0x00c0, 0x3f6b: 0x00c0, 0x3f6c: 0x00c0, 0x3f6d: 0x00c0, 0x3f6e: 0x00c0, 0x3f6f: 0x00c0, - 0x3f70: 0x00c3, 0x3f71: 0x00c3, 0x3f72: 0x00c3, 0x3f73: 0x00c3, 0x3f74: 0x00c3, 0x3f75: 0x00c3, - 0x3f76: 0x00c3, 0x3f78: 0x00c3, 0x3f79: 0x00c3, 0x3f7a: 0x00c3, 0x3f7b: 0x00c3, - 0x3f7c: 0x00c3, 0x3f7d: 0x00c3, 0x3f7e: 0x00c0, 0x3f7f: 0x00c6, - // Block 0xfe, offset 0x3f80 - 0x3f80: 0x00c0, 0x3f81: 0x0080, 0x3f82: 0x0080, 0x3f83: 0x0080, 0x3f84: 0x0080, 0x3f85: 0x0080, - 0x3f90: 0x00c0, 0x3f91: 0x00c0, - 0x3f92: 0x00c0, 0x3f93: 0x00c0, 0x3f94: 0x00c0, 0x3f95: 0x00c0, 0x3f96: 0x00c0, 0x3f97: 0x00c0, - 0x3f98: 0x00c0, 0x3f99: 0x00c0, 0x3f9a: 0x0080, 0x3f9b: 0x0080, 0x3f9c: 0x0080, 0x3f9d: 0x0080, - 0x3f9e: 0x0080, 0x3f9f: 0x0080, 0x3fa0: 0x0080, 0x3fa1: 0x0080, 0x3fa2: 0x0080, 0x3fa3: 0x0080, - 0x3fa4: 0x0080, 0x3fa5: 0x0080, 0x3fa6: 0x0080, 0x3fa7: 0x0080, 0x3fa8: 0x0080, 0x3fa9: 0x0080, - 0x3faa: 0x0080, 0x3fab: 0x0080, 0x3fac: 0x0080, - 0x3fb0: 0x0080, 0x3fb1: 0x0080, 0x3fb2: 0x00c0, 0x3fb3: 0x00c0, 0x3fb4: 0x00c0, 0x3fb5: 0x00c0, - 0x3fb6: 0x00c0, 0x3fb7: 0x00c0, 0x3fb8: 0x00c0, 0x3fb9: 0x00c0, 0x3fba: 0x00c0, 0x3fbb: 0x00c0, - 0x3fbc: 0x00c0, 0x3fbd: 0x00c0, 0x3fbe: 0x00c0, 0x3fbf: 0x00c0, - // Block 0xff, offset 0x3fc0 - 0x3fc0: 0x00c0, 0x3fc1: 0x00c0, 0x3fc2: 0x00c0, 0x3fc3: 0x00c0, 0x3fc4: 0x00c0, 0x3fc5: 0x00c0, - 0x3fc6: 0x00c0, 0x3fc7: 0x00c0, 0x3fc8: 0x00c0, 0x3fc9: 0x00c0, 0x3fca: 0x00c0, 0x3fcb: 0x00c0, - 0x3fcc: 0x00c0, 0x3fcd: 0x00c0, 0x3fce: 0x00c0, 0x3fcf: 0x00c0, - 0x3fd2: 0x00c3, 0x3fd3: 0x00c3, 0x3fd4: 0x00c3, 0x3fd5: 0x00c3, 0x3fd6: 0x00c3, 0x3fd7: 0x00c3, - 0x3fd8: 0x00c3, 0x3fd9: 0x00c3, 0x3fda: 0x00c3, 0x3fdb: 0x00c3, 0x3fdc: 0x00c3, 0x3fdd: 0x00c3, - 0x3fde: 0x00c3, 0x3fdf: 0x00c3, 0x3fe0: 0x00c3, 0x3fe1: 0x00c3, 0x3fe2: 0x00c3, 0x3fe3: 0x00c3, - 0x3fe4: 0x00c3, 0x3fe5: 0x00c3, 0x3fe6: 0x00c3, 0x3fe7: 0x00c3, 0x3fe9: 0x00c0, - 0x3fea: 0x00c3, 0x3feb: 0x00c3, 0x3fec: 0x00c3, 0x3fed: 0x00c3, 0x3fee: 0x00c3, 0x3fef: 0x00c3, - 0x3ff0: 0x00c3, 0x3ff1: 0x00c0, 0x3ff2: 0x00c3, 0x3ff3: 0x00c3, 0x3ff4: 0x00c0, 0x3ff5: 0x00c3, - 0x3ff6: 0x00c3, - // Block 0x100, offset 0x4000 - 0x4000: 0x00c0, 0x4001: 0x00c0, 0x4002: 0x00c0, 0x4003: 0x00c0, 0x4004: 0x00c0, 0x4005: 0x00c0, - 0x4006: 0x00c0, 0x4007: 0x00c0, 0x4008: 0x00c0, 0x4009: 0x00c0, 0x400a: 0x00c0, 0x400b: 0x00c0, - 0x400c: 0x00c0, 0x400d: 0x00c0, 0x400e: 0x00c0, 0x400f: 0x00c0, 0x4010: 0x00c0, 0x4011: 0x00c0, - 0x4012: 0x00c0, 0x4013: 0x00c0, 0x4014: 0x00c0, 0x4015: 0x00c0, 0x4016: 0x00c0, 0x4017: 0x00c0, - 0x4018: 0x00c0, 0x4019: 0x00c0, - // Block 0x101, offset 0x4040 - 0x4040: 0x0080, 0x4041: 0x0080, 0x4042: 0x0080, 0x4043: 0x0080, 0x4044: 0x0080, 0x4045: 0x0080, - 0x4046: 0x0080, 0x4047: 0x0080, 0x4048: 0x0080, 0x4049: 0x0080, 0x404a: 0x0080, 0x404b: 0x0080, - 0x404c: 0x0080, 0x404d: 0x0080, 0x404e: 0x0080, 0x404f: 0x0080, 0x4050: 0x0080, 0x4051: 0x0080, - 0x4052: 0x0080, 0x4053: 0x0080, 0x4054: 0x0080, 0x4055: 0x0080, 0x4056: 0x0080, 0x4057: 0x0080, - 0x4058: 0x0080, 0x4059: 0x0080, 0x405a: 0x0080, 0x405b: 0x0080, 0x405c: 0x0080, 0x405d: 0x0080, - 0x405e: 0x0080, 0x405f: 0x0080, 0x4060: 0x0080, 0x4061: 0x0080, 0x4062: 0x0080, 0x4063: 0x0080, - 0x4064: 0x0080, 0x4065: 0x0080, 0x4066: 0x0080, 0x4067: 0x0080, 0x4068: 0x0080, 0x4069: 0x0080, - 0x406a: 0x0080, 0x406b: 0x0080, 0x406c: 0x0080, 0x406d: 0x0080, 0x406e: 0x0080, - 0x4070: 0x0080, 0x4071: 0x0080, 0x4072: 0x0080, 0x4073: 0x0080, 0x4074: 0x0080, - // Block 0x102, offset 0x4080 - 0x4080: 0x00c0, 0x4081: 0x00c0, 0x4082: 0x00c0, 0x4083: 0x00c0, - // Block 0x103, offset 0x40c0 - 0x40c0: 0x00c0, 0x40c1: 0x00c0, 0x40c2: 0x00c0, 0x40c3: 0x00c0, 0x40c4: 0x00c0, 0x40c5: 0x00c0, - 0x40c6: 0x00c0, 0x40c7: 0x00c0, 0x40c8: 0x00c0, 0x40c9: 0x00c0, 0x40ca: 0x00c0, 0x40cb: 0x00c0, - 0x40cc: 0x00c0, 0x40cd: 0x00c0, 0x40ce: 0x00c0, 0x40cf: 0x00c0, 0x40d0: 0x00c0, 0x40d1: 0x00c0, - 0x40d2: 0x00c0, 0x40d3: 0x00c0, 0x40d4: 0x00c0, 0x40d5: 0x00c0, 0x40d6: 0x00c0, 0x40d7: 0x00c0, - 0x40d8: 0x00c0, 0x40d9: 0x00c0, 0x40da: 0x00c0, 0x40db: 0x00c0, 0x40dc: 0x00c0, 0x40dd: 0x00c0, - 0x40de: 0x00c0, 0x40df: 0x00c0, 0x40e0: 0x00c0, 0x40e1: 0x00c0, 0x40e2: 0x00c0, 0x40e3: 0x00c0, - 0x40e4: 0x00c0, 0x40e5: 0x00c0, 0x40e6: 0x00c0, 0x40e7: 0x00c0, 0x40e8: 0x00c0, 0x40e9: 0x00c0, - 0x40ea: 0x00c0, 0x40eb: 0x00c0, 0x40ec: 0x00c0, 0x40ed: 0x00c0, 0x40ee: 0x00c0, - // Block 0x104, offset 0x4100 - 0x4100: 0x00c0, 0x4101: 0x00c0, 0x4102: 0x00c0, 0x4103: 0x00c0, 0x4104: 0x00c0, 0x4105: 0x00c0, - 0x4106: 0x00c0, - // Block 0x105, offset 0x4140 - 0x4140: 0x00c0, 0x4141: 0x00c0, 0x4142: 0x00c0, 0x4143: 0x00c0, 0x4144: 0x00c0, 0x4145: 0x00c0, - 0x4146: 0x00c0, 0x4147: 0x00c0, 0x4148: 0x00c0, 0x4149: 0x00c0, 0x414a: 0x00c0, 0x414b: 0x00c0, - 0x414c: 0x00c0, 0x414d: 0x00c0, 0x414e: 0x00c0, 0x414f: 0x00c0, 0x4150: 0x00c0, 0x4151: 0x00c0, - 0x4152: 0x00c0, 0x4153: 0x00c0, 0x4154: 0x00c0, 0x4155: 0x00c0, 0x4156: 0x00c0, 0x4157: 0x00c0, - 0x4158: 0x00c0, 0x4159: 0x00c0, 0x415a: 0x00c0, 0x415b: 0x00c0, 0x415c: 0x00c0, 0x415d: 0x00c0, - 0x415e: 0x00c0, 0x4160: 0x00c0, 0x4161: 0x00c0, 0x4162: 0x00c0, 0x4163: 0x00c0, - 0x4164: 0x00c0, 0x4165: 0x00c0, 0x4166: 0x00c0, 0x4167: 0x00c0, 0x4168: 0x00c0, 0x4169: 0x00c0, - 0x416e: 0x0080, 0x416f: 0x0080, - // Block 0x106, offset 0x4180 - 0x4190: 0x00c0, 0x4191: 0x00c0, - 0x4192: 0x00c0, 0x4193: 0x00c0, 0x4194: 0x00c0, 0x4195: 0x00c0, 0x4196: 0x00c0, 0x4197: 0x00c0, - 0x4198: 0x00c0, 0x4199: 0x00c0, 0x419a: 0x00c0, 0x419b: 0x00c0, 0x419c: 0x00c0, 0x419d: 0x00c0, - 0x419e: 0x00c0, 0x419f: 0x00c0, 0x41a0: 0x00c0, 0x41a1: 0x00c0, 0x41a2: 0x00c0, 0x41a3: 0x00c0, - 0x41a4: 0x00c0, 0x41a5: 0x00c0, 0x41a6: 0x00c0, 0x41a7: 0x00c0, 0x41a8: 0x00c0, 0x41a9: 0x00c0, - 0x41aa: 0x00c0, 0x41ab: 0x00c0, 0x41ac: 0x00c0, 0x41ad: 0x00c0, - 0x41b0: 0x00c3, 0x41b1: 0x00c3, 0x41b2: 0x00c3, 0x41b3: 0x00c3, 0x41b4: 0x00c3, 0x41b5: 0x0080, - // Block 0x107, offset 0x41c0 - 0x41c0: 0x00c0, 0x41c1: 0x00c0, 0x41c2: 0x00c0, 0x41c3: 0x00c0, 0x41c4: 0x00c0, 0x41c5: 0x00c0, - 0x41c6: 0x00c0, 0x41c7: 0x00c0, 0x41c8: 0x00c0, 0x41c9: 0x00c0, 0x41ca: 0x00c0, 0x41cb: 0x00c0, - 0x41cc: 0x00c0, 0x41cd: 0x00c0, 0x41ce: 0x00c0, 0x41cf: 0x00c0, 0x41d0: 0x00c0, 0x41d1: 0x00c0, - 0x41d2: 0x00c0, 0x41d3: 0x00c0, 0x41d4: 0x00c0, 0x41d5: 0x00c0, 0x41d6: 0x00c0, 0x41d7: 0x00c0, - 0x41d8: 0x00c0, 0x41d9: 0x00c0, 0x41da: 0x00c0, 0x41db: 0x00c0, 0x41dc: 0x00c0, 0x41dd: 0x00c0, - 0x41de: 0x00c0, 0x41df: 0x00c0, 0x41e0: 0x00c0, 0x41e1: 0x00c0, 0x41e2: 0x00c0, 0x41e3: 0x00c0, - 0x41e4: 0x00c0, 0x41e5: 0x00c0, 0x41e6: 0x00c0, 0x41e7: 0x00c0, 0x41e8: 0x00c0, 0x41e9: 0x00c0, - 0x41ea: 0x00c0, 0x41eb: 0x00c0, 0x41ec: 0x00c0, 0x41ed: 0x00c0, 0x41ee: 0x00c0, 0x41ef: 0x00c0, - 0x41f0: 0x00c3, 0x41f1: 0x00c3, 0x41f2: 0x00c3, 0x41f3: 0x00c3, 0x41f4: 0x00c3, 0x41f5: 0x00c3, - 0x41f6: 0x00c3, 0x41f7: 0x0080, 0x41f8: 0x0080, 0x41f9: 0x0080, 0x41fa: 0x0080, 0x41fb: 0x0080, - 0x41fc: 0x0080, 0x41fd: 0x0080, 0x41fe: 0x0080, 0x41ff: 0x0080, - // Block 0x108, offset 0x4200 - 0x4200: 0x00c0, 0x4201: 0x00c0, 0x4202: 0x00c0, 0x4203: 0x00c0, 0x4204: 0x0080, 0x4205: 0x0080, - 0x4210: 0x00c0, 0x4211: 0x00c0, - 0x4212: 0x00c0, 0x4213: 0x00c0, 0x4214: 0x00c0, 0x4215: 0x00c0, 0x4216: 0x00c0, 0x4217: 0x00c0, - 0x4218: 0x00c0, 0x4219: 0x00c0, 0x421b: 0x0080, 0x421c: 0x0080, 0x421d: 0x0080, - 0x421e: 0x0080, 0x421f: 0x0080, 0x4220: 0x0080, 0x4221: 0x0080, 0x4223: 0x00c0, - 0x4224: 0x00c0, 0x4225: 0x00c0, 0x4226: 0x00c0, 0x4227: 0x00c0, 0x4228: 0x00c0, 0x4229: 0x00c0, - 0x422a: 0x00c0, 0x422b: 0x00c0, 0x422c: 0x00c0, 0x422d: 0x00c0, 0x422e: 0x00c0, 0x422f: 0x00c0, - 0x4230: 0x00c0, 0x4231: 0x00c0, 0x4232: 0x00c0, 0x4233: 0x00c0, 0x4234: 0x00c0, 0x4235: 0x00c0, - 0x4236: 0x00c0, 0x4237: 0x00c0, - 0x423d: 0x00c0, 0x423e: 0x00c0, 0x423f: 0x00c0, - // Block 0x109, offset 0x4240 - 0x4240: 0x00c0, 0x4241: 0x00c0, 0x4242: 0x00c0, 0x4243: 0x00c0, 0x4244: 0x00c0, 0x4245: 0x00c0, - 0x4246: 0x00c0, 0x4247: 0x00c0, 0x4248: 0x00c0, 0x4249: 0x00c0, 0x424a: 0x00c0, 0x424b: 0x00c0, - 0x424c: 0x00c0, 0x424d: 0x00c0, 0x424e: 0x00c0, 0x424f: 0x00c0, - // Block 0x10a, offset 0x4280 - 0x4280: 0x00c0, 0x4281: 0x00c0, 0x4282: 0x00c0, 0x4283: 0x00c0, 0x4284: 0x00c0, - 0x4290: 0x00c0, 0x4291: 0x00c0, - 0x4292: 0x00c0, 0x4293: 0x00c0, 0x4294: 0x00c0, 0x4295: 0x00c0, 0x4296: 0x00c0, 0x4297: 0x00c0, - 0x4298: 0x00c0, 0x4299: 0x00c0, 0x429a: 0x00c0, 0x429b: 0x00c0, 0x429c: 0x00c0, 0x429d: 0x00c0, - 0x429e: 0x00c0, 0x429f: 0x00c0, 0x42a0: 0x00c0, 0x42a1: 0x00c0, 0x42a2: 0x00c0, 0x42a3: 0x00c0, - 0x42a4: 0x00c0, 0x42a5: 0x00c0, 0x42a6: 0x00c0, 0x42a7: 0x00c0, 0x42a8: 0x00c0, 0x42a9: 0x00c0, - 0x42aa: 0x00c0, 0x42ab: 0x00c0, 0x42ac: 0x00c0, 0x42ad: 0x00c0, 0x42ae: 0x00c0, 0x42af: 0x00c0, - 0x42b0: 0x00c0, 0x42b1: 0x00c0, 0x42b2: 0x00c0, 0x42b3: 0x00c0, 0x42b4: 0x00c0, 0x42b5: 0x00c0, - 0x42b6: 0x00c0, 0x42b7: 0x00c0, 0x42b8: 0x00c0, 0x42b9: 0x00c0, 0x42ba: 0x00c0, 0x42bb: 0x00c0, - 0x42bc: 0x00c0, 0x42bd: 0x00c0, 0x42be: 0x00c0, - // Block 0x10b, offset 0x42c0 - 0x42cf: 0x00c3, 0x42d0: 0x00c3, 0x42d1: 0x00c3, - 0x42d2: 0x00c3, 0x42d3: 0x00c0, 0x42d4: 0x00c0, 0x42d5: 0x00c0, 0x42d6: 0x00c0, 0x42d7: 0x00c0, - 0x42d8: 0x00c0, 0x42d9: 0x00c0, 0x42da: 0x00c0, 0x42db: 0x00c0, 0x42dc: 0x00c0, 0x42dd: 0x00c0, - 0x42de: 0x00c0, 0x42df: 0x00c0, - // Block 0x10c, offset 0x4300 - 0x4320: 0x00c0, - // Block 0x10d, offset 0x4340 - 0x4340: 0x00c0, 0x4341: 0x00c0, 0x4342: 0x00c0, 0x4343: 0x00c0, 0x4344: 0x00c0, 0x4345: 0x00c0, - 0x4346: 0x00c0, 0x4347: 0x00c0, 0x4348: 0x00c0, 0x4349: 0x00c0, 0x434a: 0x00c0, 0x434b: 0x00c0, - 0x434c: 0x00c0, 0x434d: 0x00c0, 0x434e: 0x00c0, 0x434f: 0x00c0, 0x4350: 0x00c0, 0x4351: 0x00c0, - 0x4352: 0x00c0, 0x4353: 0x00c0, 0x4354: 0x00c0, 0x4355: 0x00c0, 0x4356: 0x00c0, 0x4357: 0x00c0, - 0x4358: 0x00c0, 0x4359: 0x00c0, 0x435a: 0x00c0, 0x435b: 0x00c0, 0x435c: 0x00c0, 0x435d: 0x00c0, - 0x435e: 0x00c0, 0x435f: 0x00c0, 0x4360: 0x00c0, 0x4361: 0x00c0, 0x4362: 0x00c0, 0x4363: 0x00c0, - 0x4364: 0x00c0, 0x4365: 0x00c0, 0x4366: 0x00c0, 0x4367: 0x00c0, 0x4368: 0x00c0, 0x4369: 0x00c0, - 0x436a: 0x00c0, 0x436b: 0x00c0, 0x436c: 0x00c0, - // Block 0x10e, offset 0x4380 - 0x4380: 0x00cc, 0x4381: 0x00cc, - // Block 0x10f, offset 0x43c0 - 0x43c0: 0x00c0, 0x43c1: 0x00c0, 0x43c2: 0x00c0, 0x43c3: 0x00c0, 0x43c4: 0x00c0, 0x43c5: 0x00c0, - 0x43c6: 0x00c0, 0x43c7: 0x00c0, 0x43c8: 0x00c0, 0x43c9: 0x00c0, 0x43ca: 0x00c0, 0x43cb: 0x00c0, - 0x43cc: 0x00c0, 0x43cd: 0x00c0, 0x43ce: 0x00c0, 0x43cf: 0x00c0, 0x43d0: 0x00c0, 0x43d1: 0x00c0, - 0x43d2: 0x00c0, 0x43d3: 0x00c0, 0x43d4: 0x00c0, 0x43d5: 0x00c0, 0x43d6: 0x00c0, 0x43d7: 0x00c0, - 0x43d8: 0x00c0, 0x43d9: 0x00c0, 0x43da: 0x00c0, 0x43db: 0x00c0, 0x43dc: 0x00c0, 0x43dd: 0x00c0, - 0x43de: 0x00c0, 0x43df: 0x00c0, 0x43e0: 0x00c0, 0x43e1: 0x00c0, 0x43e2: 0x00c0, 0x43e3: 0x00c0, - 0x43e4: 0x00c0, 0x43e5: 0x00c0, 0x43e6: 0x00c0, 0x43e7: 0x00c0, 0x43e8: 0x00c0, 0x43e9: 0x00c0, - 0x43ea: 0x00c0, - 0x43f0: 0x00c0, 0x43f1: 0x00c0, 0x43f2: 0x00c0, 0x43f3: 0x00c0, 0x43f4: 0x00c0, 0x43f5: 0x00c0, - 0x43f6: 0x00c0, 0x43f7: 0x00c0, 0x43f8: 0x00c0, 0x43f9: 0x00c0, 0x43fa: 0x00c0, 0x43fb: 0x00c0, - 0x43fc: 0x00c0, - // Block 0x110, offset 0x4400 - 0x4400: 0x00c0, 0x4401: 0x00c0, 0x4402: 0x00c0, 0x4403: 0x00c0, 0x4404: 0x00c0, 0x4405: 0x00c0, - 0x4406: 0x00c0, 0x4407: 0x00c0, 0x4408: 0x00c0, - 0x4410: 0x00c0, 0x4411: 0x00c0, - 0x4412: 0x00c0, 0x4413: 0x00c0, 0x4414: 0x00c0, 0x4415: 0x00c0, 0x4416: 0x00c0, 0x4417: 0x00c0, - 0x4418: 0x00c0, 0x4419: 0x00c0, 0x441c: 0x0080, 0x441d: 0x00c3, - 0x441e: 0x00c3, 0x441f: 0x0080, 0x4420: 0x0040, 0x4421: 0x0040, 0x4422: 0x0040, 0x4423: 0x0040, - // Block 0x111, offset 0x4440 - 0x4440: 0x0080, 0x4441: 0x0080, 0x4442: 0x0080, 0x4443: 0x0080, 0x4444: 0x0080, 0x4445: 0x0080, - 0x4446: 0x0080, 0x4447: 0x0080, 0x4448: 0x0080, 0x4449: 0x0080, 0x444a: 0x0080, 0x444b: 0x0080, - 0x444c: 0x0080, 0x444d: 0x0080, 0x444e: 0x0080, 0x444f: 0x0080, 0x4450: 0x0080, 0x4451: 0x0080, - 0x4452: 0x0080, 0x4453: 0x0080, 0x4454: 0x0080, 0x4455: 0x0080, 0x4456: 0x0080, 0x4457: 0x0080, - 0x4458: 0x0080, 0x4459: 0x0080, 0x445a: 0x0080, 0x445b: 0x0080, 0x445c: 0x0080, 0x445d: 0x0080, - 0x445e: 0x0080, 0x445f: 0x0080, 0x4460: 0x0080, 0x4461: 0x0080, 0x4462: 0x0080, 0x4463: 0x0080, - 0x4464: 0x0080, 0x4465: 0x0080, 0x4466: 0x0080, 0x4467: 0x0080, 0x4468: 0x0080, 0x4469: 0x0080, - 0x446a: 0x0080, 0x446b: 0x0080, 0x446c: 0x0080, 0x446d: 0x0080, 0x446e: 0x0080, 0x446f: 0x0080, - 0x4470: 0x0080, 0x4471: 0x0080, 0x4472: 0x0080, 0x4473: 0x0080, 0x4474: 0x0080, 0x4475: 0x0080, - // Block 0x112, offset 0x4480 - 0x4480: 0x0080, 0x4481: 0x0080, 0x4482: 0x0080, 0x4483: 0x0080, 0x4484: 0x0080, 0x4485: 0x0080, - 0x4486: 0x0080, 0x4487: 0x0080, 0x4488: 0x0080, 0x4489: 0x0080, 0x448a: 0x0080, 0x448b: 0x0080, - 0x448c: 0x0080, 0x448d: 0x0080, 0x448e: 0x0080, 0x448f: 0x0080, 0x4490: 0x0080, 0x4491: 0x0080, - 0x4492: 0x0080, 0x4493: 0x0080, 0x4494: 0x0080, 0x4495: 0x0080, 0x4496: 0x0080, 0x4497: 0x0080, - 0x4498: 0x0080, 0x4499: 0x0080, 0x449a: 0x0080, 0x449b: 0x0080, 0x449c: 0x0080, 0x449d: 0x0080, - 0x449e: 0x0080, 0x449f: 0x0080, 0x44a0: 0x0080, 0x44a1: 0x0080, 0x44a2: 0x0080, 0x44a3: 0x0080, - 0x44a4: 0x0080, 0x44a5: 0x0080, 0x44a6: 0x0080, 0x44a9: 0x0080, - 0x44aa: 0x0080, 0x44ab: 0x0080, 0x44ac: 0x0080, 0x44ad: 0x0080, 0x44ae: 0x0080, 0x44af: 0x0080, - 0x44b0: 0x0080, 0x44b1: 0x0080, 0x44b2: 0x0080, 0x44b3: 0x0080, 0x44b4: 0x0080, 0x44b5: 0x0080, - 0x44b6: 0x0080, 0x44b7: 0x0080, 0x44b8: 0x0080, 0x44b9: 0x0080, 0x44ba: 0x0080, 0x44bb: 0x0080, - 0x44bc: 0x0080, 0x44bd: 0x0080, 0x44be: 0x0080, 0x44bf: 0x0080, - // Block 0x113, offset 0x44c0 - 0x44c0: 0x0080, 0x44c1: 0x0080, 0x44c2: 0x0080, 0x44c3: 0x0080, 0x44c4: 0x0080, 0x44c5: 0x0080, - 0x44c6: 0x0080, 0x44c7: 0x0080, 0x44c8: 0x0080, 0x44c9: 0x0080, 0x44ca: 0x0080, 0x44cb: 0x0080, - 0x44cc: 0x0080, 0x44cd: 0x0080, 0x44ce: 0x0080, 0x44cf: 0x0080, 0x44d0: 0x0080, 0x44d1: 0x0080, - 0x44d2: 0x0080, 0x44d3: 0x0080, 0x44d4: 0x0080, 0x44d5: 0x0080, 0x44d6: 0x0080, 0x44d7: 0x0080, - 0x44d8: 0x0080, 0x44d9: 0x0080, 0x44da: 0x0080, 0x44db: 0x0080, 0x44dc: 0x0080, 0x44dd: 0x0080, - 0x44de: 0x0080, 0x44df: 0x0080, 0x44e0: 0x0080, 0x44e1: 0x0080, 0x44e2: 0x0080, 0x44e3: 0x0080, - 0x44e4: 0x0080, 0x44e5: 0x00c0, 0x44e6: 0x00c0, 0x44e7: 0x00c3, 0x44e8: 0x00c3, 0x44e9: 0x00c3, - 0x44ea: 0x0080, 0x44eb: 0x0080, 0x44ec: 0x0080, 0x44ed: 0x00c0, 0x44ee: 0x00c0, 0x44ef: 0x00c0, - 0x44f0: 0x00c0, 0x44f1: 0x00c0, 0x44f2: 0x00c0, 0x44f3: 0x0040, 0x44f4: 0x0040, 0x44f5: 0x0040, - 0x44f6: 0x0040, 0x44f7: 0x0040, 0x44f8: 0x0040, 0x44f9: 0x0040, 0x44fa: 0x0040, 0x44fb: 0x00c3, - 0x44fc: 0x00c3, 0x44fd: 0x00c3, 0x44fe: 0x00c3, 0x44ff: 0x00c3, - // Block 0x114, offset 0x4500 - 0x4500: 0x00c3, 0x4501: 0x00c3, 0x4502: 0x00c3, 0x4503: 0x0080, 0x4504: 0x0080, 0x4505: 0x00c3, - 0x4506: 0x00c3, 0x4507: 0x00c3, 0x4508: 0x00c3, 0x4509: 0x00c3, 0x450a: 0x00c3, 0x450b: 0x00c3, - 0x450c: 0x0080, 0x450d: 0x0080, 0x450e: 0x0080, 0x450f: 0x0080, 0x4510: 0x0080, 0x4511: 0x0080, - 0x4512: 0x0080, 0x4513: 0x0080, 0x4514: 0x0080, 0x4515: 0x0080, 0x4516: 0x0080, 0x4517: 0x0080, - 0x4518: 0x0080, 0x4519: 0x0080, 0x451a: 0x0080, 0x451b: 0x0080, 0x451c: 0x0080, 0x451d: 0x0080, - 0x451e: 0x0080, 0x451f: 0x0080, 0x4520: 0x0080, 0x4521: 0x0080, 0x4522: 0x0080, 0x4523: 0x0080, - 0x4524: 0x0080, 0x4525: 0x0080, 0x4526: 0x0080, 0x4527: 0x0080, 0x4528: 0x0080, 0x4529: 0x0080, - 0x452a: 0x00c3, 0x452b: 0x00c3, 0x452c: 0x00c3, 0x452d: 0x00c3, 0x452e: 0x0080, 0x452f: 0x0080, - 0x4530: 0x0080, 0x4531: 0x0080, 0x4532: 0x0080, 0x4533: 0x0080, 0x4534: 0x0080, 0x4535: 0x0080, - 0x4536: 0x0080, 0x4537: 0x0080, 0x4538: 0x0080, 0x4539: 0x0080, 0x453a: 0x0080, 0x453b: 0x0080, - 0x453c: 0x0080, 0x453d: 0x0080, 0x453e: 0x0080, 0x453f: 0x0080, - // Block 0x115, offset 0x4540 - 0x4540: 0x0080, 0x4541: 0x0080, 0x4542: 0x0080, 0x4543: 0x0080, 0x4544: 0x0080, 0x4545: 0x0080, - 0x4546: 0x0080, 0x4547: 0x0080, 0x4548: 0x0080, 0x4549: 0x0080, 0x454a: 0x0080, 0x454b: 0x0080, - 0x454c: 0x0080, 0x454d: 0x0080, 0x454e: 0x0080, 0x454f: 0x0080, 0x4550: 0x0080, 0x4551: 0x0080, - 0x4552: 0x0080, 0x4553: 0x0080, 0x4554: 0x0080, 0x4555: 0x0080, 0x4556: 0x0080, 0x4557: 0x0080, - 0x4558: 0x0080, 0x4559: 0x0080, 0x455a: 0x0080, 0x455b: 0x0080, 0x455c: 0x0080, 0x455d: 0x0080, - 0x455e: 0x0080, 0x455f: 0x0080, 0x4560: 0x0080, 0x4561: 0x0080, 0x4562: 0x0080, 0x4563: 0x0080, - 0x4564: 0x0080, 0x4565: 0x0080, 0x4566: 0x0080, 0x4567: 0x0080, 0x4568: 0x0080, - // Block 0x116, offset 0x4580 - 0x4580: 0x0088, 0x4581: 0x0088, 0x4582: 0x00c9, 0x4583: 0x00c9, 0x4584: 0x00c9, 0x4585: 0x0088, - // Block 0x117, offset 0x45c0 - 0x45c0: 0x0080, 0x45c1: 0x0080, 0x45c2: 0x0080, 0x45c3: 0x0080, 0x45c4: 0x0080, 0x45c5: 0x0080, - 0x45c6: 0x0080, 0x45c7: 0x0080, 0x45c8: 0x0080, 0x45c9: 0x0080, 0x45ca: 0x0080, 0x45cb: 0x0080, - 0x45cc: 0x0080, 0x45cd: 0x0080, 0x45ce: 0x0080, 0x45cf: 0x0080, 0x45d0: 0x0080, 0x45d1: 0x0080, - 0x45d2: 0x0080, 0x45d3: 0x0080, 0x45d4: 0x0080, 0x45d5: 0x0080, 0x45d6: 0x0080, - 0x45e0: 0x0080, 0x45e1: 0x0080, 0x45e2: 0x0080, 0x45e3: 0x0080, - 0x45e4: 0x0080, 0x45e5: 0x0080, 0x45e6: 0x0080, 0x45e7: 0x0080, 0x45e8: 0x0080, 0x45e9: 0x0080, - 0x45ea: 0x0080, 0x45eb: 0x0080, 0x45ec: 0x0080, 0x45ed: 0x0080, 0x45ee: 0x0080, 0x45ef: 0x0080, - 0x45f0: 0x0080, 0x45f1: 0x0080, - // Block 0x118, offset 0x4600 - 0x4600: 0x0080, 0x4601: 0x0080, 0x4602: 0x0080, 0x4603: 0x0080, 0x4604: 0x0080, 0x4605: 0x0080, - 0x4606: 0x0080, 0x4607: 0x0080, 0x4608: 0x0080, 0x4609: 0x0080, 0x460a: 0x0080, 0x460b: 0x0080, - 0x460c: 0x0080, 0x460d: 0x0080, 0x460e: 0x0080, 0x460f: 0x0080, 0x4610: 0x0080, 0x4611: 0x0080, - 0x4612: 0x0080, 0x4613: 0x0080, 0x4614: 0x0080, 0x4616: 0x0080, 0x4617: 0x0080, - 0x4618: 0x0080, 0x4619: 0x0080, 0x461a: 0x0080, 0x461b: 0x0080, 0x461c: 0x0080, 0x461d: 0x0080, - 0x461e: 0x0080, 0x461f: 0x0080, 0x4620: 0x0080, 0x4621: 0x0080, 0x4622: 0x0080, 0x4623: 0x0080, - 0x4624: 0x0080, 0x4625: 0x0080, 0x4626: 0x0080, 0x4627: 0x0080, 0x4628: 0x0080, 0x4629: 0x0080, - 0x462a: 0x0080, 0x462b: 0x0080, 0x462c: 0x0080, 0x462d: 0x0080, 0x462e: 0x0080, 0x462f: 0x0080, - 0x4630: 0x0080, 0x4631: 0x0080, 0x4632: 0x0080, 0x4633: 0x0080, 0x4634: 0x0080, 0x4635: 0x0080, - 0x4636: 0x0080, 0x4637: 0x0080, 0x4638: 0x0080, 0x4639: 0x0080, 0x463a: 0x0080, 0x463b: 0x0080, - 0x463c: 0x0080, 0x463d: 0x0080, 0x463e: 0x0080, 0x463f: 0x0080, - // Block 0x119, offset 0x4640 - 0x4640: 0x0080, 0x4641: 0x0080, 0x4642: 0x0080, 0x4643: 0x0080, 0x4644: 0x0080, 0x4645: 0x0080, - 0x4646: 0x0080, 0x4647: 0x0080, 0x4648: 0x0080, 0x4649: 0x0080, 0x464a: 0x0080, 0x464b: 0x0080, - 0x464c: 0x0080, 0x464d: 0x0080, 0x464e: 0x0080, 0x464f: 0x0080, 0x4650: 0x0080, 0x4651: 0x0080, - 0x4652: 0x0080, 0x4653: 0x0080, 0x4654: 0x0080, 0x4655: 0x0080, 0x4656: 0x0080, 0x4657: 0x0080, - 0x4658: 0x0080, 0x4659: 0x0080, 0x465a: 0x0080, 0x465b: 0x0080, 0x465c: 0x0080, - 0x465e: 0x0080, 0x465f: 0x0080, 0x4662: 0x0080, - 0x4665: 0x0080, 0x4666: 0x0080, 0x4669: 0x0080, - 0x466a: 0x0080, 0x466b: 0x0080, 0x466c: 0x0080, 0x466e: 0x0080, 0x466f: 0x0080, - 0x4670: 0x0080, 0x4671: 0x0080, 0x4672: 0x0080, 0x4673: 0x0080, 0x4674: 0x0080, 0x4675: 0x0080, - 0x4676: 0x0080, 0x4677: 0x0080, 0x4678: 0x0080, 0x4679: 0x0080, 0x467b: 0x0080, - 0x467d: 0x0080, 0x467e: 0x0080, 0x467f: 0x0080, - // Block 0x11a, offset 0x4680 - 0x4680: 0x0080, 0x4681: 0x0080, 0x4682: 0x0080, 0x4683: 0x0080, 0x4685: 0x0080, - 0x4686: 0x0080, 0x4687: 0x0080, 0x4688: 0x0080, 0x4689: 0x0080, 0x468a: 0x0080, 0x468b: 0x0080, - 0x468c: 0x0080, 0x468d: 0x0080, 0x468e: 0x0080, 0x468f: 0x0080, 0x4690: 0x0080, 0x4691: 0x0080, - 0x4692: 0x0080, 0x4693: 0x0080, 0x4694: 0x0080, 0x4695: 0x0080, 0x4696: 0x0080, 0x4697: 0x0080, - 0x4698: 0x0080, 0x4699: 0x0080, 0x469a: 0x0080, 0x469b: 0x0080, 0x469c: 0x0080, 0x469d: 0x0080, - 0x469e: 0x0080, 0x469f: 0x0080, 0x46a0: 0x0080, 0x46a1: 0x0080, 0x46a2: 0x0080, 0x46a3: 0x0080, - 0x46a4: 0x0080, 0x46a5: 0x0080, 0x46a6: 0x0080, 0x46a7: 0x0080, 0x46a8: 0x0080, 0x46a9: 0x0080, - 0x46aa: 0x0080, 0x46ab: 0x0080, 0x46ac: 0x0080, 0x46ad: 0x0080, 0x46ae: 0x0080, 0x46af: 0x0080, - 0x46b0: 0x0080, 0x46b1: 0x0080, 0x46b2: 0x0080, 0x46b3: 0x0080, 0x46b4: 0x0080, 0x46b5: 0x0080, - 0x46b6: 0x0080, 0x46b7: 0x0080, 0x46b8: 0x0080, 0x46b9: 0x0080, 0x46ba: 0x0080, 0x46bb: 0x0080, - 0x46bc: 0x0080, 0x46bd: 0x0080, 0x46be: 0x0080, 0x46bf: 0x0080, - // Block 0x11b, offset 0x46c0 - 0x46c0: 0x0080, 0x46c1: 0x0080, 0x46c2: 0x0080, 0x46c3: 0x0080, 0x46c4: 0x0080, 0x46c5: 0x0080, - 0x46c7: 0x0080, 0x46c8: 0x0080, 0x46c9: 0x0080, 0x46ca: 0x0080, - 0x46cd: 0x0080, 0x46ce: 0x0080, 0x46cf: 0x0080, 0x46d0: 0x0080, 0x46d1: 0x0080, - 0x46d2: 0x0080, 0x46d3: 0x0080, 0x46d4: 0x0080, 0x46d6: 0x0080, 0x46d7: 0x0080, - 0x46d8: 0x0080, 0x46d9: 0x0080, 0x46da: 0x0080, 0x46db: 0x0080, 0x46dc: 0x0080, - 0x46de: 0x0080, 0x46df: 0x0080, 0x46e0: 0x0080, 0x46e1: 0x0080, 0x46e2: 0x0080, 0x46e3: 0x0080, - 0x46e4: 0x0080, 0x46e5: 0x0080, 0x46e6: 0x0080, 0x46e7: 0x0080, 0x46e8: 0x0080, 0x46e9: 0x0080, - 0x46ea: 0x0080, 0x46eb: 0x0080, 0x46ec: 0x0080, 0x46ed: 0x0080, 0x46ee: 0x0080, 0x46ef: 0x0080, - 0x46f0: 0x0080, 0x46f1: 0x0080, 0x46f2: 0x0080, 0x46f3: 0x0080, 0x46f4: 0x0080, 0x46f5: 0x0080, - 0x46f6: 0x0080, 0x46f7: 0x0080, 0x46f8: 0x0080, 0x46f9: 0x0080, 0x46fb: 0x0080, - 0x46fc: 0x0080, 0x46fd: 0x0080, 0x46fe: 0x0080, - // Block 0x11c, offset 0x4700 - 0x4700: 0x0080, 0x4701: 0x0080, 0x4702: 0x0080, 0x4703: 0x0080, 0x4704: 0x0080, - 0x4706: 0x0080, 0x470a: 0x0080, 0x470b: 0x0080, - 0x470c: 0x0080, 0x470d: 0x0080, 0x470e: 0x0080, 0x470f: 0x0080, 0x4710: 0x0080, - 0x4712: 0x0080, 0x4713: 0x0080, 0x4714: 0x0080, 0x4715: 0x0080, 0x4716: 0x0080, 0x4717: 0x0080, - 0x4718: 0x0080, 0x4719: 0x0080, 0x471a: 0x0080, 0x471b: 0x0080, 0x471c: 0x0080, 0x471d: 0x0080, - 0x471e: 0x0080, 0x471f: 0x0080, 0x4720: 0x0080, 0x4721: 0x0080, 0x4722: 0x0080, 0x4723: 0x0080, - 0x4724: 0x0080, 0x4725: 0x0080, 0x4726: 0x0080, 0x4727: 0x0080, 0x4728: 0x0080, 0x4729: 0x0080, - 0x472a: 0x0080, 0x472b: 0x0080, 0x472c: 0x0080, 0x472d: 0x0080, 0x472e: 0x0080, 0x472f: 0x0080, - 0x4730: 0x0080, 0x4731: 0x0080, 0x4732: 0x0080, 0x4733: 0x0080, 0x4734: 0x0080, 0x4735: 0x0080, - 0x4736: 0x0080, 0x4737: 0x0080, 0x4738: 0x0080, 0x4739: 0x0080, 0x473a: 0x0080, 0x473b: 0x0080, - 0x473c: 0x0080, 0x473d: 0x0080, 0x473e: 0x0080, 0x473f: 0x0080, - // Block 0x11d, offset 0x4740 - 0x4740: 0x0080, 0x4741: 0x0080, 0x4742: 0x0080, 0x4743: 0x0080, 0x4744: 0x0080, 0x4745: 0x0080, - 0x4746: 0x0080, 0x4747: 0x0080, 0x4748: 0x0080, 0x4749: 0x0080, 0x474a: 0x0080, 0x474b: 0x0080, - 0x474c: 0x0080, 0x474d: 0x0080, 0x474e: 0x0080, 0x474f: 0x0080, 0x4750: 0x0080, 0x4751: 0x0080, - 0x4752: 0x0080, 0x4753: 0x0080, 0x4754: 0x0080, 0x4755: 0x0080, 0x4756: 0x0080, 0x4757: 0x0080, - 0x4758: 0x0080, 0x4759: 0x0080, 0x475a: 0x0080, 0x475b: 0x0080, 0x475c: 0x0080, 0x475d: 0x0080, - 0x475e: 0x0080, 0x475f: 0x0080, 0x4760: 0x0080, 0x4761: 0x0080, 0x4762: 0x0080, 0x4763: 0x0080, - 0x4764: 0x0080, 0x4765: 0x0080, 0x4768: 0x0080, 0x4769: 0x0080, - 0x476a: 0x0080, 0x476b: 0x0080, 0x476c: 0x0080, 0x476d: 0x0080, 0x476e: 0x0080, 0x476f: 0x0080, - 0x4770: 0x0080, 0x4771: 0x0080, 0x4772: 0x0080, 0x4773: 0x0080, 0x4774: 0x0080, 0x4775: 0x0080, - 0x4776: 0x0080, 0x4777: 0x0080, 0x4778: 0x0080, 0x4779: 0x0080, 0x477a: 0x0080, 0x477b: 0x0080, - 0x477c: 0x0080, 0x477d: 0x0080, 0x477e: 0x0080, 0x477f: 0x0080, - // Block 0x11e, offset 0x4780 - 0x4780: 0x0080, 0x4781: 0x0080, 0x4782: 0x0080, 0x4783: 0x0080, 0x4784: 0x0080, 0x4785: 0x0080, - 0x4786: 0x0080, 0x4787: 0x0080, 0x4788: 0x0080, 0x4789: 0x0080, 0x478a: 0x0080, 0x478b: 0x0080, - 0x478e: 0x0080, 0x478f: 0x0080, 0x4790: 0x0080, 0x4791: 0x0080, - 0x4792: 0x0080, 0x4793: 0x0080, 0x4794: 0x0080, 0x4795: 0x0080, 0x4796: 0x0080, 0x4797: 0x0080, - 0x4798: 0x0080, 0x4799: 0x0080, 0x479a: 0x0080, 0x479b: 0x0080, 0x479c: 0x0080, 0x479d: 0x0080, - 0x479e: 0x0080, 0x479f: 0x0080, 0x47a0: 0x0080, 0x47a1: 0x0080, 0x47a2: 0x0080, 0x47a3: 0x0080, - 0x47a4: 0x0080, 0x47a5: 0x0080, 0x47a6: 0x0080, 0x47a7: 0x0080, 0x47a8: 0x0080, 0x47a9: 0x0080, - 0x47aa: 0x0080, 0x47ab: 0x0080, 0x47ac: 0x0080, 0x47ad: 0x0080, 0x47ae: 0x0080, 0x47af: 0x0080, - 0x47b0: 0x0080, 0x47b1: 0x0080, 0x47b2: 0x0080, 0x47b3: 0x0080, 0x47b4: 0x0080, 0x47b5: 0x0080, - 0x47b6: 0x0080, 0x47b7: 0x0080, 0x47b8: 0x0080, 0x47b9: 0x0080, 0x47ba: 0x0080, 0x47bb: 0x0080, - 0x47bc: 0x0080, 0x47bd: 0x0080, 0x47be: 0x0080, 0x47bf: 0x0080, - // Block 0x11f, offset 0x47c0 - 0x47c0: 0x00c3, 0x47c1: 0x00c3, 0x47c2: 0x00c3, 0x47c3: 0x00c3, 0x47c4: 0x00c3, 0x47c5: 0x00c3, - 0x47c6: 0x00c3, 0x47c7: 0x00c3, 0x47c8: 0x00c3, 0x47c9: 0x00c3, 0x47ca: 0x00c3, 0x47cb: 0x00c3, - 0x47cc: 0x00c3, 0x47cd: 0x00c3, 0x47ce: 0x00c3, 0x47cf: 0x00c3, 0x47d0: 0x00c3, 0x47d1: 0x00c3, - 0x47d2: 0x00c3, 0x47d3: 0x00c3, 0x47d4: 0x00c3, 0x47d5: 0x00c3, 0x47d6: 0x00c3, 0x47d7: 0x00c3, - 0x47d8: 0x00c3, 0x47d9: 0x00c3, 0x47da: 0x00c3, 0x47db: 0x00c3, 0x47dc: 0x00c3, 0x47dd: 0x00c3, - 0x47de: 0x00c3, 0x47df: 0x00c3, 0x47e0: 0x00c3, 0x47e1: 0x00c3, 0x47e2: 0x00c3, 0x47e3: 0x00c3, - 0x47e4: 0x00c3, 0x47e5: 0x00c3, 0x47e6: 0x00c3, 0x47e7: 0x00c3, 0x47e8: 0x00c3, 0x47e9: 0x00c3, - 0x47ea: 0x00c3, 0x47eb: 0x00c3, 0x47ec: 0x00c3, 0x47ed: 0x00c3, 0x47ee: 0x00c3, 0x47ef: 0x00c3, - 0x47f0: 0x00c3, 0x47f1: 0x00c3, 0x47f2: 0x00c3, 0x47f3: 0x00c3, 0x47f4: 0x00c3, 0x47f5: 0x00c3, - 0x47f6: 0x00c3, 0x47f7: 0x0080, 0x47f8: 0x0080, 0x47f9: 0x0080, 0x47fa: 0x0080, 0x47fb: 0x00c3, - 0x47fc: 0x00c3, 0x47fd: 0x00c3, 0x47fe: 0x00c3, 0x47ff: 0x00c3, - // Block 0x120, offset 0x4800 - 0x4800: 0x00c3, 0x4801: 0x00c3, 0x4802: 0x00c3, 0x4803: 0x00c3, 0x4804: 0x00c3, 0x4805: 0x00c3, - 0x4806: 0x00c3, 0x4807: 0x00c3, 0x4808: 0x00c3, 0x4809: 0x00c3, 0x480a: 0x00c3, 0x480b: 0x00c3, - 0x480c: 0x00c3, 0x480d: 0x00c3, 0x480e: 0x00c3, 0x480f: 0x00c3, 0x4810: 0x00c3, 0x4811: 0x00c3, - 0x4812: 0x00c3, 0x4813: 0x00c3, 0x4814: 0x00c3, 0x4815: 0x00c3, 0x4816: 0x00c3, 0x4817: 0x00c3, - 0x4818: 0x00c3, 0x4819: 0x00c3, 0x481a: 0x00c3, 0x481b: 0x00c3, 0x481c: 0x00c3, 0x481d: 0x00c3, - 0x481e: 0x00c3, 0x481f: 0x00c3, 0x4820: 0x00c3, 0x4821: 0x00c3, 0x4822: 0x00c3, 0x4823: 0x00c3, - 0x4824: 0x00c3, 0x4825: 0x00c3, 0x4826: 0x00c3, 0x4827: 0x00c3, 0x4828: 0x00c3, 0x4829: 0x00c3, - 0x482a: 0x00c3, 0x482b: 0x00c3, 0x482c: 0x00c3, 0x482d: 0x0080, 0x482e: 0x0080, 0x482f: 0x0080, - 0x4830: 0x0080, 0x4831: 0x0080, 0x4832: 0x0080, 0x4833: 0x0080, 0x4834: 0x0080, 0x4835: 0x00c3, - 0x4836: 0x0080, 0x4837: 0x0080, 0x4838: 0x0080, 0x4839: 0x0080, 0x483a: 0x0080, 0x483b: 0x0080, - 0x483c: 0x0080, 0x483d: 0x0080, 0x483e: 0x0080, 0x483f: 0x0080, - // Block 0x121, offset 0x4840 - 0x4840: 0x0080, 0x4841: 0x0080, 0x4842: 0x0080, 0x4843: 0x0080, 0x4844: 0x00c3, 0x4845: 0x0080, - 0x4846: 0x0080, 0x4847: 0x0080, 0x4848: 0x0080, 0x4849: 0x0080, 0x484a: 0x0080, 0x484b: 0x0080, - 0x485b: 0x00c3, 0x485c: 0x00c3, 0x485d: 0x00c3, - 0x485e: 0x00c3, 0x485f: 0x00c3, 0x4861: 0x00c3, 0x4862: 0x00c3, 0x4863: 0x00c3, - 0x4864: 0x00c3, 0x4865: 0x00c3, 0x4866: 0x00c3, 0x4867: 0x00c3, 0x4868: 0x00c3, 0x4869: 0x00c3, - 0x486a: 0x00c3, 0x486b: 0x00c3, 0x486c: 0x00c3, 0x486d: 0x00c3, 0x486e: 0x00c3, 0x486f: 0x00c3, - // Block 0x122, offset 0x4880 - 0x4880: 0x00c3, 0x4881: 0x00c3, 0x4882: 0x00c3, 0x4883: 0x00c3, 0x4884: 0x00c3, 0x4885: 0x00c3, - 0x4886: 0x00c3, 0x4888: 0x00c3, 0x4889: 0x00c3, 0x488a: 0x00c3, 0x488b: 0x00c3, - 0x488c: 0x00c3, 0x488d: 0x00c3, 0x488e: 0x00c3, 0x488f: 0x00c3, 0x4890: 0x00c3, 0x4891: 0x00c3, - 0x4892: 0x00c3, 0x4893: 0x00c3, 0x4894: 0x00c3, 0x4895: 0x00c3, 0x4896: 0x00c3, 0x4897: 0x00c3, - 0x4898: 0x00c3, 0x489b: 0x00c3, 0x489c: 0x00c3, 0x489d: 0x00c3, - 0x489e: 0x00c3, 0x489f: 0x00c3, 0x48a0: 0x00c3, 0x48a1: 0x00c3, 0x48a3: 0x00c3, - 0x48a4: 0x00c3, 0x48a6: 0x00c3, 0x48a7: 0x00c3, 0x48a8: 0x00c3, 0x48a9: 0x00c3, - 0x48aa: 0x00c3, - // Block 0x123, offset 0x48c0 - 0x48c0: 0x00c0, 0x48c1: 0x00c0, 0x48c2: 0x00c0, 0x48c3: 0x00c0, 0x48c4: 0x00c0, - 0x48c7: 0x0080, 0x48c8: 0x0080, 0x48c9: 0x0080, 0x48ca: 0x0080, 0x48cb: 0x0080, - 0x48cc: 0x0080, 0x48cd: 0x0080, 0x48ce: 0x0080, 0x48cf: 0x0080, 0x48d0: 0x00c3, 0x48d1: 0x00c3, - 0x48d2: 0x00c3, 0x48d3: 0x00c3, 0x48d4: 0x00c3, 0x48d5: 0x00c3, 0x48d6: 0x00c3, - // Block 0x124, offset 0x4900 - 0x4900: 0x00c2, 0x4901: 0x00c2, 0x4902: 0x00c2, 0x4903: 0x00c2, 0x4904: 0x00c2, 0x4905: 0x00c2, - 0x4906: 0x00c2, 0x4907: 0x00c2, 0x4908: 0x00c2, 0x4909: 0x00c2, 0x490a: 0x00c2, 0x490b: 0x00c2, - 0x490c: 0x00c2, 0x490d: 0x00c2, 0x490e: 0x00c2, 0x490f: 0x00c2, 0x4910: 0x00c2, 0x4911: 0x00c2, - 0x4912: 0x00c2, 0x4913: 0x00c2, 0x4914: 0x00c2, 0x4915: 0x00c2, 0x4916: 0x00c2, 0x4917: 0x00c2, - 0x4918: 0x00c2, 0x4919: 0x00c2, 0x491a: 0x00c2, 0x491b: 0x00c2, 0x491c: 0x00c2, 0x491d: 0x00c2, - 0x491e: 0x00c2, 0x491f: 0x00c2, 0x4920: 0x00c2, 0x4921: 0x00c2, 0x4922: 0x00c2, 0x4923: 0x00c2, - 0x4924: 0x00c2, 0x4925: 0x00c2, 0x4926: 0x00c2, 0x4927: 0x00c2, 0x4928: 0x00c2, 0x4929: 0x00c2, - 0x492a: 0x00c2, 0x492b: 0x00c2, 0x492c: 0x00c2, 0x492d: 0x00c2, 0x492e: 0x00c2, 0x492f: 0x00c2, - 0x4930: 0x00c2, 0x4931: 0x00c2, 0x4932: 0x00c2, 0x4933: 0x00c2, 0x4934: 0x00c2, 0x4935: 0x00c2, - 0x4936: 0x00c2, 0x4937: 0x00c2, 0x4938: 0x00c2, 0x4939: 0x00c2, 0x493a: 0x00c2, 0x493b: 0x00c2, - 0x493c: 0x00c2, 0x493d: 0x00c2, 0x493e: 0x00c2, 0x493f: 0x00c2, - // Block 0x125, offset 0x4940 - 0x4940: 0x00c2, 0x4941: 0x00c2, 0x4942: 0x00c2, 0x4943: 0x00c2, 0x4944: 0x00c3, 0x4945: 0x00c3, - 0x4946: 0x00c3, 0x4947: 0x00c3, 0x4948: 0x00c3, 0x4949: 0x00c3, 0x494a: 0x00c3, - 0x4950: 0x00c0, 0x4951: 0x00c0, - 0x4952: 0x00c0, 0x4953: 0x00c0, 0x4954: 0x00c0, 0x4955: 0x00c0, 0x4956: 0x00c0, 0x4957: 0x00c0, - 0x4958: 0x00c0, 0x4959: 0x00c0, - 0x495e: 0x0080, 0x495f: 0x0080, - // Block 0x126, offset 0x4980 - 0x4980: 0x0080, 0x4981: 0x0080, 0x4982: 0x0080, 0x4983: 0x0080, 0x4985: 0x0080, - 0x4986: 0x0080, 0x4987: 0x0080, 0x4988: 0x0080, 0x4989: 0x0080, 0x498a: 0x0080, 0x498b: 0x0080, - 0x498c: 0x0080, 0x498d: 0x0080, 0x498e: 0x0080, 0x498f: 0x0080, 0x4990: 0x0080, 0x4991: 0x0080, - 0x4992: 0x0080, 0x4993: 0x0080, 0x4994: 0x0080, 0x4995: 0x0080, 0x4996: 0x0080, 0x4997: 0x0080, - 0x4998: 0x0080, 0x4999: 0x0080, 0x499a: 0x0080, 0x499b: 0x0080, 0x499c: 0x0080, 0x499d: 0x0080, - 0x499e: 0x0080, 0x499f: 0x0080, 0x49a1: 0x0080, 0x49a2: 0x0080, - 0x49a4: 0x0080, 0x49a7: 0x0080, 0x49a9: 0x0080, - 0x49aa: 0x0080, 0x49ab: 0x0080, 0x49ac: 0x0080, 0x49ad: 0x0080, 0x49ae: 0x0080, 0x49af: 0x0080, - 0x49b0: 0x0080, 0x49b1: 0x0080, 0x49b2: 0x0080, 0x49b4: 0x0080, 0x49b5: 0x0080, - 0x49b6: 0x0080, 0x49b7: 0x0080, 0x49b9: 0x0080, 0x49bb: 0x0080, - // Block 0x127, offset 0x49c0 - 0x49c2: 0x0080, - 0x49c7: 0x0080, 0x49c9: 0x0080, 0x49cb: 0x0080, - 0x49cd: 0x0080, 0x49ce: 0x0080, 0x49cf: 0x0080, 0x49d1: 0x0080, - 0x49d2: 0x0080, 0x49d4: 0x0080, 0x49d7: 0x0080, - 0x49d9: 0x0080, 0x49db: 0x0080, 0x49dd: 0x0080, - 0x49df: 0x0080, 0x49e1: 0x0080, 0x49e2: 0x0080, - 0x49e4: 0x0080, 0x49e7: 0x0080, 0x49e8: 0x0080, 0x49e9: 0x0080, - 0x49ea: 0x0080, 0x49ec: 0x0080, 0x49ed: 0x0080, 0x49ee: 0x0080, 0x49ef: 0x0080, - 0x49f0: 0x0080, 0x49f1: 0x0080, 0x49f2: 0x0080, 0x49f4: 0x0080, 0x49f5: 0x0080, - 0x49f6: 0x0080, 0x49f7: 0x0080, 0x49f9: 0x0080, 0x49fa: 0x0080, 0x49fb: 0x0080, - 0x49fc: 0x0080, 0x49fe: 0x0080, - // Block 0x128, offset 0x4a00 - 0x4a00: 0x0080, 0x4a01: 0x0080, 0x4a02: 0x0080, 0x4a03: 0x0080, 0x4a04: 0x0080, 0x4a05: 0x0080, - 0x4a06: 0x0080, 0x4a07: 0x0080, 0x4a08: 0x0080, 0x4a09: 0x0080, 0x4a0b: 0x0080, - 0x4a0c: 0x0080, 0x4a0d: 0x0080, 0x4a0e: 0x0080, 0x4a0f: 0x0080, 0x4a10: 0x0080, 0x4a11: 0x0080, - 0x4a12: 0x0080, 0x4a13: 0x0080, 0x4a14: 0x0080, 0x4a15: 0x0080, 0x4a16: 0x0080, 0x4a17: 0x0080, - 0x4a18: 0x0080, 0x4a19: 0x0080, 0x4a1a: 0x0080, 0x4a1b: 0x0080, - 0x4a21: 0x0080, 0x4a22: 0x0080, 0x4a23: 0x0080, - 0x4a25: 0x0080, 0x4a26: 0x0080, 0x4a27: 0x0080, 0x4a28: 0x0080, 0x4a29: 0x0080, - 0x4a2b: 0x0080, 0x4a2c: 0x0080, 0x4a2d: 0x0080, 0x4a2e: 0x0080, 0x4a2f: 0x0080, - 0x4a30: 0x0080, 0x4a31: 0x0080, 0x4a32: 0x0080, 0x4a33: 0x0080, 0x4a34: 0x0080, 0x4a35: 0x0080, - 0x4a36: 0x0080, 0x4a37: 0x0080, 0x4a38: 0x0080, 0x4a39: 0x0080, 0x4a3a: 0x0080, 0x4a3b: 0x0080, - // Block 0x129, offset 0x4a40 - 0x4a70: 0x0080, 0x4a71: 0x0080, - // Block 0x12a, offset 0x4a80 - 0x4a80: 0x0080, 0x4a81: 0x0080, 0x4a82: 0x0080, 0x4a83: 0x0080, 0x4a84: 0x0080, 0x4a85: 0x0080, - 0x4a86: 0x0080, 0x4a87: 0x0080, 0x4a88: 0x0080, 0x4a89: 0x0080, 0x4a8a: 0x0080, 0x4a8b: 0x0080, - 0x4a8c: 0x0080, 0x4a8d: 0x0080, 0x4a8e: 0x0080, 0x4a8f: 0x0080, 0x4a90: 0x0080, 0x4a91: 0x0080, - 0x4a92: 0x0080, 0x4a93: 0x0080, 0x4a94: 0x0080, 0x4a95: 0x0080, 0x4a96: 0x0080, 0x4a97: 0x0080, - 0x4a98: 0x0080, 0x4a99: 0x0080, 0x4a9a: 0x0080, 0x4a9b: 0x0080, 0x4a9c: 0x0080, 0x4a9d: 0x0080, - 0x4a9e: 0x0080, 0x4a9f: 0x0080, 0x4aa0: 0x0080, 0x4aa1: 0x0080, 0x4aa2: 0x0080, 0x4aa3: 0x0080, - 0x4aa4: 0x0080, 0x4aa5: 0x0080, 0x4aa6: 0x0080, 0x4aa7: 0x0080, 0x4aa8: 0x0080, 0x4aa9: 0x0080, - 0x4aaa: 0x0080, 0x4aab: 0x0080, - 0x4ab0: 0x0080, 0x4ab1: 0x0080, 0x4ab2: 0x0080, 0x4ab3: 0x0080, 0x4ab4: 0x0080, 0x4ab5: 0x0080, - 0x4ab6: 0x0080, 0x4ab7: 0x0080, 0x4ab8: 0x0080, 0x4ab9: 0x0080, 0x4aba: 0x0080, 0x4abb: 0x0080, - 0x4abc: 0x0080, 0x4abd: 0x0080, 0x4abe: 0x0080, 0x4abf: 0x0080, - // Block 0x12b, offset 0x4ac0 - 0x4ac0: 0x0080, 0x4ac1: 0x0080, 0x4ac2: 0x0080, 0x4ac3: 0x0080, 0x4ac4: 0x0080, 0x4ac5: 0x0080, - 0x4ac6: 0x0080, 0x4ac7: 0x0080, 0x4ac8: 0x0080, 0x4ac9: 0x0080, 0x4aca: 0x0080, 0x4acb: 0x0080, - 0x4acc: 0x0080, 0x4acd: 0x0080, 0x4ace: 0x0080, 0x4acf: 0x0080, 0x4ad0: 0x0080, 0x4ad1: 0x0080, - 0x4ad2: 0x0080, 0x4ad3: 0x0080, - 0x4ae0: 0x0080, 0x4ae1: 0x0080, 0x4ae2: 0x0080, 0x4ae3: 0x0080, - 0x4ae4: 0x0080, 0x4ae5: 0x0080, 0x4ae6: 0x0080, 0x4ae7: 0x0080, 0x4ae8: 0x0080, 0x4ae9: 0x0080, - 0x4aea: 0x0080, 0x4aeb: 0x0080, 0x4aec: 0x0080, 0x4aed: 0x0080, 0x4aee: 0x0080, - 0x4af1: 0x0080, 0x4af2: 0x0080, 0x4af3: 0x0080, 0x4af4: 0x0080, 0x4af5: 0x0080, - 0x4af6: 0x0080, 0x4af7: 0x0080, 0x4af8: 0x0080, 0x4af9: 0x0080, 0x4afa: 0x0080, 0x4afb: 0x0080, - 0x4afc: 0x0080, 0x4afd: 0x0080, 0x4afe: 0x0080, 0x4aff: 0x0080, - // Block 0x12c, offset 0x4b00 - 0x4b01: 0x0080, 0x4b02: 0x0080, 0x4b03: 0x0080, 0x4b04: 0x0080, 0x4b05: 0x0080, - 0x4b06: 0x0080, 0x4b07: 0x0080, 0x4b08: 0x0080, 0x4b09: 0x0080, 0x4b0a: 0x0080, 0x4b0b: 0x0080, - 0x4b0c: 0x0080, 0x4b0d: 0x0080, 0x4b0e: 0x0080, 0x4b0f: 0x0080, 0x4b11: 0x0080, - 0x4b12: 0x0080, 0x4b13: 0x0080, 0x4b14: 0x0080, 0x4b15: 0x0080, 0x4b16: 0x0080, 0x4b17: 0x0080, - 0x4b18: 0x0080, 0x4b19: 0x0080, 0x4b1a: 0x0080, 0x4b1b: 0x0080, 0x4b1c: 0x0080, 0x4b1d: 0x0080, - 0x4b1e: 0x0080, 0x4b1f: 0x0080, 0x4b20: 0x0080, 0x4b21: 0x0080, 0x4b22: 0x0080, 0x4b23: 0x0080, - 0x4b24: 0x0080, 0x4b25: 0x0080, 0x4b26: 0x0080, 0x4b27: 0x0080, 0x4b28: 0x0080, 0x4b29: 0x0080, - 0x4b2a: 0x0080, 0x4b2b: 0x0080, 0x4b2c: 0x0080, 0x4b2d: 0x0080, 0x4b2e: 0x0080, 0x4b2f: 0x0080, - 0x4b30: 0x0080, 0x4b31: 0x0080, 0x4b32: 0x0080, 0x4b33: 0x0080, 0x4b34: 0x0080, 0x4b35: 0x0080, - // Block 0x12d, offset 0x4b40 - 0x4b40: 0x0080, 0x4b41: 0x0080, 0x4b42: 0x0080, 0x4b43: 0x0080, 0x4b44: 0x0080, 0x4b45: 0x0080, - 0x4b46: 0x0080, 0x4b47: 0x0080, 0x4b48: 0x0080, 0x4b49: 0x0080, 0x4b4a: 0x0080, 0x4b4b: 0x0080, - 0x4b4c: 0x0080, 0x4b50: 0x0080, 0x4b51: 0x0080, - 0x4b52: 0x0080, 0x4b53: 0x0080, 0x4b54: 0x0080, 0x4b55: 0x0080, 0x4b56: 0x0080, 0x4b57: 0x0080, - 0x4b58: 0x0080, 0x4b59: 0x0080, 0x4b5a: 0x0080, 0x4b5b: 0x0080, 0x4b5c: 0x0080, 0x4b5d: 0x0080, - 0x4b5e: 0x0080, 0x4b5f: 0x0080, 0x4b60: 0x0080, 0x4b61: 0x0080, 0x4b62: 0x0080, 0x4b63: 0x0080, - 0x4b64: 0x0080, 0x4b65: 0x0080, 0x4b66: 0x0080, 0x4b67: 0x0080, 0x4b68: 0x0080, 0x4b69: 0x0080, - 0x4b6a: 0x0080, 0x4b6b: 0x0080, 0x4b6c: 0x0080, 0x4b6d: 0x0080, 0x4b6e: 0x0080, - 0x4b70: 0x0080, 0x4b71: 0x0080, 0x4b72: 0x0080, 0x4b73: 0x0080, 0x4b74: 0x0080, 0x4b75: 0x0080, - 0x4b76: 0x0080, 0x4b77: 0x0080, 0x4b78: 0x0080, 0x4b79: 0x0080, 0x4b7a: 0x0080, 0x4b7b: 0x0080, - 0x4b7c: 0x0080, 0x4b7d: 0x0080, 0x4b7e: 0x0080, 0x4b7f: 0x0080, - // Block 0x12e, offset 0x4b80 - 0x4b80: 0x0080, 0x4b81: 0x0080, 0x4b82: 0x0080, 0x4b83: 0x0080, 0x4b84: 0x0080, 0x4b85: 0x0080, - 0x4b86: 0x0080, 0x4b87: 0x0080, 0x4b88: 0x0080, 0x4b89: 0x0080, 0x4b8a: 0x0080, 0x4b8b: 0x0080, - 0x4b8c: 0x0080, 0x4b8d: 0x0080, 0x4b8e: 0x0080, 0x4b8f: 0x0080, 0x4b90: 0x0080, 0x4b91: 0x0080, - 0x4b92: 0x0080, 0x4b93: 0x0080, 0x4b94: 0x0080, 0x4b95: 0x0080, 0x4b96: 0x0080, 0x4b97: 0x0080, - 0x4b98: 0x0080, 0x4b99: 0x0080, 0x4b9a: 0x0080, 0x4b9b: 0x0080, 0x4b9c: 0x0080, 0x4b9d: 0x0080, - 0x4b9e: 0x0080, 0x4b9f: 0x0080, 0x4ba0: 0x0080, 0x4ba1: 0x0080, 0x4ba2: 0x0080, 0x4ba3: 0x0080, - 0x4ba4: 0x0080, 0x4ba5: 0x0080, 0x4ba6: 0x0080, 0x4ba7: 0x0080, 0x4ba8: 0x0080, 0x4ba9: 0x0080, - 0x4baa: 0x0080, 0x4bab: 0x0080, 0x4bac: 0x0080, - // Block 0x12f, offset 0x4bc0 - 0x4be6: 0x0080, 0x4be7: 0x0080, 0x4be8: 0x0080, 0x4be9: 0x0080, - 0x4bea: 0x0080, 0x4beb: 0x0080, 0x4bec: 0x0080, 0x4bed: 0x0080, 0x4bee: 0x0080, 0x4bef: 0x0080, - 0x4bf0: 0x0080, 0x4bf1: 0x0080, 0x4bf2: 0x0080, 0x4bf3: 0x0080, 0x4bf4: 0x0080, 0x4bf5: 0x0080, - 0x4bf6: 0x0080, 0x4bf7: 0x0080, 0x4bf8: 0x0080, 0x4bf9: 0x0080, 0x4bfa: 0x0080, 0x4bfb: 0x0080, - 0x4bfc: 0x0080, 0x4bfd: 0x0080, 0x4bfe: 0x0080, 0x4bff: 0x0080, - // Block 0x130, offset 0x4c00 - 0x4c00: 0x008c, 0x4c01: 0x0080, 0x4c02: 0x0080, - 0x4c10: 0x0080, 0x4c11: 0x0080, - 0x4c12: 0x0080, 0x4c13: 0x0080, 0x4c14: 0x0080, 0x4c15: 0x0080, 0x4c16: 0x0080, 0x4c17: 0x0080, - 0x4c18: 0x0080, 0x4c19: 0x0080, 0x4c1a: 0x0080, 0x4c1b: 0x0080, 0x4c1c: 0x0080, 0x4c1d: 0x0080, - 0x4c1e: 0x0080, 0x4c1f: 0x0080, 0x4c20: 0x0080, 0x4c21: 0x0080, 0x4c22: 0x0080, 0x4c23: 0x0080, - 0x4c24: 0x0080, 0x4c25: 0x0080, 0x4c26: 0x0080, 0x4c27: 0x0080, 0x4c28: 0x0080, 0x4c29: 0x0080, - 0x4c2a: 0x0080, 0x4c2b: 0x0080, 0x4c2c: 0x0080, 0x4c2d: 0x0080, 0x4c2e: 0x0080, 0x4c2f: 0x0080, - 0x4c30: 0x0080, 0x4c31: 0x0080, 0x4c32: 0x0080, 0x4c33: 0x0080, 0x4c34: 0x0080, 0x4c35: 0x0080, - 0x4c36: 0x0080, 0x4c37: 0x0080, 0x4c38: 0x0080, 0x4c39: 0x0080, 0x4c3a: 0x0080, 0x4c3b: 0x0080, - // Block 0x131, offset 0x4c40 - 0x4c40: 0x0080, 0x4c41: 0x0080, 0x4c42: 0x0080, 0x4c43: 0x0080, 0x4c44: 0x0080, 0x4c45: 0x0080, - 0x4c46: 0x0080, 0x4c47: 0x0080, 0x4c48: 0x0080, - 0x4c50: 0x0080, 0x4c51: 0x0080, - // Block 0x132, offset 0x4c80 - 0x4c80: 0x0080, 0x4c81: 0x0080, 0x4c82: 0x0080, 0x4c83: 0x0080, 0x4c84: 0x0080, 0x4c85: 0x0080, - 0x4c86: 0x0080, 0x4c87: 0x0080, 0x4c88: 0x0080, 0x4c89: 0x0080, 0x4c8a: 0x0080, 0x4c8b: 0x0080, - 0x4c8c: 0x0080, 0x4c8d: 0x0080, 0x4c8e: 0x0080, 0x4c8f: 0x0080, 0x4c90: 0x0080, 0x4c91: 0x0080, - 0x4c92: 0x0080, - 0x4ca0: 0x0080, 0x4ca1: 0x0080, 0x4ca2: 0x0080, 0x4ca3: 0x0080, - 0x4ca4: 0x0080, 0x4ca5: 0x0080, 0x4ca6: 0x0080, 0x4ca7: 0x0080, 0x4ca8: 0x0080, 0x4ca9: 0x0080, - 0x4caa: 0x0080, 0x4cab: 0x0080, 0x4cac: 0x0080, - 0x4cb0: 0x0080, 0x4cb1: 0x0080, 0x4cb2: 0x0080, 0x4cb3: 0x0080, 0x4cb4: 0x0080, 0x4cb5: 0x0080, - 0x4cb6: 0x0080, - // Block 0x133, offset 0x4cc0 - 0x4cc0: 0x0080, 0x4cc1: 0x0080, 0x4cc2: 0x0080, 0x4cc3: 0x0080, 0x4cc4: 0x0080, 0x4cc5: 0x0080, - 0x4cc6: 0x0080, 0x4cc7: 0x0080, 0x4cc8: 0x0080, 0x4cc9: 0x0080, 0x4cca: 0x0080, 0x4ccb: 0x0080, - 0x4ccc: 0x0080, 0x4ccd: 0x0080, 0x4cce: 0x0080, 0x4ccf: 0x0080, 0x4cd0: 0x0080, 0x4cd1: 0x0080, - 0x4cd2: 0x0080, 0x4cd3: 0x0080, 0x4cd4: 0x0080, 0x4cd5: 0x0080, 0x4cd6: 0x0080, 0x4cd7: 0x0080, - 0x4cd8: 0x0080, 0x4cd9: 0x0080, 0x4cda: 0x0080, 0x4cdb: 0x0080, 0x4cdc: 0x0080, 0x4cdd: 0x0080, - 0x4cde: 0x0080, 0x4cdf: 0x0080, 0x4ce0: 0x0080, 0x4ce1: 0x0080, 0x4ce2: 0x0080, 0x4ce3: 0x0080, - 0x4ce4: 0x0080, 0x4ce5: 0x0080, 0x4ce6: 0x0080, 0x4ce7: 0x0080, 0x4ce8: 0x0080, 0x4ce9: 0x0080, - 0x4cea: 0x0080, 0x4ceb: 0x0080, 0x4cec: 0x0080, 0x4ced: 0x0080, 0x4cee: 0x0080, 0x4cef: 0x0080, - 0x4cf0: 0x0080, 0x4cf1: 0x0080, 0x4cf2: 0x0080, 0x4cf3: 0x0080, - // Block 0x134, offset 0x4d00 - 0x4d00: 0x0080, 0x4d01: 0x0080, 0x4d02: 0x0080, 0x4d03: 0x0080, 0x4d04: 0x0080, 0x4d05: 0x0080, - 0x4d06: 0x0080, 0x4d07: 0x0080, 0x4d08: 0x0080, 0x4d09: 0x0080, 0x4d0a: 0x0080, 0x4d0b: 0x0080, - 0x4d0c: 0x0080, 0x4d0d: 0x0080, 0x4d0e: 0x0080, 0x4d0f: 0x0080, 0x4d10: 0x0080, 0x4d11: 0x0080, - 0x4d12: 0x0080, 0x4d13: 0x0080, 0x4d14: 0x0080, - // Block 0x135, offset 0x4d40 - 0x4d40: 0x0080, 0x4d41: 0x0080, 0x4d42: 0x0080, 0x4d43: 0x0080, 0x4d44: 0x0080, 0x4d45: 0x0080, - 0x4d46: 0x0080, 0x4d47: 0x0080, 0x4d48: 0x0080, 0x4d49: 0x0080, 0x4d4a: 0x0080, 0x4d4b: 0x0080, - 0x4d50: 0x0080, 0x4d51: 0x0080, - 0x4d52: 0x0080, 0x4d53: 0x0080, 0x4d54: 0x0080, 0x4d55: 0x0080, 0x4d56: 0x0080, 0x4d57: 0x0080, - 0x4d58: 0x0080, 0x4d59: 0x0080, 0x4d5a: 0x0080, 0x4d5b: 0x0080, 0x4d5c: 0x0080, 0x4d5d: 0x0080, - 0x4d5e: 0x0080, 0x4d5f: 0x0080, 0x4d60: 0x0080, 0x4d61: 0x0080, 0x4d62: 0x0080, 0x4d63: 0x0080, - 0x4d64: 0x0080, 0x4d65: 0x0080, 0x4d66: 0x0080, 0x4d67: 0x0080, 0x4d68: 0x0080, 0x4d69: 0x0080, - 0x4d6a: 0x0080, 0x4d6b: 0x0080, 0x4d6c: 0x0080, 0x4d6d: 0x0080, 0x4d6e: 0x0080, 0x4d6f: 0x0080, - 0x4d70: 0x0080, 0x4d71: 0x0080, 0x4d72: 0x0080, 0x4d73: 0x0080, 0x4d74: 0x0080, 0x4d75: 0x0080, - 0x4d76: 0x0080, 0x4d77: 0x0080, 0x4d78: 0x0080, 0x4d79: 0x0080, 0x4d7a: 0x0080, 0x4d7b: 0x0080, - 0x4d7c: 0x0080, 0x4d7d: 0x0080, 0x4d7e: 0x0080, 0x4d7f: 0x0080, - // Block 0x136, offset 0x4d80 - 0x4d80: 0x0080, 0x4d81: 0x0080, 0x4d82: 0x0080, 0x4d83: 0x0080, 0x4d84: 0x0080, 0x4d85: 0x0080, - 0x4d86: 0x0080, 0x4d87: 0x0080, - 0x4d90: 0x0080, 0x4d91: 0x0080, - 0x4d92: 0x0080, 0x4d93: 0x0080, 0x4d94: 0x0080, 0x4d95: 0x0080, 0x4d96: 0x0080, 0x4d97: 0x0080, - 0x4d98: 0x0080, 0x4d99: 0x0080, - 0x4da0: 0x0080, 0x4da1: 0x0080, 0x4da2: 0x0080, 0x4da3: 0x0080, - 0x4da4: 0x0080, 0x4da5: 0x0080, 0x4da6: 0x0080, 0x4da7: 0x0080, 0x4da8: 0x0080, 0x4da9: 0x0080, - 0x4daa: 0x0080, 0x4dab: 0x0080, 0x4dac: 0x0080, 0x4dad: 0x0080, 0x4dae: 0x0080, 0x4daf: 0x0080, - 0x4db0: 0x0080, 0x4db1: 0x0080, 0x4db2: 0x0080, 0x4db3: 0x0080, 0x4db4: 0x0080, 0x4db5: 0x0080, - 0x4db6: 0x0080, 0x4db7: 0x0080, 0x4db8: 0x0080, 0x4db9: 0x0080, 0x4dba: 0x0080, 0x4dbb: 0x0080, - 0x4dbc: 0x0080, 0x4dbd: 0x0080, 0x4dbe: 0x0080, 0x4dbf: 0x0080, - // Block 0x137, offset 0x4dc0 - 0x4dc0: 0x0080, 0x4dc1: 0x0080, 0x4dc2: 0x0080, 0x4dc3: 0x0080, 0x4dc4: 0x0080, 0x4dc5: 0x0080, - 0x4dc6: 0x0080, 0x4dc7: 0x0080, - 0x4dd0: 0x0080, 0x4dd1: 0x0080, - 0x4dd2: 0x0080, 0x4dd3: 0x0080, 0x4dd4: 0x0080, 0x4dd5: 0x0080, 0x4dd6: 0x0080, 0x4dd7: 0x0080, - 0x4dd8: 0x0080, 0x4dd9: 0x0080, 0x4dda: 0x0080, 0x4ddb: 0x0080, 0x4ddc: 0x0080, 0x4ddd: 0x0080, - 0x4dde: 0x0080, 0x4ddf: 0x0080, 0x4de0: 0x0080, 0x4de1: 0x0080, 0x4de2: 0x0080, 0x4de3: 0x0080, - 0x4de4: 0x0080, 0x4de5: 0x0080, 0x4de6: 0x0080, 0x4de7: 0x0080, 0x4de8: 0x0080, 0x4de9: 0x0080, - 0x4dea: 0x0080, 0x4deb: 0x0080, 0x4dec: 0x0080, 0x4ded: 0x0080, - // Block 0x138, offset 0x4e00 - 0x4e10: 0x0080, 0x4e11: 0x0080, - 0x4e12: 0x0080, 0x4e13: 0x0080, 0x4e14: 0x0080, 0x4e15: 0x0080, 0x4e16: 0x0080, 0x4e17: 0x0080, - 0x4e18: 0x0080, 0x4e19: 0x0080, 0x4e1a: 0x0080, 0x4e1b: 0x0080, 0x4e1c: 0x0080, 0x4e1d: 0x0080, - 0x4e1e: 0x0080, 0x4e20: 0x0080, 0x4e21: 0x0080, 0x4e22: 0x0080, 0x4e23: 0x0080, - 0x4e24: 0x0080, 0x4e25: 0x0080, 0x4e26: 0x0080, 0x4e27: 0x0080, - 0x4e30: 0x0080, 0x4e33: 0x0080, 0x4e34: 0x0080, 0x4e35: 0x0080, - 0x4e36: 0x0080, 0x4e37: 0x0080, 0x4e38: 0x0080, 0x4e39: 0x0080, 0x4e3a: 0x0080, 0x4e3b: 0x0080, - 0x4e3c: 0x0080, 0x4e3d: 0x0080, 0x4e3e: 0x0080, - // Block 0x139, offset 0x4e40 - 0x4e40: 0x0080, 0x4e41: 0x0080, 0x4e42: 0x0080, 0x4e43: 0x0080, 0x4e44: 0x0080, 0x4e45: 0x0080, - 0x4e46: 0x0080, 0x4e47: 0x0080, 0x4e48: 0x0080, 0x4e49: 0x0080, 0x4e4a: 0x0080, 0x4e4b: 0x0080, - 0x4e50: 0x0080, 0x4e51: 0x0080, - 0x4e52: 0x0080, 0x4e53: 0x0080, 0x4e54: 0x0080, 0x4e55: 0x0080, 0x4e56: 0x0080, 0x4e57: 0x0080, - 0x4e58: 0x0080, 0x4e59: 0x0080, 0x4e5a: 0x0080, 0x4e5b: 0x0080, 0x4e5c: 0x0080, 0x4e5d: 0x0080, - 0x4e5e: 0x0080, - // Block 0x13a, offset 0x4e80 - 0x4e80: 0x0080, 0x4e81: 0x0080, 0x4e82: 0x0080, 0x4e83: 0x0080, 0x4e84: 0x0080, 0x4e85: 0x0080, - 0x4e86: 0x0080, 0x4e87: 0x0080, 0x4e88: 0x0080, 0x4e89: 0x0080, 0x4e8a: 0x0080, 0x4e8b: 0x0080, - 0x4e8c: 0x0080, 0x4e8d: 0x0080, 0x4e8e: 0x0080, 0x4e8f: 0x0080, 0x4e90: 0x0080, 0x4e91: 0x0080, - // Block 0x13b, offset 0x4ec0 - 0x4ec0: 0x0080, - // Block 0x13c, offset 0x4f00 - 0x4f00: 0x00cc, 0x4f01: 0x00cc, 0x4f02: 0x00cc, 0x4f03: 0x00cc, 0x4f04: 0x00cc, 0x4f05: 0x00cc, - 0x4f06: 0x00cc, 0x4f07: 0x00cc, 0x4f08: 0x00cc, 0x4f09: 0x00cc, 0x4f0a: 0x00cc, 0x4f0b: 0x00cc, - 0x4f0c: 0x00cc, 0x4f0d: 0x00cc, 0x4f0e: 0x00cc, 0x4f0f: 0x00cc, 0x4f10: 0x00cc, 0x4f11: 0x00cc, - 0x4f12: 0x00cc, 0x4f13: 0x00cc, 0x4f14: 0x00cc, 0x4f15: 0x00cc, 0x4f16: 0x00cc, - // Block 0x13d, offset 0x4f40 - 0x4f40: 0x00cc, 0x4f41: 0x00cc, 0x4f42: 0x00cc, 0x4f43: 0x00cc, 0x4f44: 0x00cc, 0x4f45: 0x00cc, - 0x4f46: 0x00cc, 0x4f47: 0x00cc, 0x4f48: 0x00cc, 0x4f49: 0x00cc, 0x4f4a: 0x00cc, 0x4f4b: 0x00cc, - 0x4f4c: 0x00cc, 0x4f4d: 0x00cc, 0x4f4e: 0x00cc, 0x4f4f: 0x00cc, 0x4f50: 0x00cc, 0x4f51: 0x00cc, - 0x4f52: 0x00cc, 0x4f53: 0x00cc, 0x4f54: 0x00cc, 0x4f55: 0x00cc, 0x4f56: 0x00cc, 0x4f57: 0x00cc, - 0x4f58: 0x00cc, 0x4f59: 0x00cc, 0x4f5a: 0x00cc, 0x4f5b: 0x00cc, 0x4f5c: 0x00cc, 0x4f5d: 0x00cc, - 0x4f5e: 0x00cc, 0x4f5f: 0x00cc, 0x4f60: 0x00cc, 0x4f61: 0x00cc, 0x4f62: 0x00cc, 0x4f63: 0x00cc, - 0x4f64: 0x00cc, 0x4f65: 0x00cc, 0x4f66: 0x00cc, 0x4f67: 0x00cc, 0x4f68: 0x00cc, 0x4f69: 0x00cc, - 0x4f6a: 0x00cc, 0x4f6b: 0x00cc, 0x4f6c: 0x00cc, 0x4f6d: 0x00cc, 0x4f6e: 0x00cc, 0x4f6f: 0x00cc, - 0x4f70: 0x00cc, 0x4f71: 0x00cc, 0x4f72: 0x00cc, 0x4f73: 0x00cc, 0x4f74: 0x00cc, - // Block 0x13e, offset 0x4f80 - 0x4f80: 0x00cc, 0x4f81: 0x00cc, 0x4f82: 0x00cc, 0x4f83: 0x00cc, 0x4f84: 0x00cc, 0x4f85: 0x00cc, - 0x4f86: 0x00cc, 0x4f87: 0x00cc, 0x4f88: 0x00cc, 0x4f89: 0x00cc, 0x4f8a: 0x00cc, 0x4f8b: 0x00cc, - 0x4f8c: 0x00cc, 0x4f8d: 0x00cc, 0x4f8e: 0x00cc, 0x4f8f: 0x00cc, 0x4f90: 0x00cc, 0x4f91: 0x00cc, - 0x4f92: 0x00cc, 0x4f93: 0x00cc, 0x4f94: 0x00cc, 0x4f95: 0x00cc, 0x4f96: 0x00cc, 0x4f97: 0x00cc, - 0x4f98: 0x00cc, 0x4f99: 0x00cc, 0x4f9a: 0x00cc, 0x4f9b: 0x00cc, 0x4f9c: 0x00cc, 0x4f9d: 0x00cc, - 0x4fa0: 0x00cc, 0x4fa1: 0x00cc, 0x4fa2: 0x00cc, 0x4fa3: 0x00cc, - 0x4fa4: 0x00cc, 0x4fa5: 0x00cc, 0x4fa6: 0x00cc, 0x4fa7: 0x00cc, 0x4fa8: 0x00cc, 0x4fa9: 0x00cc, - 0x4faa: 0x00cc, 0x4fab: 0x00cc, 0x4fac: 0x00cc, 0x4fad: 0x00cc, 0x4fae: 0x00cc, 0x4faf: 0x00cc, - 0x4fb0: 0x00cc, 0x4fb1: 0x00cc, 0x4fb2: 0x00cc, 0x4fb3: 0x00cc, 0x4fb4: 0x00cc, 0x4fb5: 0x00cc, - 0x4fb6: 0x00cc, 0x4fb7: 0x00cc, 0x4fb8: 0x00cc, 0x4fb9: 0x00cc, 0x4fba: 0x00cc, 0x4fbb: 0x00cc, - 0x4fbc: 0x00cc, 0x4fbd: 0x00cc, 0x4fbe: 0x00cc, 0x4fbf: 0x00cc, - // Block 0x13f, offset 0x4fc0 - 0x4fc0: 0x00cc, 0x4fc1: 0x00cc, 0x4fc2: 0x00cc, 0x4fc3: 0x00cc, 0x4fc4: 0x00cc, 0x4fc5: 0x00cc, - 0x4fc6: 0x00cc, 0x4fc7: 0x00cc, 0x4fc8: 0x00cc, 0x4fc9: 0x00cc, 0x4fca: 0x00cc, 0x4fcb: 0x00cc, - 0x4fcc: 0x00cc, 0x4fcd: 0x00cc, 0x4fce: 0x00cc, 0x4fcf: 0x00cc, 0x4fd0: 0x00cc, 0x4fd1: 0x00cc, - 0x4fd2: 0x00cc, 0x4fd3: 0x00cc, 0x4fd4: 0x00cc, 0x4fd5: 0x00cc, 0x4fd6: 0x00cc, 0x4fd7: 0x00cc, - 0x4fd8: 0x00cc, 0x4fd9: 0x00cc, 0x4fda: 0x00cc, 0x4fdb: 0x00cc, 0x4fdc: 0x00cc, 0x4fdd: 0x00cc, - 0x4fde: 0x00cc, 0x4fdf: 0x00cc, 0x4fe0: 0x00cc, 0x4fe1: 0x00cc, - // Block 0x140, offset 0x5000 - 0x5000: 0x008c, 0x5001: 0x008c, 0x5002: 0x008c, 0x5003: 0x008c, 0x5004: 0x008c, 0x5005: 0x008c, - 0x5006: 0x008c, 0x5007: 0x008c, 0x5008: 0x008c, 0x5009: 0x008c, 0x500a: 0x008c, 0x500b: 0x008c, - 0x500c: 0x008c, 0x500d: 0x008c, 0x500e: 0x008c, 0x500f: 0x008c, 0x5010: 0x008c, 0x5011: 0x008c, - 0x5012: 0x008c, 0x5013: 0x008c, 0x5014: 0x008c, 0x5015: 0x008c, 0x5016: 0x008c, 0x5017: 0x008c, - 0x5018: 0x008c, 0x5019: 0x008c, 0x501a: 0x008c, 0x501b: 0x008c, 0x501c: 0x008c, 0x501d: 0x008c, - // Block 0x141, offset 0x5040 - 0x5041: 0x0040, - 0x5060: 0x0040, 0x5061: 0x0040, 0x5062: 0x0040, 0x5063: 0x0040, - 0x5064: 0x0040, 0x5065: 0x0040, 0x5066: 0x0040, 0x5067: 0x0040, 0x5068: 0x0040, 0x5069: 0x0040, - 0x506a: 0x0040, 0x506b: 0x0040, 0x506c: 0x0040, 0x506d: 0x0040, 0x506e: 0x0040, 0x506f: 0x0040, - 0x5070: 0x0040, 0x5071: 0x0040, 0x5072: 0x0040, 0x5073: 0x0040, 0x5074: 0x0040, 0x5075: 0x0040, - 0x5076: 0x0040, 0x5077: 0x0040, 0x5078: 0x0040, 0x5079: 0x0040, 0x507a: 0x0040, 0x507b: 0x0040, - 0x507c: 0x0040, 0x507d: 0x0040, 0x507e: 0x0040, 0x507f: 0x0040, - // Block 0x142, offset 0x5080 - 0x5080: 0x0040, 0x5081: 0x0040, 0x5082: 0x0040, 0x5083: 0x0040, 0x5084: 0x0040, 0x5085: 0x0040, - 0x5086: 0x0040, 0x5087: 0x0040, 0x5088: 0x0040, 0x5089: 0x0040, 0x508a: 0x0040, 0x508b: 0x0040, - 0x508c: 0x0040, 0x508d: 0x0040, 0x508e: 0x0040, 0x508f: 0x0040, 0x5090: 0x0040, 0x5091: 0x0040, - 0x5092: 0x0040, 0x5093: 0x0040, 0x5094: 0x0040, 0x5095: 0x0040, 0x5096: 0x0040, 0x5097: 0x0040, - 0x5098: 0x0040, 0x5099: 0x0040, 0x509a: 0x0040, 0x509b: 0x0040, 0x509c: 0x0040, 0x509d: 0x0040, - 0x509e: 0x0040, 0x509f: 0x0040, 0x50a0: 0x0040, 0x50a1: 0x0040, 0x50a2: 0x0040, 0x50a3: 0x0040, - 0x50a4: 0x0040, 0x50a5: 0x0040, 0x50a6: 0x0040, 0x50a7: 0x0040, 0x50a8: 0x0040, 0x50a9: 0x0040, - 0x50aa: 0x0040, 0x50ab: 0x0040, 0x50ac: 0x0040, 0x50ad: 0x0040, 0x50ae: 0x0040, 0x50af: 0x0040, - // Block 0x143, offset 0x50c0 - 0x50c0: 0x0040, 0x50c1: 0x0040, 0x50c2: 0x0040, 0x50c3: 0x0040, 0x50c4: 0x0040, 0x50c5: 0x0040, - 0x50c6: 0x0040, 0x50c7: 0x0040, 0x50c8: 0x0040, 0x50c9: 0x0040, 0x50ca: 0x0040, 0x50cb: 0x0040, - 0x50cc: 0x0040, 0x50cd: 0x0040, 0x50ce: 0x0040, 0x50cf: 0x0040, 0x50d0: 0x0040, 0x50d1: 0x0040, - 0x50d2: 0x0040, 0x50d3: 0x0040, 0x50d4: 0x0040, 0x50d5: 0x0040, 0x50d6: 0x0040, 0x50d7: 0x0040, - 0x50d8: 0x0040, 0x50d9: 0x0040, 0x50da: 0x0040, 0x50db: 0x0040, 0x50dc: 0x0040, 0x50dd: 0x0040, - 0x50de: 0x0040, 0x50df: 0x0040, 0x50e0: 0x0040, 0x50e1: 0x0040, 0x50e2: 0x0040, 0x50e3: 0x0040, - 0x50e4: 0x0040, 0x50e5: 0x0040, 0x50e6: 0x0040, 0x50e7: 0x0040, 0x50e8: 0x0040, 0x50e9: 0x0040, - 0x50ea: 0x0040, 0x50eb: 0x0040, 0x50ec: 0x0040, 0x50ed: 0x0040, 0x50ee: 0x0040, 0x50ef: 0x0040, - 0x50f0: 0x0040, 0x50f1: 0x0040, 0x50f2: 0x0040, 0x50f3: 0x0040, 0x50f4: 0x0040, 0x50f5: 0x0040, - 0x50f6: 0x0040, 0x50f7: 0x0040, 0x50f8: 0x0040, 0x50f9: 0x0040, 0x50fa: 0x0040, 0x50fb: 0x0040, - 0x50fc: 0x0040, 0x50fd: 0x0040, -} - -// derivedPropertiesIndex: 36 blocks, 2304 entries, 4608 bytes -// Block 0 is the zero block. -var derivedPropertiesIndex = [2304]uint16{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc6: 0x05, 0xc7: 0x06, - 0xc8: 0x05, 0xc9: 0x05, 0xca: 0x07, 0xcb: 0x08, 0xcc: 0x09, 0xcd: 0x0a, 0xce: 0x0b, 0xcf: 0x0c, - 0xd0: 0x05, 0xd1: 0x05, 0xd2: 0x0d, 0xd3: 0x05, 0xd4: 0x0e, 0xd5: 0x0f, 0xd6: 0x10, 0xd7: 0x11, - 0xd8: 0x12, 0xd9: 0x13, 0xda: 0x14, 0xdb: 0x15, 0xdc: 0x16, 0xdd: 0x17, 0xde: 0x18, 0xdf: 0x19, - 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, - 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x0a, 0xec: 0x0a, 0xed: 0x0b, 0xee: 0x0c, 0xef: 0x0d, - 0xf0: 0x1d, 0xf3: 0x20, 0xf4: 0x21, - // Block 0x4, offset 0x100 - 0x120: 0x1a, 0x121: 0x1b, 0x122: 0x1c, 0x123: 0x1d, 0x124: 0x1e, 0x125: 0x1f, 0x126: 0x20, 0x127: 0x21, - 0x128: 0x22, 0x129: 0x23, 0x12a: 0x24, 0x12b: 0x25, 0x12c: 0x26, 0x12d: 0x27, 0x12e: 0x28, 0x12f: 0x29, - 0x130: 0x2a, 0x131: 0x2b, 0x132: 0x2c, 0x133: 0x2d, 0x134: 0x2e, 0x135: 0x2f, 0x136: 0x30, 0x137: 0x31, - 0x138: 0x32, 0x139: 0x33, 0x13a: 0x34, 0x13b: 0x35, 0x13c: 0x36, 0x13d: 0x37, 0x13e: 0x38, 0x13f: 0x39, - // Block 0x5, offset 0x140 - 0x140: 0x3a, 0x141: 0x3b, 0x142: 0x3c, 0x143: 0x3d, 0x144: 0x3e, 0x145: 0x3e, 0x146: 0x3e, 0x147: 0x3e, - 0x148: 0x05, 0x149: 0x3f, 0x14a: 0x40, 0x14b: 0x41, 0x14c: 0x42, 0x14d: 0x43, 0x14e: 0x44, 0x14f: 0x45, - 0x150: 0x46, 0x151: 0x05, 0x152: 0x05, 0x153: 0x05, 0x154: 0x05, 0x155: 0x05, 0x156: 0x05, 0x157: 0x05, - 0x158: 0x05, 0x159: 0x47, 0x15a: 0x48, 0x15b: 0x49, 0x15c: 0x4a, 0x15d: 0x4b, 0x15e: 0x4c, 0x15f: 0x4d, - 0x160: 0x4e, 0x161: 0x4f, 0x162: 0x50, 0x163: 0x51, 0x164: 0x52, 0x165: 0x53, 0x166: 0x54, 0x167: 0x55, - 0x168: 0x56, 0x169: 0x57, 0x16a: 0x58, 0x16c: 0x59, 0x16d: 0x5a, 0x16e: 0x5b, 0x16f: 0x5c, - 0x170: 0x5d, 0x171: 0x5e, 0x172: 0x5f, 0x173: 0x60, 0x174: 0x61, 0x175: 0x62, 0x176: 0x63, 0x177: 0x64, - 0x178: 0x05, 0x179: 0x05, 0x17a: 0x65, 0x17b: 0x05, 0x17c: 0x66, 0x17d: 0x67, 0x17e: 0x68, 0x17f: 0x69, - // Block 0x6, offset 0x180 - 0x180: 0x6a, 0x181: 0x6b, 0x182: 0x6c, 0x183: 0x6d, 0x184: 0x6e, 0x185: 0x6f, 0x186: 0x70, 0x187: 0x71, - 0x188: 0x71, 0x189: 0x71, 0x18a: 0x71, 0x18b: 0x71, 0x18c: 0x71, 0x18d: 0x71, 0x18e: 0x71, 0x18f: 0x72, - 0x190: 0x73, 0x191: 0x74, 0x192: 0x71, 0x193: 0x71, 0x194: 0x71, 0x195: 0x71, 0x196: 0x71, 0x197: 0x71, - 0x198: 0x71, 0x199: 0x71, 0x19a: 0x71, 0x19b: 0x71, 0x19c: 0x71, 0x19d: 0x71, 0x19e: 0x71, 0x19f: 0x71, - 0x1a0: 0x71, 0x1a1: 0x71, 0x1a2: 0x71, 0x1a3: 0x71, 0x1a4: 0x71, 0x1a5: 0x71, 0x1a6: 0x71, 0x1a7: 0x71, - 0x1a8: 0x71, 0x1a9: 0x71, 0x1aa: 0x71, 0x1ab: 0x71, 0x1ac: 0x71, 0x1ad: 0x75, 0x1ae: 0x76, 0x1af: 0x77, - 0x1b0: 0x78, 0x1b1: 0x79, 0x1b2: 0x05, 0x1b3: 0x7a, 0x1b4: 0x7b, 0x1b5: 0x7c, 0x1b6: 0x7d, 0x1b7: 0x7e, - 0x1b8: 0x7f, 0x1b9: 0x80, 0x1ba: 0x81, 0x1bb: 0x82, 0x1bc: 0x83, 0x1bd: 0x83, 0x1be: 0x83, 0x1bf: 0x84, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x85, 0x1c1: 0x86, 0x1c2: 0x87, 0x1c3: 0x88, 0x1c4: 0x89, 0x1c5: 0x8a, 0x1c6: 0x8b, 0x1c7: 0x8c, - 0x1c8: 0x8d, 0x1c9: 0x71, 0x1ca: 0x71, 0x1cb: 0x8e, 0x1cc: 0x83, 0x1cd: 0x8f, 0x1ce: 0x71, 0x1cf: 0x71, - 0x1d0: 0x90, 0x1d1: 0x90, 0x1d2: 0x90, 0x1d3: 0x90, 0x1d4: 0x90, 0x1d5: 0x90, 0x1d6: 0x90, 0x1d7: 0x90, - 0x1d8: 0x90, 0x1d9: 0x90, 0x1da: 0x90, 0x1db: 0x90, 0x1dc: 0x90, 0x1dd: 0x90, 0x1de: 0x90, 0x1df: 0x90, - 0x1e0: 0x90, 0x1e1: 0x90, 0x1e2: 0x90, 0x1e3: 0x90, 0x1e4: 0x90, 0x1e5: 0x90, 0x1e6: 0x90, 0x1e7: 0x90, - 0x1e8: 0x90, 0x1e9: 0x90, 0x1ea: 0x90, 0x1eb: 0x90, 0x1ec: 0x90, 0x1ed: 0x90, 0x1ee: 0x90, 0x1ef: 0x90, - 0x1f0: 0x90, 0x1f1: 0x90, 0x1f2: 0x90, 0x1f3: 0x90, 0x1f4: 0x90, 0x1f5: 0x90, 0x1f6: 0x90, 0x1f7: 0x90, - 0x1f8: 0x90, 0x1f9: 0x90, 0x1fa: 0x90, 0x1fb: 0x90, 0x1fc: 0x90, 0x1fd: 0x90, 0x1fe: 0x90, 0x1ff: 0x90, - // Block 0x8, offset 0x200 - 0x200: 0x90, 0x201: 0x90, 0x202: 0x90, 0x203: 0x90, 0x204: 0x90, 0x205: 0x90, 0x206: 0x90, 0x207: 0x90, - 0x208: 0x90, 0x209: 0x90, 0x20a: 0x90, 0x20b: 0x90, 0x20c: 0x90, 0x20d: 0x90, 0x20e: 0x90, 0x20f: 0x90, - 0x210: 0x90, 0x211: 0x90, 0x212: 0x90, 0x213: 0x90, 0x214: 0x90, 0x215: 0x90, 0x216: 0x90, 0x217: 0x90, - 0x218: 0x90, 0x219: 0x90, 0x21a: 0x90, 0x21b: 0x90, 0x21c: 0x90, 0x21d: 0x90, 0x21e: 0x90, 0x21f: 0x90, - 0x220: 0x90, 0x221: 0x90, 0x222: 0x90, 0x223: 0x90, 0x224: 0x90, 0x225: 0x90, 0x226: 0x90, 0x227: 0x90, - 0x228: 0x90, 0x229: 0x90, 0x22a: 0x90, 0x22b: 0x90, 0x22c: 0x90, 0x22d: 0x90, 0x22e: 0x90, 0x22f: 0x90, - 0x230: 0x90, 0x231: 0x90, 0x232: 0x90, 0x233: 0x90, 0x234: 0x90, 0x235: 0x90, 0x236: 0x91, 0x237: 0x71, - 0x238: 0x90, 0x239: 0x90, 0x23a: 0x90, 0x23b: 0x90, 0x23c: 0x90, 0x23d: 0x90, 0x23e: 0x90, 0x23f: 0x90, - // Block 0x9, offset 0x240 - 0x240: 0x90, 0x241: 0x90, 0x242: 0x90, 0x243: 0x90, 0x244: 0x90, 0x245: 0x90, 0x246: 0x90, 0x247: 0x90, - 0x248: 0x90, 0x249: 0x90, 0x24a: 0x90, 0x24b: 0x90, 0x24c: 0x90, 0x24d: 0x90, 0x24e: 0x90, 0x24f: 0x90, - 0x250: 0x90, 0x251: 0x90, 0x252: 0x90, 0x253: 0x90, 0x254: 0x90, 0x255: 0x90, 0x256: 0x90, 0x257: 0x90, - 0x258: 0x90, 0x259: 0x90, 0x25a: 0x90, 0x25b: 0x90, 0x25c: 0x90, 0x25d: 0x90, 0x25e: 0x90, 0x25f: 0x90, - 0x260: 0x90, 0x261: 0x90, 0x262: 0x90, 0x263: 0x90, 0x264: 0x90, 0x265: 0x90, 0x266: 0x90, 0x267: 0x90, - 0x268: 0x90, 0x269: 0x90, 0x26a: 0x90, 0x26b: 0x90, 0x26c: 0x90, 0x26d: 0x90, 0x26e: 0x90, 0x26f: 0x90, - 0x270: 0x90, 0x271: 0x90, 0x272: 0x90, 0x273: 0x90, 0x274: 0x90, 0x275: 0x90, 0x276: 0x90, 0x277: 0x90, - 0x278: 0x90, 0x279: 0x90, 0x27a: 0x90, 0x27b: 0x90, 0x27c: 0x90, 0x27d: 0x90, 0x27e: 0x90, 0x27f: 0x90, - // Block 0xa, offset 0x280 - 0x280: 0x90, 0x281: 0x90, 0x282: 0x90, 0x283: 0x90, 0x284: 0x90, 0x285: 0x90, 0x286: 0x90, 0x287: 0x90, - 0x288: 0x90, 0x289: 0x90, 0x28a: 0x90, 0x28b: 0x90, 0x28c: 0x90, 0x28d: 0x90, 0x28e: 0x90, 0x28f: 0x90, - 0x290: 0x90, 0x291: 0x90, 0x292: 0x90, 0x293: 0x90, 0x294: 0x90, 0x295: 0x90, 0x296: 0x90, 0x297: 0x90, - 0x298: 0x90, 0x299: 0x90, 0x29a: 0x90, 0x29b: 0x90, 0x29c: 0x90, 0x29d: 0x90, 0x29e: 0x90, 0x29f: 0x90, - 0x2a0: 0x90, 0x2a1: 0x90, 0x2a2: 0x90, 0x2a3: 0x90, 0x2a4: 0x90, 0x2a5: 0x90, 0x2a6: 0x90, 0x2a7: 0x90, - 0x2a8: 0x90, 0x2a9: 0x90, 0x2aa: 0x90, 0x2ab: 0x90, 0x2ac: 0x90, 0x2ad: 0x90, 0x2ae: 0x90, 0x2af: 0x90, - 0x2b0: 0x90, 0x2b1: 0x90, 0x2b2: 0x90, 0x2b3: 0x90, 0x2b4: 0x90, 0x2b5: 0x90, 0x2b6: 0x90, 0x2b7: 0x90, - 0x2b8: 0x90, 0x2b9: 0x90, 0x2ba: 0x90, 0x2bb: 0x90, 0x2bc: 0x90, 0x2bd: 0x90, 0x2be: 0x90, 0x2bf: 0x92, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x05, 0x2c1: 0x05, 0x2c2: 0x05, 0x2c3: 0x05, 0x2c4: 0x05, 0x2c5: 0x05, 0x2c6: 0x05, 0x2c7: 0x05, - 0x2c8: 0x05, 0x2c9: 0x05, 0x2ca: 0x05, 0x2cb: 0x05, 0x2cc: 0x05, 0x2cd: 0x05, 0x2ce: 0x05, 0x2cf: 0x05, - 0x2d0: 0x05, 0x2d1: 0x05, 0x2d2: 0x93, 0x2d3: 0x94, 0x2d4: 0x05, 0x2d5: 0x05, 0x2d6: 0x05, 0x2d7: 0x05, - 0x2d8: 0x95, 0x2d9: 0x96, 0x2da: 0x97, 0x2db: 0x98, 0x2dc: 0x99, 0x2dd: 0x9a, 0x2de: 0x9b, 0x2df: 0x9c, - 0x2e0: 0x9d, 0x2e1: 0x9e, 0x2e2: 0x05, 0x2e3: 0x9f, 0x2e4: 0xa0, 0x2e5: 0xa1, 0x2e6: 0xa2, 0x2e7: 0xa3, - 0x2e8: 0xa4, 0x2e9: 0xa5, 0x2ea: 0xa6, 0x2eb: 0xa7, 0x2ec: 0xa8, 0x2ed: 0xa9, 0x2ee: 0x05, 0x2ef: 0xaa, - 0x2f0: 0x05, 0x2f1: 0x05, 0x2f2: 0x05, 0x2f3: 0x05, 0x2f4: 0x05, 0x2f5: 0x05, 0x2f6: 0x05, 0x2f7: 0x05, - 0x2f8: 0x05, 0x2f9: 0x05, 0x2fa: 0x05, 0x2fb: 0x05, 0x2fc: 0x05, 0x2fd: 0x05, 0x2fe: 0x05, 0x2ff: 0x05, - // Block 0xc, offset 0x300 - 0x300: 0x05, 0x301: 0x05, 0x302: 0x05, 0x303: 0x05, 0x304: 0x05, 0x305: 0x05, 0x306: 0x05, 0x307: 0x05, - 0x308: 0x05, 0x309: 0x05, 0x30a: 0x05, 0x30b: 0x05, 0x30c: 0x05, 0x30d: 0x05, 0x30e: 0x05, 0x30f: 0x05, - 0x310: 0x05, 0x311: 0x05, 0x312: 0x05, 0x313: 0x05, 0x314: 0x05, 0x315: 0x05, 0x316: 0x05, 0x317: 0x05, - 0x318: 0x05, 0x319: 0x05, 0x31a: 0x05, 0x31b: 0x05, 0x31c: 0x05, 0x31d: 0x05, 0x31e: 0x05, 0x31f: 0x05, - 0x320: 0x05, 0x321: 0x05, 0x322: 0x05, 0x323: 0x05, 0x324: 0x05, 0x325: 0x05, 0x326: 0x05, 0x327: 0x05, - 0x328: 0x05, 0x329: 0x05, 0x32a: 0x05, 0x32b: 0x05, 0x32c: 0x05, 0x32d: 0x05, 0x32e: 0x05, 0x32f: 0x05, - 0x330: 0x05, 0x331: 0x05, 0x332: 0x05, 0x333: 0x05, 0x334: 0x05, 0x335: 0x05, 0x336: 0x05, 0x337: 0x05, - 0x338: 0x05, 0x339: 0x05, 0x33a: 0x05, 0x33b: 0x05, 0x33c: 0x05, 0x33d: 0x05, 0x33e: 0x05, 0x33f: 0x05, - // Block 0xd, offset 0x340 - 0x340: 0x05, 0x341: 0x05, 0x342: 0x05, 0x343: 0x05, 0x344: 0x05, 0x345: 0x05, 0x346: 0x05, 0x347: 0x05, - 0x348: 0x05, 0x349: 0x05, 0x34a: 0x05, 0x34b: 0x05, 0x34c: 0x05, 0x34d: 0x05, 0x34e: 0x05, 0x34f: 0x05, - 0x350: 0x05, 0x351: 0x05, 0x352: 0x05, 0x353: 0x05, 0x354: 0x05, 0x355: 0x05, 0x356: 0x05, 0x357: 0x05, - 0x358: 0x05, 0x359: 0x05, 0x35a: 0x05, 0x35b: 0x05, 0x35c: 0x05, 0x35d: 0x05, 0x35e: 0xab, 0x35f: 0xac, - // Block 0xe, offset 0x380 - 0x380: 0x3e, 0x381: 0x3e, 0x382: 0x3e, 0x383: 0x3e, 0x384: 0x3e, 0x385: 0x3e, 0x386: 0x3e, 0x387: 0x3e, - 0x388: 0x3e, 0x389: 0x3e, 0x38a: 0x3e, 0x38b: 0x3e, 0x38c: 0x3e, 0x38d: 0x3e, 0x38e: 0x3e, 0x38f: 0x3e, - 0x390: 0x3e, 0x391: 0x3e, 0x392: 0x3e, 0x393: 0x3e, 0x394: 0x3e, 0x395: 0x3e, 0x396: 0x3e, 0x397: 0x3e, - 0x398: 0x3e, 0x399: 0x3e, 0x39a: 0x3e, 0x39b: 0x3e, 0x39c: 0x3e, 0x39d: 0x3e, 0x39e: 0x3e, 0x39f: 0x3e, - 0x3a0: 0x3e, 0x3a1: 0x3e, 0x3a2: 0x3e, 0x3a3: 0x3e, 0x3a4: 0x3e, 0x3a5: 0x3e, 0x3a6: 0x3e, 0x3a7: 0x3e, - 0x3a8: 0x3e, 0x3a9: 0x3e, 0x3aa: 0x3e, 0x3ab: 0x3e, 0x3ac: 0x3e, 0x3ad: 0x3e, 0x3ae: 0x3e, 0x3af: 0x3e, - 0x3b0: 0x3e, 0x3b1: 0x3e, 0x3b2: 0x3e, 0x3b3: 0x3e, 0x3b4: 0x3e, 0x3b5: 0x3e, 0x3b6: 0x3e, 0x3b7: 0x3e, - 0x3b8: 0x3e, 0x3b9: 0x3e, 0x3ba: 0x3e, 0x3bb: 0x3e, 0x3bc: 0x3e, 0x3bd: 0x3e, 0x3be: 0x3e, 0x3bf: 0x3e, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x3e, 0x3c1: 0x3e, 0x3c2: 0x3e, 0x3c3: 0x3e, 0x3c4: 0x3e, 0x3c5: 0x3e, 0x3c6: 0x3e, 0x3c7: 0x3e, - 0x3c8: 0x3e, 0x3c9: 0x3e, 0x3ca: 0x3e, 0x3cb: 0x3e, 0x3cc: 0x3e, 0x3cd: 0x3e, 0x3ce: 0x3e, 0x3cf: 0x3e, - 0x3d0: 0x3e, 0x3d1: 0x3e, 0x3d2: 0x3e, 0x3d3: 0x3e, 0x3d4: 0x3e, 0x3d5: 0x3e, 0x3d6: 0x3e, 0x3d7: 0x3e, - 0x3d8: 0x3e, 0x3d9: 0x3e, 0x3da: 0x3e, 0x3db: 0x3e, 0x3dc: 0x3e, 0x3dd: 0x3e, 0x3de: 0x3e, 0x3df: 0x3e, - 0x3e0: 0x3e, 0x3e1: 0x3e, 0x3e2: 0x3e, 0x3e3: 0x3e, 0x3e4: 0x83, 0x3e5: 0x83, 0x3e6: 0x83, 0x3e7: 0x83, - 0x3e8: 0xad, 0x3e9: 0xae, 0x3ea: 0x83, 0x3eb: 0xaf, 0x3ec: 0xb0, 0x3ed: 0xb1, 0x3ee: 0x71, 0x3ef: 0xb2, - 0x3f0: 0x71, 0x3f1: 0x71, 0x3f2: 0x71, 0x3f3: 0x71, 0x3f4: 0x71, 0x3f5: 0xb3, 0x3f6: 0xb4, 0x3f7: 0xb5, - 0x3f8: 0xb6, 0x3f9: 0xb7, 0x3fa: 0x71, 0x3fb: 0xb8, 0x3fc: 0xb9, 0x3fd: 0xba, 0x3fe: 0xbb, 0x3ff: 0xbc, - // Block 0x10, offset 0x400 - 0x400: 0xbd, 0x401: 0xbe, 0x402: 0x05, 0x403: 0xbf, 0x404: 0xc0, 0x405: 0xc1, 0x406: 0xc2, 0x407: 0xc3, - 0x40a: 0xc4, 0x40b: 0xc5, 0x40c: 0xc6, 0x40d: 0xc7, 0x40e: 0xc8, 0x40f: 0xc9, - 0x410: 0x05, 0x411: 0x05, 0x412: 0xca, 0x413: 0xcb, 0x414: 0xcc, 0x415: 0xcd, - 0x418: 0x05, 0x419: 0x05, 0x41a: 0x05, 0x41b: 0x05, 0x41c: 0xce, 0x41d: 0xcf, - 0x420: 0xd0, 0x421: 0xd1, 0x422: 0xd2, 0x423: 0xd3, 0x424: 0xd4, 0x426: 0xd5, 0x427: 0xb4, - 0x428: 0xd6, 0x429: 0xd7, 0x42a: 0xd8, 0x42b: 0xd9, 0x42c: 0xda, 0x42d: 0xdb, 0x42e: 0xdc, - 0x430: 0x05, 0x431: 0x5f, 0x432: 0xdd, 0x433: 0xde, - 0x439: 0xdf, - // Block 0x11, offset 0x440 - 0x440: 0xe0, 0x441: 0xe1, 0x442: 0xe2, 0x443: 0xe3, 0x444: 0xe4, 0x445: 0xe5, 0x446: 0xe6, 0x447: 0xe7, - 0x448: 0xe8, 0x44a: 0xe9, 0x44b: 0xea, 0x44c: 0xeb, 0x44d: 0xec, - 0x450: 0xed, 0x451: 0xee, 0x452: 0xef, 0x453: 0xf0, 0x456: 0xf1, 0x457: 0xf2, - 0x458: 0xf3, 0x459: 0xf4, 0x45a: 0xf5, 0x45b: 0xf6, 0x45c: 0xf7, - 0x462: 0xf8, 0x463: 0xf9, - 0x46b: 0xfa, - 0x470: 0xfb, 0x471: 0xfc, 0x472: 0xfd, - // Block 0x12, offset 0x480 - 0x480: 0x05, 0x481: 0x05, 0x482: 0x05, 0x483: 0x05, 0x484: 0x05, 0x485: 0x05, 0x486: 0x05, 0x487: 0x05, - 0x488: 0x05, 0x489: 0x05, 0x48a: 0x05, 0x48b: 0x05, 0x48c: 0x05, 0x48d: 0x05, 0x48e: 0xfe, - 0x490: 0x71, 0x491: 0xff, 0x492: 0x05, 0x493: 0x05, 0x494: 0x05, 0x495: 0x100, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x05, 0x4c1: 0x05, 0x4c2: 0x05, 0x4c3: 0x05, 0x4c4: 0x05, 0x4c5: 0x05, 0x4c6: 0x05, 0x4c7: 0x05, - 0x4c8: 0x05, 0x4c9: 0x05, 0x4ca: 0x05, 0x4cb: 0x05, 0x4cc: 0x05, 0x4cd: 0x05, 0x4ce: 0x05, 0x4cf: 0x05, - 0x4d0: 0x101, - // Block 0x14, offset 0x500 - 0x510: 0x05, 0x511: 0x05, 0x512: 0x05, 0x513: 0x05, 0x514: 0x05, 0x515: 0x05, 0x516: 0x05, 0x517: 0x05, - 0x518: 0x05, 0x519: 0x102, - // Block 0x15, offset 0x540 - 0x560: 0x05, 0x561: 0x05, 0x562: 0x05, 0x563: 0x05, 0x564: 0x05, 0x565: 0x05, 0x566: 0x05, 0x567: 0x05, - 0x568: 0xfa, 0x569: 0x103, 0x56b: 0x104, 0x56c: 0x105, 0x56d: 0x106, 0x56e: 0x107, - 0x57c: 0x05, 0x57d: 0x108, 0x57e: 0x109, 0x57f: 0x10a, - // Block 0x16, offset 0x580 - 0x580: 0x05, 0x581: 0x05, 0x582: 0x05, 0x583: 0x05, 0x584: 0x05, 0x585: 0x05, 0x586: 0x05, 0x587: 0x05, - 0x588: 0x05, 0x589: 0x05, 0x58a: 0x05, 0x58b: 0x05, 0x58c: 0x05, 0x58d: 0x05, 0x58e: 0x05, 0x58f: 0x05, - 0x590: 0x05, 0x591: 0x05, 0x592: 0x05, 0x593: 0x05, 0x594: 0x05, 0x595: 0x05, 0x596: 0x05, 0x597: 0x05, - 0x598: 0x05, 0x599: 0x05, 0x59a: 0x05, 0x59b: 0x05, 0x59c: 0x05, 0x59d: 0x05, 0x59e: 0x05, 0x59f: 0x10b, - 0x5a0: 0x05, 0x5a1: 0x05, 0x5a2: 0x05, 0x5a3: 0x05, 0x5a4: 0x05, 0x5a5: 0x05, 0x5a6: 0x05, 0x5a7: 0x05, - 0x5a8: 0x05, 0x5a9: 0x05, 0x5aa: 0x05, 0x5ab: 0xdd, - // Block 0x17, offset 0x5c0 - 0x5c0: 0x10c, - 0x5f0: 0x05, 0x5f1: 0x10d, 0x5f2: 0x10e, - // Block 0x18, offset 0x600 - 0x600: 0x71, 0x601: 0x71, 0x602: 0x71, 0x603: 0x10f, 0x604: 0x110, 0x605: 0x111, 0x606: 0x112, 0x607: 0x113, - 0x608: 0xc1, 0x609: 0x114, 0x60c: 0x71, 0x60d: 0x115, - 0x610: 0x71, 0x611: 0x116, 0x612: 0x117, 0x613: 0x118, 0x614: 0x119, 0x615: 0x11a, 0x616: 0x71, 0x617: 0x71, - 0x618: 0x71, 0x619: 0x71, 0x61a: 0x11b, 0x61b: 0x71, 0x61c: 0x71, 0x61d: 0x71, 0x61e: 0x71, 0x61f: 0x11c, - 0x620: 0x71, 0x621: 0x71, 0x622: 0x71, 0x623: 0x71, 0x624: 0x71, 0x625: 0x71, 0x626: 0x71, 0x627: 0x71, - 0x628: 0x11d, 0x629: 0x11e, 0x62a: 0x11f, - // Block 0x19, offset 0x640 - 0x640: 0x120, - 0x660: 0x05, 0x661: 0x05, 0x662: 0x05, 0x663: 0x121, 0x664: 0x122, 0x665: 0x123, - 0x678: 0x124, 0x679: 0x125, 0x67a: 0x126, 0x67b: 0x127, - // Block 0x1a, offset 0x680 - 0x680: 0x128, 0x681: 0x71, 0x682: 0x129, 0x683: 0x12a, 0x684: 0x12b, 0x685: 0x128, 0x686: 0x12c, 0x687: 0x12d, - 0x688: 0x12e, 0x689: 0x12f, 0x68c: 0x71, 0x68d: 0x71, 0x68e: 0x71, 0x68f: 0x71, - 0x690: 0x71, 0x691: 0x71, 0x692: 0x71, 0x693: 0x71, 0x694: 0x71, 0x695: 0x71, 0x696: 0x71, 0x697: 0x71, - 0x698: 0x71, 0x699: 0x71, 0x69a: 0x71, 0x69b: 0x130, 0x69c: 0x71, 0x69d: 0x131, 0x69e: 0x71, 0x69f: 0x132, - 0x6a0: 0x133, 0x6a1: 0x134, 0x6a2: 0x135, 0x6a4: 0x136, 0x6a5: 0x137, 0x6a6: 0x138, 0x6a7: 0x139, - // Block 0x1b, offset 0x6c0 - 0x6c0: 0x90, 0x6c1: 0x90, 0x6c2: 0x90, 0x6c3: 0x90, 0x6c4: 0x90, 0x6c5: 0x90, 0x6c6: 0x90, 0x6c7: 0x90, - 0x6c8: 0x90, 0x6c9: 0x90, 0x6ca: 0x90, 0x6cb: 0x90, 0x6cc: 0x90, 0x6cd: 0x90, 0x6ce: 0x90, 0x6cf: 0x90, - 0x6d0: 0x90, 0x6d1: 0x90, 0x6d2: 0x90, 0x6d3: 0x90, 0x6d4: 0x90, 0x6d5: 0x90, 0x6d6: 0x90, 0x6d7: 0x90, - 0x6d8: 0x90, 0x6d9: 0x90, 0x6da: 0x90, 0x6db: 0x13a, 0x6dc: 0x90, 0x6dd: 0x90, 0x6de: 0x90, 0x6df: 0x90, - 0x6e0: 0x90, 0x6e1: 0x90, 0x6e2: 0x90, 0x6e3: 0x90, 0x6e4: 0x90, 0x6e5: 0x90, 0x6e6: 0x90, 0x6e7: 0x90, - 0x6e8: 0x90, 0x6e9: 0x90, 0x6ea: 0x90, 0x6eb: 0x90, 0x6ec: 0x90, 0x6ed: 0x90, 0x6ee: 0x90, 0x6ef: 0x90, - 0x6f0: 0x90, 0x6f1: 0x90, 0x6f2: 0x90, 0x6f3: 0x90, 0x6f4: 0x90, 0x6f5: 0x90, 0x6f6: 0x90, 0x6f7: 0x90, - 0x6f8: 0x90, 0x6f9: 0x90, 0x6fa: 0x90, 0x6fb: 0x90, 0x6fc: 0x90, 0x6fd: 0x90, 0x6fe: 0x90, 0x6ff: 0x90, - // Block 0x1c, offset 0x700 - 0x700: 0x90, 0x701: 0x90, 0x702: 0x90, 0x703: 0x90, 0x704: 0x90, 0x705: 0x90, 0x706: 0x90, 0x707: 0x90, - 0x708: 0x90, 0x709: 0x90, 0x70a: 0x90, 0x70b: 0x90, 0x70c: 0x90, 0x70d: 0x90, 0x70e: 0x90, 0x70f: 0x90, - 0x710: 0x90, 0x711: 0x90, 0x712: 0x90, 0x713: 0x90, 0x714: 0x90, 0x715: 0x90, 0x716: 0x90, 0x717: 0x90, - 0x718: 0x90, 0x719: 0x90, 0x71a: 0x90, 0x71b: 0x90, 0x71c: 0x13b, 0x71d: 0x90, 0x71e: 0x90, 0x71f: 0x90, - 0x720: 0x13c, 0x721: 0x90, 0x722: 0x90, 0x723: 0x90, 0x724: 0x90, 0x725: 0x90, 0x726: 0x90, 0x727: 0x90, - 0x728: 0x90, 0x729: 0x90, 0x72a: 0x90, 0x72b: 0x90, 0x72c: 0x90, 0x72d: 0x90, 0x72e: 0x90, 0x72f: 0x90, - 0x730: 0x90, 0x731: 0x90, 0x732: 0x90, 0x733: 0x90, 0x734: 0x90, 0x735: 0x90, 0x736: 0x90, 0x737: 0x90, - 0x738: 0x90, 0x739: 0x90, 0x73a: 0x90, 0x73b: 0x90, 0x73c: 0x90, 0x73d: 0x90, 0x73e: 0x90, 0x73f: 0x90, - // Block 0x1d, offset 0x740 - 0x740: 0x90, 0x741: 0x90, 0x742: 0x90, 0x743: 0x90, 0x744: 0x90, 0x745: 0x90, 0x746: 0x90, 0x747: 0x90, - 0x748: 0x90, 0x749: 0x90, 0x74a: 0x90, 0x74b: 0x90, 0x74c: 0x90, 0x74d: 0x90, 0x74e: 0x90, 0x74f: 0x90, - 0x750: 0x90, 0x751: 0x90, 0x752: 0x90, 0x753: 0x90, 0x754: 0x90, 0x755: 0x90, 0x756: 0x90, 0x757: 0x90, - 0x758: 0x90, 0x759: 0x90, 0x75a: 0x90, 0x75b: 0x90, 0x75c: 0x90, 0x75d: 0x90, 0x75e: 0x90, 0x75f: 0x90, - 0x760: 0x90, 0x761: 0x90, 0x762: 0x90, 0x763: 0x90, 0x764: 0x90, 0x765: 0x90, 0x766: 0x90, 0x767: 0x90, - 0x768: 0x90, 0x769: 0x90, 0x76a: 0x90, 0x76b: 0x90, 0x76c: 0x90, 0x76d: 0x90, 0x76e: 0x90, 0x76f: 0x90, - 0x770: 0x90, 0x771: 0x90, 0x772: 0x90, 0x773: 0x90, 0x774: 0x90, 0x775: 0x90, 0x776: 0x90, 0x777: 0x90, - 0x778: 0x90, 0x779: 0x90, 0x77a: 0x13d, - // Block 0x1e, offset 0x780 - 0x7a0: 0x83, 0x7a1: 0x83, 0x7a2: 0x83, 0x7a3: 0x83, 0x7a4: 0x83, 0x7a5: 0x83, 0x7a6: 0x83, 0x7a7: 0x83, - 0x7a8: 0x13e, - // Block 0x1f, offset 0x7c0 - 0x7d0: 0x0e, 0x7d1: 0x0f, 0x7d2: 0x10, 0x7d3: 0x11, 0x7d4: 0x12, 0x7d6: 0x13, 0x7d7: 0x0a, - 0x7d8: 0x14, 0x7db: 0x15, 0x7dd: 0x16, 0x7de: 0x17, 0x7df: 0x18, - 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, - 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x19, 0x7eb: 0x1a, 0x7ec: 0x1b, 0x7ef: 0x1c, - // Block 0x20, offset 0x800 - 0x800: 0x13f, 0x801: 0x3e, 0x804: 0x3e, 0x805: 0x3e, 0x806: 0x3e, 0x807: 0x140, - // Block 0x21, offset 0x840 - 0x840: 0x3e, 0x841: 0x3e, 0x842: 0x3e, 0x843: 0x3e, 0x844: 0x3e, 0x845: 0x3e, 0x846: 0x3e, 0x847: 0x3e, - 0x848: 0x3e, 0x849: 0x3e, 0x84a: 0x3e, 0x84b: 0x3e, 0x84c: 0x3e, 0x84d: 0x3e, 0x84e: 0x3e, 0x84f: 0x3e, - 0x850: 0x3e, 0x851: 0x3e, 0x852: 0x3e, 0x853: 0x3e, 0x854: 0x3e, 0x855: 0x3e, 0x856: 0x3e, 0x857: 0x3e, - 0x858: 0x3e, 0x859: 0x3e, 0x85a: 0x3e, 0x85b: 0x3e, 0x85c: 0x3e, 0x85d: 0x3e, 0x85e: 0x3e, 0x85f: 0x3e, - 0x860: 0x3e, 0x861: 0x3e, 0x862: 0x3e, 0x863: 0x3e, 0x864: 0x3e, 0x865: 0x3e, 0x866: 0x3e, 0x867: 0x3e, - 0x868: 0x3e, 0x869: 0x3e, 0x86a: 0x3e, 0x86b: 0x3e, 0x86c: 0x3e, 0x86d: 0x3e, 0x86e: 0x3e, 0x86f: 0x3e, - 0x870: 0x3e, 0x871: 0x3e, 0x872: 0x3e, 0x873: 0x3e, 0x874: 0x3e, 0x875: 0x3e, 0x876: 0x3e, 0x877: 0x3e, - 0x878: 0x3e, 0x879: 0x3e, 0x87a: 0x3e, 0x87b: 0x3e, 0x87c: 0x3e, 0x87d: 0x3e, 0x87e: 0x3e, 0x87f: 0x141, - // Block 0x22, offset 0x880 - 0x8a0: 0x1e, - 0x8b0: 0x0c, 0x8b1: 0x0c, 0x8b2: 0x0c, 0x8b3: 0x0c, 0x8b4: 0x0c, 0x8b5: 0x0c, 0x8b6: 0x0c, 0x8b7: 0x0c, - 0x8b8: 0x0c, 0x8b9: 0x0c, 0x8ba: 0x0c, 0x8bb: 0x0c, 0x8bc: 0x0c, 0x8bd: 0x0c, 0x8be: 0x0c, 0x8bf: 0x1f, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x0c, 0x8c1: 0x0c, 0x8c2: 0x0c, 0x8c3: 0x0c, 0x8c4: 0x0c, 0x8c5: 0x0c, 0x8c6: 0x0c, 0x8c7: 0x0c, - 0x8c8: 0x0c, 0x8c9: 0x0c, 0x8ca: 0x0c, 0x8cb: 0x0c, 0x8cc: 0x0c, 0x8cd: 0x0c, 0x8ce: 0x0c, 0x8cf: 0x1f, -} - -// Total table size 25344 bytes (24KiB); checksum: 811C9DC5 diff --git a/vendor/golang.org/x/text/secure/precis/transformer.go b/vendor/golang.org/x/text/secure/precis/transformer.go deleted file mode 100644 index 97ce5e757..000000000 --- a/vendor/golang.org/x/text/secure/precis/transformer.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package precis - -import "golang.org/x/text/transform" - -// Transformer implements the transform.Transformer interface. -type Transformer struct { - t transform.Transformer -} - -// Reset implements the transform.Transformer interface. -func (t Transformer) Reset() { t.t.Reset() } - -// Transform implements the transform.Transformer interface. -func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - return t.t.Transform(dst, src, atEOF) -} - -// Bytes returns a new byte slice with the result of applying t to b. -func (t Transformer) Bytes(b []byte) []byte { - b, _, _ = transform.Bytes(t, b) - return b -} - -// String returns a string with the result of applying t to s. -func (t Transformer) String(s string) string { - s, _, _ = transform.String(t, s) - return s -} diff --git a/vendor/golang.org/x/text/secure/precis/trieval.go b/vendor/golang.org/x/text/secure/precis/trieval.go deleted file mode 100644 index fed7d7595..000000000 --- a/vendor/golang.org/x/text/secure/precis/trieval.go +++ /dev/null @@ -1,64 +0,0 @@ -// This file was generated by go generate; DO NOT EDIT - -package precis - -// entry is the entry of a trie table -// 7..6 property (unassigned, disallowed, maybe, valid) -// 5..0 category -type entry uint8 - -const ( - propShift = 6 - propMask = 0xc0 - catMask = 0x3f -) - -func (e entry) property() property { return property(e & propMask) } -func (e entry) category() category { return category(e & catMask) } - -type property uint8 - -// The order of these constants matter. A Profile may consider runes to be -// allowed either from pValid or idDisOrFreePVal. -const ( - unassigned property = iota << propShift - disallowed - idDisOrFreePVal // disallowed for Identifier, pValid for FreeForm - pValid -) - -// compute permutations of all properties and specialCategories. -type category uint8 - -const ( - other category = iota - - // Special rune types - joiningL - joiningD - joiningT - joiningR - viramaModifier - viramaJoinT // Virama + JoiningT - latinSmallL // U+006c - greek - greekJoinT // Greek + JoiningT - hebrew - hebrewJoinT // Hebrew + JoiningT - japanese // hirigana, katakana, han - - // Special rune types associated with contextual rules defined in - // https://tools.ietf.org/html/rfc5892#appendix-A. - // ContextO - zeroWidthNonJoiner // rule 1 - zeroWidthJoiner // rule 2 - // ContextJ - middleDot // rule 3 - greekLowerNumeralSign // rule 4 - hebrewPreceding // rule 5 and 6 - katakanaMiddleDot // rule 7 - arabicIndicDigit // rule 8 - extendedArabicIndicDigit // rule 9 - - numCategories -) diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go index 51862b02b..fe47b9b35 100644 --- a/vendor/golang.org/x/text/transform/transform.go +++ b/vendor/golang.org/x/text/transform/transform.go @@ -6,7 +6,7 @@ // bytes passing through as well as various transformations. Example // transformations provided by other packages include normalization and // conversion between character sets. -package transform +package transform // import "golang.org/x/text/transform" import ( "bytes" @@ -24,6 +24,10 @@ var ( // complete the transformation. ErrShortSrc = errors.New("transform: short source buffer") + // ErrEndOfSpan means that the input and output (the transformed input) + // are not identical. + ErrEndOfSpan = errors.New("transform: input and output are not identical") + // errInconsistentByteCount means that Transform returned success (nil // error) but also returned nSrc inconsistent with the src argument. errInconsistentByteCount = errors.New("transform: inconsistent byte count returned") @@ -60,6 +64,41 @@ type Transformer interface { Reset() } +// SpanningTransformer extends the Transformer interface with a Span method +// that determines how much of the input already conforms to the Transformer. +type SpanningTransformer interface { + Transformer + + // Span returns a position in src such that transforming src[:n] results in + // identical output src[:n] for these bytes. It does not necessarily return + // the largest such n. The atEOF argument tells whether src represents the + // last bytes of the input. + // + // Callers should always account for the n bytes consumed before + // considering the error err. + // + // A nil error means that all input bytes are known to be identical to the + // output produced by the Transformer. A nil error can be be returned + // regardless of whether atEOF is true. If err is nil, then then n must + // equal len(src); the converse is not necessarily true. + // + // ErrEndOfSpan means that the Transformer output may differ from the + // input after n bytes. Note that n may be len(src), meaning that the output + // would contain additional bytes after otherwise identical output. + // ErrShortSrc means that src had insufficient data to determine whether the + // remaining bytes would change. Other than the error conditions listed + // here, implementations are free to report other errors that arise. + // + // Calling Span can modify the Transformer state as a side effect. In + // effect, it does the transformation just as calling Transform would, only + // without copying to a destination buffer and only up to a point it can + // determine the input and output bytes are the same. This is obviously more + // limited than calling Transform, but can be more efficient in terms of + // copying and allocating buffers. Calls to Span and Transform may be + // interleaved. + Span(src []byte, atEOF bool) (n int, err error) +} + // NopResetter can be embedded by implementations of Transformer to add a nop // Reset method. type NopResetter struct{} @@ -278,6 +317,10 @@ func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { return n, n, err } +func (nop) Span(src []byte, atEOF bool) (n int, err error) { + return len(src), nil +} + type discard struct{ NopResetter } func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { @@ -289,8 +332,8 @@ var ( // by consuming all bytes and writing nothing. Discard Transformer = discard{} - // Nop is a Transformer that copies src to dst. - Nop Transformer = nop{} + // Nop is a SpanningTransformer that copies src to dst. + Nop SpanningTransformer = nop{} ) // chain is a sequence of links. A chain with N Transformers has N+1 links and @@ -358,6 +401,8 @@ func (c *chain) Reset() { } } +// TODO: make chain use Span (is going to be fun to implement!) + // Transform applies the transformers of c in sequence. func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { // Set up src and dst in the chain. @@ -448,8 +493,7 @@ func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err erro return dstL.n, srcL.p, err } -// RemoveFunc returns a Transformer that removes from the input all runes r for -// which f(r) is true. Illegal bytes in the input are replaced by RuneError. +// Deprecated: use runes.Remove instead. func RemoveFunc(f func(r rune) bool) Transformer { return removeF(f) } diff --git a/vendor/golang.org/x/text/unicode/bidi/bidi.go b/vendor/golang.org/x/text/unicode/bidi/bidi.go index 1254119b5..3fc4a6252 100644 --- a/vendor/golang.org/x/text/unicode/bidi/bidi.go +++ b/vendor/golang.org/x/text/unicode/bidi/bidi.go @@ -10,7 +10,7 @@ // // NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways // and without notice. -package bidi +package bidi // import "golang.org/x/text/unicode/bidi" // TODO: // The following functionality would not be hard to implement, but hinges on diff --git a/vendor/golang.org/x/text/unicode/bidi/bracket.go b/vendor/golang.org/x/text/unicode/bidi/bracket.go index e88119d1b..601e25920 100644 --- a/vendor/golang.org/x/text/unicode/bidi/bracket.go +++ b/vendor/golang.org/x/text/unicode/bidi/bracket.go @@ -84,7 +84,7 @@ func resolvePairedBrackets(s *isolatingRunSequence) { dirEmbed = R } p.locateBrackets(s.p.pairTypes, s.p.pairValues) - p.resolveBrackets(dirEmbed) + p.resolveBrackets(dirEmbed, s.p.initialTypes) } type bracketPairer struct { @@ -125,6 +125,8 @@ func (p *bracketPairer) matchOpener(pairValues []rune, opener, closer int) bool return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]] } +const maxPairingDepth = 63 + // locateBrackets locates matching bracket pairs according to BD16. // // This implementation uses a linked list instead of a stack, because, while @@ -136,11 +138,17 @@ func (p *bracketPairer) locateBrackets(pairTypes []bracketType, pairValues []run for i, index := range p.indexes { // look at the bracket type for each character - switch pairTypes[index] { - case bpNone: + if pairTypes[index] == bpNone || p.codesIsolatedRun[i] != ON { // continue scanning - + continue + } + switch pairTypes[index] { case bpOpen: + // check if maximum pairing depth reached + if p.openers.Len() == maxPairingDepth { + p.openers.Init() + return + } // remember opener location, most recent first p.openers.PushFront(i) @@ -270,7 +278,7 @@ func (p *bracketPairer) classBeforePair(loc bracketPair) Class { } // assignBracketType implements rule N0 for a single bracket pair. -func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class) { +func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initialTypes []Class) { // rule "N0, a", inspect contents of pair dirPair := p.classifyPairContent(loc, dirEmbed) @@ -295,13 +303,33 @@ func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class) { // direction // set the bracket types to the type found + p.setBracketsToType(loc, dirPair, initialTypes) +} + +func (p *bracketPairer) setBracketsToType(loc bracketPair, dirPair Class, initialTypes []Class) { p.codesIsolatedRun[loc.opener] = dirPair p.codesIsolatedRun[loc.closer] = dirPair + + for i := loc.opener + 1; i < loc.closer; i++ { + index := p.indexes[i] + if initialTypes[index] != NSM { + break + } + p.codesIsolatedRun[i] = dirPair + } + + for i := loc.closer + 1; i < len(p.indexes); i++ { + index := p.indexes[i] + if initialTypes[index] != NSM { + break + } + p.codesIsolatedRun[i] = dirPair + } } // resolveBrackets implements rule N0 for a list of pairs. -func (p *bracketPairer) resolveBrackets(dirEmbed Class) { +func (p *bracketPairer) resolveBrackets(dirEmbed Class, initialTypes []Class) { for _, loc := range p.pairPositions { - p.assignBracketType(loc, dirEmbed) + p.assignBracketType(loc, dirEmbed, initialTypes) } } diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index 381f30ad0..d4c1399f0 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -309,6 +309,9 @@ func (p *paragraph) determineExplicitEmbeddingLevels() { } if isIsolate { p.resultLevels[i] = stack.lastEmbeddingLevel() + if stack.lastDirectionalOverrideStatus() != ON { + p.resultTypes[i] = stack.lastDirectionalOverrideStatus() + } } var newLevel level @@ -723,7 +726,7 @@ loop: continue loop } } - log.Panicf("invalid bidi code %s present in assertOnly at position %d", t, s.indexes[i]) + log.Panicf("invalid bidi code %v present in assertOnly at position %d", t, s.indexes[i]) } } diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go deleted file mode 100644 index 040f3013d..000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "flag" - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -var outputFile = flag.String("out", "tables.go", "output file") - -func main() { - gen.Init() - gen.Repackage("gen_trieval.go", "trieval.go", "bidi") - gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") - - genTables() -} - -// bidiClass names and codes taken from class "bc" in -// http://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt -var bidiClass = map[string]Class{ - "AL": AL, // ArabicLetter - "AN": AN, // ArabicNumber - "B": B, // ParagraphSeparator - "BN": BN, // BoundaryNeutral - "CS": CS, // CommonSeparator - "EN": EN, // EuropeanNumber - "ES": ES, // EuropeanSeparator - "ET": ET, // EuropeanTerminator - "L": L, // LeftToRight - "NSM": NSM, // NonspacingMark - "ON": ON, // OtherNeutral - "R": R, // RightToLeft - "S": S, // SegmentSeparator - "WS": WS, // WhiteSpace - - "FSI": Control, - "PDF": Control, - "PDI": Control, - "LRE": Control, - "LRI": Control, - "LRO": Control, - "RLE": Control, - "RLI": Control, - "RLO": Control, -} - -func genTables() { - if numClass > 0x0F { - log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) - } - w := gen.NewCodeWriter() - defer w.WriteGoFile(*outputFile, "bidi") - - gen.WriteUnicodeVersion(w) - - t := triegen.NewTrie("bidi") - - // Build data about bracket mapping. These bits need to be or-ed with - // any other bits. - orMask := map[rune]uint64{} - - xorMap := map[rune]int{} - xorMasks := []rune{0} // First value is no-op. - - ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { - r1 := p.Rune(0) - r2 := p.Rune(1) - xor := r1 ^ r2 - if _, ok := xorMap[xor]; !ok { - xorMap[xor] = len(xorMasks) - xorMasks = append(xorMasks, xor) - } - entry := uint64(xorMap[xor]) << xorMaskShift - switch p.String(2) { - case "o": - entry |= openMask - case "c", "n": - default: - log.Fatalf("Unknown bracket class %q.", p.String(2)) - } - orMask[r1] = entry - }) - - w.WriteComment(` - xorMasks contains masks to be xor-ed with brackets to get the reverse - version.`) - w.WriteVar("xorMasks", xorMasks) - - done := map[rune]bool{} - - insert := func(r rune, c Class) { - if !done[r] { - t.Insert(r, orMask[r]|uint64(c)) - done[r] = true - } - } - - // Insert the derived BiDi properties. - ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { - r := p.Rune(0) - class, ok := bidiClass[p.String(1)] - if !ok { - log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) - } - insert(r, class) - }) - visitDefaults(insert) - - // TODO: use sparse blocks. This would reduce table size considerably - // from the looks of it. - - sz, err := t.Gen(w) - if err != nil { - log.Fatal(err) - } - w.Size += sz -} - -// dummy values to make methods in gen_common compile. The real versions -// will be generated by this file to tables.go. -var ( - xorMasks []rune -) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go deleted file mode 100644 index 51bd68fa7..000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "unicode" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/ucd" - "golang.org/x/text/unicode/rangetable" -) - -// These tables are hand-extracted from: -// http://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt -func visitDefaults(fn func(r rune, c Class)) { - // first write default values for ranges listed above. - visitRunes(fn, AL, []rune{ - 0x0600, 0x07BF, // Arabic - 0x08A0, 0x08FF, // Arabic Extended-A - 0xFB50, 0xFDCF, // Arabic Presentation Forms - 0xFDF0, 0xFDFF, - 0xFE70, 0xFEFF, - 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols - }) - visitRunes(fn, R, []rune{ - 0x0590, 0x05FF, // Hebrew - 0x07C0, 0x089F, // Nko et al. - 0xFB1D, 0xFB4F, - 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. - 0x0001E800, 0x0001EDFF, - 0x0001EF00, 0x0001EFFF, - }) - visitRunes(fn, ET, []rune{ // European Terminator - 0x20A0, 0x20Cf, // Currency symbols - }) - rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { - fn(r, BN) // Boundary Neutral - }) - ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { - if p.String(1) == "Default_Ignorable_Code_Point" { - fn(p.Rune(0), BN) // Boundary Neutral - } - }) -} - -func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { - for i := 0; i < len(runes); i += 2 { - lo, hi := runes[i], runes[i+1] - for j := lo; j <= hi; j++ { - fn(j, c) - } - } -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go deleted file mode 100644 index 9cb994289..000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// Class is the Unicode BiDi class. Each rune has a single class. -type Class uint - -const ( - L Class = iota // LeftToRight - R // RightToLeft - EN // EuropeanNumber - ES // EuropeanSeparator - ET // EuropeanTerminator - AN // ArabicNumber - CS // CommonSeparator - B // ParagraphSeparator - S // SegmentSeparator - WS // WhiteSpace - ON // OtherNeutral - BN // BoundaryNeutral - NSM // NonspacingMark - AL // ArabicLetter - Control // Control LRO - PDI - - numClass - - LRO // LeftToRightOverride - RLO // RightToLeftOverride - LRE // LeftToRightEmbedding - RLE // RightToLeftEmbedding - PDF // PopDirectionalFormat - LRI // LeftToRightIsolate - RLI // RightToLeftIsolate - FSI // FirstStrongIsolate - PDI // PopDirectionalIsolate - - unknownClass = ^Class(0) -) - -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - -// A trie entry has the following bits: -// 7..5 XOR mask for brackets -// 4 1: Bracket open, 0: Bracket close -// 3..0 Class type - -const ( - openMask = 0x10 - xorMaskShift = 5 -) diff --git a/vendor/golang.org/x/text/unicode/bidi/tables.go b/vendor/golang.org/x/text/unicode/bidi/tables.go index 2d4dde07c..7212d5add 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables.go @@ -1,4 +1,4 @@ -// This file was generated by go generate; DO NOT EDIT +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/trieval.go b/vendor/golang.org/x/text/unicode/bidi/trieval.go index bebd855ef..4c459c4b7 100644 --- a/vendor/golang.org/x/text/unicode/bidi/trieval.go +++ b/vendor/golang.org/x/text/unicode/bidi/trieval.go @@ -1,4 +1,4 @@ -// This file was generated by go generate; DO NOT EDIT +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. package bidi diff --git a/vendor/golang.org/x/text/unicode/norm/composition.go b/vendor/golang.org/x/text/unicode/norm/composition.go index d17b278ad..bab4c5de0 100644 --- a/vendor/golang.org/x/text/unicode/norm/composition.go +++ b/vendor/golang.org/x/text/unicode/norm/composition.go @@ -33,17 +33,9 @@ const ( // streamSafe implements the policy of when a CGJ should be inserted. type streamSafe uint8 -// mkStreamSafe is a shorthand for declaring a streamSafe var and calling -// first on it. -func mkStreamSafe(p Properties) streamSafe { - return streamSafe(p.nTrailingNonStarters()) -} - -// first inserts the first rune of a segment. +// first inserts the first rune of a segment. It is a faster version of next if +// it is known p represents the first rune in a segment. func (ss *streamSafe) first(p Properties) { - if *ss != 0 { - panic("!= 0") - } *ss = streamSafe(p.nTrailingNonStarters()) } @@ -66,7 +58,7 @@ func (ss *streamSafe) next(p Properties) ssState { // be a non-starter. Note that it always hold that if nLead > 0 then // nLead == nTrail. if n == 0 { - *ss = 0 + *ss = streamSafe(p.nTrailingNonStarters()) return ssStarter } return ssSuccess @@ -142,7 +134,6 @@ func (rb *reorderBuffer) setFlusher(out []byte, f func(*reorderBuffer) bool) { func (rb *reorderBuffer) reset() { rb.nrune = 0 rb.nbyte = 0 - rb.ss = 0 } func (rb *reorderBuffer) doFlush() bool { @@ -257,6 +248,9 @@ func (rb *reorderBuffer) insertUnsafe(src input, i int, info Properties) { // It flushes the buffer on each new segment start. func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr { rb.tmpBytes.setBytes(dcomp) + // As the streamSafe accounting already handles the counting for modifiers, + // we don't have to call next. However, we do need to keep the accounting + // intact when flushing the buffer. for i := 0; i < len(dcomp); { info := rb.f.info(rb.tmpBytes, i) if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() { diff --git a/vendor/golang.org/x/text/unicode/norm/forminfo.go b/vendor/golang.org/x/text/unicode/norm/forminfo.go index 15a67c653..e67e7655c 100644 --- a/vendor/golang.org/x/text/unicode/norm/forminfo.go +++ b/vendor/golang.org/x/text/unicode/norm/forminfo.go @@ -10,7 +10,7 @@ package norm // and its corresponding decomposing form share the same trie. Each trie maps // a rune to a uint16. The values take two forms. For v >= 0x8000: // bits -// 15: 1 (inverse of NFD_QD bit of qcInfo) +// 15: 1 (inverse of NFD_QC bit of qcInfo) // 13..7: qcInfo (see below). isYesD is always true (no decompostion). // 6..0: ccc (compressed CCC value). // For v < 0x8000, the respective rune has a decomposition and v is an index @@ -56,28 +56,31 @@ type formInfo struct { nextMain iterFunc } -var formTable []*formInfo - -func init() { - formTable = make([]*formInfo, 4) - - for i := range formTable { - f := &formInfo{} - formTable[i] = f - f.form = Form(i) - if Form(i) == NFKD || Form(i) == NFKC { - f.compatibility = true - f.info = lookupInfoNFKC - } else { - f.info = lookupInfoNFC - } - f.nextMain = nextDecomposed - if Form(i) == NFC || Form(i) == NFKC { - f.nextMain = nextComposed - f.composing = true - } - } -} +var formTable = []*formInfo{{ + form: NFC, + composing: true, + compatibility: false, + info: lookupInfoNFC, + nextMain: nextComposed, +}, { + form: NFD, + composing: false, + compatibility: false, + info: lookupInfoNFC, + nextMain: nextDecomposed, +}, { + form: NFKC, + composing: true, + compatibility: true, + info: lookupInfoNFKC, + nextMain: nextComposed, +}, { + form: NFKD, + composing: false, + compatibility: true, + info: lookupInfoNFKC, + nextMain: nextDecomposed, +}} // We do not distinguish between boundaries for NFC, NFD, etc. to avoid // unexpected behavior for the user. For example, in NFD, there is a boundary diff --git a/vendor/golang.org/x/text/unicode/norm/input.go b/vendor/golang.org/x/text/unicode/norm/input.go index 045d4ccce..479e35bc2 100644 --- a/vendor/golang.org/x/text/unicode/norm/input.go +++ b/vendor/golang.org/x/text/unicode/norm/input.go @@ -90,16 +90,20 @@ func (in *input) charinfoNFKC(p int) (uint16, int) { } func (in *input) hangul(p int) (r rune) { + var size int if in.bytes == nil { if !isHangulString(in.str[p:]) { return 0 } - r, _ = utf8.DecodeRuneInString(in.str[p:]) + r, size = utf8.DecodeRuneInString(in.str[p:]) } else { if !isHangul(in.bytes[p:]) { return 0 } - r, _ = utf8.DecodeRune(in.bytes[p:]) + r, size = utf8.DecodeRune(in.bytes[p:]) + } + if size != hangulUTF8Size { + return 0 } return r } diff --git a/vendor/golang.org/x/text/unicode/norm/iter.go b/vendor/golang.org/x/text/unicode/norm/iter.go index 0a42a72de..ce17f96c2 100644 --- a/vendor/golang.org/x/text/unicode/norm/iter.go +++ b/vendor/golang.org/x/text/unicode/norm/iter.go @@ -41,6 +41,7 @@ func (i *Iter) Init(f Form, src []byte) { i.next = i.rb.f.nextMain i.asciiF = nextASCIIBytes i.info = i.rb.f.info(i.rb.src, i.p) + i.rb.ss.first(i.info) } // InitString initializes i to iterate over src after normalizing it to Form f. @@ -56,11 +57,12 @@ func (i *Iter) InitString(f Form, src string) { i.next = i.rb.f.nextMain i.asciiF = nextASCIIString i.info = i.rb.f.info(i.rb.src, i.p) + i.rb.ss.first(i.info) } // Seek sets the segment to be returned by the next call to Next to start // at position p. It is the responsibility of the caller to set p to the -// start of a UTF8 rune. +// start of a segment. func (i *Iter) Seek(offset int64, whence int) (int64, error) { var abs int64 switch whence { @@ -84,6 +86,7 @@ func (i *Iter) Seek(offset int64, whence int) (int64, error) { i.multiSeg = nil i.next = i.rb.f.nextMain i.info = i.rb.f.info(i.rb.src, i.p) + i.rb.ss.first(i.info) return abs, nil } @@ -161,6 +164,7 @@ func nextHangul(i *Iter) []byte { if next >= i.rb.nsrc { i.setDone() } else if i.rb.src.hangul(next) == 0 { + i.rb.ss.next(i.info) i.info = i.rb.f.info(i.rb.src, i.p) i.next = i.rb.f.nextMain return i.next(i) @@ -204,12 +208,10 @@ func nextMultiNorm(i *Iter) []byte { if info.BoundaryBefore() { i.rb.compose() seg := i.buf[:i.rb.flushCopy(i.buf[:])] - i.rb.ss.first(info) i.rb.insertUnsafe(input{bytes: d}, j, info) i.multiSeg = d[j+int(info.size):] return seg } - i.rb.ss.next(info) i.rb.insertUnsafe(input{bytes: d}, j, info) j += int(info.size) } @@ -222,9 +224,9 @@ func nextMultiNorm(i *Iter) []byte { func nextDecomposed(i *Iter) (next []byte) { outp := 0 inCopyStart, outCopyStart := i.p, 0 - ss := mkStreamSafe(i.info) for { if sz := int(i.info.size); sz <= 1 { + i.rb.ss = 0 p := i.p i.p++ // ASCII or illegal byte. Either way, advance by 1. if i.p >= i.rb.nsrc { @@ -243,6 +245,8 @@ func nextDecomposed(i *Iter) (next []byte) { p := outp + len(d) if outp > 0 { i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) + // TODO: this condition should not be possible, but we leave it + // in for defensive purposes. if p > len(i.buf) { return i.buf[:outp] } @@ -266,7 +270,7 @@ func nextDecomposed(i *Iter) (next []byte) { } else { i.info = i.rb.f.info(i.rb.src, i.p) } - switch ss.next(i.info) { + switch i.rb.ss.next(i.info) { case ssOverflow: i.next = nextCGJDecompose fallthrough @@ -309,7 +313,7 @@ func nextDecomposed(i *Iter) (next []byte) { } prevCC := i.info.tccc i.info = i.rb.f.info(i.rb.src, i.p) - if v := ss.next(i.info); v == ssStarter { + if v := i.rb.ss.next(i.info); v == ssStarter { break } else if v == ssOverflow { i.next = nextCGJDecompose @@ -335,10 +339,6 @@ doNorm: func doNormDecomposed(i *Iter) []byte { for { - if s := i.rb.ss.next(i.info); s == ssOverflow { - i.next = nextCGJDecompose - break - } i.rb.insertUnsafe(i.rb.src, i.p, i.info) if i.p += int(i.info.size); i.p >= i.rb.nsrc { i.setDone() @@ -348,6 +348,10 @@ func doNormDecomposed(i *Iter) []byte { if i.info.ccc == 0 { break } + if s := i.rb.ss.next(i.info); s == ssOverflow { + i.next = nextCGJDecompose + break + } } // new segment or too many combining characters: exit normalization return i.buf[:i.rb.flushCopy(i.buf[:])] @@ -357,6 +361,7 @@ func nextCGJDecompose(i *Iter) []byte { i.rb.ss = 0 i.rb.insertCGJ() i.next = nextDecomposed + i.rb.ss.first(i.info) buf := doNormDecomposed(i) return buf } @@ -365,7 +370,6 @@ func nextCGJDecompose(i *Iter) []byte { func nextComposed(i *Iter) []byte { outp, startp := 0, i.p var prevCC uint8 - ss := mkStreamSafe(i.info) for { if !i.info.isYesC() { goto doNorm @@ -385,11 +389,12 @@ func nextComposed(i *Iter) []byte { i.setDone() break } else if i.rb.src._byte(i.p) < utf8.RuneSelf { + i.rb.ss = 0 i.next = i.asciiF break } i.info = i.rb.f.info(i.rb.src, i.p) - if v := ss.next(i.info); v == ssStarter { + if v := i.rb.ss.next(i.info); v == ssStarter { break } else if v == ssOverflow { i.next = nextCGJCompose @@ -401,8 +406,10 @@ func nextComposed(i *Iter) []byte { } return i.returnSlice(startp, i.p) doNorm: + // reset to start position i.p = startp i.info = i.rb.f.info(i.rb.src, i.p) + i.rb.ss.first(i.info) if i.info.multiSegment() { d := i.info.Decomposition() info := i.rb.f.info(input{bytes: d}, 0) diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go deleted file mode 100644 index 07bdff6bd..000000000 --- a/vendor/golang.org/x/text/unicode/norm/maketables.go +++ /dev/null @@ -1,978 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Normalization table generator. -// Data read from the web. -// See forminfo.go for a description of the trie values associated with each rune. - -package main - -import ( - "bytes" - "flag" - "fmt" - "io" - "log" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -func main() { - gen.Init() - loadUnicodeData() - compactCCC() - loadCompositionExclusions() - completeCharFields(FCanonical) - completeCharFields(FCompatibility) - computeNonStarterCounts() - verifyComputed() - printChars() - if *test { - testDerived() - printTestdata() - } else { - makeTables() - } -} - -var ( - tablelist = flag.String("tables", - "all", - "comma-separated list of which tables to generate; "+ - "can be 'decomp', 'recomp', 'info' and 'all'") - test = flag.Bool("test", - false, - "test existing tables against DerivedNormalizationProps and generate test data for regression testing") - verbose = flag.Bool("verbose", - false, - "write data to stdout as it is parsed") -) - -const MaxChar = 0x10FFFF // anything above this shouldn't exist - -// Quick Check properties of runes allow us to quickly -// determine whether a rune may occur in a normal form. -// For a given normal form, a rune may be guaranteed to occur -// verbatim (QC=Yes), may or may not combine with another -// rune (QC=Maybe), or may not occur (QC=No). -type QCResult int - -const ( - QCUnknown QCResult = iota - QCYes - QCNo - QCMaybe -) - -func (r QCResult) String() string { - switch r { - case QCYes: - return "Yes" - case QCNo: - return "No" - case QCMaybe: - return "Maybe" - } - return "***UNKNOWN***" -} - -const ( - FCanonical = iota // NFC or NFD - FCompatibility // NFKC or NFKD - FNumberOfFormTypes -) - -const ( - MComposed = iota // NFC or NFKC - MDecomposed // NFD or NFKD - MNumberOfModes -) - -// This contains only the properties we're interested in. -type Char struct { - name string - codePoint rune // if zero, this index is not a valid code point. - ccc uint8 // canonical combining class - origCCC uint8 - excludeInComp bool // from CompositionExclusions.txt - compatDecomp bool // it has a compatibility expansion - - nTrailingNonStarters uint8 - nLeadingNonStarters uint8 // must be equal to trailing if non-zero - - forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility - - state State -} - -var chars = make([]Char, MaxChar+1) -var cccMap = make(map[uint8]uint8) - -func (c Char) String() string { - buf := new(bytes.Buffer) - - fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) - fmt.Fprintf(buf, " ccc: %v\n", c.ccc) - fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) - fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) - fmt.Fprintf(buf, " state: %v\n", c.state) - fmt.Fprintf(buf, " NFC:\n") - fmt.Fprint(buf, c.forms[FCanonical]) - fmt.Fprintf(buf, " NFKC:\n") - fmt.Fprint(buf, c.forms[FCompatibility]) - - return buf.String() -} - -// In UnicodeData.txt, some ranges are marked like this: -// 3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;; -// 4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;; -// parseCharacter keeps a state variable indicating the weirdness. -type State int - -const ( - SNormal State = iota // known to be zero for the type - SFirst - SLast - SMissing -) - -var lastChar = rune('\u0000') - -func (c Char) isValid() bool { - return c.codePoint != 0 && c.state != SMissing -} - -type FormInfo struct { - quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed - verified [MNumberOfModes]bool // index: MComposed or MDecomposed - - combinesForward bool // May combine with rune on the right - combinesBackward bool // May combine with rune on the left - isOneWay bool // Never appears in result - inDecomp bool // Some decompositions result in this char. - decomp Decomposition - expandedDecomp Decomposition -} - -func (f FormInfo) String() string { - buf := bytes.NewBuffer(make([]byte, 0)) - - fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) - fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) - fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) - fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) - fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) - fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) - fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) - fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) - - return buf.String() -} - -type Decomposition []rune - -func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { - decomp := strings.Split(s, " ") - if len(decomp) > 0 && skipfirst { - decomp = decomp[1:] - } - for _, d := range decomp { - point, err := strconv.ParseUint(d, 16, 64) - if err != nil { - return a, err - } - a = append(a, rune(point)) - } - return a, nil -} - -func loadUnicodeData() { - f := gen.OpenUCDFile("UnicodeData.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(ucd.CodePoint) - char := &chars[r] - - char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) - decmap := p.String(ucd.DecompMapping) - - exp, err := parseDecomposition(decmap, false) - isCompat := false - if err != nil { - if len(decmap) > 0 { - exp, err = parseDecomposition(decmap, true) - if err != nil { - log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) - } - isCompat = true - } - } - - char.name = p.String(ucd.Name) - char.codePoint = r - char.forms[FCompatibility].decomp = exp - if !isCompat { - char.forms[FCanonical].decomp = exp - } else { - char.compatDecomp = true - } - if len(decmap) > 0 { - char.forms[FCompatibility].decomp = exp - } - } - if err := p.Err(); err != nil { - log.Fatal(err) - } -} - -// compactCCC converts the sparse set of CCC values to a continguous one, -// reducing the number of bits needed from 8 to 6. -func compactCCC() { - m := make(map[uint8]uint8) - for i := range chars { - c := &chars[i] - m[c.ccc] = 0 - } - cccs := []int{} - for v, _ := range m { - cccs = append(cccs, int(v)) - } - sort.Ints(cccs) - for i, c := range cccs { - cccMap[uint8(i)] = uint8(c) - m[uint8(c)] = uint8(i) - } - for i := range chars { - c := &chars[i] - c.origCCC = c.ccc - c.ccc = m[c.ccc] - } - if len(m) >= 1<<6 { - log.Fatalf("too many difference CCC values: %d >= 64", len(m)) - } -} - -// CompositionExclusions.txt has form: -// 0958 # ... -// See http://unicode.org/reports/tr44/ for full explanation -func loadCompositionExclusions() { - f := gen.OpenUCDFile("CompositionExclusions.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - c := &chars[p.Rune(0)] - if c.excludeInComp { - log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) - } - c.excludeInComp = true - } - if e := p.Err(); e != nil { - log.Fatal(e) - } -} - -// hasCompatDecomp returns true if any of the recursive -// decompositions contains a compatibility expansion. -// In this case, the character may not occur in NFK*. -func hasCompatDecomp(r rune) bool { - c := &chars[r] - if c.compatDecomp { - return true - } - for _, d := range c.forms[FCompatibility].decomp { - if hasCompatDecomp(d) { - return true - } - } - return false -} - -// Hangul related constants. -const ( - HangulBase = 0xAC00 - HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) - - JamoLBase = 0x1100 - JamoLEnd = 0x1113 - JamoVBase = 0x1161 - JamoVEnd = 0x1176 - JamoTBase = 0x11A8 - JamoTEnd = 0x11C3 - - JamoLVTCount = 19 * 21 * 28 - JamoTCount = 28 -) - -func isHangul(r rune) bool { - return HangulBase <= r && r < HangulEnd -} - -func isHangulWithoutJamoT(r rune) bool { - if !isHangul(r) { - return false - } - r -= HangulBase - return r < JamoLVTCount && r%JamoTCount == 0 -} - -func ccc(r rune) uint8 { - return chars[r].ccc -} - -// Insert a rune in a buffer, ordered by Canonical Combining Class. -func insertOrdered(b Decomposition, r rune) Decomposition { - n := len(b) - b = append(b, 0) - cc := ccc(r) - if cc > 0 { - // Use bubble sort. - for ; n > 0; n-- { - if ccc(b[n-1]) <= cc { - break - } - b[n] = b[n-1] - } - } - b[n] = r - return b -} - -// Recursively decompose. -func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { - dcomp := chars[r].forms[form].decomp - if len(dcomp) == 0 { - return insertOrdered(d, r) - } - for _, c := range dcomp { - d = decomposeRecursive(form, c, d) - } - return d -} - -func completeCharFields(form int) { - // Phase 0: pre-expand decomposition. - for i := range chars { - f := &chars[i].forms[form] - if len(f.decomp) == 0 { - continue - } - exp := make(Decomposition, 0) - for _, c := range f.decomp { - exp = decomposeRecursive(form, c, exp) - } - f.expandedDecomp = exp - } - - // Phase 1: composition exclusion, mark decomposition. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - // Marks script-specific exclusions and version restricted. - f.isOneWay = c.excludeInComp - - // Singletons - f.isOneWay = f.isOneWay || len(f.decomp) == 1 - - // Non-starter decompositions - if len(f.decomp) > 1 { - chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 - f.isOneWay = f.isOneWay || chk - } - - // Runes that decompose into more than two runes. - f.isOneWay = f.isOneWay || len(f.decomp) > 2 - - if form == FCompatibility { - f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) - } - - for _, r := range f.decomp { - chars[r].forms[form].inDecomp = true - } - } - - // Phase 2: forward and backward combining. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - if !f.isOneWay && len(f.decomp) == 2 { - f0 := &chars[f.decomp[0]].forms[form] - f1 := &chars[f.decomp[1]].forms[form] - if !f0.isOneWay { - f0.combinesForward = true - } - if !f1.isOneWay { - f1.combinesBackward = true - } - } - if isHangulWithoutJamoT(rune(i)) { - f.combinesForward = true - } - } - - // Phase 3: quick check values. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - switch { - case len(f.decomp) > 0: - f.quickCheck[MDecomposed] = QCNo - case isHangul(rune(i)): - f.quickCheck[MDecomposed] = QCNo - default: - f.quickCheck[MDecomposed] = QCYes - } - switch { - case f.isOneWay: - f.quickCheck[MComposed] = QCNo - case (i & 0xffff00) == JamoLBase: - f.quickCheck[MComposed] = QCYes - if JamoLBase <= i && i < JamoLEnd { - f.combinesForward = true - } - if JamoVBase <= i && i < JamoVEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - f.combinesForward = true - } - if JamoTBase <= i && i < JamoTEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - } - case !f.combinesBackward: - f.quickCheck[MComposed] = QCYes - default: - f.quickCheck[MComposed] = QCMaybe - } - } -} - -func computeNonStarterCounts() { - // Phase 4: leading and trailing non-starter count - for i := range chars { - c := &chars[i] - - runes := []rune{rune(i)} - // We always use FCompatibility so that the CGJ insertion points do not - // change for repeated normalizations with different forms. - if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { - runes = exp - } - // We consider runes that combine backwards to be non-starters for the - // purpose of Stream-Safe Text Processing. - for _, r := range runes { - if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nLeadingNonStarters++ - } - for i := len(runes) - 1; i >= 0; i-- { - if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nTrailingNonStarters++ - } - if c.nTrailingNonStarters > 3 { - log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) - } - - if isHangul(rune(i)) { - c.nTrailingNonStarters = 2 - if isHangulWithoutJamoT(rune(i)) { - c.nTrailingNonStarters = 1 - } - } - - if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { - log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) - } - if t := c.nTrailingNonStarters; t > 3 { - log.Fatalf("%U: number of trailing non-starters is %d > 3", t) - } - } -} - -func printBytes(w io.Writer, b []byte, name string) { - fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) - fmt.Fprintf(w, "var %s = [...]byte {", name) - for i, c := range b { - switch { - case i%64 == 0: - fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) - case i%8 == 0: - fmt.Fprintf(w, "\n") - } - fmt.Fprintf(w, "0x%.2X, ", c) - } - fmt.Fprint(w, "\n}\n\n") -} - -// See forminfo.go for format. -func makeEntry(f *FormInfo, c *Char) uint16 { - e := uint16(0) - if r := c.codePoint; HangulBase <= r && r < HangulEnd { - e |= 0x40 - } - if f.combinesForward { - e |= 0x20 - } - if f.quickCheck[MDecomposed] == QCNo { - e |= 0x4 - } - switch f.quickCheck[MComposed] { - case QCYes: - case QCNo: - e |= 0x10 - case QCMaybe: - e |= 0x18 - default: - log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) - } - e |= uint16(c.nTrailingNonStarters) - return e -} - -// decompSet keeps track of unique decompositions, grouped by whether -// the decomposition is followed by a trailing and/or leading CCC. -type decompSet [7]map[string]bool - -const ( - normalDecomp = iota - firstMulti - firstCCC - endMulti - firstLeadingCCC - firstCCCZeroExcept - firstStarterWithNLead - lastDecomp -) - -var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} - -func makeDecompSet() decompSet { - m := decompSet{} - for i := range m { - m[i] = make(map[string]bool) - } - return m -} -func (m *decompSet) insert(key int, s string) { - m[key][s] = true -} - -func printCharInfoTables(w io.Writer) int { - mkstr := func(r rune, f *FormInfo) (int, string) { - d := f.expandedDecomp - s := string([]rune(d)) - if max := 1 << 6; len(s) >= max { - const msg = "%U: too many bytes in decomposition: %d >= %d" - log.Fatalf(msg, r, len(s), max) - } - head := uint8(len(s)) - if f.quickCheck[MComposed] != QCYes { - head |= 0x40 - } - if f.combinesForward { - head |= 0x80 - } - s = string([]byte{head}) + s - - lccc := ccc(d[0]) - tccc := ccc(d[len(d)-1]) - cc := ccc(r) - if cc != 0 && lccc == 0 && tccc == 0 { - log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) - } - if tccc < lccc && lccc != 0 { - const msg = "%U: lccc (%d) must be <= tcc (%d)" - log.Fatalf(msg, r, lccc, tccc) - } - index := normalDecomp - nTrail := chars[r].nTrailingNonStarters - if tccc > 0 || lccc > 0 || nTrail > 0 { - tccc <<= 2 - tccc |= nTrail - s += string([]byte{tccc}) - index = endMulti - for _, r := range d[1:] { - if ccc(r) == 0 { - index = firstCCC - } - } - if lccc > 0 { - s += string([]byte{lccc}) - if index == firstCCC { - log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) - } - index = firstLeadingCCC - } - if cc != lccc { - if cc != 0 { - log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) - } - index = firstCCCZeroExcept - } - } else if len(d) > 1 { - index = firstMulti - } - return index, s - } - - decompSet := makeDecompSet() - const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. - decompSet.insert(firstStarterWithNLead, nLeadStr) - - // Store the uniqued decompositions in a byte buffer, - // preceded by their byte length. - for _, c := range chars { - for _, f := range c.forms { - if len(f.expandedDecomp) == 0 { - continue - } - if f.combinesBackward { - log.Fatalf("%U: combinesBackward and decompose", c.codePoint) - } - index, s := mkstr(c.codePoint, &f) - decompSet.insert(index, s) - } - } - - decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) - size := 0 - positionMap := make(map[string]uint16) - decompositions.WriteString("\000") - fmt.Fprintln(w, "const (") - for i, m := range decompSet { - sa := []string{} - for s := range m { - sa = append(sa, s) - } - sort.Strings(sa) - for _, s := range sa { - p := decompositions.Len() - decompositions.WriteString(s) - positionMap[s] = uint16(p) - } - if cname[i] != "" { - fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) - } - } - fmt.Fprintln(w, "maxDecomp = 0x8000") - fmt.Fprintln(w, ")") - b := decompositions.Bytes() - printBytes(w, b, "decomps") - size += len(b) - - varnames := []string{"nfc", "nfkc"} - for i := 0; i < FNumberOfFormTypes; i++ { - trie := triegen.NewTrie(varnames[i]) - - for r, c := range chars { - f := c.forms[i] - d := f.expandedDecomp - if len(d) != 0 { - _, key := mkstr(c.codePoint, &f) - trie.Insert(rune(r), uint64(positionMap[key])) - if c.ccc != ccc(d[0]) { - // We assume the lead ccc of a decomposition !=0 in this case. - if ccc(d[0]) == 0 { - log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) - } - } - } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { - // Handle cases where it can't be detected that the nLead should be equal - // to nTrail. - trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) - } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { - trie.Insert(c.codePoint, uint64(0x8000|v)) - } - } - sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) - if err != nil { - log.Fatal(err) - } - size += sz - } - return size -} - -func contains(sa []string, s string) bool { - for _, a := range sa { - if a == s { - return true - } - } - return false -} - -func makeTables() { - w := &bytes.Buffer{} - - size := 0 - if *tablelist == "" { - return - } - list := strings.Split(*tablelist, ",") - if *tablelist == "all" { - list = []string{"recomp", "info"} - } - - // Compute maximum decomposition size. - max := 0 - for _, c := range chars { - if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { - max = n - } - } - - fmt.Fprintln(w, "const (") - fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") - fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) - fmt.Fprintln(w) - fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") - fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") - fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") - fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") - fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) - fmt.Fprintln(w, ")\n") - - // Print the CCC remap table. - size += len(cccMap) - fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) - for i := 0; i < len(cccMap); i++ { - if i%8 == 0 { - fmt.Fprintln(w) - } - fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) - } - fmt.Fprintln(w, "\n}\n") - - if contains(list, "info") { - size += printCharInfoTables(w) - } - - if contains(list, "recomp") { - // Note that we use 32 bit keys, instead of 64 bit. - // This clips the bits of three entries, but we know - // this won't cause a collision. The compiler will catch - // any changes made to UnicodeData.txt that introduces - // a collision. - // Note that the recomposition map for NFC and NFKC - // are identical. - - // Recomposition map - nrentries := 0 - for _, c := range chars { - f := c.forms[FCanonical] - if !f.isOneWay && len(f.decomp) > 0 { - nrentries++ - } - } - sz := nrentries * 8 - size += sz - fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) - fmt.Fprintln(w, "var recompMap = map[uint32]rune{") - for i, c := range chars { - f := c.forms[FCanonical] - d := f.decomp - if !f.isOneWay && len(d) > 0 { - key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) - fmt.Fprintf(w, "0x%.8X: 0x%.4X,\n", key, i) - } - } - fmt.Fprintf(w, "}\n\n") - } - - fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) - gen.WriteGoFile("tables.go", "norm", w.Bytes()) -} - -func printChars() { - if *verbose { - for _, c := range chars { - if !c.isValid() || c.state == SMissing { - continue - } - fmt.Println(c) - } - } -} - -// verifyComputed does various consistency tests. -func verifyComputed() { - for i, c := range chars { - for _, f := range c.forms { - isNo := (f.quickCheck[MDecomposed] == QCNo) - if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { - log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) - } - - isMaybe := f.quickCheck[MComposed] == QCMaybe - if f.combinesBackward != isMaybe { - log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) - } - if len(f.decomp) > 0 && f.combinesForward && isMaybe { - log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) - } - - if len(f.expandedDecomp) != 0 { - continue - } - if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { - // We accept these runes to be treated differently (it only affects - // segment breaking in iteration, most likely on improper use), but - // reconsider if more characters are added. - // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;; - // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;; - // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; - // U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;; - // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; - // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;; - if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { - log.Fatalf("%U: nLead was %v; want %v", i, a, b) - } - } - } - nfc := c.forms[FCanonical] - nfkc := c.forms[FCompatibility] - if nfc.combinesBackward != nfkc.combinesBackward { - log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) - } - } -} - -// Use values in DerivedNormalizationProps.txt to compare against the -// values we computed. -// DerivedNormalizationProps.txt has form: -// 00C0..00C5 ; NFD_QC; N # ... -// 0374 ; NFD_QC; N # ... -// See http://unicode.org/reports/tr44/ for full explanation -func testDerived() { - f := gen.OpenUCDFile("DerivedNormalizationProps.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(0) - c := &chars[r] - - var ftype, mode int - qt := p.String(1) - switch qt { - case "NFC_QC": - ftype, mode = FCanonical, MComposed - case "NFD_QC": - ftype, mode = FCanonical, MDecomposed - case "NFKC_QC": - ftype, mode = FCompatibility, MComposed - case "NFKD_QC": - ftype, mode = FCompatibility, MDecomposed - default: - continue - } - var qr QCResult - switch p.String(2) { - case "Y": - qr = QCYes - case "N": - qr = QCNo - case "M": - qr = QCMaybe - default: - log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) - } - if got := c.forms[ftype].quickCheck[mode]; got != qr { - log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) - } - c.forms[ftype].verified[mode] = true - } - if err := p.Err(); err != nil { - log.Fatal(err) - } - // Any unspecified value must be QCYes. Verify this. - for i, c := range chars { - for j, fd := range c.forms { - for k, qr := range fd.quickCheck { - if !fd.verified[k] && qr != QCYes { - m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" - log.Printf(m, i, j, k, qr, c.name) - } - } - } - } -} - -var testHeader = `const ( - Yes = iota - No - Maybe -) - -type formData struct { - qc uint8 - combinesForward bool - decomposition string -} - -type runeData struct { - r rune - ccc uint8 - nLead uint8 - nTrail uint8 - f [2]formData // 0: canonical; 1: compatibility -} - -func f(qc uint8, cf bool, dec string) [2]formData { - return [2]formData{{qc, cf, dec}, {qc, cf, dec}} -} - -func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { - return [2]formData{{qc, cf, d}, {qck, cfk, dk}} -} - -var testData = []runeData{ -` - -func printTestdata() { - type lastInfo struct { - ccc uint8 - nLead uint8 - nTrail uint8 - f string - } - - last := lastInfo{} - w := &bytes.Buffer{} - fmt.Fprintf(w, testHeader) - for r, c := range chars { - f := c.forms[FCanonical] - qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - f = c.forms[FCompatibility] - qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - s := "" - if d == dk && qc == qck && cf == cfk { - s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) - } else { - s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) - } - current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} - if last != current { - fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) - last = current - } - } - fmt.Fprintln(w, "}") - gen.WriteGoFile("data_test.go", "norm", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/unicode/norm/normalize.go b/vendor/golang.org/x/text/unicode/norm/normalize.go index a70805c26..e28ac641a 100644 --- a/vendor/golang.org/x/text/unicode/norm/normalize.go +++ b/vendor/golang.org/x/text/unicode/norm/normalize.go @@ -2,13 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Note: the file data_test.go that is generated should not be checked in. //go:generate go run maketables.go triegen.go -//go:generate go run maketables.go triegen.go -test +//go:generate go test -tags test // Package norm contains types and functions for normalizing Unicode strings. -package norm +package norm // import "golang.org/x/text/unicode/norm" -import "unicode/utf8" +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) // A Form denotes a canonical representation of Unicode code points. // The Unicode-defined normalization and equivalence forms are: @@ -263,6 +268,34 @@ func (f Form) QuickSpan(b []byte) int { return n } +// Span implements transform.SpanningTransformer. It returns a boundary n such +// that b[0:n] == f(b[0:n]). It is not guaranteed to return the largest such n. +func (f Form) Span(b []byte, atEOF bool) (n int, err error) { + n, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), atEOF) + if n < len(b) { + if !ok { + err = transform.ErrEndOfSpan + } else { + err = transform.ErrShortSrc + } + } + return n, err +} + +// SpanString returns a boundary n such that s[0:n] == f(s[0:n]). +// It is not guaranteed to return the largest such n. +func (f Form) SpanString(s string, atEOF bool) (n int, err error) { + n, ok := formTable[f].quickSpan(inputString(s), 0, len(s), atEOF) + if n < len(s) { + if !ok { + err = transform.ErrEndOfSpan + } else { + err = transform.ErrShortSrc + } + } + return n, err +} + // quickSpan returns a boundary n such that src[0:n] == f(src[0:n]) and // whether any non-normalized parts were found. If atEOF is false, n will // not point past the last segment if this segment might be become @@ -291,7 +324,6 @@ func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool) // have an overflow for runes that are starters (e.g. with U+FF9E). switch ss.next(info) { case ssStarter: - ss.first(info) lastSegStart = i case ssOverflow: return lastSegStart, false @@ -321,7 +353,7 @@ func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool) return lastSegStart, false } -// QuickSpanString returns a boundary n such that b[0:n] == f(s[0:n]). +// QuickSpanString returns a boundary n such that s[0:n] == f(s[0:n]). // It is not guaranteed to return the largest such n. func (f Form) QuickSpanString(s string) int { n, _ := formTable[f].quickSpan(inputString(s), 0, len(s), true) @@ -408,6 +440,8 @@ func (f Form) nextBoundary(src input, nsrc int, atEOF bool) int { } return -1 } + // TODO: Using streamSafe to determine the boundary isn't the same as + // using BoundaryBefore. Determine which should be used. if s := ss.next(info); s != ssSuccess { return i } @@ -472,15 +506,14 @@ func decomposeSegment(rb *reorderBuffer, sp int, atEOF bool) int { if info.size == 0 { return 0 } - if rb.nrune > 0 { - if s := rb.ss.next(info); s == ssStarter { - goto end - } else if s == ssOverflow { - rb.insertCGJ() + if s := rb.ss.next(info); s == ssStarter { + // TODO: this could be removed if we don't support merging. + if rb.nrune > 0 { goto end } - } else { - rb.ss.first(info) + } else if s == ssOverflow { + rb.insertCGJ() + goto end } if err := rb.insertFlush(rb.src, sp, info); err != iSuccess { return int(err) diff --git a/vendor/golang.org/x/text/unicode/norm/readwriter.go b/vendor/golang.org/x/text/unicode/norm/readwriter.go index 4fa0e04b2..d926ee903 100644 --- a/vendor/golang.org/x/text/unicode/norm/readwriter.go +++ b/vendor/golang.org/x/text/unicode/norm/readwriter.go @@ -112,7 +112,6 @@ func (r *normReader) Read(p []byte) (int, error) { } } } - panic("should not reach here") } // Reader returns a new reader that implements Read diff --git a/vendor/golang.org/x/text/unicode/norm/tables.go b/vendor/golang.org/x/text/unicode/norm/tables.go index a56697b57..bf9ff8038 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables.go +++ b/vendor/golang.org/x/text/unicode/norm/tables.go @@ -1,4 +1,4 @@ -// This file was generated by go generate; DO NOT EDIT +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. package norm @@ -27,14 +27,14 @@ const ( firstMulti = 0x186D firstCCC = 0x2C9E endMulti = 0x2F60 - firstLeadingCCC = 0x4A44 - firstCCCZeroExcept = 0x4A5A - firstStarterWithNLead = 0x4A81 - lastDecomp = 0x4A83 + firstLeadingCCC = 0x49AE + firstCCCZeroExcept = 0x4A78 + firstStarterWithNLead = 0x4A9F + lastDecomp = 0x4AA1 maxDecomp = 0x8000 ) -// decomps: 19075 bytes +// decomps: 19105 bytes var decomps = [...]byte{ // Bytes 0 - 3f 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, @@ -2443,283 +2443,287 @@ var decomps = [...]byte{ 0xD9, 0x8F, 0x69, 0x43, 0x20, 0xD9, 0x90, 0x6D, 0x43, 0x20, 0xD9, 0x91, 0x71, 0x43, 0x20, 0xD9, 0x92, 0x75, 0x43, 0x41, 0xCC, 0x8A, 0xC9, 0x43, - 0x73, 0xCC, 0x87, 0xC9, 0x43, 0xE1, 0x85, 0xA1, - 0x01, 0x43, 0xE1, 0x85, 0xA2, 0x01, 0x43, 0xE1, - 0x85, 0xA3, 0x01, 0x43, 0xE1, 0x85, 0xA4, 0x01, - 0x43, 0xE1, 0x85, 0xA5, 0x01, 0x43, 0xE1, 0x85, - 0xA6, 0x01, 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x43, + 0x73, 0xCC, 0x87, 0xC9, 0x44, 0x20, 0xE3, 0x82, + 0x99, 0x0D, 0x44, 0x20, 0xE3, 0x82, 0x9A, 0x0D, + 0x44, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x44, 0xCE, + 0x91, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x95, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0x97, 0xCC, 0x81, 0xC9, // Bytes 4300 - 433f - 0xE1, 0x85, 0xA8, 0x01, 0x43, 0xE1, 0x85, 0xA9, - 0x01, 0x43, 0xE1, 0x85, 0xAA, 0x01, 0x43, 0xE1, - 0x85, 0xAB, 0x01, 0x43, 0xE1, 0x85, 0xAC, 0x01, - 0x43, 0xE1, 0x85, 0xAD, 0x01, 0x43, 0xE1, 0x85, - 0xAE, 0x01, 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x43, - 0xE1, 0x85, 0xB0, 0x01, 0x43, 0xE1, 0x85, 0xB1, - 0x01, 0x43, 0xE1, 0x85, 0xB2, 0x01, 0x43, 0xE1, - 0x85, 0xB3, 0x01, 0x43, 0xE1, 0x85, 0xB4, 0x01, + 0x44, 0xCE, 0x99, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0x9F, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, 0x88, 0xC9, + 0x44, 0xCE, 0xA9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xB1, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, + 0x44, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xBF, 0xCC, 0x81, 0xC9, 0x44, 0xCF, 0x85, 0xCC, // Bytes 4340 - 437f - 0x43, 0xE1, 0x85, 0xB5, 0x01, 0x43, 0xE1, 0x86, - 0xAA, 0x01, 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x43, - 0xE1, 0x86, 0xAD, 0x01, 0x43, 0xE1, 0x86, 0xB0, - 0x01, 0x43, 0xE1, 0x86, 0xB1, 0x01, 0x43, 0xE1, - 0x86, 0xB2, 0x01, 0x43, 0xE1, 0x86, 0xB3, 0x01, - 0x43, 0xE1, 0x86, 0xB4, 0x01, 0x43, 0xE1, 0x86, - 0xB5, 0x01, 0x44, 0x20, 0xE3, 0x82, 0x99, 0x0D, - 0x44, 0x20, 0xE3, 0x82, 0x9A, 0x0D, 0x44, 0xC2, + 0x81, 0xC9, 0x44, 0xCF, 0x89, 0xCC, 0x81, 0xC9, + 0x44, 0xD7, 0x90, 0xD6, 0xB7, 0x31, 0x44, 0xD7, + 0x90, 0xD6, 0xB8, 0x35, 0x44, 0xD7, 0x90, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x91, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x91, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x92, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x93, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x94, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x95, 0xD6, 0xB9, 0x39, 0x44, 0xD7, // Bytes 4380 - 43bf - 0xA8, 0xCC, 0x81, 0xCA, 0x44, 0xCE, 0x91, 0xCC, - 0x81, 0xC9, 0x44, 0xCE, 0x95, 0xCC, 0x81, 0xC9, - 0x44, 0xCE, 0x97, 0xCC, 0x81, 0xC9, 0x44, 0xCE, - 0x99, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x9F, 0xCC, - 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, 0x81, 0xC9, - 0x44, 0xCE, 0xA5, 0xCC, 0x88, 0xC9, 0x44, 0xCE, - 0xA9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB1, 0xCC, - 0x81, 0xC9, 0x44, 0xCE, 0xB5, 0xCC, 0x81, 0xC9, + 0x95, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x96, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x98, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x99, 0xD6, 0xB4, 0x25, 0x44, 0xD7, + 0x99, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9A, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x9B, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x9C, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9E, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, 0x41, // Bytes 43c0 - 43ff - 0x44, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, 0x44, 0xCE, - 0xB9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xBF, 0xCC, - 0x81, 0xC9, 0x44, 0xCF, 0x85, 0xCC, 0x81, 0xC9, - 0x44, 0xCF, 0x89, 0xCC, 0x81, 0xC9, 0x44, 0xD7, - 0x90, 0xD6, 0xB7, 0x31, 0x44, 0xD7, 0x90, 0xD6, - 0xB8, 0x35, 0x44, 0xD7, 0x90, 0xD6, 0xBC, 0x41, - 0x44, 0xD7, 0x91, 0xD6, 0xBC, 0x41, 0x44, 0xD7, - 0x91, 0xD6, 0xBF, 0x49, 0x44, 0xD7, 0x92, 0xD6, + 0x44, 0xD7, 0xA1, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA3, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, 0x49, + 0x44, 0xD7, 0xA6, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA7, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA8, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0xA9, 0xD7, 0x81, 0x4D, 0x44, 0xD7, + 0xA9, 0xD7, 0x82, 0x51, 0x44, 0xD7, 0xAA, 0xD6, // Bytes 4400 - 443f - 0xBC, 0x41, 0x44, 0xD7, 0x93, 0xD6, 0xBC, 0x41, - 0x44, 0xD7, 0x94, 0xD6, 0xBC, 0x41, 0x44, 0xD7, - 0x95, 0xD6, 0xB9, 0x39, 0x44, 0xD7, 0x95, 0xD6, - 0xBC, 0x41, 0x44, 0xD7, 0x96, 0xD6, 0xBC, 0x41, - 0x44, 0xD7, 0x98, 0xD6, 0xBC, 0x41, 0x44, 0xD7, - 0x99, 0xD6, 0xB4, 0x25, 0x44, 0xD7, 0x99, 0xD6, - 0xBC, 0x41, 0x44, 0xD7, 0x9A, 0xD6, 0xBC, 0x41, - 0x44, 0xD7, 0x9B, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xBC, 0x41, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, 0x31, + 0x44, 0xD8, 0xA7, 0xD9, 0x8B, 0x59, 0x44, 0xD8, + 0xA7, 0xD9, 0x93, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, + 0x94, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x44, 0xD8, 0xB0, 0xD9, 0xB0, 0x79, 0x44, 0xD8, + 0xB1, 0xD9, 0xB0, 0x79, 0x44, 0xD9, 0x80, 0xD9, + 0x8B, 0x59, 0x44, 0xD9, 0x80, 0xD9, 0x8E, 0x65, + 0x44, 0xD9, 0x80, 0xD9, 0x8F, 0x69, 0x44, 0xD9, // Bytes 4440 - 447f - 0x9B, 0xD6, 0xBF, 0x49, 0x44, 0xD7, 0x9C, 0xD6, - 0xBC, 0x41, 0x44, 0xD7, 0x9E, 0xD6, 0xBC, 0x41, - 0x44, 0xD7, 0xA0, 0xD6, 0xBC, 0x41, 0x44, 0xD7, - 0xA1, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA3, 0xD6, - 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, 0xBC, 0x41, - 0x44, 0xD7, 0xA4, 0xD6, 0xBF, 0x49, 0x44, 0xD7, - 0xA6, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA7, 0xD6, - 0xBC, 0x41, 0x44, 0xD7, 0xA8, 0xD6, 0xBC, 0x41, + 0x80, 0xD9, 0x90, 0x6D, 0x44, 0xD9, 0x80, 0xD9, + 0x91, 0x71, 0x44, 0xD9, 0x80, 0xD9, 0x92, 0x75, + 0x44, 0xD9, 0x87, 0xD9, 0xB0, 0x79, 0x44, 0xD9, + 0x88, 0xD9, 0x94, 0xC9, 0x44, 0xD9, 0x89, 0xD9, + 0xB0, 0x79, 0x44, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, + 0x44, 0xDB, 0x92, 0xD9, 0x94, 0xC9, 0x44, 0xDB, + 0x95, 0xD9, 0x94, 0xC9, 0x45, 0x20, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCC, // Bytes 4480 - 44bf - 0x44, 0xD7, 0xA9, 0xD6, 0xBC, 0x41, 0x44, 0xD7, - 0xA9, 0xD7, 0x81, 0x4D, 0x44, 0xD7, 0xA9, 0xD7, - 0x82, 0x51, 0x44, 0xD7, 0xAA, 0xD6, 0xBC, 0x41, - 0x44, 0xD7, 0xB2, 0xD6, 0xB7, 0x31, 0x44, 0xD8, - 0xA7, 0xD9, 0x8B, 0x59, 0x44, 0xD8, 0xA7, 0xD9, - 0x93, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, 0x94, 0xC9, - 0x44, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, 0x44, 0xD8, - 0xB0, 0xD9, 0xB0, 0x79, 0x44, 0xD8, 0xB1, 0xD9, + 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCD, 0x82, + 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x45, + 0x20, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x45, 0x20, + 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, + 0x94, 0xCC, 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x45, 0x20, 0xD9, 0x8C, 0xD9, + 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8D, 0xD9, 0x91, // Bytes 44c0 - 44ff - 0xB0, 0x79, 0x44, 0xD9, 0x80, 0xD9, 0x8B, 0x59, - 0x44, 0xD9, 0x80, 0xD9, 0x8E, 0x65, 0x44, 0xD9, - 0x80, 0xD9, 0x8F, 0x69, 0x44, 0xD9, 0x80, 0xD9, - 0x90, 0x6D, 0x44, 0xD9, 0x80, 0xD9, 0x91, 0x71, - 0x44, 0xD9, 0x80, 0xD9, 0x92, 0x75, 0x44, 0xD9, - 0x87, 0xD9, 0xB0, 0x79, 0x44, 0xD9, 0x88, 0xD9, - 0x94, 0xC9, 0x44, 0xD9, 0x89, 0xD9, 0xB0, 0x79, - 0x44, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, 0x44, 0xDB, + 0x72, 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, 0x72, + 0x45, 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x72, 0x45, + 0x20, 0xD9, 0x90, 0xD9, 0x91, 0x72, 0x45, 0x20, + 0xD9, 0x91, 0xD9, 0xB0, 0x7A, 0x45, 0xE2, 0xAB, + 0x9D, 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xCF, 0x85, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xD7, 0xA9, 0xD6, + 0xBC, 0xD7, 0x81, 0x4E, 0x46, 0xD7, 0xA9, 0xD6, // Bytes 4500 - 453f - 0x92, 0xD9, 0x94, 0xC9, 0x44, 0xDB, 0x95, 0xD9, - 0x94, 0xC9, 0x45, 0x20, 0xCC, 0x88, 0xCC, 0x80, - 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCC, 0x81, 0xCA, - 0x45, 0x20, 0xCC, 0x88, 0xCD, 0x82, 0xCA, 0x45, - 0x20, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x45, 0x20, - 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x45, 0x20, 0xCC, - 0x93, 0xCD, 0x82, 0xCA, 0x45, 0x20, 0xCC, 0x94, - 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, 0x94, 0xCC, + 0xBC, 0xD7, 0x82, 0x52, 0x46, 0xD9, 0x80, 0xD9, + 0x8E, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x8F, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x90, 0xD9, 0x91, 0x72, 0x46, 0xE0, 0xA4, 0x95, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x96, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x97, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x9C, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA1, // Bytes 4540 - 457f - 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x94, 0xCD, 0x82, - 0xCA, 0x45, 0x20, 0xD9, 0x8C, 0xD9, 0x91, 0x72, - 0x45, 0x20, 0xD9, 0x8D, 0xD9, 0x91, 0x72, 0x45, - 0x20, 0xD9, 0x8E, 0xD9, 0x91, 0x72, 0x45, 0x20, - 0xD9, 0x8F, 0xD9, 0x91, 0x72, 0x45, 0x20, 0xD9, - 0x90, 0xD9, 0x91, 0x72, 0x45, 0x20, 0xD9, 0x91, - 0xD9, 0xB0, 0x7A, 0x45, 0xE2, 0xAB, 0x9D, 0xCC, - 0xB8, 0x05, 0x46, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA2, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAB, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAF, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA1, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA2, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xAF, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x96, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x97, // Bytes 4580 - 45bf - 0x81, 0xCA, 0x46, 0xCF, 0x85, 0xCC, 0x88, 0xCC, - 0x81, 0xCA, 0x46, 0xD7, 0xA9, 0xD6, 0xBC, 0xD7, - 0x81, 0x4E, 0x46, 0xD7, 0xA9, 0xD6, 0xBC, 0xD7, - 0x82, 0x52, 0x46, 0xD9, 0x80, 0xD9, 0x8E, 0xD9, - 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, 0x8F, 0xD9, - 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, 0x90, 0xD9, - 0x91, 0x72, 0x46, 0xE0, 0xA4, 0x95, 0xE0, 0xA4, - 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x96, 0xE0, 0xA4, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x9C, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xAB, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB2, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB8, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA1, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA2, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE0, 0xBE, 0xB3, // Bytes 45c0 - 45ff - 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x97, 0xE0, 0xA4, - 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x9C, 0xE0, 0xA4, - 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA1, 0xE0, 0xA4, - 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA2, 0xE0, 0xA4, - 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAB, 0xE0, 0xA4, - 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAF, 0xE0, 0xA4, - 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA1, 0xE0, 0xA6, - 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA2, 0xE0, 0xA6, + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0x0D, 0x48, 0xF0, 0x9D, 0x85, + 0x97, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, + 0x48, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xAD, 0x48, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + 0x9D, 0x85, 0xA5, 0xAD, 0x49, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x49, // Bytes 4600 - 463f - 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xAF, 0xE0, 0xA6, - 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x96, 0xE0, 0xA8, - 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x97, 0xE0, 0xA8, - 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x9C, 0xE0, 0xA8, - 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xAB, 0xE0, 0xA8, - 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB2, 0xE0, 0xA8, - 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB8, 0xE0, 0xA8, - 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA1, 0xE0, 0xAC, - // Bytes 4640 - 467f - 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA2, 0xE0, 0xAC, - 0xBC, 0x09, 0x46, 0xE0, 0xBE, 0xB2, 0xE0, 0xBE, - 0x80, 0x9D, 0x46, 0xE0, 0xBE, 0xB3, 0xE0, 0xBE, - 0x80, 0x9D, 0x46, 0xE3, 0x83, 0x86, 0xE3, 0x82, - 0x99, 0x0D, 0x48, 0xF0, 0x9D, 0x85, 0x97, 0xF0, - 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0, 0x9D, 0x85, - 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0, - 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, - // Bytes 4680 - 46bf - 0x48, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, 0x9D, 0x85, - 0xA5, 0xAD, 0x49, 0xE0, 0xBE, 0xB2, 0xE0, 0xBD, - 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x49, 0xE0, 0xBE, - 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, - 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, - 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0, - 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, - 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, - // Bytes 46c0 - 46ff - 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, - 0xB0, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, - 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB1, 0xAE, - 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, - 0xA5, 0xF0, 0x9D, 0x85, 0xB2, 0xAE, 0x4C, 0xF0, - 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, - 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, - 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, - // Bytes 4700 - 473f - 0xAF, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, + 0x80, 0x9E, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, - 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, 0x9D, 0x85, - 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, 0x83, 0x41, - 0xCC, 0x82, 0xC9, 0x83, 0x41, 0xCC, 0x86, 0xC9, - 0x83, 0x41, 0xCC, 0x87, 0xC9, 0x83, 0x41, 0xCC, - 0x88, 0xC9, 0x83, 0x41, 0xCC, 0x8A, 0xC9, 0x83, - 0x41, 0xCC, 0xA3, 0xB5, 0x83, 0x43, 0xCC, 0xA7, + 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xB0, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, + 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + // Bytes 4640 - 467f + 0xB1, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, 0xAE, + 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0, + 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, + 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + // Bytes 4680 - 46bf + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, + 0x83, 0x41, 0xCC, 0x82, 0xC9, 0x83, 0x41, 0xCC, + 0x86, 0xC9, 0x83, 0x41, 0xCC, 0x87, 0xC9, 0x83, + 0x41, 0xCC, 0x88, 0xC9, 0x83, 0x41, 0xCC, 0x8A, + 0xC9, 0x83, 0x41, 0xCC, 0xA3, 0xB5, 0x83, 0x43, + 0xCC, 0xA7, 0xA5, 0x83, 0x45, 0xCC, 0x82, 0xC9, + 0x83, 0x45, 0xCC, 0x84, 0xC9, 0x83, 0x45, 0xCC, + 0xA3, 0xB5, 0x83, 0x45, 0xCC, 0xA7, 0xA5, 0x83, + // Bytes 46c0 - 46ff + 0x49, 0xCC, 0x88, 0xC9, 0x83, 0x4C, 0xCC, 0xA3, + 0xB5, 0x83, 0x4F, 0xCC, 0x82, 0xC9, 0x83, 0x4F, + 0xCC, 0x83, 0xC9, 0x83, 0x4F, 0xCC, 0x84, 0xC9, + 0x83, 0x4F, 0xCC, 0x87, 0xC9, 0x83, 0x4F, 0xCC, + 0x88, 0xC9, 0x83, 0x4F, 0xCC, 0x9B, 0xAD, 0x83, + 0x4F, 0xCC, 0xA3, 0xB5, 0x83, 0x4F, 0xCC, 0xA8, + 0xA5, 0x83, 0x52, 0xCC, 0xA3, 0xB5, 0x83, 0x53, + 0xCC, 0x81, 0xC9, 0x83, 0x53, 0xCC, 0x8C, 0xC9, + // Bytes 4700 - 473f + 0x83, 0x53, 0xCC, 0xA3, 0xB5, 0x83, 0x55, 0xCC, + 0x83, 0xC9, 0x83, 0x55, 0xCC, 0x84, 0xC9, 0x83, + 0x55, 0xCC, 0x88, 0xC9, 0x83, 0x55, 0xCC, 0x9B, + 0xAD, 0x83, 0x61, 0xCC, 0x82, 0xC9, 0x83, 0x61, + 0xCC, 0x86, 0xC9, 0x83, 0x61, 0xCC, 0x87, 0xC9, + 0x83, 0x61, 0xCC, 0x88, 0xC9, 0x83, 0x61, 0xCC, + 0x8A, 0xC9, 0x83, 0x61, 0xCC, 0xA3, 0xB5, 0x83, + 0x63, 0xCC, 0xA7, 0xA5, 0x83, 0x65, 0xCC, 0x82, // Bytes 4740 - 477f - 0xA5, 0x83, 0x45, 0xCC, 0x82, 0xC9, 0x83, 0x45, - 0xCC, 0x84, 0xC9, 0x83, 0x45, 0xCC, 0xA3, 0xB5, - 0x83, 0x45, 0xCC, 0xA7, 0xA5, 0x83, 0x49, 0xCC, - 0x88, 0xC9, 0x83, 0x4C, 0xCC, 0xA3, 0xB5, 0x83, - 0x4F, 0xCC, 0x82, 0xC9, 0x83, 0x4F, 0xCC, 0x83, - 0xC9, 0x83, 0x4F, 0xCC, 0x84, 0xC9, 0x83, 0x4F, - 0xCC, 0x87, 0xC9, 0x83, 0x4F, 0xCC, 0x88, 0xC9, - 0x83, 0x4F, 0xCC, 0x9B, 0xAD, 0x83, 0x4F, 0xCC, + 0xC9, 0x83, 0x65, 0xCC, 0x84, 0xC9, 0x83, 0x65, + 0xCC, 0xA3, 0xB5, 0x83, 0x65, 0xCC, 0xA7, 0xA5, + 0x83, 0x69, 0xCC, 0x88, 0xC9, 0x83, 0x6C, 0xCC, + 0xA3, 0xB5, 0x83, 0x6F, 0xCC, 0x82, 0xC9, 0x83, + 0x6F, 0xCC, 0x83, 0xC9, 0x83, 0x6F, 0xCC, 0x84, + 0xC9, 0x83, 0x6F, 0xCC, 0x87, 0xC9, 0x83, 0x6F, + 0xCC, 0x88, 0xC9, 0x83, 0x6F, 0xCC, 0x9B, 0xAD, + 0x83, 0x6F, 0xCC, 0xA3, 0xB5, 0x83, 0x6F, 0xCC, // Bytes 4780 - 47bf - 0xA3, 0xB5, 0x83, 0x4F, 0xCC, 0xA8, 0xA5, 0x83, - 0x52, 0xCC, 0xA3, 0xB5, 0x83, 0x53, 0xCC, 0x81, - 0xC9, 0x83, 0x53, 0xCC, 0x8C, 0xC9, 0x83, 0x53, - 0xCC, 0xA3, 0xB5, 0x83, 0x55, 0xCC, 0x83, 0xC9, - 0x83, 0x55, 0xCC, 0x84, 0xC9, 0x83, 0x55, 0xCC, - 0x88, 0xC9, 0x83, 0x55, 0xCC, 0x9B, 0xAD, 0x83, - 0x61, 0xCC, 0x82, 0xC9, 0x83, 0x61, 0xCC, 0x86, - 0xC9, 0x83, 0x61, 0xCC, 0x87, 0xC9, 0x83, 0x61, + 0xA8, 0xA5, 0x83, 0x72, 0xCC, 0xA3, 0xB5, 0x83, + 0x73, 0xCC, 0x81, 0xC9, 0x83, 0x73, 0xCC, 0x8C, + 0xC9, 0x83, 0x73, 0xCC, 0xA3, 0xB5, 0x83, 0x75, + 0xCC, 0x83, 0xC9, 0x83, 0x75, 0xCC, 0x84, 0xC9, + 0x83, 0x75, 0xCC, 0x88, 0xC9, 0x83, 0x75, 0xCC, + 0x9B, 0xAD, 0x84, 0xCE, 0x91, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x91, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x95, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x95, 0xCC, // Bytes 47c0 - 47ff - 0xCC, 0x88, 0xC9, 0x83, 0x61, 0xCC, 0x8A, 0xC9, - 0x83, 0x61, 0xCC, 0xA3, 0xB5, 0x83, 0x63, 0xCC, - 0xA7, 0xA5, 0x83, 0x65, 0xCC, 0x82, 0xC9, 0x83, - 0x65, 0xCC, 0x84, 0xC9, 0x83, 0x65, 0xCC, 0xA3, - 0xB5, 0x83, 0x65, 0xCC, 0xA7, 0xA5, 0x83, 0x69, - 0xCC, 0x88, 0xC9, 0x83, 0x6C, 0xCC, 0xA3, 0xB5, - 0x83, 0x6F, 0xCC, 0x82, 0xC9, 0x83, 0x6F, 0xCC, - 0x83, 0xC9, 0x83, 0x6F, 0xCC, 0x84, 0xC9, 0x83, + 0x94, 0xC9, 0x84, 0xCE, 0x97, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x97, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x99, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x99, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0x9F, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x9F, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0xA5, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCC, 0x80, 0xC9, 0x84, 0xCE, // Bytes 4800 - 483f - 0x6F, 0xCC, 0x87, 0xC9, 0x83, 0x6F, 0xCC, 0x88, - 0xC9, 0x83, 0x6F, 0xCC, 0x9B, 0xAD, 0x83, 0x6F, - 0xCC, 0xA3, 0xB5, 0x83, 0x6F, 0xCC, 0xA8, 0xA5, - 0x83, 0x72, 0xCC, 0xA3, 0xB5, 0x83, 0x73, 0xCC, - 0x81, 0xC9, 0x83, 0x73, 0xCC, 0x8C, 0xC9, 0x83, - 0x73, 0xCC, 0xA3, 0xB5, 0x83, 0x75, 0xCC, 0x83, - 0xC9, 0x83, 0x75, 0xCC, 0x84, 0xC9, 0x83, 0x75, - 0xCC, 0x88, 0xC9, 0x83, 0x75, 0xCC, 0x9B, 0xAD, + 0xB1, 0xCC, 0x81, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCD, 0x82, 0xC9, 0x84, 0xCE, + 0xB5, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB5, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x80, 0xC9, + 0x84, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, 0x84, 0xCE, + 0xB7, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCD, 0x82, 0xC9, // Bytes 4840 - 487f - 0x84, 0xCE, 0x91, 0xCC, 0x93, 0xC9, 0x84, 0xCE, - 0x91, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0x95, 0xCC, - 0x93, 0xC9, 0x84, 0xCE, 0x95, 0xCC, 0x94, 0xC9, - 0x84, 0xCE, 0x97, 0xCC, 0x93, 0xC9, 0x84, 0xCE, - 0x97, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0x99, 0xCC, - 0x93, 0xC9, 0x84, 0xCE, 0x99, 0xCC, 0x94, 0xC9, - 0x84, 0xCE, 0x9F, 0xCC, 0x93, 0xC9, 0x84, 0xCE, - 0x9F, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xA5, 0xCC, + 0x84, 0xCE, 0xB9, 0xCC, 0x88, 0xC9, 0x84, 0xCE, + 0xB9, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB9, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xBF, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0xBF, 0xCC, 0x94, 0xC9, 0x84, 0xCF, + 0x85, 0xCC, 0x88, 0xC9, 0x84, 0xCF, 0x85, 0xCC, + 0x93, 0xC9, 0x84, 0xCF, 0x85, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCC, 0x80, 0xC9, 0x84, 0xCF, + 0x89, 0xCC, 0x81, 0xC9, 0x84, 0xCF, 0x89, 0xCC, // Bytes 4880 - 48bf - 0x94, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, 0x93, 0xC9, - 0x84, 0xCE, 0xA9, 0xCC, 0x94, 0xC9, 0x84, 0xCE, - 0xB1, 0xCC, 0x80, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, - 0x81, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x93, 0xC9, - 0x84, 0xCE, 0xB1, 0xCC, 0x94, 0xC9, 0x84, 0xCE, - 0xB1, 0xCD, 0x82, 0xC9, 0x84, 0xCE, 0xB5, 0xCC, - 0x93, 0xC9, 0x84, 0xCE, 0xB5, 0xCC, 0x94, 0xC9, - 0x84, 0xCE, 0xB7, 0xCC, 0x80, 0xC9, 0x84, 0xCE, + 0x93, 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCD, 0x82, 0xC9, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, // Bytes 48c0 - 48ff - 0xB7, 0xCC, 0x81, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, - 0x93, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x94, 0xC9, - 0x84, 0xCE, 0xB7, 0xCD, 0x82, 0xC9, 0x84, 0xCE, - 0xB9, 0xCC, 0x88, 0xC9, 0x84, 0xCE, 0xB9, 0xCC, - 0x93, 0xC9, 0x84, 0xCE, 0xB9, 0xCC, 0x94, 0xC9, - 0x84, 0xCE, 0xBF, 0xCC, 0x93, 0xC9, 0x84, 0xCE, - 0xBF, 0xCC, 0x94, 0xC9, 0x84, 0xCF, 0x85, 0xCC, - 0x88, 0xC9, 0x84, 0xCF, 0x85, 0xCC, 0x93, 0xC9, + 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, // Bytes 4900 - 493f - 0x84, 0xCF, 0x85, 0xCC, 0x94, 0xC9, 0x84, 0xCF, - 0x89, 0xCC, 0x80, 0xC9, 0x84, 0xCF, 0x89, 0xCC, - 0x81, 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x93, 0xC9, - 0x84, 0xCF, 0x89, 0xCC, 0x94, 0xC9, 0x84, 0xCF, - 0x89, 0xCD, 0x82, 0xC9, 0x86, 0xCE, 0x91, 0xCC, - 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, 0x91, 0xCC, - 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, 0x91, 0xCC, - 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, 0x91, 0xCC, + 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, // Bytes 4940 - 497f - 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, 0x91, 0xCC, - 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, 0x91, 0xCC, - 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, 0x97, 0xCC, - 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, 0x97, 0xCC, - 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, 0x97, 0xCC, - 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, 0x97, 0xCC, - 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, 0x97, 0xCC, - 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, 0x97, 0xCC, + 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCF, // Bytes 4980 - 49bf - 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, 0xA9, 0xCC, - 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, 0xA9, 0xCC, - 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, 0xA9, 0xCC, - 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, 0xA9, 0xCC, - 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, 0xA9, 0xCC, - 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, 0xA9, 0xCC, - 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, 0xB1, 0xCC, - 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, 0xB1, 0xCC, + 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x42, 0xCC, + 0x80, 0xC9, 0x32, 0x42, 0xCC, 0x81, 0xC9, 0x32, + 0x42, 0xCC, 0x93, 0xC9, 0x32, 0x43, 0xE1, 0x85, // Bytes 49c0 - 49ff - 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, 0xB1, 0xCC, - 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, 0xB1, 0xCC, - 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, 0xB1, 0xCC, - 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, 0xB1, 0xCC, - 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, 0xB7, 0xCC, - 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, 0xB7, 0xCC, - 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, 0xB7, 0xCC, - 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, 0xB7, 0xCC, + 0xA1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA5, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA9, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, 0x43, // Bytes 4a00 - 4a3f - 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, 0xB7, 0xCC, - 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, 0xB7, 0xCC, - 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCF, 0x89, 0xCC, - 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCF, 0x89, 0xCC, - 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCF, 0x89, 0xCC, - 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCF, 0x89, 0xCC, - 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCF, 0x89, 0xCC, - 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCF, 0x89, 0xCC, + 0xE1, 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB5, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, 0x01, // Bytes 4a40 - 4a7f - 0x94, 0xCD, 0x82, 0xCA, 0x42, 0xCC, 0x80, 0xC9, - 0x32, 0x42, 0xCC, 0x81, 0xC9, 0x32, 0x42, 0xCC, - 0x93, 0xC9, 0x32, 0x44, 0xCC, 0x88, 0xCC, 0x81, - 0xCA, 0x32, 0x43, 0xE3, 0x82, 0x99, 0x0D, 0x03, - 0x43, 0xE3, 0x82, 0x9A, 0x0D, 0x03, 0x46, 0xE0, - 0xBD, 0xB1, 0xE0, 0xBD, 0xB2, 0x9E, 0x26, 0x46, - 0xE0, 0xBD, 0xB1, 0xE0, 0xBD, 0xB4, 0xA2, 0x26, - 0x46, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, + 0x00, 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, 0x01, + 0x00, 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, 0x01, + 0x00, 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x32, + 0x43, 0xE3, 0x82, 0x99, 0x0D, 0x03, 0x43, 0xE3, // Bytes 4a80 - 4abf - 0x26, 0x00, 0x01, + 0x82, 0x9A, 0x0D, 0x03, 0x46, 0xE0, 0xBD, 0xB1, + 0xE0, 0xBD, 0xB2, 0x9E, 0x26, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBD, 0xB4, 0xA2, 0x26, 0x46, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x26, 0x00, + 0x01, } // lookup returns the trie value for the first UTF-8 encoding in s and @@ -2892,7 +2896,7 @@ func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { return 0 } -// nfcTrie. Total size: 10332 bytes (10.09 KiB). Checksum: ad355b768fddb1b6. +// nfcTrie. Total size: 10332 bytes (10.09 KiB). Checksum: 51cc525b297fc970. type nfcTrie struct{} func newNfcTrie(i int) *nfcTrie { @@ -2928,22 +2932,22 @@ var nfcValues = [2944]uint16{ 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, // Block 0x2, offset 0x80 // Block 0x3, offset 0xc0 - 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x471e, 0xc3: 0x2f79, 0xc4: 0x472d, 0xc5: 0x4732, - 0xc6: 0xa000, 0xc7: 0x473c, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x4741, 0xcb: 0x2ffb, - 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x4755, 0xd1: 0x3104, - 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x475f, 0xd5: 0x4764, 0xd6: 0x4773, - 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x47a5, 0xdd: 0x3235, - 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x47af, 0xe3: 0x3285, - 0xe4: 0x47be, 0xe5: 0x47c3, 0xe6: 0xa000, 0xe7: 0x47cd, 0xe8: 0x32ee, 0xe9: 0x32f3, - 0xea: 0x47d2, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x47e6, - 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x47f0, 0xf5: 0x47f5, - 0xf6: 0x4804, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, - 0xfc: 0x4836, 0xfd: 0x3550, 0xff: 0x3569, + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, // Block 0x4, offset 0x100 - 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x4723, 0x103: 0x47b4, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, - 0x112: 0x4746, 0x113: 0x47d7, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, @@ -2954,12 +2958,12 @@ var nfcValues = [2944]uint16{ // Block 0x5, offset 0x140 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, - 0x14c: 0x4769, 0x14d: 0x47fa, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, - 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x478c, 0x15b: 0x481d, 0x15c: 0x317c, 0x15d: 0x348d, - 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x4791, 0x161: 0x4822, 0x162: 0x31a4, 0x163: 0x34ba, - 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x479b, 0x169: 0x482c, - 0x16a: 0x47a0, 0x16b: 0x4831, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0xa000, @@ -2971,7 +2975,7 @@ var nfcValues = [2944]uint16{ 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, - 0x1aa: 0x4782, 0x1ab: 0x4813, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, 0x1b0: 0x33c5, 0x1b4: 0x3028, 0x1b5: 0x3334, 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, @@ -2982,8 +2986,8 @@ var nfcValues = [2944]uint16{ 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, 0x1de: 0x305a, 0x1df: 0x3366, - 0x1e6: 0x4728, 0x1e7: 0x47b9, 0x1e8: 0x4750, 0x1e9: 0x47e1, - 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x476e, 0x1ef: 0x47ff, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, // Block 0x8, offset 0x200 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, @@ -2998,7 +3002,7 @@ var nfcValues = [2944]uint16{ 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, // Block 0x9, offset 0x240 - 0x240: 0x4a44, 0x241: 0x4a49, 0x242: 0x9932, 0x243: 0x4a4e, 0x244: 0x4a53, 0x245: 0x9936, + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, @@ -3017,7 +3021,7 @@ var nfcValues = [2944]uint16{ 0x299: 0xa000, 0x29f: 0xa000, 0x2a1: 0xa000, 0x2a5: 0xa000, 0x2a9: 0xa000, - 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x4894, 0x2ad: 0x3697, 0x2ae: 0x48be, 0x2af: 0x36a9, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, 0x2b7: 0xa000, 0x2b9: 0xa000, 0x2bf: 0xa000, @@ -3078,15 +3082,15 @@ var nfcValues = [2944]uint16{ 0x424: 0x305f, 0x425: 0x336b, 0x426: 0x3055, 0x427: 0x3361, 0x428: 0x3064, 0x429: 0x3370, 0x42a: 0x3069, 0x42b: 0x3375, 0x42c: 0x30af, 0x42d: 0x33bb, 0x42e: 0x390b, 0x42f: 0x3a9a, 0x430: 0x30b9, 0x431: 0x33ca, 0x432: 0x30c3, 0x433: 0x33d4, 0x434: 0x30cd, 0x435: 0x33de, - 0x436: 0x475a, 0x437: 0x47eb, 0x438: 0x3912, 0x439: 0x3aa1, 0x43a: 0x30e6, 0x43b: 0x33f7, + 0x436: 0x46c4, 0x437: 0x4755, 0x438: 0x3912, 0x439: 0x3aa1, 0x43a: 0x30e6, 0x43b: 0x33f7, 0x43c: 0x30e1, 0x43d: 0x33f2, 0x43e: 0x30eb, 0x43f: 0x33fc, // Block 0x11, offset 0x440 0x440: 0x30f0, 0x441: 0x3401, 0x442: 0x30f5, 0x443: 0x3406, 0x444: 0x3109, 0x445: 0x341a, 0x446: 0x3113, 0x447: 0x3424, 0x448: 0x3122, 0x449: 0x3433, 0x44a: 0x311d, 0x44b: 0x342e, 0x44c: 0x3935, 0x44d: 0x3ac4, 0x44e: 0x3943, 0x44f: 0x3ad2, 0x450: 0x394a, 0x451: 0x3ad9, 0x452: 0x3951, 0x453: 0x3ae0, 0x454: 0x314f, 0x455: 0x3460, 0x456: 0x3154, 0x457: 0x3465, - 0x458: 0x315e, 0x459: 0x346f, 0x45a: 0x4787, 0x45b: 0x4818, 0x45c: 0x3997, 0x45d: 0x3b26, - 0x45e: 0x3177, 0x45f: 0x3488, 0x460: 0x3181, 0x461: 0x3492, 0x462: 0x4796, 0x463: 0x4827, + 0x458: 0x315e, 0x459: 0x346f, 0x45a: 0x46f1, 0x45b: 0x4782, 0x45c: 0x3997, 0x45d: 0x3b26, + 0x45e: 0x3177, 0x45f: 0x3488, 0x460: 0x3181, 0x461: 0x3492, 0x462: 0x4700, 0x463: 0x4791, 0x464: 0x399e, 0x465: 0x3b2d, 0x466: 0x39a5, 0x467: 0x3b34, 0x468: 0x39ac, 0x469: 0x3b3b, 0x46a: 0x3190, 0x46b: 0x34a1, 0x46c: 0x319a, 0x46d: 0x34b0, 0x46e: 0x31ae, 0x46f: 0x34c4, 0x470: 0x31a9, 0x471: 0x34bf, 0x472: 0x31ea, 0x473: 0x3500, 0x474: 0x31f9, 0x475: 0x350f, @@ -3098,16 +3102,16 @@ var nfcValues = [2944]uint16{ 0x48c: 0x322b, 0x48d: 0x3546, 0x48e: 0x3249, 0x48f: 0x3564, 0x490: 0x3262, 0x491: 0x3582, 0x492: 0x3271, 0x493: 0x3591, 0x494: 0x3276, 0x495: 0x3596, 0x496: 0x337a, 0x497: 0x34a6, 0x498: 0x3537, 0x499: 0x3573, 0x49b: 0x35d1, - 0x4a0: 0x4737, 0x4a1: 0x47c8, 0x4a2: 0x2f83, 0x4a3: 0x328f, + 0x4a0: 0x46a1, 0x4a1: 0x4732, 0x4a2: 0x2f83, 0x4a3: 0x328f, 0x4a4: 0x3878, 0x4a5: 0x3a07, 0x4a6: 0x3871, 0x4a7: 0x3a00, 0x4a8: 0x3886, 0x4a9: 0x3a15, 0x4aa: 0x387f, 0x4ab: 0x3a0e, 0x4ac: 0x38be, 0x4ad: 0x3a4d, 0x4ae: 0x3894, 0x4af: 0x3a23, 0x4b0: 0x388d, 0x4b1: 0x3a1c, 0x4b2: 0x38a2, 0x4b3: 0x3a31, 0x4b4: 0x389b, 0x4b5: 0x3a2a, - 0x4b6: 0x38c5, 0x4b7: 0x3a54, 0x4b8: 0x474b, 0x4b9: 0x47dc, 0x4ba: 0x3000, 0x4bb: 0x330c, + 0x4b6: 0x38c5, 0x4b7: 0x3a54, 0x4b8: 0x46b5, 0x4b9: 0x4746, 0x4ba: 0x3000, 0x4bb: 0x330c, 0x4bc: 0x2fec, 0x4bd: 0x32f8, 0x4be: 0x38da, 0x4bf: 0x3a69, // Block 0x13, offset 0x4c0 0x4c0: 0x38d3, 0x4c1: 0x3a62, 0x4c2: 0x38e8, 0x4c3: 0x3a77, 0x4c4: 0x38e1, 0x4c5: 0x3a70, 0x4c6: 0x38fd, 0x4c7: 0x3a8c, 0x4c8: 0x3091, 0x4c9: 0x339d, 0x4ca: 0x30a5, 0x4cb: 0x33b1, - 0x4cc: 0x477d, 0x4cd: 0x480e, 0x4ce: 0x3136, 0x4cf: 0x3447, 0x4d0: 0x3920, 0x4d1: 0x3aaf, + 0x4cc: 0x46e7, 0x4cd: 0x4778, 0x4ce: 0x3136, 0x4cf: 0x3447, 0x4d0: 0x3920, 0x4d1: 0x3aaf, 0x4d2: 0x3919, 0x4d3: 0x3aa8, 0x4d4: 0x392e, 0x4d5: 0x3abd, 0x4d6: 0x3927, 0x4d7: 0x3ab6, 0x4d8: 0x3989, 0x4d9: 0x3b18, 0x4da: 0x396d, 0x4db: 0x3afc, 0x4dc: 0x3966, 0x4dd: 0x3af5, 0x4de: 0x397b, 0x4df: 0x3b0a, 0x4e0: 0x3974, 0x4e1: 0x3b03, 0x4e2: 0x3982, 0x4e3: 0x3b11, @@ -3116,29 +3120,29 @@ var nfcValues = [2944]uint16{ 0x4f0: 0x39f9, 0x4f1: 0x3b88, 0x4f2: 0x3230, 0x4f3: 0x354b, 0x4f4: 0x3258, 0x4f5: 0x3578, 0x4f6: 0x3253, 0x4f7: 0x356e, 0x4f8: 0x323f, 0x4f9: 0x355a, // Block 0x14, offset 0x500 - 0x500: 0x489a, 0x501: 0x48a0, 0x502: 0x49b4, 0x503: 0x49cc, 0x504: 0x49bc, 0x505: 0x49d4, - 0x506: 0x49c4, 0x507: 0x49dc, 0x508: 0x4840, 0x509: 0x4846, 0x50a: 0x4924, 0x50b: 0x493c, - 0x50c: 0x492c, 0x50d: 0x4944, 0x50e: 0x4934, 0x50f: 0x494c, 0x510: 0x48ac, 0x511: 0x48b2, + 0x500: 0x4804, 0x501: 0x480a, 0x502: 0x491e, 0x503: 0x4936, 0x504: 0x4926, 0x505: 0x493e, + 0x506: 0x492e, 0x507: 0x4946, 0x508: 0x47aa, 0x509: 0x47b0, 0x50a: 0x488e, 0x50b: 0x48a6, + 0x50c: 0x4896, 0x50d: 0x48ae, 0x50e: 0x489e, 0x50f: 0x48b6, 0x510: 0x4816, 0x511: 0x481c, 0x512: 0x3db8, 0x513: 0x3dc8, 0x514: 0x3dc0, 0x515: 0x3dd0, - 0x518: 0x484c, 0x519: 0x4852, 0x51a: 0x3ce8, 0x51b: 0x3cf8, 0x51c: 0x3cf0, 0x51d: 0x3d00, - 0x520: 0x48c4, 0x521: 0x48ca, 0x522: 0x49e4, 0x523: 0x49fc, - 0x524: 0x49ec, 0x525: 0x4a04, 0x526: 0x49f4, 0x527: 0x4a0c, 0x528: 0x4858, 0x529: 0x485e, - 0x52a: 0x4954, 0x52b: 0x496c, 0x52c: 0x495c, 0x52d: 0x4974, 0x52e: 0x4964, 0x52f: 0x497c, - 0x530: 0x48dc, 0x531: 0x48e2, 0x532: 0x3e18, 0x533: 0x3e30, 0x534: 0x3e20, 0x535: 0x3e38, - 0x536: 0x3e28, 0x537: 0x3e40, 0x538: 0x4864, 0x539: 0x486a, 0x53a: 0x3d18, 0x53b: 0x3d30, + 0x518: 0x47b6, 0x519: 0x47bc, 0x51a: 0x3ce8, 0x51b: 0x3cf8, 0x51c: 0x3cf0, 0x51d: 0x3d00, + 0x520: 0x482e, 0x521: 0x4834, 0x522: 0x494e, 0x523: 0x4966, + 0x524: 0x4956, 0x525: 0x496e, 0x526: 0x495e, 0x527: 0x4976, 0x528: 0x47c2, 0x529: 0x47c8, + 0x52a: 0x48be, 0x52b: 0x48d6, 0x52c: 0x48c6, 0x52d: 0x48de, 0x52e: 0x48ce, 0x52f: 0x48e6, + 0x530: 0x4846, 0x531: 0x484c, 0x532: 0x3e18, 0x533: 0x3e30, 0x534: 0x3e20, 0x535: 0x3e38, + 0x536: 0x3e28, 0x537: 0x3e40, 0x538: 0x47ce, 0x539: 0x47d4, 0x53a: 0x3d18, 0x53b: 0x3d30, 0x53c: 0x3d20, 0x53d: 0x3d38, 0x53e: 0x3d28, 0x53f: 0x3d40, // Block 0x15, offset 0x540 - 0x540: 0x48e8, 0x541: 0x48ee, 0x542: 0x3e48, 0x543: 0x3e58, 0x544: 0x3e50, 0x545: 0x3e60, - 0x548: 0x4870, 0x549: 0x4876, 0x54a: 0x3d48, 0x54b: 0x3d58, - 0x54c: 0x3d50, 0x54d: 0x3d60, 0x550: 0x48fa, 0x551: 0x4900, + 0x540: 0x4852, 0x541: 0x4858, 0x542: 0x3e48, 0x543: 0x3e58, 0x544: 0x3e50, 0x545: 0x3e60, + 0x548: 0x47da, 0x549: 0x47e0, 0x54a: 0x3d48, 0x54b: 0x3d58, + 0x54c: 0x3d50, 0x54d: 0x3d60, 0x550: 0x4864, 0x551: 0x486a, 0x552: 0x3e80, 0x553: 0x3e98, 0x554: 0x3e88, 0x555: 0x3ea0, 0x556: 0x3e90, 0x557: 0x3ea8, - 0x559: 0x487c, 0x55b: 0x3d68, 0x55d: 0x3d70, - 0x55f: 0x3d78, 0x560: 0x4912, 0x561: 0x4918, 0x562: 0x4a14, 0x563: 0x4a2c, - 0x564: 0x4a1c, 0x565: 0x4a34, 0x566: 0x4a24, 0x567: 0x4a3c, 0x568: 0x4882, 0x569: 0x4888, - 0x56a: 0x4984, 0x56b: 0x499c, 0x56c: 0x498c, 0x56d: 0x49a4, 0x56e: 0x4994, 0x56f: 0x49ac, - 0x570: 0x488e, 0x571: 0x43b4, 0x572: 0x3691, 0x573: 0x43ba, 0x574: 0x48b8, 0x575: 0x43c0, - 0x576: 0x36a3, 0x577: 0x43c6, 0x578: 0x36c1, 0x579: 0x43cc, 0x57a: 0x36d9, 0x57b: 0x43d2, - 0x57c: 0x4906, 0x57d: 0x43d8, + 0x559: 0x47e6, 0x55b: 0x3d68, 0x55d: 0x3d70, + 0x55f: 0x3d78, 0x560: 0x487c, 0x561: 0x4882, 0x562: 0x497e, 0x563: 0x4996, + 0x564: 0x4986, 0x565: 0x499e, 0x566: 0x498e, 0x567: 0x49a6, 0x568: 0x47ec, 0x569: 0x47f2, + 0x56a: 0x48ee, 0x56b: 0x4906, 0x56c: 0x48f6, 0x56d: 0x490e, 0x56e: 0x48fe, 0x56f: 0x4916, + 0x570: 0x47f8, 0x571: 0x431e, 0x572: 0x3691, 0x573: 0x4324, 0x574: 0x4822, 0x575: 0x432a, + 0x576: 0x36a3, 0x577: 0x4330, 0x578: 0x36c1, 0x579: 0x4336, 0x57a: 0x36d9, 0x57b: 0x433c, + 0x57c: 0x4870, 0x57d: 0x4342, // Block 0x16, offset 0x580 0x580: 0x3da0, 0x581: 0x3da8, 0x582: 0x4184, 0x583: 0x41a2, 0x584: 0x418e, 0x585: 0x41ac, 0x586: 0x4198, 0x587: 0x41b6, 0x588: 0x3cd8, 0x589: 0x3ce0, 0x58a: 0x40d0, 0x58b: 0x40ee, @@ -3149,19 +3153,19 @@ var nfcValues = [2944]uint16{ 0x5a4: 0x4206, 0x5a5: 0x4224, 0x5a6: 0x4210, 0x5a7: 0x422e, 0x5a8: 0x3d80, 0x5a9: 0x3d88, 0x5aa: 0x4148, 0x5ab: 0x4166, 0x5ac: 0x4152, 0x5ad: 0x4170, 0x5ae: 0x415c, 0x5af: 0x417a, 0x5b0: 0x3685, 0x5b1: 0x367f, 0x5b2: 0x3d90, 0x5b3: 0x368b, 0x5b4: 0x3d98, - 0x5b6: 0x48a6, 0x5b7: 0x3db0, 0x5b8: 0x35f5, 0x5b9: 0x35ef, 0x5ba: 0x35e3, 0x5bb: 0x4384, + 0x5b6: 0x4810, 0x5b7: 0x3db0, 0x5b8: 0x35f5, 0x5b9: 0x35ef, 0x5ba: 0x35e3, 0x5bb: 0x42ee, 0x5bc: 0x35fb, 0x5bd: 0x8100, 0x5be: 0x01d3, 0x5bf: 0xa100, // Block 0x17, offset 0x5c0 0x5c0: 0x8100, 0x5c1: 0x35a7, 0x5c2: 0x3dd8, 0x5c3: 0x369d, 0x5c4: 0x3de0, - 0x5c6: 0x48d0, 0x5c7: 0x3df8, 0x5c8: 0x3601, 0x5c9: 0x438a, 0x5ca: 0x360d, 0x5cb: 0x4390, + 0x5c6: 0x483a, 0x5c7: 0x3df8, 0x5c8: 0x3601, 0x5c9: 0x42f4, 0x5ca: 0x360d, 0x5cb: 0x42fa, 0x5cc: 0x3619, 0x5cd: 0x3b8f, 0x5ce: 0x3b96, 0x5cf: 0x3b9d, 0x5d0: 0x36b5, 0x5d1: 0x36af, - 0x5d2: 0x3e00, 0x5d3: 0x457a, 0x5d6: 0x36bb, 0x5d7: 0x3e10, - 0x5d8: 0x3631, 0x5d9: 0x362b, 0x5da: 0x361f, 0x5db: 0x4396, 0x5dd: 0x3ba4, - 0x5de: 0x3bab, 0x5df: 0x3bb2, 0x5e0: 0x36eb, 0x5e1: 0x36e5, 0x5e2: 0x3e68, 0x5e3: 0x4582, + 0x5d2: 0x3e00, 0x5d3: 0x44e4, 0x5d6: 0x36bb, 0x5d7: 0x3e10, + 0x5d8: 0x3631, 0x5d9: 0x362b, 0x5da: 0x361f, 0x5db: 0x4300, 0x5dd: 0x3ba4, + 0x5de: 0x3bab, 0x5df: 0x3bb2, 0x5e0: 0x36eb, 0x5e1: 0x36e5, 0x5e2: 0x3e68, 0x5e3: 0x44ec, 0x5e4: 0x36cd, 0x5e5: 0x36d3, 0x5e6: 0x36f1, 0x5e7: 0x3e78, 0x5e8: 0x3661, 0x5e9: 0x365b, - 0x5ea: 0x364f, 0x5eb: 0x43a2, 0x5ec: 0x3649, 0x5ed: 0x359b, 0x5ee: 0x437e, 0x5ef: 0x0081, + 0x5ea: 0x364f, 0x5eb: 0x430c, 0x5ec: 0x3649, 0x5ed: 0x359b, 0x5ee: 0x42e8, 0x5ef: 0x0081, 0x5f2: 0x3eb0, 0x5f3: 0x36f7, 0x5f4: 0x3eb8, - 0x5f6: 0x491e, 0x5f7: 0x3ed0, 0x5f8: 0x363d, 0x5f9: 0x439c, 0x5fa: 0x366d, 0x5fb: 0x43ae, + 0x5f6: 0x4888, 0x5f7: 0x3ed0, 0x5f8: 0x363d, 0x5f9: 0x4306, 0x5fa: 0x366d, 0x5fb: 0x4318, 0x5fc: 0x3679, 0x5fd: 0x4256, 0x5fe: 0xa100, // Block 0x18, offset 0x600 0x601: 0x3c06, 0x603: 0xa000, 0x604: 0x3c0d, 0x605: 0xa000, @@ -3519,8 +3523,8 @@ var nfcSparseValues = [688]valueRange{ {value: 0x8100, lo: 0xb8, hi: 0xb8}, // Block 0x1, offset 0x5 {value: 0x0091, lo: 0x03}, - {value: 0x4778, lo: 0xa0, hi: 0xa1}, - {value: 0x47aa, lo: 0xaf, hi: 0xb0}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, {value: 0xa000, lo: 0xb7, hi: 0xb7}, // Block 0x2, offset 0x9 {value: 0x0000, lo: 0x01}, @@ -3533,11 +3537,11 @@ var nfcSparseValues = [688]valueRange{ {value: 0xa000, lo: 0x81, hi: 0x81}, {value: 0xa000, lo: 0x85, hi: 0x85}, {value: 0xa000, lo: 0x89, hi: 0x89}, - {value: 0x48d6, lo: 0x8a, hi: 0x8a}, - {value: 0x48f4, lo: 0x8b, hi: 0x8b}, + {value: 0x4840, lo: 0x8a, hi: 0x8a}, + {value: 0x485e, lo: 0x8b, hi: 0x8b}, {value: 0x36c7, lo: 0x8c, hi: 0x8c}, {value: 0x36df, lo: 0x8d, hi: 0x8d}, - {value: 0x490c, lo: 0x8e, hi: 0x8e}, + {value: 0x4876, lo: 0x8e, hi: 0x8e}, {value: 0xa000, lo: 0x92, hi: 0x92}, {value: 0x36fd, lo: 0x93, hi: 0x94}, // Block 0x5, offset 0x18 @@ -3665,7 +3669,7 @@ var nfcSparseValues = [688]valueRange{ {value: 0x812d, lo: 0x92, hi: 0x92}, {value: 0x8132, lo: 0x93, hi: 0x93}, {value: 0x8132, lo: 0x94, hi: 0x94}, - {value: 0x45b2, lo: 0x98, hi: 0x9f}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, // Block 0x12, offset 0x89 {value: 0x0000, lo: 0x02}, {value: 0x8102, lo: 0xbc, hi: 0xbc}, @@ -3676,18 +3680,18 @@ var nfcSparseValues = [688]valueRange{ {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, {value: 0x8104, lo: 0x8d, hi: 0x8d}, {value: 0x9900, lo: 0x97, hi: 0x97}, - {value: 0x45f2, lo: 0x9c, hi: 0x9d}, - {value: 0x4602, lo: 0x9f, hi: 0x9f}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, // Block 0x14, offset 0x93 {value: 0x0000, lo: 0x03}, - {value: 0x462a, lo: 0xb3, hi: 0xb3}, - {value: 0x4632, lo: 0xb6, hi: 0xb6}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, {value: 0x8102, lo: 0xbc, hi: 0xbc}, // Block 0x15, offset 0x97 {value: 0x0008, lo: 0x03}, {value: 0x8104, lo: 0x8d, hi: 0x8d}, - {value: 0x460a, lo: 0x99, hi: 0x9b}, - {value: 0x4622, lo: 0x9e, hi: 0x9e}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, // Block 0x16, offset 0x9b {value: 0x0000, lo: 0x01}, {value: 0x8102, lo: 0xbc, hi: 0xbc}, @@ -3702,8 +3706,8 @@ var nfcSparseValues = [688]valueRange{ {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, {value: 0x8104, lo: 0x8d, hi: 0x8d}, {value: 0x9900, lo: 0x96, hi: 0x97}, - {value: 0x463a, lo: 0x9c, hi: 0x9c}, - {value: 0x4642, lo: 0x9d, hi: 0x9d}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, // Block 0x19, offset 0xa8 {value: 0x0000, lo: 0x03}, {value: 0xa000, lo: 0x92, hi: 0x92}, @@ -3787,18 +3791,18 @@ var nfcSparseValues = [688]valueRange{ {value: 0x263d, lo: 0xa9, hi: 0xa9}, {value: 0x8126, lo: 0xb1, hi: 0xb1}, {value: 0x8127, lo: 0xb2, hi: 0xb2}, - {value: 0x4a66, lo: 0xb3, hi: 0xb3}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, {value: 0x8128, lo: 0xb4, hi: 0xb4}, - {value: 0x4a6f, lo: 0xb5, hi: 0xb5}, - {value: 0x464a, lo: 0xb6, hi: 0xb6}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, {value: 0x8200, lo: 0xb7, hi: 0xb7}, - {value: 0x4652, lo: 0xb8, hi: 0xb8}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, {value: 0x8200, lo: 0xb9, hi: 0xb9}, {value: 0x8127, lo: 0xba, hi: 0xbd}, // Block 0x27, offset 0xf5 {value: 0x0000, lo: 0x0b}, {value: 0x8127, lo: 0x80, hi: 0x80}, - {value: 0x4a78, lo: 0x81, hi: 0x81}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, {value: 0x8132, lo: 0x82, hi: 0x83}, {value: 0x8104, lo: 0x84, hi: 0x84}, {value: 0x8132, lo: 0x86, hi: 0x87}, @@ -3975,7 +3979,7 @@ var nfcSparseValues = [688]valueRange{ {value: 0x048b, lo: 0xa9, hi: 0xaa}, // Block 0x45, offset 0x189 {value: 0x0000, lo: 0x01}, - {value: 0x4573, lo: 0x9c, hi: 0x9c}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, // Block 0x46, offset 0x18b {value: 0x0000, lo: 0x01}, {value: 0x8132, lo: 0xaf, hi: 0xb1}, @@ -3994,12 +3998,12 @@ var nfcSparseValues = [688]valueRange{ {value: 0x812f, lo: 0xae, hi: 0xaf}, // Block 0x4a, offset 0x197 {value: 0x0000, lo: 0x03}, - {value: 0x4a81, lo: 0xb3, hi: 0xb3}, - {value: 0x4a81, lo: 0xb5, hi: 0xb6}, - {value: 0x4a81, lo: 0xba, hi: 0xbf}, + {value: 0x4a9f, lo: 0xb3, hi: 0xb3}, + {value: 0x4a9f, lo: 0xb5, hi: 0xb6}, + {value: 0x4a9f, lo: 0xba, hi: 0xbf}, // Block 0x4b, offset 0x19b {value: 0x0000, lo: 0x01}, - {value: 0x4a81, lo: 0x8f, hi: 0xa3}, + {value: 0x4a9f, lo: 0x8f, hi: 0xa3}, // Block 0x4c, offset 0x19d {value: 0x0000, lo: 0x01}, {value: 0x8100, lo: 0xae, hi: 0xbe}, @@ -4119,29 +4123,29 @@ var nfcSparseValues = [688]valueRange{ {value: 0xc600, lo: 0x89, hi: 0xa3}, // Block 0x63, offset 0x1fb {value: 0x0006, lo: 0x0d}, - {value: 0x4426, lo: 0x9d, hi: 0x9d}, + {value: 0x4390, lo: 0x9d, hi: 0x9d}, {value: 0x8115, lo: 0x9e, hi: 0x9e}, - {value: 0x4498, lo: 0x9f, hi: 0x9f}, - {value: 0x4486, lo: 0xaa, hi: 0xab}, - {value: 0x458a, lo: 0xac, hi: 0xac}, - {value: 0x4592, lo: 0xad, hi: 0xad}, - {value: 0x43de, lo: 0xae, hi: 0xb1}, - {value: 0x43fc, lo: 0xb2, hi: 0xb4}, - {value: 0x4414, lo: 0xb5, hi: 0xb6}, - {value: 0x4420, lo: 0xb8, hi: 0xb8}, - {value: 0x442c, lo: 0xb9, hi: 0xbb}, - {value: 0x4444, lo: 0xbc, hi: 0xbc}, - {value: 0x444a, lo: 0xbe, hi: 0xbe}, + {value: 0x4402, lo: 0x9f, hi: 0x9f}, + {value: 0x43f0, lo: 0xaa, hi: 0xab}, + {value: 0x44f4, lo: 0xac, hi: 0xac}, + {value: 0x44fc, lo: 0xad, hi: 0xad}, + {value: 0x4348, lo: 0xae, hi: 0xb1}, + {value: 0x4366, lo: 0xb2, hi: 0xb4}, + {value: 0x437e, lo: 0xb5, hi: 0xb6}, + {value: 0x438a, lo: 0xb8, hi: 0xb8}, + {value: 0x4396, lo: 0xb9, hi: 0xbb}, + {value: 0x43ae, lo: 0xbc, hi: 0xbc}, + {value: 0x43b4, lo: 0xbe, hi: 0xbe}, // Block 0x64, offset 0x209 {value: 0x0006, lo: 0x08}, - {value: 0x4450, lo: 0x80, hi: 0x81}, - {value: 0x445c, lo: 0x83, hi: 0x84}, - {value: 0x446e, lo: 0x86, hi: 0x89}, - {value: 0x4492, lo: 0x8a, hi: 0x8a}, - {value: 0x440e, lo: 0x8b, hi: 0x8b}, - {value: 0x43f6, lo: 0x8c, hi: 0x8c}, - {value: 0x443e, lo: 0x8d, hi: 0x8d}, - {value: 0x4468, lo: 0x8e, hi: 0x8e}, + {value: 0x43ba, lo: 0x80, hi: 0x81}, + {value: 0x43c6, lo: 0x83, hi: 0x84}, + {value: 0x43d8, lo: 0x86, hi: 0x89}, + {value: 0x43fc, lo: 0x8a, hi: 0x8a}, + {value: 0x4378, lo: 0x8b, hi: 0x8b}, + {value: 0x4360, lo: 0x8c, hi: 0x8c}, + {value: 0x43a8, lo: 0x8d, hi: 0x8d}, + {value: 0x43d2, lo: 0x8e, hi: 0x8e}, // Block 0x65, offset 0x212 {value: 0x0000, lo: 0x02}, {value: 0x8100, lo: 0xa4, hi: 0xa5}, @@ -4179,16 +4183,16 @@ var nfcSparseValues = [688]valueRange{ {value: 0x8100, lo: 0xb5, hi: 0xba}, // Block 0x6e, offset 0x22c {value: 0x0000, lo: 0x04}, - {value: 0x4a81, lo: 0x9e, hi: 0x9f}, - {value: 0x4a81, lo: 0xa3, hi: 0xa3}, - {value: 0x4a81, lo: 0xa5, hi: 0xa6}, - {value: 0x4a81, lo: 0xaa, hi: 0xaf}, + {value: 0x4a9f, lo: 0x9e, hi: 0x9f}, + {value: 0x4a9f, lo: 0xa3, hi: 0xa3}, + {value: 0x4a9f, lo: 0xa5, hi: 0xa6}, + {value: 0x4a9f, lo: 0xaa, hi: 0xaf}, // Block 0x6f, offset 0x231 {value: 0x0000, lo: 0x05}, - {value: 0x4a81, lo: 0x82, hi: 0x87}, - {value: 0x4a81, lo: 0x8a, hi: 0x8f}, - {value: 0x4a81, lo: 0x92, hi: 0x97}, - {value: 0x4a81, lo: 0x9a, hi: 0x9c}, + {value: 0x4a9f, lo: 0x82, hi: 0x87}, + {value: 0x4a9f, lo: 0x8a, hi: 0x8f}, + {value: 0x4a9f, lo: 0x92, hi: 0x97}, + {value: 0x4a9f, lo: 0x9a, hi: 0x9c}, {value: 0x8100, lo: 0xa3, hi: 0xa3}, // Block 0x70, offset 0x237 {value: 0x0000, lo: 0x01}, @@ -4295,13 +4299,13 @@ var nfcSparseValues = [688]valueRange{ {value: 0x8101, lo: 0x9e, hi: 0x9e}, // Block 0x86, offset 0x288 {value: 0x0000, lo: 0x0c}, - {value: 0x4662, lo: 0x9e, hi: 0x9e}, - {value: 0x466c, lo: 0x9f, hi: 0x9f}, - {value: 0x46a0, lo: 0xa0, hi: 0xa0}, - {value: 0x46ae, lo: 0xa1, hi: 0xa1}, - {value: 0x46bc, lo: 0xa2, hi: 0xa2}, - {value: 0x46ca, lo: 0xa3, hi: 0xa3}, - {value: 0x46d8, lo: 0xa4, hi: 0xa4}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, {value: 0x812b, lo: 0xa5, hi: 0xa6}, {value: 0x8101, lo: 0xa7, hi: 0xa9}, {value: 0x8130, lo: 0xad, hi: 0xad}, @@ -4313,14 +4317,14 @@ var nfcSparseValues = [688]valueRange{ {value: 0x8132, lo: 0x85, hi: 0x89}, {value: 0x812d, lo: 0x8a, hi: 0x8b}, {value: 0x8132, lo: 0xaa, hi: 0xad}, - {value: 0x4676, lo: 0xbb, hi: 0xbb}, - {value: 0x4680, lo: 0xbc, hi: 0xbc}, - {value: 0x46e6, lo: 0xbd, hi: 0xbd}, - {value: 0x4702, lo: 0xbe, hi: 0xbe}, - {value: 0x46f4, lo: 0xbf, hi: 0xbf}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, // Block 0x88, offset 0x29f {value: 0x0000, lo: 0x01}, - {value: 0x4710, lo: 0x80, hi: 0x80}, + {value: 0x467a, lo: 0x80, hi: 0x80}, // Block 0x89, offset 0x2a1 {value: 0x0000, lo: 0x01}, {value: 0x8132, lo: 0x82, hi: 0x84}, @@ -4513,7 +4517,7 @@ func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { return 0 } -// nfkcTrie. Total size: 16994 bytes (16.60 KiB). Checksum: 146925fc21092b17. +// nfkcTrie. Total size: 16994 bytes (16.60 KiB). Checksum: c3ed54ee046f3c46. type nfkcTrie struct{} func newNfkcTrie(i int) *nfkcTrie { @@ -4549,22 +4553,22 @@ var nfkcValues = [5888]uint16{ 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, // Block 0x2, offset 0x80 // Block 0x3, offset 0xc0 - 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x471e, 0xc3: 0x2f79, 0xc4: 0x472d, 0xc5: 0x4732, - 0xc6: 0xa000, 0xc7: 0x473c, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x4741, 0xcb: 0x2ffb, - 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x4755, 0xd1: 0x3104, - 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x475f, 0xd5: 0x4764, 0xd6: 0x4773, - 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x47a5, 0xdd: 0x3235, - 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x47af, 0xe3: 0x3285, - 0xe4: 0x47be, 0xe5: 0x47c3, 0xe6: 0xa000, 0xe7: 0x47cd, 0xe8: 0x32ee, 0xe9: 0x32f3, - 0xea: 0x47d2, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x47e6, - 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x47f0, 0xf5: 0x47f5, - 0xf6: 0x4804, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, - 0xfc: 0x4836, 0xfd: 0x3550, 0xff: 0x3569, + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, // Block 0x4, offset 0x100 - 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x4723, 0x103: 0x47b4, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, - 0x112: 0x4746, 0x113: 0x47d7, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, @@ -4575,12 +4579,12 @@ var nfkcValues = [5888]uint16{ // Block 0x5, offset 0x140 0x140: 0x1c34, 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, 0x149: 0x1c5c, - 0x14c: 0x4769, 0x14d: 0x47fa, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, - 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x478c, 0x15b: 0x481d, 0x15c: 0x317c, 0x15d: 0x348d, - 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x4791, 0x161: 0x4822, 0x162: 0x31a4, 0x163: 0x34ba, - 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x479b, 0x169: 0x482c, - 0x16a: 0x47a0, 0x16b: 0x4831, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0x00a7, @@ -4592,7 +4596,7 @@ var nfkcValues = [5888]uint16{ 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, - 0x1aa: 0x4782, 0x1ab: 0x4813, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, 0x1b0: 0x33c5, 0x1b1: 0x1942, 0x1b2: 0x1945, 0x1b3: 0x19cf, 0x1b4: 0x3028, 0x1b5: 0x3334, 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, @@ -4603,8 +4607,8 @@ var nfkcValues = [5888]uint16{ 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, 0x1de: 0x305a, 0x1df: 0x3366, - 0x1e6: 0x4728, 0x1e7: 0x47b9, 0x1e8: 0x4750, 0x1e9: 0x47e1, - 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x476e, 0x1ef: 0x47ff, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, // Block 0x8, offset 0x200 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, @@ -4619,7 +4623,7 @@ var nfkcValues = [5888]uint16{ 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, // Block 0x9, offset 0x240 - 0x240: 0x4a44, 0x241: 0x4a49, 0x242: 0x9932, 0x243: 0x4a4e, 0x244: 0x4a53, 0x245: 0x9936, + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, @@ -4631,22 +4635,22 @@ var nfkcValues = [5888]uint16{ 0x27a: 0x42a5, 0x27e: 0x0037, // Block 0xa, offset 0x280 - 0x284: 0x425a, 0x285: 0x4511, + 0x284: 0x425a, 0x285: 0x447b, 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, 0x295: 0xa000, 0x297: 0xa000, 0x299: 0xa000, 0x29f: 0xa000, 0x2a1: 0xa000, 0x2a5: 0xa000, 0x2a9: 0xa000, - 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x4894, 0x2ad: 0x3697, 0x2ae: 0x48be, 0x2af: 0x36a9, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, 0x2b7: 0xa000, 0x2b9: 0xa000, 0x2bf: 0xa000, // Block 0xb, offset 0x2c0 0x2c1: 0xa000, 0x2c5: 0xa000, - 0x2c9: 0xa000, 0x2ca: 0x48d6, 0x2cb: 0x48f4, - 0x2cc: 0x36c7, 0x2cd: 0x36df, 0x2ce: 0x490c, 0x2d0: 0x01be, 0x2d1: 0x01d0, - 0x2d2: 0x01ac, 0x2d3: 0x43a2, 0x2d4: 0x43a8, 0x2d5: 0x01fa, 0x2d6: 0x01e8, + 0x2c9: 0xa000, 0x2ca: 0x4840, 0x2cb: 0x485e, + 0x2cc: 0x36c7, 0x2cd: 0x36df, 0x2ce: 0x4876, 0x2d0: 0x01be, 0x2d1: 0x01d0, + 0x2d2: 0x01ac, 0x2d3: 0x430c, 0x2d4: 0x4312, 0x2d5: 0x01fa, 0x2d6: 0x01e8, 0x2f0: 0x01d6, 0x2f1: 0x01eb, 0x2f2: 0x01ee, 0x2f4: 0x0188, 0x2f5: 0x01c7, 0x2f9: 0x01a6, // Block 0xc, offset 0x300 @@ -4726,15 +4730,15 @@ var nfkcValues = [5888]uint16{ 0x4e4: 0x305f, 0x4e5: 0x336b, 0x4e6: 0x3055, 0x4e7: 0x3361, 0x4e8: 0x3064, 0x4e9: 0x3370, 0x4ea: 0x3069, 0x4eb: 0x3375, 0x4ec: 0x30af, 0x4ed: 0x33bb, 0x4ee: 0x390b, 0x4ef: 0x3a9a, 0x4f0: 0x30b9, 0x4f1: 0x33ca, 0x4f2: 0x30c3, 0x4f3: 0x33d4, 0x4f4: 0x30cd, 0x4f5: 0x33de, - 0x4f6: 0x475a, 0x4f7: 0x47eb, 0x4f8: 0x3912, 0x4f9: 0x3aa1, 0x4fa: 0x30e6, 0x4fb: 0x33f7, + 0x4f6: 0x46c4, 0x4f7: 0x4755, 0x4f8: 0x3912, 0x4f9: 0x3aa1, 0x4fa: 0x30e6, 0x4fb: 0x33f7, 0x4fc: 0x30e1, 0x4fd: 0x33f2, 0x4fe: 0x30eb, 0x4ff: 0x33fc, // Block 0x14, offset 0x500 0x500: 0x30f0, 0x501: 0x3401, 0x502: 0x30f5, 0x503: 0x3406, 0x504: 0x3109, 0x505: 0x341a, 0x506: 0x3113, 0x507: 0x3424, 0x508: 0x3122, 0x509: 0x3433, 0x50a: 0x311d, 0x50b: 0x342e, 0x50c: 0x3935, 0x50d: 0x3ac4, 0x50e: 0x3943, 0x50f: 0x3ad2, 0x510: 0x394a, 0x511: 0x3ad9, 0x512: 0x3951, 0x513: 0x3ae0, 0x514: 0x314f, 0x515: 0x3460, 0x516: 0x3154, 0x517: 0x3465, - 0x518: 0x315e, 0x519: 0x346f, 0x51a: 0x4787, 0x51b: 0x4818, 0x51c: 0x3997, 0x51d: 0x3b26, - 0x51e: 0x3177, 0x51f: 0x3488, 0x520: 0x3181, 0x521: 0x3492, 0x522: 0x4796, 0x523: 0x4827, + 0x518: 0x315e, 0x519: 0x346f, 0x51a: 0x46f1, 0x51b: 0x4782, 0x51c: 0x3997, 0x51d: 0x3b26, + 0x51e: 0x3177, 0x51f: 0x3488, 0x520: 0x3181, 0x521: 0x3492, 0x522: 0x4700, 0x523: 0x4791, 0x524: 0x399e, 0x525: 0x3b2d, 0x526: 0x39a5, 0x527: 0x3b34, 0x528: 0x39ac, 0x529: 0x3b3b, 0x52a: 0x3190, 0x52b: 0x34a1, 0x52c: 0x319a, 0x52d: 0x34b0, 0x52e: 0x31ae, 0x52f: 0x34c4, 0x530: 0x31a9, 0x531: 0x34bf, 0x532: 0x31ea, 0x533: 0x3500, 0x534: 0x31f9, 0x535: 0x350f, @@ -4746,16 +4750,16 @@ var nfkcValues = [5888]uint16{ 0x54c: 0x322b, 0x54d: 0x3546, 0x54e: 0x3249, 0x54f: 0x3564, 0x550: 0x3262, 0x551: 0x3582, 0x552: 0x3271, 0x553: 0x3591, 0x554: 0x3276, 0x555: 0x3596, 0x556: 0x337a, 0x557: 0x34a6, 0x558: 0x3537, 0x559: 0x3573, 0x55a: 0x1be0, 0x55b: 0x42d7, - 0x560: 0x4737, 0x561: 0x47c8, 0x562: 0x2f83, 0x563: 0x328f, + 0x560: 0x46a1, 0x561: 0x4732, 0x562: 0x2f83, 0x563: 0x328f, 0x564: 0x3878, 0x565: 0x3a07, 0x566: 0x3871, 0x567: 0x3a00, 0x568: 0x3886, 0x569: 0x3a15, 0x56a: 0x387f, 0x56b: 0x3a0e, 0x56c: 0x38be, 0x56d: 0x3a4d, 0x56e: 0x3894, 0x56f: 0x3a23, 0x570: 0x388d, 0x571: 0x3a1c, 0x572: 0x38a2, 0x573: 0x3a31, 0x574: 0x389b, 0x575: 0x3a2a, - 0x576: 0x38c5, 0x577: 0x3a54, 0x578: 0x474b, 0x579: 0x47dc, 0x57a: 0x3000, 0x57b: 0x330c, + 0x576: 0x38c5, 0x577: 0x3a54, 0x578: 0x46b5, 0x579: 0x4746, 0x57a: 0x3000, 0x57b: 0x330c, 0x57c: 0x2fec, 0x57d: 0x32f8, 0x57e: 0x38da, 0x57f: 0x3a69, // Block 0x16, offset 0x580 0x580: 0x38d3, 0x581: 0x3a62, 0x582: 0x38e8, 0x583: 0x3a77, 0x584: 0x38e1, 0x585: 0x3a70, 0x586: 0x38fd, 0x587: 0x3a8c, 0x588: 0x3091, 0x589: 0x339d, 0x58a: 0x30a5, 0x58b: 0x33b1, - 0x58c: 0x477d, 0x58d: 0x480e, 0x58e: 0x3136, 0x58f: 0x3447, 0x590: 0x3920, 0x591: 0x3aaf, + 0x58c: 0x46e7, 0x58d: 0x4778, 0x58e: 0x3136, 0x58f: 0x3447, 0x590: 0x3920, 0x591: 0x3aaf, 0x592: 0x3919, 0x593: 0x3aa8, 0x594: 0x392e, 0x595: 0x3abd, 0x596: 0x3927, 0x597: 0x3ab6, 0x598: 0x3989, 0x599: 0x3b18, 0x59a: 0x396d, 0x59b: 0x3afc, 0x59c: 0x3966, 0x59d: 0x3af5, 0x59e: 0x397b, 0x59f: 0x3b0a, 0x5a0: 0x3974, 0x5a1: 0x3b03, 0x5a2: 0x3982, 0x5a3: 0x3b11, @@ -4764,29 +4768,29 @@ var nfkcValues = [5888]uint16{ 0x5b0: 0x39f9, 0x5b1: 0x3b88, 0x5b2: 0x3230, 0x5b3: 0x354b, 0x5b4: 0x3258, 0x5b5: 0x3578, 0x5b6: 0x3253, 0x5b7: 0x356e, 0x5b8: 0x323f, 0x5b9: 0x355a, // Block 0x17, offset 0x5c0 - 0x5c0: 0x489a, 0x5c1: 0x48a0, 0x5c2: 0x49b4, 0x5c3: 0x49cc, 0x5c4: 0x49bc, 0x5c5: 0x49d4, - 0x5c6: 0x49c4, 0x5c7: 0x49dc, 0x5c8: 0x4840, 0x5c9: 0x4846, 0x5ca: 0x4924, 0x5cb: 0x493c, - 0x5cc: 0x492c, 0x5cd: 0x4944, 0x5ce: 0x4934, 0x5cf: 0x494c, 0x5d0: 0x48ac, 0x5d1: 0x48b2, + 0x5c0: 0x4804, 0x5c1: 0x480a, 0x5c2: 0x491e, 0x5c3: 0x4936, 0x5c4: 0x4926, 0x5c5: 0x493e, + 0x5c6: 0x492e, 0x5c7: 0x4946, 0x5c8: 0x47aa, 0x5c9: 0x47b0, 0x5ca: 0x488e, 0x5cb: 0x48a6, + 0x5cc: 0x4896, 0x5cd: 0x48ae, 0x5ce: 0x489e, 0x5cf: 0x48b6, 0x5d0: 0x4816, 0x5d1: 0x481c, 0x5d2: 0x3db8, 0x5d3: 0x3dc8, 0x5d4: 0x3dc0, 0x5d5: 0x3dd0, - 0x5d8: 0x484c, 0x5d9: 0x4852, 0x5da: 0x3ce8, 0x5db: 0x3cf8, 0x5dc: 0x3cf0, 0x5dd: 0x3d00, - 0x5e0: 0x48c4, 0x5e1: 0x48ca, 0x5e2: 0x49e4, 0x5e3: 0x49fc, - 0x5e4: 0x49ec, 0x5e5: 0x4a04, 0x5e6: 0x49f4, 0x5e7: 0x4a0c, 0x5e8: 0x4858, 0x5e9: 0x485e, - 0x5ea: 0x4954, 0x5eb: 0x496c, 0x5ec: 0x495c, 0x5ed: 0x4974, 0x5ee: 0x4964, 0x5ef: 0x497c, - 0x5f0: 0x48dc, 0x5f1: 0x48e2, 0x5f2: 0x3e18, 0x5f3: 0x3e30, 0x5f4: 0x3e20, 0x5f5: 0x3e38, - 0x5f6: 0x3e28, 0x5f7: 0x3e40, 0x5f8: 0x4864, 0x5f9: 0x486a, 0x5fa: 0x3d18, 0x5fb: 0x3d30, + 0x5d8: 0x47b6, 0x5d9: 0x47bc, 0x5da: 0x3ce8, 0x5db: 0x3cf8, 0x5dc: 0x3cf0, 0x5dd: 0x3d00, + 0x5e0: 0x482e, 0x5e1: 0x4834, 0x5e2: 0x494e, 0x5e3: 0x4966, + 0x5e4: 0x4956, 0x5e5: 0x496e, 0x5e6: 0x495e, 0x5e7: 0x4976, 0x5e8: 0x47c2, 0x5e9: 0x47c8, + 0x5ea: 0x48be, 0x5eb: 0x48d6, 0x5ec: 0x48c6, 0x5ed: 0x48de, 0x5ee: 0x48ce, 0x5ef: 0x48e6, + 0x5f0: 0x4846, 0x5f1: 0x484c, 0x5f2: 0x3e18, 0x5f3: 0x3e30, 0x5f4: 0x3e20, 0x5f5: 0x3e38, + 0x5f6: 0x3e28, 0x5f7: 0x3e40, 0x5f8: 0x47ce, 0x5f9: 0x47d4, 0x5fa: 0x3d18, 0x5fb: 0x3d30, 0x5fc: 0x3d20, 0x5fd: 0x3d38, 0x5fe: 0x3d28, 0x5ff: 0x3d40, // Block 0x18, offset 0x600 - 0x600: 0x48e8, 0x601: 0x48ee, 0x602: 0x3e48, 0x603: 0x3e58, 0x604: 0x3e50, 0x605: 0x3e60, - 0x608: 0x4870, 0x609: 0x4876, 0x60a: 0x3d48, 0x60b: 0x3d58, - 0x60c: 0x3d50, 0x60d: 0x3d60, 0x610: 0x48fa, 0x611: 0x4900, + 0x600: 0x4852, 0x601: 0x4858, 0x602: 0x3e48, 0x603: 0x3e58, 0x604: 0x3e50, 0x605: 0x3e60, + 0x608: 0x47da, 0x609: 0x47e0, 0x60a: 0x3d48, 0x60b: 0x3d58, + 0x60c: 0x3d50, 0x60d: 0x3d60, 0x610: 0x4864, 0x611: 0x486a, 0x612: 0x3e80, 0x613: 0x3e98, 0x614: 0x3e88, 0x615: 0x3ea0, 0x616: 0x3e90, 0x617: 0x3ea8, - 0x619: 0x487c, 0x61b: 0x3d68, 0x61d: 0x3d70, - 0x61f: 0x3d78, 0x620: 0x4912, 0x621: 0x4918, 0x622: 0x4a14, 0x623: 0x4a2c, - 0x624: 0x4a1c, 0x625: 0x4a34, 0x626: 0x4a24, 0x627: 0x4a3c, 0x628: 0x4882, 0x629: 0x4888, - 0x62a: 0x4984, 0x62b: 0x499c, 0x62c: 0x498c, 0x62d: 0x49a4, 0x62e: 0x4994, 0x62f: 0x49ac, - 0x630: 0x488e, 0x631: 0x43b4, 0x632: 0x3691, 0x633: 0x43ba, 0x634: 0x48b8, 0x635: 0x43c0, - 0x636: 0x36a3, 0x637: 0x43c6, 0x638: 0x36c1, 0x639: 0x43cc, 0x63a: 0x36d9, 0x63b: 0x43d2, - 0x63c: 0x4906, 0x63d: 0x43d8, + 0x619: 0x47e6, 0x61b: 0x3d68, 0x61d: 0x3d70, + 0x61f: 0x3d78, 0x620: 0x487c, 0x621: 0x4882, 0x622: 0x497e, 0x623: 0x4996, + 0x624: 0x4986, 0x625: 0x499e, 0x626: 0x498e, 0x627: 0x49a6, 0x628: 0x47ec, 0x629: 0x47f2, + 0x62a: 0x48ee, 0x62b: 0x4906, 0x62c: 0x48f6, 0x62d: 0x490e, 0x62e: 0x48fe, 0x62f: 0x4916, + 0x630: 0x47f8, 0x631: 0x431e, 0x632: 0x3691, 0x633: 0x4324, 0x634: 0x4822, 0x635: 0x432a, + 0x636: 0x36a3, 0x637: 0x4330, 0x638: 0x36c1, 0x639: 0x4336, 0x63a: 0x36d9, 0x63b: 0x433c, + 0x63c: 0x4870, 0x63d: 0x4342, // Block 0x19, offset 0x640 0x640: 0x3da0, 0x641: 0x3da8, 0x642: 0x4184, 0x643: 0x41a2, 0x644: 0x418e, 0x645: 0x41ac, 0x646: 0x4198, 0x647: 0x41b6, 0x648: 0x3cd8, 0x649: 0x3ce0, 0x64a: 0x40d0, 0x64b: 0x40ee, @@ -4797,19 +4801,19 @@ var nfkcValues = [5888]uint16{ 0x664: 0x4206, 0x665: 0x4224, 0x666: 0x4210, 0x667: 0x422e, 0x668: 0x3d80, 0x669: 0x3d88, 0x66a: 0x4148, 0x66b: 0x4166, 0x66c: 0x4152, 0x66d: 0x4170, 0x66e: 0x415c, 0x66f: 0x417a, 0x670: 0x3685, 0x671: 0x367f, 0x672: 0x3d90, 0x673: 0x368b, 0x674: 0x3d98, - 0x676: 0x48a6, 0x677: 0x3db0, 0x678: 0x35f5, 0x679: 0x35ef, 0x67a: 0x35e3, 0x67b: 0x4384, + 0x676: 0x4810, 0x677: 0x3db0, 0x678: 0x35f5, 0x679: 0x35ef, 0x67a: 0x35e3, 0x67b: 0x42ee, 0x67c: 0x35fb, 0x67d: 0x4287, 0x67e: 0x01d3, 0x67f: 0x4287, // Block 0x1a, offset 0x680 - 0x680: 0x42a0, 0x681: 0x4518, 0x682: 0x3dd8, 0x683: 0x369d, 0x684: 0x3de0, - 0x686: 0x48d0, 0x687: 0x3df8, 0x688: 0x3601, 0x689: 0x438a, 0x68a: 0x360d, 0x68b: 0x4390, - 0x68c: 0x3619, 0x68d: 0x451f, 0x68e: 0x4526, 0x68f: 0x452d, 0x690: 0x36b5, 0x691: 0x36af, - 0x692: 0x3e00, 0x693: 0x457a, 0x696: 0x36bb, 0x697: 0x3e10, - 0x698: 0x3631, 0x699: 0x362b, 0x69a: 0x361f, 0x69b: 0x4396, 0x69d: 0x4534, - 0x69e: 0x453b, 0x69f: 0x4542, 0x6a0: 0x36eb, 0x6a1: 0x36e5, 0x6a2: 0x3e68, 0x6a3: 0x4582, + 0x680: 0x42a0, 0x681: 0x4482, 0x682: 0x3dd8, 0x683: 0x369d, 0x684: 0x3de0, + 0x686: 0x483a, 0x687: 0x3df8, 0x688: 0x3601, 0x689: 0x42f4, 0x68a: 0x360d, 0x68b: 0x42fa, + 0x68c: 0x3619, 0x68d: 0x4489, 0x68e: 0x4490, 0x68f: 0x4497, 0x690: 0x36b5, 0x691: 0x36af, + 0x692: 0x3e00, 0x693: 0x44e4, 0x696: 0x36bb, 0x697: 0x3e10, + 0x698: 0x3631, 0x699: 0x362b, 0x69a: 0x361f, 0x69b: 0x4300, 0x69d: 0x449e, + 0x69e: 0x44a5, 0x69f: 0x44ac, 0x6a0: 0x36eb, 0x6a1: 0x36e5, 0x6a2: 0x3e68, 0x6a3: 0x44ec, 0x6a4: 0x36cd, 0x6a5: 0x36d3, 0x6a6: 0x36f1, 0x6a7: 0x3e78, 0x6a8: 0x3661, 0x6a9: 0x365b, - 0x6aa: 0x364f, 0x6ab: 0x43a2, 0x6ac: 0x3649, 0x6ad: 0x450a, 0x6ae: 0x4511, 0x6af: 0x0081, + 0x6aa: 0x364f, 0x6ab: 0x430c, 0x6ac: 0x3649, 0x6ad: 0x4474, 0x6ae: 0x447b, 0x6af: 0x0081, 0x6b2: 0x3eb0, 0x6b3: 0x36f7, 0x6b4: 0x3eb8, - 0x6b6: 0x491e, 0x6b7: 0x3ed0, 0x6b8: 0x363d, 0x6b9: 0x439c, 0x6ba: 0x366d, 0x6bb: 0x43ae, + 0x6b6: 0x4888, 0x6b7: 0x3ed0, 0x6b8: 0x363d, 0x6b9: 0x4306, 0x6ba: 0x366d, 0x6bb: 0x4318, 0x6bc: 0x3679, 0x6bd: 0x425a, 0x6be: 0x428c, // Block 0x1b, offset 0x6c0 0x6c0: 0x1bd8, 0x6c1: 0x1bdc, 0x6c2: 0x0047, 0x6c3: 0x1c54, 0x6c5: 0x1be8, @@ -4922,7 +4926,7 @@ var nfkcValues = [5888]uint16{ 0x93c: 0x3fc0, 0x93d: 0x3fc8, // Block 0x25, offset 0x940 0x954: 0x3f00, - 0x959: 0x9903, 0x95a: 0x9903, 0x95b: 0x4372, 0x95c: 0x4378, 0x95d: 0xa000, + 0x959: 0x9903, 0x95a: 0x9903, 0x95b: 0x42dc, 0x95c: 0x42e2, 0x95d: 0xa000, 0x95e: 0x3fd0, 0x95f: 0x26b4, 0x966: 0xa000, 0x96b: 0xa000, 0x96c: 0x3fe0, 0x96d: 0xa000, 0x96e: 0x3fe8, 0x96f: 0xa000, @@ -4942,10 +4946,10 @@ var nfkcValues = [5888]uint16{ // Block 0x27, offset 0x9c0 0x9c0: 0x0367, 0x9c1: 0x032b, 0x9c2: 0x032f, 0x9c3: 0x0333, 0x9c4: 0x037b, 0x9c5: 0x0337, 0x9c6: 0x033b, 0x9c7: 0x033f, 0x9c8: 0x0343, 0x9c9: 0x0347, 0x9ca: 0x034b, 0x9cb: 0x034f, - 0x9cc: 0x0353, 0x9cd: 0x0357, 0x9ce: 0x035b, 0x9cf: 0x42dc, 0x9d0: 0x42e1, 0x9d1: 0x42e6, - 0x9d2: 0x42eb, 0x9d3: 0x42f0, 0x9d4: 0x42f5, 0x9d5: 0x42fa, 0x9d6: 0x42ff, 0x9d7: 0x4304, - 0x9d8: 0x4309, 0x9d9: 0x430e, 0x9da: 0x4313, 0x9db: 0x4318, 0x9dc: 0x431d, 0x9dd: 0x4322, - 0x9de: 0x4327, 0x9df: 0x432c, 0x9e0: 0x4331, 0x9e1: 0x4336, 0x9e2: 0x433b, 0x9e3: 0x4340, + 0x9cc: 0x0353, 0x9cd: 0x0357, 0x9ce: 0x035b, 0x9cf: 0x49bd, 0x9d0: 0x49c3, 0x9d1: 0x49c9, + 0x9d2: 0x49cf, 0x9d3: 0x49d5, 0x9d4: 0x49db, 0x9d5: 0x49e1, 0x9d6: 0x49e7, 0x9d7: 0x49ed, + 0x9d8: 0x49f3, 0x9d9: 0x49f9, 0x9da: 0x49ff, 0x9db: 0x4a05, 0x9dc: 0x4a0b, 0x9dd: 0x4a11, + 0x9de: 0x4a17, 0x9df: 0x4a1d, 0x9e0: 0x4a23, 0x9e1: 0x4a29, 0x9e2: 0x4a2f, 0x9e3: 0x4a35, 0x9e4: 0x03c3, 0x9e5: 0x035f, 0x9e6: 0x0363, 0x9e7: 0x03e7, 0x9e8: 0x03eb, 0x9e9: 0x03ef, 0x9ea: 0x03f3, 0x9eb: 0x03f7, 0x9ec: 0x03fb, 0x9ed: 0x03ff, 0x9ee: 0x036b, 0x9ef: 0x0403, 0x9f0: 0x0407, 0x9f1: 0x036f, 0x9f2: 0x0373, 0x9f3: 0x0377, 0x9f4: 0x037f, 0x9f5: 0x0383, @@ -5148,17 +5152,17 @@ var nfkcValues = [5888]uint16{ 0xe40: 0x19d5, 0xe41: 0x19d8, 0xe42: 0x19db, 0xe43: 0x1c08, 0xe44: 0x1c0c, 0xe45: 0x1a5f, 0xe46: 0x1a5f, 0xe53: 0x1d75, 0xe54: 0x1d66, 0xe55: 0x1d6b, 0xe56: 0x1d7a, 0xe57: 0x1d70, - 0xe5d: 0x4426, - 0xe5e: 0x8115, 0xe5f: 0x4498, 0xe60: 0x022d, 0xe61: 0x0215, 0xe62: 0x021e, 0xe63: 0x0221, + 0xe5d: 0x4390, + 0xe5e: 0x8115, 0xe5f: 0x4402, 0xe60: 0x022d, 0xe61: 0x0215, 0xe62: 0x021e, 0xe63: 0x0221, 0xe64: 0x0224, 0xe65: 0x0227, 0xe66: 0x022a, 0xe67: 0x0230, 0xe68: 0x0233, 0xe69: 0x0017, - 0xe6a: 0x4486, 0xe6b: 0x448c, 0xe6c: 0x458a, 0xe6d: 0x4592, 0xe6e: 0x43de, 0xe6f: 0x43e4, - 0xe70: 0x43ea, 0xe71: 0x43f0, 0xe72: 0x43fc, 0xe73: 0x4402, 0xe74: 0x4408, 0xe75: 0x4414, - 0xe76: 0x441a, 0xe78: 0x4420, 0xe79: 0x442c, 0xe7a: 0x4432, 0xe7b: 0x4438, - 0xe7c: 0x4444, 0xe7e: 0x444a, + 0xe6a: 0x43f0, 0xe6b: 0x43f6, 0xe6c: 0x44f4, 0xe6d: 0x44fc, 0xe6e: 0x4348, 0xe6f: 0x434e, + 0xe70: 0x4354, 0xe71: 0x435a, 0xe72: 0x4366, 0xe73: 0x436c, 0xe74: 0x4372, 0xe75: 0x437e, + 0xe76: 0x4384, 0xe78: 0x438a, 0xe79: 0x4396, 0xe7a: 0x439c, 0xe7b: 0x43a2, + 0xe7c: 0x43ae, 0xe7e: 0x43b4, // Block 0x3a, offset 0xe80 - 0xe80: 0x4450, 0xe81: 0x4456, 0xe83: 0x445c, 0xe84: 0x4462, - 0xe86: 0x446e, 0xe87: 0x4474, 0xe88: 0x447a, 0xe89: 0x4480, 0xe8a: 0x4492, 0xe8b: 0x440e, - 0xe8c: 0x43f6, 0xe8d: 0x443e, 0xe8e: 0x4468, 0xe8f: 0x1d7f, 0xe90: 0x0299, 0xe91: 0x0299, + 0xe80: 0x43ba, 0xe81: 0x43c0, 0xe83: 0x43c6, 0xe84: 0x43cc, + 0xe86: 0x43d8, 0xe87: 0x43de, 0xe88: 0x43e4, 0xe89: 0x43ea, 0xe8a: 0x43fc, 0xe8b: 0x4378, + 0xe8c: 0x4360, 0xe8d: 0x43a8, 0xe8e: 0x43d2, 0xe8f: 0x1d7f, 0xe90: 0x0299, 0xe91: 0x0299, 0xe92: 0x02a2, 0xe93: 0x02a2, 0xe94: 0x02a2, 0xe95: 0x02a2, 0xe96: 0x02a5, 0xe97: 0x02a5, 0xe98: 0x02a5, 0xe99: 0x02a5, 0xe9a: 0x02ab, 0xe9b: 0x02ab, 0xe9c: 0x02ab, 0xe9d: 0x02ab, 0xe9e: 0x029f, 0xe9f: 0x029f, 0xea0: 0x029f, 0xea1: 0x029f, 0xea2: 0x02a8, 0xea3: 0x02a8, @@ -5174,9 +5178,9 @@ var nfkcValues = [5888]uint16{ 0xed2: 0x02db, 0xed3: 0x02db, 0xed4: 0x02db, 0xed5: 0x02db, 0xed6: 0x02e1, 0xed7: 0x02e1, 0xed8: 0x02e1, 0xed9: 0x02e1, 0xeda: 0x02de, 0xedb: 0x02de, 0xedc: 0x02de, 0xedd: 0x02de, 0xede: 0x02e4, 0xedf: 0x02e4, 0xee0: 0x02e7, 0xee1: 0x02e7, 0xee2: 0x02e7, 0xee3: 0x02e7, - 0xee4: 0x4504, 0xee5: 0x4504, 0xee6: 0x02ed, 0xee7: 0x02ed, 0xee8: 0x02ed, 0xee9: 0x02ed, + 0xee4: 0x446e, 0xee5: 0x446e, 0xee6: 0x02ed, 0xee7: 0x02ed, 0xee8: 0x02ed, 0xee9: 0x02ed, 0xeea: 0x02ea, 0xeeb: 0x02ea, 0xeec: 0x02ea, 0xeed: 0x02ea, 0xeee: 0x0308, 0xeef: 0x0308, - 0xef0: 0x44fe, 0xef1: 0x44fe, + 0xef0: 0x4468, 0xef1: 0x4468, // Block 0x3c, offset 0xf00 0xf13: 0x02d8, 0xf14: 0x02d8, 0xf15: 0x02d8, 0xf16: 0x02d8, 0xf17: 0x02f6, 0xf18: 0x02f6, 0xf19: 0x02f3, 0xf1a: 0x02f3, 0xf1b: 0x02f9, 0xf1c: 0x02f9, 0xf1d: 0x204f, @@ -5203,8 +5207,8 @@ var nfkcValues = [5888]uint16{ 0xf86: 0x1fb4, 0xf87: 0x1fb9, 0xf88: 0x1fbe, 0xf89: 0x1fc3, 0xf8a: 0x1fc8, 0xf8b: 0x1fcd, 0xf8c: 0x1fd2, 0xf8d: 0x1fd7, 0xf8e: 0x1fe6, 0xf8f: 0x1ff5, 0xf90: 0x1ffa, 0xf91: 0x1fff, 0xf92: 0x2004, 0xf93: 0x2009, 0xf94: 0x200e, 0xf95: 0x2018, 0xf96: 0x201d, 0xf97: 0x2022, - 0xf98: 0x2031, 0xf99: 0x2040, 0xf9a: 0x2045, 0xf9b: 0x44b6, 0xf9c: 0x44bc, 0xf9d: 0x44f2, - 0xf9e: 0x4549, 0xf9f: 0x4550, 0xfa0: 0x4557, 0xfa1: 0x455e, 0xfa2: 0x4565, 0xfa3: 0x456c, + 0xf98: 0x2031, 0xf99: 0x2040, 0xf9a: 0x2045, 0xf9b: 0x4420, 0xf9c: 0x4426, 0xf9d: 0x445c, + 0xf9e: 0x44b3, 0xf9f: 0x44ba, 0xfa0: 0x44c1, 0xfa1: 0x44c8, 0xfa2: 0x44cf, 0xfa3: 0x44d6, 0xfa4: 0x25c6, 0xfa5: 0x25cd, 0xfa6: 0x25d4, 0xfa7: 0x25db, 0xfa8: 0x25f0, 0xfa9: 0x25f7, 0xfaa: 0x1d98, 0xfab: 0x1d9d, 0xfac: 0x1da2, 0xfad: 0x1da7, 0xfae: 0x1db1, 0xfaf: 0x1db6, 0xfb0: 0x1dca, 0xfb1: 0x1dcf, 0xfb2: 0x1dd4, 0xfb3: 0x1dd9, 0xfb4: 0x1de3, 0xfb5: 0x1de8, @@ -5213,7 +5217,7 @@ var nfkcValues = [5888]uint16{ // Block 0x3f, offset 0xfc0 0xfc0: 0x1f5a, 0xfc1: 0x1f6e, 0xfc2: 0x1f73, 0xfc3: 0x1f78, 0xfc4: 0x1f7d, 0xfc5: 0x1f96, 0xfc6: 0x1fa0, 0xfc7: 0x1fa5, 0xfc8: 0x1faa, 0xfc9: 0x1fbe, 0xfca: 0x1fdc, 0xfcb: 0x1fe1, - 0xfcc: 0x1fe6, 0xfcd: 0x1feb, 0xfce: 0x1ff5, 0xfcf: 0x1ffa, 0xfd0: 0x44f2, 0xfd1: 0x2027, + 0xfcc: 0x1fe6, 0xfcd: 0x1feb, 0xfce: 0x1ff5, 0xfcf: 0x1ffa, 0xfd0: 0x445c, 0xfd1: 0x2027, 0xfd2: 0x202c, 0xfd3: 0x2031, 0xfd4: 0x2036, 0xfd5: 0x2040, 0xfd6: 0x2045, 0xfd7: 0x25b1, 0xfd8: 0x25b8, 0xfd9: 0x25bf, 0xfda: 0x25d4, 0xfdb: 0x25e2, 0xfdc: 0x1d89, 0xfdd: 0x1d8e, 0xfde: 0x1d93, 0xfdf: 0x1da2, 0xfe0: 0x1dac, 0xfe1: 0x1dbb, 0xfe2: 0x1dc0, 0xfe3: 0x1dc5, @@ -5227,11 +5231,11 @@ var nfkcValues = [5888]uint16{ 0x1006: 0x1f69, 0x1007: 0x1f6e, 0x1008: 0x1f73, 0x1009: 0x1f87, 0x100a: 0x1f8c, 0x100b: 0x1f91, 0x100c: 0x1f96, 0x100d: 0x1f9b, 0x100e: 0x1faf, 0x100f: 0x1fb4, 0x1010: 0x1fb9, 0x1011: 0x1fbe, 0x1012: 0x1fcd, 0x1013: 0x1fd2, 0x1014: 0x1fd7, 0x1015: 0x1fe6, 0x1016: 0x1ff0, 0x1017: 0x1fff, - 0x1018: 0x2004, 0x1019: 0x44e6, 0x101a: 0x2018, 0x101b: 0x201d, 0x101c: 0x2022, 0x101d: 0x2031, + 0x1018: 0x2004, 0x1019: 0x4450, 0x101a: 0x2018, 0x101b: 0x201d, 0x101c: 0x2022, 0x101d: 0x2031, 0x101e: 0x203b, 0x101f: 0x25d4, 0x1020: 0x25e2, 0x1021: 0x1da2, 0x1022: 0x1dac, 0x1023: 0x1dd4, 0x1024: 0x1dde, 0x1025: 0x1dfc, 0x1026: 0x1e06, 0x1027: 0x1e6a, 0x1028: 0x1e6f, 0x1029: 0x1e92, 0x102a: 0x1e97, 0x102b: 0x1f6e, 0x102c: 0x1f73, 0x102d: 0x1f96, 0x102e: 0x1fe6, 0x102f: 0x1ff0, - 0x1030: 0x2031, 0x1031: 0x203b, 0x1032: 0x459a, 0x1033: 0x45a2, 0x1034: 0x45aa, 0x1035: 0x1ef1, + 0x1030: 0x2031, 0x1031: 0x203b, 0x1032: 0x4504, 0x1033: 0x450c, 0x1034: 0x4514, 0x1035: 0x1ef1, 0x1036: 0x1ef6, 0x1037: 0x1f0a, 0x1038: 0x1f0f, 0x1039: 0x1f1e, 0x103a: 0x1f23, 0x103b: 0x1e74, 0x103c: 0x1e79, 0x103d: 0x1e9c, 0x103e: 0x1ea1, 0x103f: 0x1e33, // Block 0x41, offset 0x1040 @@ -5245,7 +5249,7 @@ var nfkcValues = [5888]uint16{ 0x106a: 0x1e65, 0x106b: 0x1eb0, 0x106c: 0x1ed3, 0x106d: 0x1e7e, 0x106e: 0x1e83, 0x106f: 0x1e88, 0x1070: 0x1e92, 0x1071: 0x1e6f, 0x1072: 0x1e97, 0x1073: 0x1eec, 0x1074: 0x1e56, 0x1075: 0x1e5b, 0x1076: 0x1e60, 0x1077: 0x1e7e, 0x1078: 0x1e83, 0x1079: 0x1e88, 0x107a: 0x1eec, 0x107b: 0x1efb, - 0x107c: 0x449e, 0x107d: 0x449e, + 0x107c: 0x4408, 0x107d: 0x4408, // Block 0x42, offset 0x1080 0x1090: 0x2311, 0x1091: 0x2326, 0x1092: 0x2326, 0x1093: 0x232d, 0x1094: 0x2334, 0x1095: 0x2349, 0x1096: 0x2350, 0x1097: 0x2357, @@ -5293,13 +5297,13 @@ var nfkcValues = [5888]uint16{ 0x119e: 0x04bb, 0x119f: 0x0007, 0x11a0: 0x000d, 0x11a1: 0x0015, 0x11a2: 0x0017, 0x11a3: 0x001b, 0x11a4: 0x0039, 0x11a5: 0x003d, 0x11a6: 0x003b, 0x11a8: 0x0079, 0x11a9: 0x0009, 0x11aa: 0x000b, 0x11ab: 0x0041, - 0x11b0: 0x42aa, 0x11b1: 0x44c2, 0x11b2: 0x42af, 0x11b4: 0x42b4, - 0x11b6: 0x42b9, 0x11b7: 0x44c8, 0x11b8: 0x42be, 0x11b9: 0x44ce, 0x11ba: 0x42c3, 0x11bb: 0x44d4, - 0x11bc: 0x42c8, 0x11bd: 0x44da, 0x11be: 0x42cd, 0x11bf: 0x44e0, + 0x11b0: 0x42aa, 0x11b1: 0x442c, 0x11b2: 0x42af, 0x11b4: 0x42b4, + 0x11b6: 0x42b9, 0x11b7: 0x4432, 0x11b8: 0x42be, 0x11b9: 0x4438, 0x11ba: 0x42c3, 0x11bb: 0x443e, + 0x11bc: 0x42c8, 0x11bd: 0x4444, 0x11be: 0x42cd, 0x11bf: 0x444a, // Block 0x47, offset 0x11c0 - 0x11c0: 0x0236, 0x11c1: 0x44a4, 0x11c2: 0x44a4, 0x11c3: 0x44aa, 0x11c4: 0x44aa, 0x11c5: 0x44ec, - 0x11c6: 0x44ec, 0x11c7: 0x44b0, 0x11c8: 0x44b0, 0x11c9: 0x44f8, 0x11ca: 0x44f8, 0x11cb: 0x44f8, - 0x11cc: 0x44f8, 0x11cd: 0x0239, 0x11ce: 0x0239, 0x11cf: 0x023c, 0x11d0: 0x023c, 0x11d1: 0x023c, + 0x11c0: 0x0236, 0x11c1: 0x440e, 0x11c2: 0x440e, 0x11c3: 0x4414, 0x11c4: 0x4414, 0x11c5: 0x4456, + 0x11c6: 0x4456, 0x11c7: 0x441a, 0x11c8: 0x441a, 0x11c9: 0x4462, 0x11ca: 0x4462, 0x11cb: 0x4462, + 0x11cc: 0x4462, 0x11cd: 0x0239, 0x11ce: 0x0239, 0x11cf: 0x023c, 0x11d0: 0x023c, 0x11d1: 0x023c, 0x11d2: 0x023c, 0x11d3: 0x023f, 0x11d4: 0x023f, 0x11d5: 0x0242, 0x11d6: 0x0242, 0x11d7: 0x0242, 0x11d8: 0x0242, 0x11d9: 0x0245, 0x11da: 0x0245, 0x11db: 0x0245, 0x11dc: 0x0245, 0x11dd: 0x0248, 0x11de: 0x0248, 0x11df: 0x0248, 0x11e0: 0x0248, 0x11e1: 0x024b, 0x11e2: 0x024b, 0x11e3: 0x024b, @@ -5338,18 +5342,18 @@ var nfkcValues = [5888]uint16{ 0x128c: 0x054b, 0x128d: 0x054f, 0x128e: 0x0553, 0x128f: 0x0557, 0x1290: 0x055b, 0x1291: 0x055f, 0x1292: 0x0563, 0x1293: 0x0567, 0x1294: 0x056f, 0x1295: 0x0577, 0x1296: 0x057f, 0x1297: 0x0583, 0x1298: 0x0587, 0x1299: 0x058b, 0x129a: 0x058f, 0x129b: 0x0593, 0x129c: 0x0597, 0x129d: 0x05a7, - 0x129e: 0x4a5a, 0x129f: 0x4a60, 0x12a0: 0x03c3, 0x12a1: 0x0313, 0x12a2: 0x0317, 0x12a3: 0x4345, - 0x12a4: 0x031b, 0x12a5: 0x434a, 0x12a6: 0x434f, 0x12a7: 0x031f, 0x12a8: 0x0323, 0x12a9: 0x0327, - 0x12aa: 0x4354, 0x12ab: 0x4359, 0x12ac: 0x435e, 0x12ad: 0x4363, 0x12ae: 0x4368, 0x12af: 0x436d, + 0x129e: 0x4a78, 0x129f: 0x4a7e, 0x12a0: 0x03c3, 0x12a1: 0x0313, 0x12a2: 0x0317, 0x12a3: 0x4a3b, + 0x12a4: 0x031b, 0x12a5: 0x4a41, 0x12a6: 0x4a47, 0x12a7: 0x031f, 0x12a8: 0x0323, 0x12a9: 0x0327, + 0x12aa: 0x4a4d, 0x12ab: 0x4a53, 0x12ac: 0x4a59, 0x12ad: 0x4a5f, 0x12ae: 0x4a65, 0x12af: 0x4a6b, 0x12b0: 0x0367, 0x12b1: 0x032b, 0x12b2: 0x032f, 0x12b3: 0x0333, 0x12b4: 0x037b, 0x12b5: 0x0337, 0x12b6: 0x033b, 0x12b7: 0x033f, 0x12b8: 0x0343, 0x12b9: 0x0347, 0x12ba: 0x034b, 0x12bb: 0x034f, 0x12bc: 0x0353, 0x12bd: 0x0357, 0x12be: 0x035b, // Block 0x4b, offset 0x12c0 - 0x12c2: 0x42dc, 0x12c3: 0x42e1, 0x12c4: 0x42e6, 0x12c5: 0x42eb, - 0x12c6: 0x42f0, 0x12c7: 0x42f5, 0x12ca: 0x42fa, 0x12cb: 0x42ff, - 0x12cc: 0x4304, 0x12cd: 0x4309, 0x12ce: 0x430e, 0x12cf: 0x4313, - 0x12d2: 0x4318, 0x12d3: 0x431d, 0x12d4: 0x4322, 0x12d5: 0x4327, 0x12d6: 0x432c, 0x12d7: 0x4331, - 0x12da: 0x4336, 0x12db: 0x433b, 0x12dc: 0x4340, + 0x12c2: 0x49bd, 0x12c3: 0x49c3, 0x12c4: 0x49c9, 0x12c5: 0x49cf, + 0x12c6: 0x49d5, 0x12c7: 0x49db, 0x12ca: 0x49e1, 0x12cb: 0x49e7, + 0x12cc: 0x49ed, 0x12cd: 0x49f3, 0x12ce: 0x49f9, 0x12cf: 0x49ff, + 0x12d2: 0x4a05, 0x12d3: 0x4a0b, 0x12d4: 0x4a11, 0x12d5: 0x4a17, 0x12d6: 0x4a1d, 0x12d7: 0x4a23, + 0x12da: 0x4a29, 0x12db: 0x4a2f, 0x12dc: 0x4a35, 0x12e0: 0x00bf, 0x12e1: 0x00c2, 0x12e2: 0x00cb, 0x12e3: 0x4264, 0x12e4: 0x00c8, 0x12e5: 0x00c5, 0x12e6: 0x0447, 0x12e8: 0x046b, 0x12e9: 0x044b, 0x12ea: 0x044f, 0x12eb: 0x0453, 0x12ec: 0x0457, 0x12ed: 0x046f, 0x12ee: 0x0473, @@ -5426,7 +5430,7 @@ var nfkcValues = [5888]uint16{ // Block 0x52, offset 0x1480 0x1480: 0x26ad, 0x1481: 0x26c2, 0x1482: 0x0503, 0x1490: 0x0c0f, 0x1491: 0x0a47, - 0x1492: 0x08d3, 0x1493: 0x465a, 0x1494: 0x071b, 0x1495: 0x09ef, 0x1496: 0x132f, 0x1497: 0x09ff, + 0x1492: 0x08d3, 0x1493: 0x45c4, 0x1494: 0x071b, 0x1495: 0x09ef, 0x1496: 0x132f, 0x1497: 0x09ff, 0x1498: 0x0727, 0x1499: 0x0cd7, 0x149a: 0x0eaf, 0x149b: 0x0caf, 0x149c: 0x0827, 0x149d: 0x0b6b, 0x149e: 0x07bf, 0x149f: 0x0cb7, 0x14a0: 0x0813, 0x14a1: 0x1117, 0x14a2: 0x0f83, 0x14a3: 0x138b, 0x14a4: 0x09d3, 0x14a5: 0x090b, 0x14a6: 0x0e63, 0x14a7: 0x0c1b, 0x14a8: 0x0c47, 0x14a9: 0x06bf, @@ -5665,8 +5669,8 @@ var nfkcSparseValues = [875]valueRange{ {value: 0x22b2, lo: 0xbe, hi: 0xbe}, // Block 0x1, offset 0xe {value: 0x0091, lo: 0x03}, - {value: 0x4778, lo: 0xa0, hi: 0xa1}, - {value: 0x47aa, lo: 0xaf, hi: 0xb0}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, {value: 0xa000, lo: 0xb7, hi: 0xb7}, // Block 0x2, offset 0x12 {value: 0x0003, lo: 0x08}, @@ -5814,7 +5818,7 @@ var nfkcSparseValues = [875]valueRange{ {value: 0x812d, lo: 0x92, hi: 0x92}, {value: 0x8132, lo: 0x93, hi: 0x93}, {value: 0x8132, lo: 0x94, hi: 0x94}, - {value: 0x45b2, lo: 0x98, hi: 0x9f}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, // Block 0x11, offset 0x96 {value: 0x0000, lo: 0x02}, {value: 0x8102, lo: 0xbc, hi: 0xbc}, @@ -5825,18 +5829,18 @@ var nfkcSparseValues = [875]valueRange{ {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, {value: 0x8104, lo: 0x8d, hi: 0x8d}, {value: 0x9900, lo: 0x97, hi: 0x97}, - {value: 0x45f2, lo: 0x9c, hi: 0x9d}, - {value: 0x4602, lo: 0x9f, hi: 0x9f}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, // Block 0x13, offset 0xa0 {value: 0x0000, lo: 0x03}, - {value: 0x462a, lo: 0xb3, hi: 0xb3}, - {value: 0x4632, lo: 0xb6, hi: 0xb6}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, {value: 0x8102, lo: 0xbc, hi: 0xbc}, // Block 0x14, offset 0xa4 {value: 0x0008, lo: 0x03}, {value: 0x8104, lo: 0x8d, hi: 0x8d}, - {value: 0x460a, lo: 0x99, hi: 0x9b}, - {value: 0x4622, lo: 0x9e, hi: 0x9e}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, // Block 0x15, offset 0xa8 {value: 0x0000, lo: 0x01}, {value: 0x8102, lo: 0xbc, hi: 0xbc}, @@ -5851,8 +5855,8 @@ var nfkcSparseValues = [875]valueRange{ {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, {value: 0x8104, lo: 0x8d, hi: 0x8d}, {value: 0x9900, lo: 0x96, hi: 0x97}, - {value: 0x463a, lo: 0x9c, hi: 0x9c}, - {value: 0x4642, lo: 0x9d, hi: 0x9d}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, // Block 0x18, offset 0xb5 {value: 0x0000, lo: 0x03}, {value: 0xa000, lo: 0x92, hi: 0x92}, @@ -5941,18 +5945,18 @@ var nfkcSparseValues = [875]valueRange{ {value: 0x263d, lo: 0xa9, hi: 0xa9}, {value: 0x8126, lo: 0xb1, hi: 0xb1}, {value: 0x8127, lo: 0xb2, hi: 0xb2}, - {value: 0x4a66, lo: 0xb3, hi: 0xb3}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, {value: 0x8128, lo: 0xb4, hi: 0xb4}, - {value: 0x4a6f, lo: 0xb5, hi: 0xb5}, - {value: 0x464a, lo: 0xb6, hi: 0xb6}, - {value: 0x468a, lo: 0xb7, hi: 0xb7}, - {value: 0x4652, lo: 0xb8, hi: 0xb8}, - {value: 0x4695, lo: 0xb9, hi: 0xb9}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x45f4, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x45ff, lo: 0xb9, hi: 0xb9}, {value: 0x8127, lo: 0xba, hi: 0xbd}, // Block 0x26, offset 0x107 {value: 0x0000, lo: 0x0b}, {value: 0x8127, lo: 0x80, hi: 0x80}, - {value: 0x4a78, lo: 0x81, hi: 0x81}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, {value: 0x8132, lo: 0x82, hi: 0x83}, {value: 0x8104, lo: 0x84, hi: 0x84}, {value: 0x8132, lo: 0x86, hi: 0x87}, @@ -6199,7 +6203,7 @@ var nfkcSparseValues = [875]valueRange{ {value: 0x192d, lo: 0xb5, hi: 0xb6}, // Block 0x4a, offset 0x1db {value: 0x0000, lo: 0x01}, - {value: 0x4573, lo: 0x9c, hi: 0x9c}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, // Block 0x4b, offset 0x1dd {value: 0x0000, lo: 0x02}, {value: 0x0095, lo: 0xbc, hi: 0xbc}, @@ -6245,16 +6249,16 @@ var nfkcSparseValues = [875]valueRange{ {value: 0x04b3, lo: 0xb6, hi: 0xb6}, {value: 0x0887, lo: 0xb8, hi: 0xba}, // Block 0x53, offset 0x201 - {value: 0x0005, lo: 0x09}, + {value: 0x0006, lo: 0x09}, {value: 0x0313, lo: 0xb1, hi: 0xb1}, {value: 0x0317, lo: 0xb2, hi: 0xb2}, - {value: 0x4345, lo: 0xb3, hi: 0xb3}, + {value: 0x4a3b, lo: 0xb3, hi: 0xb3}, {value: 0x031b, lo: 0xb4, hi: 0xb4}, - {value: 0x434a, lo: 0xb5, hi: 0xb6}, + {value: 0x4a41, lo: 0xb5, hi: 0xb6}, {value: 0x031f, lo: 0xb7, hi: 0xb7}, {value: 0x0323, lo: 0xb8, hi: 0xb8}, {value: 0x0327, lo: 0xb9, hi: 0xb9}, - {value: 0x4354, lo: 0xba, hi: 0xbf}, + {value: 0x4a4d, lo: 0xba, hi: 0xbf}, // Block 0x54, offset 0x20b {value: 0x0000, lo: 0x02}, {value: 0x8132, lo: 0xaf, hi: 0xaf}, @@ -6479,13 +6483,13 @@ var nfkcSparseValues = [875]valueRange{ {value: 0x8101, lo: 0x9e, hi: 0x9e}, // Block 0x83, offset 0x2ba {value: 0x0000, lo: 0x0c}, - {value: 0x4662, lo: 0x9e, hi: 0x9e}, - {value: 0x466c, lo: 0x9f, hi: 0x9f}, - {value: 0x46a0, lo: 0xa0, hi: 0xa0}, - {value: 0x46ae, lo: 0xa1, hi: 0xa1}, - {value: 0x46bc, lo: 0xa2, hi: 0xa2}, - {value: 0x46ca, lo: 0xa3, hi: 0xa3}, - {value: 0x46d8, lo: 0xa4, hi: 0xa4}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, {value: 0x812b, lo: 0xa5, hi: 0xa6}, {value: 0x8101, lo: 0xa7, hi: 0xa9}, {value: 0x8130, lo: 0xad, hi: 0xad}, @@ -6497,14 +6501,14 @@ var nfkcSparseValues = [875]valueRange{ {value: 0x8132, lo: 0x85, hi: 0x89}, {value: 0x812d, lo: 0x8a, hi: 0x8b}, {value: 0x8132, lo: 0xaa, hi: 0xad}, - {value: 0x4676, lo: 0xbb, hi: 0xbb}, - {value: 0x4680, lo: 0xbc, hi: 0xbc}, - {value: 0x46e6, lo: 0xbd, hi: 0xbd}, - {value: 0x4702, lo: 0xbe, hi: 0xbe}, - {value: 0x46f4, lo: 0xbf, hi: 0xbf}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, // Block 0x85, offset 0x2d1 {value: 0x0000, lo: 0x01}, - {value: 0x4710, lo: 0x80, hi: 0x80}, + {value: 0x467a, lo: 0x80, hi: 0x80}, // Block 0x86, offset 0x2d3 {value: 0x0000, lo: 0x01}, {value: 0x8132, lo: 0x82, hi: 0x84}, @@ -7624,4 +7628,4 @@ var recompMap = map[uint32]rune{ 0x15B915AF: 0x115BB, } -// Total size of tables: 53KB (53976 bytes) +// Total size of tables: 53KB (54006 bytes) diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go deleted file mode 100644 index 45d711900..000000000 --- a/vendor/golang.org/x/text/unicode/norm/triegen.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Trie table generator. -// Used by make*tables tools to generate a go file with trie data structures -// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte -// sequence are used to lookup offsets in the index table to be used for the -// next byte. The last byte is used to index into a table with 16-bit values. - -package main - -import ( - "fmt" - "io" -) - -const maxSparseEntries = 16 - -type normCompacter struct { - sparseBlocks [][]uint64 - sparseOffset []uint16 - sparseCount int - name string -} - -func mostFrequentStride(a []uint64) int { - counts := make(map[int]int) - var v int - for _, x := range a { - if stride := int(x) - v; v != 0 && stride >= 0 { - counts[stride]++ - } - v = int(x) - } - var maxs, maxc int - for stride, cnt := range counts { - if cnt > maxc || (cnt == maxc && stride < maxs) { - maxs, maxc = stride, cnt - } - } - return maxs -} - -func countSparseEntries(a []uint64) int { - stride := mostFrequentStride(a) - var v, count int - for _, tv := range a { - if int(tv)-v != stride { - if tv != 0 { - count++ - } - } - v = int(tv) - } - return count -} - -func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { - if n := countSparseEntries(v); n <= maxSparseEntries { - return (n+1)*4 + 2, true - } - return 0, false -} - -func (c *normCompacter) Store(v []uint64) uint32 { - h := uint32(len(c.sparseOffset)) - c.sparseBlocks = append(c.sparseBlocks, v) - c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) - c.sparseCount += countSparseEntries(v) + 1 - return h -} - -func (c *normCompacter) Handler() string { - return c.name + "Sparse.lookup" -} - -func (c *normCompacter) Print(w io.Writer) (retErr error) { - p := func(f string, x ...interface{}) { - if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { - retErr = err - } - } - - ls := len(c.sparseBlocks) - p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) - p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) - - ns := c.sparseCount - p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) - p("var %sSparseValues = [%d]valueRange {", c.name, ns) - for i, b := range c.sparseBlocks { - p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) - var v int - stride := mostFrequentStride(b) - n := countSparseEntries(b) - p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) - for i, nv := range b { - if int(nv)-v != stride { - if v != 0 { - p(",hi:%#02x},", 0x80+i-1) - } - if nv != 0 { - p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) - } - } - v = int(nv) - } - if v != 0 { - p(",hi:%#02x},", 0x80+len(b)-1) - } - } - p("\n}\n\n") - return -} diff --git a/vendor/golang.org/x/text/width/gen.go b/vendor/golang.org/x/text/width/gen.go deleted file mode 100644 index 03d9f99ad..000000000 --- a/vendor/golang.org/x/text/width/gen.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// This program generates the trie for width operations. The generated table -// includes width category information as well as the normalization mappings. -package main - -import ( - "bytes" - "fmt" - "io" - "log" - "math" - "unicode/utf8" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" -) - -// See gen_common.go for flags. - -func main() { - gen.Init() - genTables() - genTests() - gen.Repackage("gen_trieval.go", "trieval.go", "width") - gen.Repackage("gen_common.go", "common_test.go", "width") -} - -func genTables() { - t := triegen.NewTrie("width") - // fold and inverse mappings. See mapComment for a description of the format - // of each entry. Add dummy value to make an index of 0 mean no mapping. - inverse := [][4]byte{{}} - mapping := map[[4]byte]int{[4]byte{}: 0} - - getWidthData(func(r rune, tag elem, alt rune) { - idx := 0 - if alt != 0 { - var buf [4]byte - buf[0] = byte(utf8.EncodeRune(buf[1:], alt)) - s := string(r) - buf[buf[0]] ^= s[len(s)-1] - var ok bool - if idx, ok = mapping[buf]; !ok { - idx = len(mapping) - if idx > math.MaxUint8 { - log.Fatalf("Index %d does not fit in a byte.", idx) - } - mapping[buf] = idx - inverse = append(inverse, buf) - } - } - t.Insert(r, uint64(tag|elem(idx))) - }) - - w := &bytes.Buffer{} - gen.WriteUnicodeVersion(w) - - sz, err := t.Gen(w) - if err != nil { - log.Fatal(err) - } - - sz += writeMappings(w, inverse) - - fmt.Fprintf(w, "// Total table size %d bytes (%dKiB)\n", sz, sz/1024) - - gen.WriteGoFile(*outputFile, "width", w.Bytes()) -} - -const inverseDataComment = ` -// inverseData contains 4-byte entries of the following format: -// <length> <modified UTF-8-encoded rune> <0 padding> -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8.` - -func writeMappings(w io.Writer, data [][4]byte) int { - fmt.Fprintln(w, inverseDataComment) - fmt.Fprintf(w, "var inverseData = [%d][4]byte{\n", len(data)) - for _, x := range data { - fmt.Fprintf(w, "{ 0x%02x, 0x%02x, 0x%02x, 0x%02x },\n", x[0], x[1], x[2], x[3]) - } - fmt.Fprintln(w, "}") - return len(data) * 4 -} - -func genTests() { - w := &bytes.Buffer{} - fmt.Fprintf(w, "\nvar mapRunes = map[rune]struct{r rune; e elem}{\n") - getWidthData(func(r rune, tag elem, alt rune) { - if alt != 0 { - fmt.Fprintf(w, "\t0x%X: {0x%X, 0x%X},\n", r, alt, tag) - } - }) - fmt.Fprintln(w, "}") - gen.WriteGoFile("runes_test.go", "width", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/width/gen_common.go b/vendor/golang.org/x/text/width/gen_common.go deleted file mode 100644 index 601e75268..000000000 --- a/vendor/golang.org/x/text/width/gen_common.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This code is shared between the main code generator and the test code. - -import ( - "flag" - "log" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/ucd" -) - -var ( - outputFile = flag.String("out", "tables.go", "output file") -) - -var typeMap = map[string]elem{ - "A": tagAmbiguous, - "N": tagNeutral, - "Na": tagNarrow, - "W": tagWide, - "F": tagFullwidth, - "H": tagHalfwidth, -} - -// getWidthData calls f for every entry for which it is defined. -// -// f may be called multiple times for the same rune. The last call to f is the -// correct value. f is not called for all runes. The default tag type is -// Neutral. -func getWidthData(f func(r rune, tag elem, alt rune)) { - // Set the default values for Unified Ideographs. In line with Annex 11, - // we encode full ranges instead of the defined runes in Unified_Ideograph. - for _, b := range []struct{ lo, hi rune }{ - {0x4E00, 0x9FFF}, // the CJK Unified Ideographs block, - {0x3400, 0x4DBF}, // the CJK Unified Ideographs Externsion A block, - {0xF900, 0xFAFF}, // the CJK Compatibility Ideographs block, - {0x20000, 0x2FFFF}, // the Supplementary Ideographic Plane, - {0x30000, 0x3FFFF}, // the Tertiary Ideographic Plane, - } { - for r := b.lo; r <= b.hi; r++ { - f(r, tagWide, 0) - } - } - - inverse := map[rune]rune{} - maps := map[string]bool{ - "<wide>": true, - "<narrow>": true, - } - - // We cannot reuse package norm's decomposition, as we need an unexpanded - // decomposition. We make use of the opportunity to verify that the - // decomposition type is as expected. - ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) { - r := p.Rune(0) - s := strings.SplitN(p.String(ucd.DecompMapping), " ", 2) - if !maps[s[0]] { - return - } - x, err := strconv.ParseUint(s[1], 16, 32) - if err != nil { - log.Fatalf("Error parsing rune %q", s[1]) - } - if inverse[r] != 0 || inverse[rune(x)] != 0 { - log.Fatalf("Circular dependency in mapping between %U and %U", r, x) - } - inverse[r] = rune(x) - inverse[rune(x)] = r - }) - - // <rune range>;<type> - ucd.Parse(gen.OpenUCDFile("EastAsianWidth.txt"), func(p *ucd.Parser) { - tag, ok := typeMap[p.String(1)] - if !ok { - log.Fatalf("Unknown width type %q", p.String(1)) - } - r := p.Rune(0) - alt, ok := inverse[r] - if tag == tagFullwidth || tag == tagHalfwidth && r != wonSign { - tag |= tagNeedsFold - if !ok { - log.Fatalf("Narrow or wide rune %U has no decomposition", r) - } - } - f(r, tag, alt) - }) -} diff --git a/vendor/golang.org/x/text/width/gen_trieval.go b/vendor/golang.org/x/text/width/gen_trieval.go deleted file mode 100644 index c17334aa6..000000000 --- a/vendor/golang.org/x/text/width/gen_trieval.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// elem is an entry of the width trie. The high byte is used to encode the type -// of the rune. The low byte is used to store the index to a mapping entry in -// the inverseData array. -type elem uint16 - -const ( - tagNeutral elem = iota << typeShift - tagAmbiguous - tagWide - tagNarrow - tagFullwidth - tagHalfwidth -) - -const ( - numTypeBits = 3 - typeShift = 16 - numTypeBits - - // tagNeedsFold is true for all fullwidth and halfwidth runes except for - // the Won sign U+20A9. - tagNeedsFold = 0x1000 - - // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide - // variant. - wonSign rune = 0x20A9 -) diff --git a/vendor/golang.org/x/text/width/kind_string.go b/vendor/golang.org/x/text/width/kind_string.go deleted file mode 100644 index ab4fee542..000000000 --- a/vendor/golang.org/x/text/width/kind_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=Kind"; DO NOT EDIT - -package width - -import "fmt" - -const _Kind_name = "NeutralEastAsianAmbiguousEastAsianWideEastAsianNarrowEastAsianFullwidthEastAsianHalfwidth" - -var _Kind_index = [...]uint8{0, 7, 25, 38, 53, 71, 89} - -func (i Kind) String() string { - if i < 0 || i >= Kind(len(_Kind_index)-1) { - return fmt.Sprintf("Kind(%d)", i) - } - return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] -} diff --git a/vendor/golang.org/x/text/width/tables.go b/vendor/golang.org/x/text/width/tables.go deleted file mode 100644 index 242da0fdb..000000000 --- a/vendor/golang.org/x/text/width/tables.go +++ /dev/null @@ -1,1284 +0,0 @@ -// This file was generated by go generate; DO NOT EDIT - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "9.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14080 bytes (13.75 KiB). Checksum: 3b8aeb3dc03667a3. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 99 blocks, 6336 entries, 12672 bytes -// The third block is the zero block. -var widthValues = [6336]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, - 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, - 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, - 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, - 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, - 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, - 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, - 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, - 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, - 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, - // Block 0x3e, offset 0xf80 - 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, - 0xf86: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, - 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, - 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, - 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, - 0xffc: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, - 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, - 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, - 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, - 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, - 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, - 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, - 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, - 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, - // Block 0x42, offset 0x1080 - 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, - 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, - 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, - 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, - 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, - 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, - 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, - 0x10aa: 0x4000, 0x10ab: 0x4000, - // Block 0x43, offset 0x10c0 - 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, - 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, - 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, - 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, - 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, - 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, - 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, - 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, - 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, - 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, - 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, - // Block 0x44, offset 0x1100 - 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, - 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, - 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, - 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, - 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, - 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, - 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, - 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, - 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, - 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, - 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, - // Block 0x45, offset 0x1140 - 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, - 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, - 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, - 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, - 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, - 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, - 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, - 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, - 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, - 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, - 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, - // Block 0x46, offset 0x1180 - 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, - 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, - 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, - 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, - 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, - 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, - 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, - 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, - 0x11bd: 0x2000, - // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, - 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, - 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, - 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, - 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, - 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, - 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, - 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, - 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, - 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, - 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, - 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12c4: 0x4000, - // Block 0x4c, offset 0x1300 - 0x130f: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1340: 0x2000, 0x1341: 0x2000, 0x1342: 0x2000, 0x1343: 0x2000, 0x1344: 0x2000, 0x1345: 0x2000, - 0x1346: 0x2000, 0x1347: 0x2000, 0x1348: 0x2000, 0x1349: 0x2000, 0x134a: 0x2000, - 0x1350: 0x2000, 0x1351: 0x2000, - 0x1352: 0x2000, 0x1353: 0x2000, 0x1354: 0x2000, 0x1355: 0x2000, 0x1356: 0x2000, 0x1357: 0x2000, - 0x1358: 0x2000, 0x1359: 0x2000, 0x135a: 0x2000, 0x135b: 0x2000, 0x135c: 0x2000, 0x135d: 0x2000, - 0x135e: 0x2000, 0x135f: 0x2000, 0x1360: 0x2000, 0x1361: 0x2000, 0x1362: 0x2000, 0x1363: 0x2000, - 0x1364: 0x2000, 0x1365: 0x2000, 0x1366: 0x2000, 0x1367: 0x2000, 0x1368: 0x2000, 0x1369: 0x2000, - 0x136a: 0x2000, 0x136b: 0x2000, 0x136c: 0x2000, 0x136d: 0x2000, - 0x1370: 0x2000, 0x1371: 0x2000, 0x1372: 0x2000, 0x1373: 0x2000, 0x1374: 0x2000, 0x1375: 0x2000, - 0x1376: 0x2000, 0x1377: 0x2000, 0x1378: 0x2000, 0x1379: 0x2000, 0x137a: 0x2000, 0x137b: 0x2000, - 0x137c: 0x2000, 0x137d: 0x2000, 0x137e: 0x2000, 0x137f: 0x2000, - // Block 0x4e, offset 0x1380 - 0x1380: 0x2000, 0x1381: 0x2000, 0x1382: 0x2000, 0x1383: 0x2000, 0x1384: 0x2000, 0x1385: 0x2000, - 0x1386: 0x2000, 0x1387: 0x2000, 0x1388: 0x2000, 0x1389: 0x2000, 0x138a: 0x2000, 0x138b: 0x2000, - 0x138c: 0x2000, 0x138d: 0x2000, 0x138e: 0x2000, 0x138f: 0x2000, 0x1390: 0x2000, 0x1391: 0x2000, - 0x1392: 0x2000, 0x1393: 0x2000, 0x1394: 0x2000, 0x1395: 0x2000, 0x1396: 0x2000, 0x1397: 0x2000, - 0x1398: 0x2000, 0x1399: 0x2000, 0x139a: 0x2000, 0x139b: 0x2000, 0x139c: 0x2000, 0x139d: 0x2000, - 0x139e: 0x2000, 0x139f: 0x2000, 0x13a0: 0x2000, 0x13a1: 0x2000, 0x13a2: 0x2000, 0x13a3: 0x2000, - 0x13a4: 0x2000, 0x13a5: 0x2000, 0x13a6: 0x2000, 0x13a7: 0x2000, 0x13a8: 0x2000, 0x13a9: 0x2000, - 0x13b0: 0x2000, 0x13b1: 0x2000, 0x13b2: 0x2000, 0x13b3: 0x2000, 0x13b4: 0x2000, 0x13b5: 0x2000, - 0x13b6: 0x2000, 0x13b7: 0x2000, 0x13b8: 0x2000, 0x13b9: 0x2000, 0x13ba: 0x2000, 0x13bb: 0x2000, - 0x13bc: 0x2000, 0x13bd: 0x2000, 0x13be: 0x2000, 0x13bf: 0x2000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, 0x13cb: 0x2000, - 0x13cc: 0x2000, 0x13cd: 0x2000, 0x13ce: 0x4000, 0x13cf: 0x2000, 0x13d0: 0x2000, 0x13d1: 0x4000, - 0x13d2: 0x4000, 0x13d3: 0x4000, 0x13d4: 0x4000, 0x13d5: 0x4000, 0x13d6: 0x4000, 0x13d7: 0x4000, - 0x13d8: 0x4000, 0x13d9: 0x4000, 0x13da: 0x4000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x4000, 0x1401: 0x4000, 0x1402: 0x4000, - 0x1410: 0x4000, 0x1411: 0x4000, - 0x1412: 0x4000, 0x1413: 0x4000, 0x1414: 0x4000, 0x1415: 0x4000, 0x1416: 0x4000, 0x1417: 0x4000, - 0x1418: 0x4000, 0x1419: 0x4000, 0x141a: 0x4000, 0x141b: 0x4000, 0x141c: 0x4000, 0x141d: 0x4000, - 0x141e: 0x4000, 0x141f: 0x4000, 0x1420: 0x4000, 0x1421: 0x4000, 0x1422: 0x4000, 0x1423: 0x4000, - 0x1424: 0x4000, 0x1425: 0x4000, 0x1426: 0x4000, 0x1427: 0x4000, 0x1428: 0x4000, 0x1429: 0x4000, - 0x142a: 0x4000, 0x142b: 0x4000, 0x142c: 0x4000, 0x142d: 0x4000, 0x142e: 0x4000, 0x142f: 0x4000, - 0x1430: 0x4000, 0x1431: 0x4000, 0x1432: 0x4000, 0x1433: 0x4000, 0x1434: 0x4000, 0x1435: 0x4000, - 0x1436: 0x4000, 0x1437: 0x4000, 0x1438: 0x4000, 0x1439: 0x4000, 0x143a: 0x4000, 0x143b: 0x4000, - // Block 0x51, offset 0x1440 - 0x1440: 0x4000, 0x1441: 0x4000, 0x1442: 0x4000, 0x1443: 0x4000, 0x1444: 0x4000, 0x1445: 0x4000, - 0x1446: 0x4000, 0x1447: 0x4000, 0x1448: 0x4000, - 0x1450: 0x4000, 0x1451: 0x4000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, 0x1483: 0x4000, 0x1484: 0x4000, 0x1485: 0x4000, - 0x1486: 0x4000, 0x1487: 0x4000, 0x1488: 0x4000, 0x1489: 0x4000, 0x148a: 0x4000, 0x148b: 0x4000, - 0x148c: 0x4000, 0x148d: 0x4000, 0x148e: 0x4000, 0x148f: 0x4000, 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, - 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - 0x14bc: 0x4000, 0x14bd: 0x4000, 0x14be: 0x4000, 0x14bf: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, 0x14c9: 0x4000, 0x14ca: 0x4000, 0x14cb: 0x4000, - 0x14cc: 0x4000, 0x14cd: 0x4000, 0x14ce: 0x4000, 0x14cf: 0x4000, 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000, - 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000, - 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000, - 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000, - 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000, - 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000, - 0x14fc: 0x4000, 0x14fe: 0x4000, 0x14ff: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, - 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000, - 0x1524: 0x4000, 0x1525: 0x4000, 0x1526: 0x4000, 0x1527: 0x4000, 0x1528: 0x4000, 0x1529: 0x4000, - 0x152a: 0x4000, 0x152b: 0x4000, 0x152c: 0x4000, 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1536: 0x4000, 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, - 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, - 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1574: 0x4000, - 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000, - 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000, - 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000, - 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, 0x15d4: 0x4000, 0x15d5: 0x4000, 0x15d6: 0x4000, 0x15d7: 0x4000, - 0x15d8: 0x4000, 0x15d9: 0x4000, 0x15da: 0x4000, 0x15db: 0x4000, 0x15dc: 0x4000, 0x15dd: 0x4000, - 0x15de: 0x4000, 0x15df: 0x4000, 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000, - 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, - // Block 0x59, offset 0x1640 - 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, - 0x167a: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1695: 0x4000, 0x1696: 0x4000, - 0x16a4: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16fb: 0x4000, - 0x16fc: 0x4000, 0x16fd: 0x4000, 0x16fe: 0x4000, 0x16ff: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000, - 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000, - 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000, - // Block 0x5d, offset 0x1740 - 0x1740: 0x4000, 0x1741: 0x4000, 0x1742: 0x4000, 0x1743: 0x4000, 0x1744: 0x4000, 0x1745: 0x4000, - 0x174c: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000, - 0x1752: 0x4000, - 0x176b: 0x4000, 0x176c: 0x4000, - 0x1774: 0x4000, 0x1775: 0x4000, - 0x1776: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1790: 0x4000, 0x1791: 0x4000, - 0x1792: 0x4000, 0x1793: 0x4000, 0x1794: 0x4000, 0x1795: 0x4000, 0x1796: 0x4000, 0x1797: 0x4000, - 0x1798: 0x4000, 0x1799: 0x4000, 0x179a: 0x4000, 0x179b: 0x4000, 0x179c: 0x4000, 0x179d: 0x4000, - 0x179e: 0x4000, 0x17a0: 0x4000, 0x17a1: 0x4000, 0x17a2: 0x4000, 0x17a3: 0x4000, - 0x17a4: 0x4000, 0x17a5: 0x4000, 0x17a6: 0x4000, 0x17a7: 0x4000, - 0x17b0: 0x4000, 0x17b3: 0x4000, 0x17b4: 0x4000, 0x17b5: 0x4000, - 0x17b6: 0x4000, 0x17b7: 0x4000, 0x17b8: 0x4000, 0x17b9: 0x4000, 0x17ba: 0x4000, 0x17bb: 0x4000, - 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, - 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, 0x17d3: 0x4000, 0x17d4: 0x4000, 0x17d5: 0x4000, 0x17d6: 0x4000, 0x17d7: 0x4000, - 0x17d8: 0x4000, 0x17d9: 0x4000, 0x17da: 0x4000, 0x17db: 0x4000, 0x17dc: 0x4000, 0x17dd: 0x4000, - 0x17de: 0x4000, - // Block 0x60, offset 0x1800 - 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, - 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000, - 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, - // Block 0x61, offset 0x1840 - 0x1840: 0x4000, - // Block 0x62, offset 0x1880 - 0x1880: 0x2000, 0x1881: 0x2000, 0x1882: 0x2000, 0x1883: 0x2000, 0x1884: 0x2000, 0x1885: 0x2000, - 0x1886: 0x2000, 0x1887: 0x2000, 0x1888: 0x2000, 0x1889: 0x2000, 0x188a: 0x2000, 0x188b: 0x2000, - 0x188c: 0x2000, 0x188d: 0x2000, 0x188e: 0x2000, 0x188f: 0x2000, 0x1890: 0x2000, 0x1891: 0x2000, - 0x1892: 0x2000, 0x1893: 0x2000, 0x1894: 0x2000, 0x1895: 0x2000, 0x1896: 0x2000, 0x1897: 0x2000, - 0x1898: 0x2000, 0x1899: 0x2000, 0x189a: 0x2000, 0x189b: 0x2000, 0x189c: 0x2000, 0x189d: 0x2000, - 0x189e: 0x2000, 0x189f: 0x2000, 0x18a0: 0x2000, 0x18a1: 0x2000, 0x18a2: 0x2000, 0x18a3: 0x2000, - 0x18a4: 0x2000, 0x18a5: 0x2000, 0x18a6: 0x2000, 0x18a7: 0x2000, 0x18a8: 0x2000, 0x18a9: 0x2000, - 0x18aa: 0x2000, 0x18ab: 0x2000, 0x18ac: 0x2000, 0x18ad: 0x2000, 0x18ae: 0x2000, 0x18af: 0x2000, - 0x18b0: 0x2000, 0x18b1: 0x2000, 0x18b2: 0x2000, 0x18b3: 0x2000, 0x18b4: 0x2000, 0x18b5: 0x2000, - 0x18b6: 0x2000, 0x18b7: 0x2000, 0x18b8: 0x2000, 0x18b9: 0x2000, 0x18ba: 0x2000, 0x18bb: 0x2000, - 0x18bc: 0x2000, 0x18bd: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, - 0x265: 0x3d, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, - // Block 0xd, offset 0x340 - 0x37f: 0x45, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x48, - // Block 0x10, offset 0x400 - 0x400: 0x49, 0x403: 0x4a, 0x404: 0x4b, 0x405: 0x4c, 0x406: 0x4d, - 0x408: 0x4e, 0x409: 0x4f, 0x40c: 0x50, 0x40d: 0x51, 0x40e: 0x52, 0x40f: 0x53, - 0x410: 0x3a, 0x411: 0x54, 0x412: 0x0e, 0x413: 0x55, 0x414: 0x56, 0x415: 0x57, 0x416: 0x58, 0x417: 0x59, - 0x418: 0x0e, 0x419: 0x5a, 0x41a: 0x0e, 0x41b: 0x5b, - 0x424: 0x5c, 0x425: 0x5d, 0x426: 0x5e, 0x427: 0x5f, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x60, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// <length> <modified UTF-8-encoded rune> <0 padding> -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 14680 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/transform.go b/vendor/golang.org/x/text/width/transform.go deleted file mode 100644 index 2ed25096a..000000000 --- a/vendor/golang.org/x/text/width/transform.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package width - -import ( - "unicode/utf8" - - "golang.org/x/text/transform" -) - -type foldTransform struct { - transform.NopResetter -} - -func (foldTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - if src[nSrc] < utf8.RuneSelf { - // ASCII fast path. - start, end := nSrc, len(src) - if d := len(dst) - nDst; d < end-start { - end = nSrc + d - } - for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { - } - n := copy(dst[nDst:], src[start:nSrc]) - if nDst += n; nDst == len(dst) { - nSrc = start + n - if nSrc == len(src) { - return nDst, nSrc, nil - } - if src[nSrc] < utf8.RuneSelf { - return nDst, nSrc, transform.ErrShortDst - } - } - continue - } - v, size := trie.lookup(src[nSrc:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - return nDst, nSrc, transform.ErrShortSrc - } - size = 1 // gobble 1 byte - } - if elem(v)&tagNeedsFold == 0 { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += size - } else { - data := inverseData[byte(v)] - if len(dst)-nDst < int(data[0]) { - return nDst, nSrc, transform.ErrShortDst - } - i := 1 - for end := int(data[0]); i < end; i++ { - dst[nDst] = data[i] - nDst++ - } - dst[nDst] = data[i] ^ src[nSrc+size-1] - nDst++ - } - nSrc += size - } - return nDst, nSrc, nil -} - -type narrowTransform struct { - transform.NopResetter -} - -func (narrowTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - if src[nSrc] < utf8.RuneSelf { - // ASCII fast path. - start, end := nSrc, len(src) - if d := len(dst) - nDst; d < end-start { - end = nSrc + d - } - for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { - } - n := copy(dst[nDst:], src[start:nSrc]) - if nDst += n; nDst == len(dst) { - nSrc = start + n - if nSrc == len(src) { - return nDst, nSrc, nil - } - if src[nSrc] < utf8.RuneSelf { - return nDst, nSrc, transform.ErrShortDst - } - } - continue - } - v, size := trie.lookup(src[nSrc:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - return nDst, nSrc, transform.ErrShortSrc - } - size = 1 // gobble 1 byte - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += size - } else { - data := inverseData[byte(v)] - if len(dst)-nDst < int(data[0]) { - return nDst, nSrc, transform.ErrShortDst - } - i := 1 - for end := int(data[0]); i < end; i++ { - dst[nDst] = data[i] - nDst++ - } - dst[nDst] = data[i] ^ src[nSrc+size-1] - nDst++ - } - nSrc += size - } - return nDst, nSrc, nil -} - -type wideTransform struct { - transform.NopResetter -} - -func (wideTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - // TODO: Consider ASCII fast path. Special-casing ASCII handling can - // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably - // not enough to warrant the extra code and complexity. - v, size := trie.lookup(src[nSrc:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - return nDst, nSrc, transform.ErrShortSrc - } - size = 1 // gobble 1 byte - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += size - } else { - data := inverseData[byte(v)] - if len(dst)-nDst < int(data[0]) { - return nDst, nSrc, transform.ErrShortDst - } - i := 1 - for end := int(data[0]); i < end; i++ { - dst[nDst] = data[i] - nDst++ - } - dst[nDst] = data[i] ^ src[nSrc+size-1] - nDst++ - } - nSrc += size - } - return nDst, nSrc, nil -} diff --git a/vendor/golang.org/x/text/width/trieval.go b/vendor/golang.org/x/text/width/trieval.go deleted file mode 100644 index 0ecffb4c6..000000000 --- a/vendor/golang.org/x/text/width/trieval.go +++ /dev/null @@ -1,30 +0,0 @@ -// This file was generated by go generate; DO NOT EDIT - -package width - -// elem is an entry of the width trie. The high byte is used to encode the type -// of the rune. The low byte is used to store the index to a mapping entry in -// the inverseData array. -type elem uint16 - -const ( - tagNeutral elem = iota << typeShift - tagAmbiguous - tagWide - tagNarrow - tagFullwidth - tagHalfwidth -) - -const ( - numTypeBits = 3 - typeShift = 16 - numTypeBits - - // tagNeedsFold is true for all fullwidth and halfwidth runes except for - // the Won sign U+20A9. - tagNeedsFold = 0x1000 - - // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide - // variant. - wonSign rune = 0x20A9 -) diff --git a/vendor/golang.org/x/text/width/width.go b/vendor/golang.org/x/text/width/width.go deleted file mode 100644 index c32d772fc..000000000 --- a/vendor/golang.org/x/text/width/width.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate stringer -type=Kind -//go:generate go run gen.go gen_common.go gen_trieval.go - -// Package width provides functionality for handling different widths in text. -// -// Wide characters behave like ideographs; they tend to allow line breaks after -// each character and remain upright in vertical text layout. Narrow characters -// are kept together in words or runs that are rotated sideways in vertical text -// layout. -// -// For more information, see http://unicode.org/reports/tr11/. -package width - -import ( - "unicode/utf8" - - "golang.org/x/text/transform" -) - -// TODO -// 1) Reduce table size by compressing blocks. -// 2) API proposition for computing display length -// (approximation, fixed pitch only). -// 3) Implement display length. - -// Kind indicates the type of width property as defined in http://unicode.org/reports/tr11/. -type Kind int - -const ( - // Neutral characters do not occur in legacy East Asian character sets. - Neutral Kind = iota - - // EastAsianAmbiguous characters that can be sometimes wide and sometimes - // narrow and require additional information not contained in the character - // code to further resolve their width. - EastAsianAmbiguous - - // EastAsianWide characters are wide in its usual form. They occur only in - // the context of East Asian typography. These runes may have explicit - // halfwidth counterparts. - EastAsianWide - - // EastAsianNarrow characters are narrow in its usual form. They often have - // fullwidth counterparts. - EastAsianNarrow - - // Note: there exist Narrow runes that do not have fullwidth or wide - // counterparts, despite what the definition says (e.g. U+27E6). - - // EastAsianFullwidth characters have a compatibility decompositions of type - // wide that map to a narrow counterpart. - EastAsianFullwidth - - // EastAsianHalfwidth characters have a compatibility decomposition of type - // narrow that map to a wide or ambiguous counterpart, plus U+20A9 ₩ WON - // SIGN. - EastAsianHalfwidth - - // Note: there exist runes that have a halfwidth counterparts but that are - // classified as Ambiguous, rather than wide (e.g. U+2190). -) - -// TODO: the generated tries need to return size 1 for invalid runes for the -// width to be computed correctly (each byte should render width 1) - -var trie = newWidthTrie(0) - -// Lookup reports the Properties of the first rune in b and the number of bytes -// of its UTF-8 encoding. -func Lookup(b []byte) (p Properties, size int) { - v, sz := trie.lookup(b) - return Properties{elem(v), b[sz-1]}, sz -} - -// LookupString reports the Properties of the first rune in s and the number of -// bytes of its UTF-8 encoding. -func LookupString(s string) (p Properties, size int) { - v, sz := trie.lookupString(s) - return Properties{elem(v), s[sz-1]}, sz -} - -// LookupRune reports the Properties of rune r. -func LookupRune(r rune) Properties { - var buf [4]byte - n := utf8.EncodeRune(buf[:], r) - v, _ := trie.lookup(buf[:n]) - last := byte(r) - if r >= utf8.RuneSelf { - last = 0x80 + byte(r&0x3f) - } - return Properties{elem(v), last} -} - -// Properties provides access to width properties of a rune. -type Properties struct { - elem elem - last byte -} - -func (e elem) kind() Kind { - return Kind(e >> typeShift) -} - -// Kind returns the Kind of a rune as defined in Unicode TR #11. -// See http://unicode.org/reports/tr11/ for more details. -func (p Properties) Kind() Kind { - return p.elem.kind() -} - -// Folded returns the folded variant of a rune or 0 if the rune is canonical. -func (p Properties) Folded() rune { - if p.elem&tagNeedsFold != 0 { - buf := inverseData[byte(p.elem)] - buf[buf[0]] ^= p.last - r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) - return r - } - return 0 -} - -// Narrow returns the narrow variant of a rune or 0 if the rune is already -// narrow or doesn't have a narrow variant. -func (p Properties) Narrow() rune { - if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianFullwidth || k == EastAsianWide || k == EastAsianAmbiguous) { - buf := inverseData[byte(p.elem)] - buf[buf[0]] ^= p.last - r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) - return r - } - return 0 -} - -// Wide returns the wide variant of a rune or 0 if the rune is already -// wide or doesn't have a wide variant. -func (p Properties) Wide() rune { - if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianHalfwidth || k == EastAsianNarrow) { - buf := inverseData[byte(p.elem)] - buf[buf[0]] ^= p.last - r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) - return r - } - return 0 -} - -// TODO for Properties: -// - Add Fullwidth/Halfwidth or Inverted methods for computing variants -// mapping. -// - Add width information (including information on non-spacing runes). - -// Transformer implements the transform.Transformer interface. -type Transformer struct { - t transform.Transformer -} - -// Reset implements the transform.Transformer interface. -func (t Transformer) Reset() { t.t.Reset() } - -// Transform implements the Transformer interface. -func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - return t.t.Transform(dst, src, atEOF) -} - -// Bytes returns a new byte slice with the result of applying t to b. -func (t Transformer) Bytes(b []byte) []byte { - b, _, _ = transform.Bytes(t, b) - return b -} - -// String returns a string with the result of applying t to s. -func (t Transformer) String(s string) string { - s, _, _ = transform.String(t, s) - return s -} - -var ( - // Fold is a transform that maps all runes to their canonical width. - // - // Note that the NFKC and NFKD transforms in golang.org/x/text/unicode/norm - // provide a more generic folding mechanism. - Fold Transformer = Transformer{foldTransform{}} - - // Widen is a transform that maps runes to their wide variant, if - // available. - Widen Transformer = Transformer{wideTransform{}} - - // Narrow is a transform that maps runes to their narrow variant, if - // available. - Narrow Transformer = Transformer{narrowTransform{}} -) - -// TODO: Consider the following options: -// - Treat Ambiguous runes that have a halfwidth counterpart as wide, or some -// generalized variant of this. -// - Consider a wide Won character to be the default width (or some generalized -// variant of this). -// - Filter the set of characters that gets converted (the preferred approach is -// to allow applying filters to transforms). diff --git a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go deleted file mode 100644 index 9421edae8..000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package rand provides utilities related to randomization. -package rand - -import ( - "math/rand" - "sync" - "time" -) - -var rng = struct { - sync.Mutex - rand *rand.Rand -}{ - rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())), -} - -// Intn generates an integer in range [0,max). -// By design this should panic if input is invalid, <= 0. -func Intn(max int) int { - rng.Lock() - defer rng.Unlock() - return rng.rand.Intn(max) -} - -// IntnRange generates an integer in range [min,max). -// By design this should panic if input is invalid, <= 0. -func IntnRange(min, max int) int { - rng.Lock() - defer rng.Unlock() - return rng.rand.Intn(max-min) + min -} - -// IntnRange generates an int64 integer in range [min,max). -// By design this should panic if input is invalid, <= 0. -func Int63nRange(min, max int64) int64 { - rng.Lock() - defer rng.Unlock() - return rng.rand.Int63n(max-min) + min -} - -// Seed seeds the rng with the provided seed. -func Seed(seed int64) { - rng.Lock() - defer rng.Unlock() - - rng.rand = rand.New(rand.NewSource(seed)) -} - -// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) -// from the default Source. -func Perm(n int) []int { - rng.Lock() - defer rng.Unlock() - return rng.rand.Perm(n) -} - -const ( - // We omit vowels from the set of available characters to reduce the chances - // of "bad words" being formed. - alphanums = "bcdfghjklmnpqrstvwxz2456789" - // No. of bits required to index into alphanums string. - alphanumsIdxBits = 5 - // Mask used to extract last alphanumsIdxBits of an int. - alphanumsIdxMask = 1<<alphanumsIdxBits - 1 - // No. of random letters we can extract from a single int63. - maxAlphanumsPerInt = 63 / alphanumsIdxBits -) - -// String generates a random alphanumeric string, without vowels, which is n -// characters long. This will panic if n is less than zero. -// How the random string is created: -// - we generate random int63's -// - from each int63, we are extracting multiple random letters by bit-shifting and masking -// - if some index is out of range of alphanums we neglect it (unlikely to happen multiple times in a row) -func String(n int) string { - b := make([]byte, n) - rng.Lock() - defer rng.Unlock() - - randomInt63 := rng.rand.Int63() - remaining := maxAlphanumsPerInt - for i := 0; i < n; { - if remaining == 0 { - randomInt63, remaining = rng.rand.Int63(), maxAlphanumsPerInt - } - if idx := int(randomInt63 & alphanumsIdxMask); idx < len(alphanums) { - b[i] = alphanums[idx] - i++ - } - randomInt63 >>= alphanumsIdxBits - remaining-- - } - return string(b) -} - -// SafeEncodeString encodes s using the same characters as rand.String. This reduces the chances of bad words and -// ensures that strings generated from hash functions appear consistent throughout the API. -func SafeEncodeString(s string) string { - r := make([]byte, len(s)) - for i, b := range []rune(s) { - r[i] = alphanums[(int(b) % len(alphanums))] - } - return string(r) -} diff --git a/vendor/vendor.json b/vendor/vendor.json index 1297e3a31..16d2d5978 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -50,20 +50,6 @@ "revision": "eaa7994b2278094c904d31993d26f56324db3052", "revisionTime": "2018-05-02T23:02:43Z" }, - { - "checksumSHA1": "IatnluZB5jTVUncMN134e4VOV34=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/PuerkitoBio/purell", - "path": "github.com/PuerkitoBio/purell", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "E/Tz8z0B/gaR551g+XqPKAhcteM=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/PuerkitoBio/urlesc", - "path": "github.com/PuerkitoBio/urlesc", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, { "checksumSHA1": "KmjnydoAbofMieIWm+it5OWERaM=", "path": "github.com/alecthomas/template", @@ -311,41 +297,6 @@ "revision": "2ad8d707cc05b1815ce6ff2543bb5e8d8f9298ef", "revisionTime": "2016-06-01T07:36:36Z" }, - { - "checksumSHA1": "f1wARLDzsF/JoyN01yoxXEwFIp8=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/docker/distribution/digest", - "path": "github.com/docker/distribution/digest", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "PzXRTLmmqWXxmDqdIXLcRYBma18=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/docker/distribution/reference", - "path": "github.com/docker/distribution/reference", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "1vQR+ZyudsjKio6RNKmWhwzGTb0=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/emicklei/go-restful", - "path": "github.com/emicklei/go-restful", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "3xWz4fZ9xW+CfADpYoPFcZCYJ4E=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/emicklei/go-restful/log", - "path": "github.com/emicklei/go-restful/log", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "J7CtF9gIs2yH9A7lPQDDrhYxiRk=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/emicklei/go-restful/swagger", - "path": "github.com/emicklei/go-restful/swagger", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, { "checksumSHA1": "ww7LVo7jNJ1o6sfRcromEHKyY+o=", "origin": "k8s.io/client-go/1.5/vendor/github.com/ghodss/yaml", @@ -377,34 +328,6 @@ "revision": "390ab7935ee28ec6b286364bba9b4dd6410cb3d5", "revisionTime": "2016-11-15T14:25:13Z" }, - { - "checksumSHA1": "NaZnW0tKj/b0k5WzcMD0twrLbrE=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/go-openapi/jsonpointer", - "path": "github.com/go-openapi/jsonpointer", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "3LJXjMDxPY+veIqzQtiAvK3hXnY=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/go-openapi/jsonreference", - "path": "github.com/go-openapi/jsonreference", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "faeB3fny260hQ/gEfEXa1ZQTGtk=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/go-openapi/spec", - "path": "github.com/go-openapi/spec", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "wGpZwJ5HZtReou8A3WEV1Gdxs6k=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/go-openapi/swag", - "path": "github.com/go-openapi/swag", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, { "checksumSHA1": "KZ3QD2QgUS4RcoKiA3mn5pSlJxQ=", "path": "github.com/go-stack/stack", @@ -721,13 +644,6 @@ "revision": "f2b4162afba35581b6d4a50d3b8f34e33c144682", "revisionTime": "2018-06-12T20:28:35Z" }, - { - "checksumSHA1": "gA95N2LM2hEJLoqrTPaFsSWDJ2Y=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/juju/ratelimit", - "path": "github.com/juju/ratelimit", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, { "checksumSHA1": "Farach1xcmsQYrhiUfkwF2rbIaE=", "path": "github.com/julienschmidt/httprouter", @@ -740,27 +656,6 @@ "revision": "b84e30acd515aadc4b783ad4ff83aff3299bdfe0", "revisionTime": "2014-02-26T03:06:59Z" }, - { - "checksumSHA1": "urY45++NYCue4nh4k8OjUFnIGfU=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/mailru/easyjson/buffer", - "path": "github.com/mailru/easyjson/buffer", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "yTDKAM4KBgOvXRsZC50zg0OChvM=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/mailru/easyjson/jlexer", - "path": "github.com/mailru/easyjson/jlexer", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "4+d+6rhM1pei6lBguhqSEW7LaXs=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/mailru/easyjson/jwriter", - "path": "github.com/mailru/easyjson/jwriter", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, { "checksumSHA1": "Q2vw4HZBbnU8BLFt8VrzStwqSJg=", "path": "github.com/matttproud/golang_protobuf_extensions/pbutil", @@ -994,12 +889,6 @@ "revision": "84bc9597164f671c0130543778228928d6865c5c", "revisionTime": "2017-06-08T03:40:07Z" }, - { - "checksumSHA1": "STxYqRb4gnlSr3mRpT+Igfdz/kM=", - "path": "github.com/spf13/pflag", - "revision": "e57e3eeb33f795204c1ca35f56c44f83227c6e66", - "revisionTime": "2017-05-08T18:43:26Z" - }, { "checksumSHA1": "iydUphwYqZRq3WhstEdGsbvBAKs=", "path": "github.com/stretchr/testify/assert", @@ -1012,13 +901,6 @@ "revision": "d77da356e56a7428ad25149ca77381849a6a5232", "revisionTime": "2016-06-15T09:26:46Z" }, - { - "checksumSHA1": "f6Aew+ZA+HBAXCw6/xTST3mB0Lw=", - "origin": "k8s.io/client-go/1.5/vendor/github.com/ugorji/go/codec", - "path": "github.com/ugorji/go/codec", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, { "checksumSHA1": "1hwn8cgg4EVXhCpJIqmMbzqnUo0=", "path": "golang.org/x/crypto/ed25519", @@ -1181,74 +1063,29 @@ "revision": "c200b10b5d5e122be351b67af224adc6128af5bf", "revisionTime": "2016-10-22T18:22:21Z" }, - { - "checksumSHA1": "QQpKbWuqvhmxVr/hfEYdWzzcXRM=", - "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/cases", - "path": "golang.org/x/text/cases", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "iAsGo/kxvnwILbJVUCd0ZcqZO/Q=", - "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/internal/tag", - "path": "golang.org/x/text/internal/tag", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "mQ6PCGHY7K0oPjKbYD8wsTjm/P8=", - "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/language", - "path": "golang.org/x/text/language", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "WpeH2TweiuiZAQVTJNO5vyZAQQA=", - "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/runes", - "path": "golang.org/x/text/runes", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, { "checksumSHA1": "faFDXp++cLjLBlvsr+izZ+go1WU=", "path": "golang.org/x/text/secure/bidirule", - "revision": "2bf8f2a19ec09c670e931282edfe6567f6be21c9", - "revisionTime": "2017-06-27T21:03:49Z" + "revision": "b19bf474d317b857955b12035d2c5acb57ce8b01", + "revisionTime": "2017-07-06T13:46:35Z" }, { - "checksumSHA1": "FcK7VslktIAWj5jnWVnU2SesBq0=", - "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/secure/precis", - "path": "golang.org/x/text/secure/precis", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "nwlu7UTwYbCj9l5f3a7t2ROwNzM=", - "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/transform", + "checksumSHA1": "ziMb9+ANGRJSSIuxYdRbA+cDRBQ=", "path": "golang.org/x/text/transform", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" + "revision": "b19bf474d317b857955b12035d2c5acb57ce8b01", + "revisionTime": "2017-07-06T13:46:35Z" }, { - "checksumSHA1": "nWJ9R1+Xw41f/mM3b7BYtv77CfI=", - "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/unicode/bidi", + "checksumSHA1": "MRLtTu/vpd18le8/HPLxOdjO5RE=", "path": "golang.org/x/text/unicode/bidi", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" + "revision": "b19bf474d317b857955b12035d2c5acb57ce8b01", + "revisionTime": "2017-07-06T13:46:35Z" }, { - "checksumSHA1": "BAZ96wCGUj6HdY9sG60Yw09KWA4=", - "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/unicode/norm", + "checksumSHA1": "kKylzIrLEnH8NKyeVAL0dq5gjVQ=", "path": "golang.org/x/text/unicode/norm", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" - }, - { - "checksumSHA1": "AZMILKWqLP99UilLgbGZ+uzIVrM=", - "origin": "k8s.io/client-go/1.5/vendor/golang.org/x/text/width", - "path": "golang.org/x/text/width", - "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", - "revisionTime": "2016-09-30T00:14:02Z" + "revision": "b19bf474d317b857955b12035d2c5acb57ce8b01", + "revisionTime": "2017-07-06T13:46:35Z" }, { "checksumSHA1": "vGfePfr0+weQUeTM/71mu+LCFuE=", @@ -1947,14 +1784,6 @@ "version": "kubernetes-1.11.0", "versionExact": "kubernetes-1.11.0" }, - { - "checksumSHA1": "/y9iJ+ZCp2LabYLPPyGroXmbufY=", - "path": "k8s.io/apimachinery/pkg/util/rand", - "revision": "103fd098999dc9c0c88536f5c9ad2e5da39373ae", - "revisionTime": "2018-06-21T03:03:51Z", - "version": "kubernetes-1.11.0", - "versionExact": "kubernetes-1.11.0" - }, { "checksumSHA1": "keWEyQHGKGkDo2SARgphbGgN608=", "path": "k8s.io/apimachinery/pkg/util/runtime", From f3735d0eaddbe659038a735ce0123598323cef9c Mon Sep 17 00:00:00 2001 From: jojohappy <sarahdj0917@gmail.com> Date: Tue, 3 Jul 2018 18:28:16 +0800 Subject: [PATCH 3/6] Remove useless testdata Signed-off-by: jojohappy <sarahdj0917@gmail.com> --- pkg/textparse/testdata.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/textparse/testdata.txt b/pkg/textparse/testdata.txt index 1617202d8..a1c43edb5 100644 --- a/pkg/textparse/testdata.txt +++ b/pkg/textparse/testdata.txt @@ -526,6 +526,3 @@ prometheus_treecache_watcher_goroutines 0 # HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. # TYPE prometheus_treecache_zookeeper_failures_total counter prometheus_treecache_zookeeper_failures_total 0 -# HELP redis_up help text -# TYPE redis_up gauge -redis_up{addr="redis://localhost:6379",alias=""} 1 \ No newline at end of file From e81785d1a3778d620a2c41cdda1e942bb03eb14b Mon Sep 17 00:00:00 2001 From: jojohappy <sarahdj0917@gmail.com> Date: Tue, 3 Jul 2018 20:04:27 +0800 Subject: [PATCH 4/6] To keep depecrate k8s node NodeLegacyHostIP as local constant to keep compatibility for older k8s version Signed-off-by: jojohappy <sarahdj0917@gmail.com> --- discovery/kubernetes/node.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index ec6733734..c541f3946 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -29,6 +29,10 @@ import ( "k8s.io/client-go/util/workqueue" ) +const ( + NodeLegacyHostIP = "LegacyHostIP" +) + // Node discovers Kubernetes nodes. type Node struct { logger log.Logger @@ -203,6 +207,9 @@ func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, if addresses, ok := m[apiv1.NodeExternalIP]; ok { return addresses[0], m, nil } + if addresses, ok := m[apiv1.NodeAddressType(NodeLegacyHostIP)]; ok { + return addresses[0], m, nil + } if addresses, ok := m[apiv1.NodeHostName]; ok { return addresses[0], m, nil } From f5794b0ff92073f9e2e6a51db8163bbaab987127 Mon Sep 17 00:00:00 2001 From: jojohappy <sarahdj0917@gmail.com> Date: Sat, 7 Jul 2018 10:41:48 +0800 Subject: [PATCH 5/6] Revert the testdata.txt, should be no change Signed-off-by: jojohappy <sarahdj0917@gmail.com> --- pkg/textparse/testdata.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/textparse/testdata.txt b/pkg/textparse/testdata.txt index a1c43edb5..c7f2a7af0 100644 --- a/pkg/textparse/testdata.txt +++ b/pkg/textparse/testdata.txt @@ -525,4 +525,4 @@ prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 prometheus_treecache_watcher_goroutines 0 # HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. # TYPE prometheus_treecache_zookeeper_failures_total counter -prometheus_treecache_zookeeper_failures_total 0 +prometheus_treecache_zookeeper_failures_total 0 \ No newline at end of file From e060f7755fc185dfa3db6b9410bc38e861aaea56 Mon Sep 17 00:00:00 2001 From: jojohappy <sarahdj0917@gmail.com> Date: Sat, 7 Jul 2018 10:43:50 +0800 Subject: [PATCH 6/6] To keep comment of NodeLegacyHostIP for k8s node address Signed-off-by: jojohappy <sarahdj0917@gmail.com> --- discovery/kubernetes/node.go | 1 + 1 file changed, 1 insertion(+) diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index c541f3946..a99eca08c 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -192,6 +192,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { // nodeAddresses returns the provided node's address, based on the priority: // 1. NodeInternalIP // 2. NodeExternalIP +// 3. NodeLegacyHostIP // 3. NodeHostName // // Derived from k8s.io/kubernetes/pkg/util/node/node.go